diff --git a/.gitignore b/.gitignore
index 306c9d2f5409bdf2a003e63b795885f268af391a..a14ac887e7f573d4e37d483bf9b33e7de42970e6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,10 +1,11 @@
-# general 
+# general
 .cache
 
 # C++ Build
 build*/
 install*/
 cppcheck-result.xml
+include/aidge/core_version.h
 
 # VSCode
 .vscode
diff --git a/CHANGELOG b/CHANGELOG
index b53d52720c420583973bee58f8ba2b290f0879af..91877c347aad809ea1c45dedf4e035575fd88689 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,5 @@
+# Version 0.4.0 (February 2025)
+
 # Version 0.4.0 (December 2024)
 
 # Version 0.2.1 (May 14, 2024)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index beec9fbb427afadccef156139bd277d743e6999f..635c9c7e1b1048a749dc491c97335b13d057013a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,19 +1,34 @@
 cmake_minimum_required(VERSION 3.18)
-set(CXX_STANDARD 14)
+
+set(CMAKE_CXX_STANDARD 14)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS  OFF)
 
 file(STRINGS "${CMAKE_SOURCE_DIR}/version.txt" version)
 
+# Parse version.txt to retrieve Major, Minor and Path
+string(REGEX MATCH "([0-9]+\\.[0-9]+\\.[0-9]+)" _ MATCHES ${version})
+set(PROJECT_VERSION_MAJOR ${CMAKE_MATCH_1})
+set(PROJECT_VERSION_MINOR ${CMAKE_MATCH_2})
+set(PROJECT_VERSION_PATCH ${CMAKE_MATCH_3})
+
+# Retrieve latest git commit
+execute_process(
+    COMMAND git rev-parse --short HEAD
+    WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
+    OUTPUT_VARIABLE GIT_COMMIT_HASH
+    OUTPUT_STRIP_TRAILING_WHITESPACE
+    ERROR_QUIET
+)
+
 project(aidge_core
         VERSION ${version}
         DESCRIPTION "Core algorithms for operators and graph of the AIDGE framework"
         LANGUAGES CXX)
-message(STATUS "Project name: ${CMAKE_PROJECT_NAME}")
-message(STATUS "Project version: ${version}")
-add_definitions(-DPROJECT_VERSION="${version}")
 
 message(STATUS "Project name: ${CMAKE_PROJECT_NAME}")
 message(STATUS "Project version: ${version}")
-
+message(STATUS "Latest git commit: ${GIT_COMMIT_HASH}")
 # helper for LSP users
 set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
 
@@ -29,7 +44,6 @@ option(TEST "Enable tests" ON)
 option(COVERAGE "Enable coverage" OFF)
 option(ENABLE_ASAN "Enable ASan (AddressSanitizer) for runtime analysis of memory use (over/underflow, memory leak, ...)" OFF)
 
-
 ##############################################
 # Import utils CMakeLists
 set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
@@ -40,9 +54,9 @@ endif()
 
 ##############################################
 # Find system dependencies
-set(FMT_VERSION 10.2.1)
+set(FMT_MIN_VERSION 10.2.1)
 
-find_package(fmt ${FMT_VERSION} QUIET)
+find_package(fmt ${FMT_MIN_VERSION} QUIET)
 
 if(NOT fmt_FOUND)
     message(STATUS "fmt not found in system, retrieving from git")
@@ -51,7 +65,7 @@ if(NOT fmt_FOUND)
     FetchContent_Declare(
         fmt
         GIT_REPOSITORY https://github.com/fmtlib/fmt.git
-        GIT_TAG        ${FMT_VERSION}
+        GIT_TAG        master  #latest
     )
 
     set(FMT_SYSTEM_HEADERS ON)
@@ -115,9 +129,12 @@ if (PYBIND)
     include(PybindDependency)
     add_pybind_dependency(${module_name})
     ##
+
+    target_link_libraries(${pybind_module_name} PUBLIC fmt::fmt)
 endif()
 
-target_link_libraries(${module_name} PUBLIC Threads::Threads fmt::fmt)
+target_link_libraries(${module_name} PUBLIC fmt::fmt)
+target_link_libraries(${module_name} PUBLIC Threads::Threads)
 target_compile_features(${module_name} PRIVATE cxx_std_14)
 
 if (DOSANITIZE STREQUAL "ON")
@@ -162,9 +179,16 @@ endif()
 # Installation instructions
 if(NOT $ENV{AIDGE_INSTALL} STREQUAL "")
     set(CMAKE_INSTALL_PREFIX $ENV{AIDGE_INSTALL})
-    message(WARNING "CMAKE_INSTALL_PREFIX set to env variable AIDGE_INSTALL by default = ${CMAKE_INSTALL_PREFIX}")
+    message(WARNING "CMAKE_INSTALL_PREFIX set to env variable AIDGE_INSTALL by default (${CMAKE_INSTALL_PREFIX})")
 endif()
 
+message(STATUS "Creating ${CMAKE_CURRENT_SOURCE_DIR}/include/aidge/core_version.h")
+# Generate version.h file from config file version.h.in
+configure_file(
+    "${CMAKE_CURRENT_SOURCE_DIR}/include/aidge/version.h.in"
+    "${CMAKE_CURRENT_SOURCE_DIR}/include/aidge/core_version.h"
+)
+
 include(GNUInstallDirs)
 set(INSTALL_CONFIGDIR ${CMAKE_INSTALL_LIBDIR}/cmake/${CMAKE_PROJECT_NAME})
 
diff --git a/MANIFEST.in b/MANIFEST.in
index ed911dd75b59b65b8bfa023584aae8585de6325b..7d4d68c220e882a3aad520fff5980fc5cf758c18 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,8 +1,10 @@
 include README.md LICENSE
-recursive-include aidge_core *.py 
+recursive-include aidge_core *.py
 recursive-exclude aidge_core/unit_tests *.py
 
 recursive-include aidge_core/aidge_export_aidge *
+recursive-include aidge_core/export_utils/templates *
+
 recursive-include include *.hpp
 recursive-include src *.cpp
 recursive-include python_binding *.cpp
diff --git a/aidge_core-config.cmake.in b/aidge_core-config.cmake.in
index abe55b6faef64aa61d4df4076c035ac0c5f998b4..d69d2467533c0949ce27dd9db882a2819115b27e 100644
--- a/aidge_core-config.cmake.in
+++ b/aidge_core-config.cmake.in
@@ -1,7 +1,7 @@
 @PACKAGE_INIT@
 
 include(CMakeFindDependencyMacro)
-find_dependency(fmt)
+find_dependency(fmt @FMT_VERSION@)
 find_dependency(Threads)
 set(AIDGE_REQUIRES_PYTHON @AIDGE_REQUIRES_PYTHON@)
 set(AIDGE_PYTHON_HAS_EMBED @AIDGE_PYTHON_HAS_EMBED@)
diff --git a/aidge_core/__init__.py b/aidge_core/__init__.py
index 32042125ed6ecb1d935e240837afe6516706dbcb..0832de2c472d15e2ce1667cae66e4f09303a5add 100644
--- a/aidge_core/__init__.py
+++ b/aidge_core/__init__.py
@@ -13,4 +13,3 @@ import aidge_core.utils
 from aidge_core.aidge_export_aidge import serialize_to_cpp
 from aidge_core.show_graphview import gview_to_json
 from aidge_core.mem_info import *
-from ._version import *
diff --git a/aidge_core/export_utils/data_conversion.py b/aidge_core/export_utils/data_conversion.py
index 401fc39f2a70245a67719699b5f0cdc61108e0cf..6dba5b78cd7b8e79baddb160a1110c3e830c7cd7 100644
--- a/aidge_core/export_utils/data_conversion.py
+++ b/aidge_core/export_utils/data_conversion.py
@@ -1,8 +1,9 @@
 import numpy as np
 import aidge_core
 
+from typing import Dict
 
-datatype_converter_aide2c = {
+datatype_converter_aidge2c = {
     aidge_core.dtype.float64 : "double",
     aidge_core.dtype.float32 : "float",
     aidge_core.dtype.float16 : "half_float::half",
@@ -19,12 +20,31 @@ datatype_converter_aide2c = {
 def aidge2c(datatype):
     """Convert a aidge datatype to C type
 
+    If the type is not convertible to a C type (e.g. int4), return None and raise a warning.
+
     :param datatype: Aidge datatype to convert
     :type datatype: :py:object:`aidge_core.DataType`
     :return: A string representing the C type
     :rtype: string
     """
-    if datatype in datatype_converter_aide2c:
-        return datatype_converter_aide2c[datatype]
+    if datatype in datatype_converter_aidge2c:
+        return datatype_converter_aidge2c[datatype]
     else:
         raise ValueError(f"Unsupported {datatype} aidge datatype")
+
+def aidge2export_type(datatype: aidge_core.dtype, conversion_map: Dict[aidge_core.dtype, str] = datatype_converter_aidge2c) -> str:
+    """Convert a aidge datatype to the export type specified by the map passed in argument
+
+    If the aidge type is not convertible, that is to say, is not specified in the map, a value Error is raised.
+
+    :param datatype: Aidge datatype to convert
+    :type datatype: :py:object:`aidge_core.DataType`
+    :param conversion_map: Map that specify the conversion
+    :type conversion_map: Dict[:py:object:`aidge_core.DataType`, str]
+    :return: A string representing the export type
+    :rtype: string
+    """
+    if datatype in conversion_map:
+        return conversion_map[datatype]
+    else:
+        raise ValueError(f"Unsupported type conversion {datatype} aidge datatype for export")
diff --git a/aidge_core/export_utils/export_registry.py b/aidge_core/export_utils/export_registry.py
index e5b6b2098cd760c4d425b96caf7b41cc8e82c46e..8927ae5169978da81e39912ebd4e26e2655137ad 100644
--- a/aidge_core/export_utils/export_registry.py
+++ b/aidge_core/export_utils/export_registry.py
@@ -80,6 +80,14 @@ class ExportLib(aidge_core.OperatorImpl):
                                     aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint32]))
         aidge_core.register_Tensor([self._name, aidge_core.dtype.uint64],
                                     aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint64]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.int4],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int4]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.uint4],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint4]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.dual_int4],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.dual_int4]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.dual_uint4],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.dual_uint4]))
 
     @classproperty
     def _export_node_registry(cls) -> Dict[str, List['ExportNode']]:
diff --git a/aidge_core/export_utils/node_export.py b/aidge_core/export_utils/node_export.py
index 5777814a0b10c49d0f75245bdc4e9681027bdfb8..c24727adf11bb936cb99c1f40312c4da8c0705f3 100644
--- a/aidge_core/export_utils/node_export.py
+++ b/aidge_core/export_utils/node_export.py
@@ -3,7 +3,8 @@ from pathlib import Path
 
 from aidge_core.export_utils import data_conversion, code_generation
 from abc import ABC, abstractmethod
-from typing import List
+from typing import List, Dict
+
 
 
 def get_chan(tensor: aidge_core.Tensor) -> int:
@@ -14,12 +15,19 @@ def get_chan(tensor: aidge_core.Tensor) -> int:
             return dims[1]
         elif len(dims) == 2:  # Suppose NC
             return dims[1]
+        elif len(dims) == 1:  # Suppose C (for bias)
+            return dims[0]
         else:
             return None
     elif dformat == aidge_core.dformat.nchw:
         return dims[1]
     elif dformat == aidge_core.dformat.nhwc:
-        return dims[3]
+        if len(dims) == 4:  # NHWC
+            return dims[3]
+        elif len(dims) == 2:  # NC
+            return 1
+        elif len(dims) == 1:  # C for bias
+            return 1
     elif dformat == aidge_core.dformat.chwn:
         return dims[0]
     elif dformat == aidge_core.dformat.ncdhw:
@@ -40,12 +48,19 @@ def get_height(tensor: aidge_core.Tensor) -> int:
             return dims[2]
         elif len(dims) == 2:  # Suppose NC
             return 1
+        elif len(dims) == 1:  # Suppose C for bias
+            return 1
         else:
             return None
     elif dformat == aidge_core.dformat.nchw:
         return dims[2]
     elif dformat == aidge_core.dformat.nhwc:
-        return dims[1]
+        if len(dims) == 4:  # NHWC
+            return dims[1]
+        elif len(dims) == 2:  # NC
+            return 1
+        elif len(dims) == 1:  # C for bias
+            return 1
     elif dformat == aidge_core.dformat.chwn:
         return dims[1]
     elif dformat == aidge_core.dformat.ncdhw:
@@ -66,12 +81,19 @@ def get_width(tensor: aidge_core.Tensor) -> int:
             return dims[3]
         elif len(dims) == 2:  # Suppose NC
             return 1
+        elif len(dims) == 1:  # Suppose C for bias
+            return 1
         else:
             return None
     elif dformat == aidge_core.dformat.nchw:
         return dims[3]
     elif dformat == aidge_core.dformat.nhwc:
-        return dims[2]
+        if len(dims) == 4:  # NHWC
+            return dims[2]
+        elif len(dims) == 2:  # NC
+            return 1
+        elif len(dims) == 1:  # C for bias
+            return 1
     elif dformat == aidge_core.dformat.chwn:
         return dims[2]
     elif dformat == aidge_core.dformat.ncdhw:
@@ -162,7 +184,9 @@ class ExportNode(ABC):
     """
 
     @abstractmethod
-    def __init__(self, aidge_node: aidge_core.Node, mem_info: List[dict]=None) -> None:
+    def __init__(self, aidge_node: aidge_core.Node, 
+                 mem_info: List[dict]=None, 
+                 conversion_map: Dict[aidge_core.dtype, str] = data_conversion.datatype_converter_aidge2c) -> None:
         """Create ExportNode and retrieve attributes from ``aidge_node``:
         """
 
@@ -231,8 +255,8 @@ class ExportNode(ABC):
                 self.attributes["in_dformat"][idx] = tensor.dformat()
                 self.attributes["in_format"][idx] = aidge_core.format_as(tensor.dformat())
                 self.attributes["in_dtype"][idx] = tensor.dtype()
-                self.attributes["in_cdtype"][idx] = data_conversion.aidge2c(
-                    tensor.dtype())
+                # self.attributes["in_cdtype"][idx] = data_conversion.aidge2c(tensor.dtype())
+                self.attributes["in_cdtype"][idx] = data_conversion.aidge2export_type(tensor.dtype(), conversion_map)
                 self.attributes["in_chan"][idx] = get_chan(tensor)
                 self.attributes["in_height"][idx] = get_height(tensor)
                 self.attributes["in_width"][idx] = get_width(tensor)
@@ -254,8 +278,8 @@ class ExportNode(ABC):
                 self.attributes["out_dformat"][idx] = tensor.dformat()
                 self.attributes["out_format"][idx] = aidge_core.format_as(tensor.dformat())
                 self.attributes["out_dtype"][idx] = tensor.dtype()
-                self.attributes["out_cdtype"][idx] = data_conversion.aidge2c(
-                    tensor.dtype())
+                # self.attributes["out_cdtype"][idx] = data_conversion.aidge2c(tensor.dtype())
+                self.attributes["out_cdtype"][idx] = data_conversion.aidge2export_type(tensor.dtype(), conversion_map)
                 self.attributes["out_chan"][idx] = get_chan(tensor)
                 self.attributes["out_height"][idx] = get_height(tensor)
                 self.attributes["out_width"][idx] = get_width(tensor)
diff --git a/aidge_core/export_utils/scheduler_export.py b/aidge_core/export_utils/scheduler_export.py
index 90862578f04ecd0bdf5ee0bfd7643e7bc3d0a455..0995e4cea50e1a813f433ce4a7a9e94733a68196 100644
--- a/aidge_core/export_utils/scheduler_export.py
+++ b/aidge_core/export_utils/scheduler_export.py
@@ -109,15 +109,17 @@ def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib =
         is_output:bool = node in graphview.get_output_nodes()
 
         if is_input:
+            flag_not_input = True
             # GraphView.get_inputs_nodes() returns the nodes that have an Input set to None or not in the graph
             # However, some inputs are Optional and thus the node may not be an input of the graph!
-            # So we need to check that all the inputs of the nodes or in the graph or not optional
+            # So we need to check that at least one input of the nodes is not in the graph and not optional
             # This is what the following code block is checking.
             for idx, node_in in enumerate(node.inputs()):
-                optional:bool = node.get_operator().input_category(idx) == aidge_core.InputCategory.OptionalData
+                optional:bool = node.get_operator().is_optional_input(idx)
                 # Note: node_in is a Tuple(Node, out_idx)
                 in_graph:bool = node_in[0] in graphview.get_nodes()
-                is_input &= (in_graph or not optional)
+                flag_not_input &= (in_graph or optional)
+            is_input = not flag_not_input
 
         # Get operator current specs
         required_specs = op_impl.get_required_spec()
@@ -137,11 +139,12 @@ def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib =
         list_actions += op.forward()
         if is_input:
             for idx, node_in in enumerate(node.inputs()):
-                if node_in[0] not in graphview.get_nodes():
+                if (node.get_operator().get_input(idx) is not None) and (node_in[0] not in graphview.get_nodes()): 
                     inputs_name.append(op.attributes["in_name"][idx])
                     inputs_dtype.append(
                         op.attributes["in_cdtype"][idx]
                     )
+
         if is_output:
             for idx in range(len(node.outputs())):
                 outputs_name.append(op.attributes["out_name"][idx])
diff --git a/aidge_core/mem_info.py b/aidge_core/mem_info.py
index c7ca85bbd73bd205850b19616e53fda210749a80..cabc2c72ee973babdf0342ba82057f7ab0769b52 100644
--- a/aidge_core/mem_info.py
+++ b/aidge_core/mem_info.py
@@ -5,6 +5,9 @@ from pathlib import Path
 import aidge_core
 from typing import Tuple, List
 
+import matplotlib.pyplot as plt
+import aidge_core.mem_info
+import numpy as np
 
 # Default memory management, which can be used for development
 def compute_default_mem_info(scheduler: aidge_core.Scheduler) -> Tuple[int, List]:
@@ -41,20 +44,72 @@ def compute_default_mem_info(scheduler: aidge_core.Scheduler) -> Tuple[int, List
             mem_info[node] = [] # No meminfo for producer
     return mem_size, mem_info
 
+def log_meminfo(mem_manager:aidge_core.MemoryManager, path: Path, diplay_names:bool):
+    """Generate a graph representing the memory allocation of each ouputs.
 
-def _gnuplot_installed():
-    try:
-        # Run gnuplot with the --version flag and capture the output
-        subprocess.run(["gnuplot", "--version"])
-        return True
-    except FileNotFoundError:
-        aidge_core.Log.warn("Gnuplot is not installed.")
-        return False
-    except subprocess.CalledProcessError:
-        aidge_core.Log.warn("Gnuplot command found but failed to run.")
-        return False
+    Block with the smae color correspond to the same memory plane.
 
-def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder: Path = None, wrapping: bool = False) -> Tuple[int, List[dict]]:
+    :param mem_manager: Memory manager to log
+    :type mem_manager: aidge_core.memory_manager
+    :param path: Path where to save the figure
+    :type path: Path
+    :param diplay_names: If True Node names are diplayed alongside their block
+    :type diplay_names: bool
+    """
+
+    max_lifetime = mem_manager.get_max_lifetime()
+
+    # peak_usage in kwords
+    peak_usage = mem_manager.get_peak_usage() / 1024
+
+    # Set figure size 1920x1080 px
+    plt.figure(figsize=(19.20, 10.80))
+    # Same color for each planes
+    colors = plt.cm.viridis(np.linspace(0, 1, len(mem_manager.get_planes()) + 1))
+    color_id = 1
+    for node, planes in mem_manager.get_planes().items():
+        for plane in planes:
+            cont_offset    = plane.get_contiguous_offset()
+            cont_size      = plane.get_contiguous_size()
+            allocated      = plane.mem_space.allocated
+            released       = plane.mem_space.released
+            is_released    = released >= 0 and not plane.mem_space.dependencies
+            x_start        = allocated
+            y_start        = cont_offset / 1024.0
+            y_end          = (cont_offset + cont_size) / 1024.0
+            x_end          = max_lifetime if not is_released else released
+
+            plt.fill_betweenx(
+                [y_start, y_end],
+                x_start,
+                x_end + 1,
+                color=colors[color_id % len(colors)]
+            )
+
+            if diplay_names:
+                # Rotation for lisibility!
+                plt.text(x_end,y_end, node.name(), rotation=45)
+        color_id += 1
+
+    plt.xlim(0, max_lifetime + 1)
+    plt.ylim(0, peak_usage)
+    plt.axhline(y=peak_usage, color='red', linestyle='--')
+    plt.text(0, peak_usage, f'Peak usage = {peak_usage} KWords', color='red')
+    plt.xlabel("Time")
+    plt.ylabel("Memory usage (KWords)")
+    plt.title("Memory Usage Over Time")
+    plt.grid(True)
+    ax = plt.gca()
+    ax.spines['top'].set_visible(False)
+    ax.spines['right'].set_visible(False)
+    folder_path = path.parent
+    folder_path.mkdir(parents=True, exist_ok=True)
+    plt.savefig(path)
+    plt.close()
+    aidge_core.Log.notice(f"Generated memory management info at: {path}")
+
+
+def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder: Path = None, wrapping: bool = False, auto_concat: bool = False, display_names: bool=True) -> Tuple[int, List[dict]]:
     """Generates optimized memory information for a computation graph managed by a scheduler.
 
     This function analyzes the memory usage of a computation graph, determining the memory peak
@@ -70,6 +125,11 @@ def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder
     :param wrapping: Boolean flag to enable or disable wrap-around buffer optimization.
                      Defaults to `False`.
     :type wrapping: bool, optional
+    :param auto_concat: Boolean flag to enable or disable auto-concatenation optimization.
+                     Defaults to `False`.
+    :type auto_concat: bool, optional
+    :param diplay_names: If True Node names are diplayed in the memory plot alongside their block, defaults to False
+    :type diplay_names: bool, optional
     :return: A tuple containing the peak memory size and a list of memory information for each
              scheduled node. The memory information for each node includes details such as size,
              offset, stride, length, count, and optional wrap-around details.
@@ -81,25 +141,19 @@ def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder
     # scheduler.generate_scheduling()
     # Generate the memory manager
     # So far, the Producers are not take in consideration in the meory manager => inc_producers=False
-    mem_manager = scheduler.generate_memory(
-        inc_producers=False, wrap_around_buffer=wrapping)
+    if auto_concat:
+        mem_manager = scheduler.generate_memory_auto_concat(
+            inc_producers=False, wrap_around_buffer=wrapping)
+    else:
+        mem_manager = scheduler.generate_memory(
+            inc_producers=False, wrap_around_buffer=wrapping)
 
     # List of nodes which are connected at the input of the graph (None if input is not connected)
     nodes_at_input = [n[0] for n in scheduler.graph_view().inputs()]
 
     if stats_folder is not None:
-        if _gnuplot_installed():
-            # Use gnuplot to generate the log
-            os.makedirs(str(Path(stats_folder) / "graph"), exist_ok=True)
-            mem_manager.log("memory_info")
-            os.chmod("memory_info_plot.gnu", 0o777)
-            os.system("./memory_info_plot.gnu")
-            shutil.move("memory_info", str(Path(stats_folder) / "graph" / "memory_info"))
-            shutil.move("memory_info_plot.png", str(
-                Path(stats_folder) / "graph" / "memory_info_plot.png"))
-            os.remove("memory_info_plot.gnu")
-        else:
-            aidge_core.Log.warn("Warning: gnuplot is not installed, could not generate stat folder.")
+        log_meminfo(mem_manager, Path(stats_folder) / "memory_info.png", display_names)
+
     # In the export, we currently use an unified memory buffer whose size
     # is determined by the memory peak usage
     mem_size = mem_manager.get_peak_usage()
diff --git a/aidge_core/show_graphview.py b/aidge_core/show_graphview.py
index 14bb6c3e9a4c7be1e1aecee02a04a5dc42e0a5d4..cc4d7bc722fdfb99f77556b6c66265ca2125d142 100644
--- a/aidge_core/show_graphview.py
+++ b/aidge_core/show_graphview.py
@@ -58,20 +58,36 @@ def _create_dict(ordered_nodes : List[aidge_core.Node], write_trainable_params_e
                          'nb_outputs' : node.get_operator().nb_outputs()}
 
             inputs = []
-            for input_idx in range(node.get_operator().nb_inputs()):
-                input_dict = {'dims' : node.get_operator().get_input(input_idx).dims(),
-                              'data_type' : str(node.get_operator().get_input(input_idx).dtype()),
-                              'data_format' : str(node.get_operator().get_input(input_idx).dformat())}
-                inputs.append(input_dict)
+            if node.get_operator().nb_inputs() > 0:
+                for input_idx in range(node.get_operator().nb_inputs()):
+                    if node.get_operator().get_input(input_idx) is not None:
+                        input_dict = {'dims' : node.get_operator().get_input(input_idx).dims(),
+                                    'data_type' : str(node.get_operator().get_input(input_idx).dtype()),
+                                    'data_format' : str(node.get_operator().get_input(input_idx).dformat())}
+                    
+                    elif node.get_operator().get_input(input_idx) is None:
+                        input_dict = {'dims' : None,
+                                    'data_type' : None,
+                                    'data_format' : None}
+
+                    inputs.append(input_dict)
 
             node_dict['inputs'] = inputs
 
             outputs = []
-            for output_idx in range(node.get_operator().nb_outputs()):
-                output_dict = {'dims' : node.get_operator().get_output(output_idx).dims(),
-                               'data_type' : str(node.get_operator().get_output(output_idx).dtype()),
-                              'data_format' : str(node.get_operator().get_output(output_idx).dformat())}
-                outputs.append(output_dict)
+            if node.get_operator().nb_outputs() > 0:
+                for output_idx in range(node.get_operator().nb_outputs()):
+                    if node.get_operator().get_output(output_idx) is not None:
+                        output_dict = {'dims' : node.get_operator().get_output(output_idx).dims(),
+                                    'data_type' : str(node.get_operator().get_output(output_idx).dtype()),
+                                    'data_format' : str(node.get_operator().get_output(output_idx).dformat())}
+                    
+                    elif node.get_operator().get_output(output_idx) is None:
+                        output_dict = {'dims' : None,
+                                    'data_type' : None,
+                                    'data_format' : None}
+
+                    outputs.append(output_dict)
 
             node_dict['outputs'] = outputs
 
@@ -199,6 +215,8 @@ def gview_to_json(gview : aidge_core.GraphView, json_path : Path, write_trainabl
     :type params_file_format: str, optional
     """
 
+    json_path = Path(json_path)
+
     if not json_path.suffix:
         if not json_path.is_dir():
             json_path.mkdir(parents=True, exist_ok=True)
diff --git a/aidge_core/unit_tests/test_show_graphview.py b/aidge_core/unit_tests/test_show_graphview.py
index 4c68e93e39a543e96c2b664dbe554660bf37cc91..838a88b2ce5b30c69b18fd61f514af2ec96e0199 100644
--- a/aidge_core/unit_tests/test_show_graphview.py
+++ b/aidge_core/unit_tests/test_show_graphview.py
@@ -28,7 +28,7 @@ def create_gview():
             value = prod_op.get_output(0)
             value.set_backend("cpu")
             tuple_out = node.output(0)[0]
-            
+
             if (tuple_out[0].type() == "Conv" or tuple_out[0].type() == "PaddedConv") and tuple_out[1]==1:
                 # Conv weight
                 aidge_core.xavier_uniform_filler(value)
@@ -45,13 +45,13 @@ def create_gview():
                 pass
 
     # Compile model
-    gview.forward_dims([[1, 1, 28, 28]]) 
+    gview.forward_dims([[1, 1, 28, 28]])
     gview.set_datatype(aidge_core.dtype.float32)
 
     return gview
 
 class test_show_gview(unittest.TestCase):
-    """Test aidge functionality to show GraphView.    
+    """Test aidge functionality to show GraphView.
     """
 
     def setUp(self):
@@ -61,12 +61,14 @@ class test_show_gview(unittest.TestCase):
         pass
 
     def test_gview_to_json(self):
-        
+
         gview = create_gview()
 
-        # Create temporary file to store JSON model description             
-        model_description_file = tempfile.NamedTemporaryFile(mode="w+", suffix='.json')
+        # Create temporary file to store JSON model description
+        model_description_file = tempfile.NamedTemporaryFile(suffix='.json', delete=False)
+        model_description_file.close() # Ensure the file is closed
 
+        # Pass the file path to gview_to_json
         gview_to_json(gview, Path(model_description_file.name))
 
         # Load JSON
@@ -76,15 +78,15 @@ class test_show_gview(unittest.TestCase):
         # Get list of nodes of Aidge graphview
         gview_ordered_nodes = gview.get_ordered_nodes()
 
-        # Iterate over the list of ordered nodes and the corresponding JSON 
+        # Iterate over the list of ordered nodes and the corresponding JSON
         self.assertEqual(len(gview_ordered_nodes), len(model_json['graph']))
 
-        for node_gview, node_json in zip(gview_ordered_nodes, model_json['graph']):   
-                    
+        for node_gview, node_json in zip(gview_ordered_nodes, model_json['graph']):
+
             self.assertEqual(node_gview.get_operator().type(), node_json['optype'])
             self.assertEqual(node_gview.get_operator().nb_inputs(), node_json['nb_inputs'])
             self.assertEqual(node_gview.get_operator().nb_outputs(), node_json['nb_outputs'])
-            
+
             self.assertEqual(node_gview.get_operator().nb_inputs(), len(node_json['inputs']))
             for input_idx in range(node_gview.get_operator().nb_inputs()):
                 self.assertEqual(node_gview.get_operator().get_input(input_idx).dims(), node_json['inputs'][input_idx]['dims'])
@@ -97,7 +99,7 @@ class test_show_gview(unittest.TestCase):
                 self.assertEqual(str(node_gview.get_operator().get_output(output_idx).dtype()), node_json['outputs'][output_idx]['data_type'])
                 self.assertEqual(str(node_gview.get_operator().get_output(output_idx).dformat()), node_json['outputs'][output_idx]['data_format'])
 
-            self.assertEqual(len(node_gview.get_parents()), len(node_json['parents']))                  
+            self.assertEqual(len(node_gview.get_parents()), len(node_json['parents']))
             self.assertEqual(len(node_gview.get_children()), len(node_json['children']))
 
             if not hasattr(node_gview.get_operator(), 'get_micro_graph'):
@@ -109,21 +111,21 @@ class test_show_gview(unittest.TestCase):
                     self.assertIsNone(node_gview.get_operator().attr) and self.assertFalse(node_json['attributes'])
 
             elif hasattr(node_gview.get_operator(), 'get_micro_graph'):
-                
+
                 self.assertEqual(len(node_gview.get_operator().get_micro_graph().get_nodes()), len(node_json['attributes']['micro_graph']))
-                
+
                 for micro_node_gview in node_gview.get_operator().get_micro_graph().get_nodes():
                     for micro_node_json in node_json['attributes']['micro_graph']:
                         if micro_node_gview.get_operator().type() == micro_node_json['optype']:
-                            
+
                             for key, value in micro_node_gview.get_operator().attr.dict().items():
                                 if not type(value).__name__ in dir(builtins):
                                     # Replace original value by its name (str) because value is of a type that could not be written to the JSON
-                                    # Cannot update this dict inplace : micro_node_gview.get_operator().attr.dict().update({key : value.name}) 
+                                    # Cannot update this dict inplace : micro_node_gview.get_operator().attr.dict().update({key : value.name})
                                     temp_mnode_dict = micro_node_gview.get_operator().attr.dict()
                                     temp_mnode_dict.update({key : value.name})
-                                    self.assertDictEqual(temp_mnode_dict, micro_node_json['attributes'])                
-                    
+                                    self.assertDictEqual(temp_mnode_dict, micro_node_json['attributes'])
+
 if __name__ == '__main__':
     unittest.main()
 
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index a06202696951a888284f1e0a50b6c25b677fe7f3..3031fc19b335f6e77bb7999f8b3a2b107e3f5323 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -11,6 +11,7 @@
 
 #ifndef AIDGE_IMPORTS_H_
 #define AIDGE_IMPORTS_H_
+#include "aidge/core_version.h"
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/backend/TensorImpl.hpp"
@@ -20,6 +21,8 @@
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/data/Data.hpp"
+#include "aidge/data/DataFormat.hpp"
+#include "aidge/data/DataType.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Database.hpp"
 #include "aidge/data/DataProvider.hpp"
@@ -29,8 +32,6 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/OpArgs.hpp"
 
-#include "aidge/graphRegex/GraphRegex.hpp"
-
 #include "aidge/filler/Filler.hpp"
 
 #include "aidge/nodeTester/ConditionalInterpreter.hpp"
@@ -92,5 +93,7 @@
 #include "aidge/utils/Random.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/logger/EnumString.hpp"
+#include "aidge/utils/sys_info/CoreVersionInfo.hpp"
 
 #endif /* AIDGE_IMPORTS_H_ */
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 8a6684b22b4c4659353fa5b5dee2b0820c46a11f..9710c028aa6b18c5f5324b898b96e93f1a7912cf 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -12,13 +12,16 @@
 #ifndef AIDGE_BACKEND_OPERATORIMPL_H_
 #define AIDGE_BACKEND_OPERATORIMPL_H_
 
+#include <functional>  // std::function
+#include <memory>
 #include <string>
 #include <vector>
-#include <functional>
+#include <utility>  // std::pair
 
 #include "aidge/utils/Types.h"
 #include "aidge/utils/DynamicAttributes.hpp"
-#include "aidge/data/Data.hpp"
+#include "aidge/data/DataFormat.hpp"
+#include "aidge/data/DataType.hpp"
 #include "aidge/data/Elts.hpp"
 #include "aidge/scheduler/ProdConso.hpp"
 
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 864789c19181b52351fc09a63a787feaed31a216..05674c44d5a97e89bfd6154e956d52e7d22920ff 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -12,15 +12,16 @@
 #ifndef AIDGE_TENSORIMPL_H_
 #define AIDGE_TENSORIMPL_H_
 
-#include <numeric>     // std::accumulate
 #include <cstddef>     // std::size_t
 #include <functional>  // std::multiplies
-#include <vector>
+#include <numeric>     // std::accumulate
 #include <utility>     // std::pair, std::make_pair
+#include <vector>
+#include <string>
 
-#include "aidge/data/Data.hpp"
-#include "aidge/utils/Types.h"
+#include "aidge/data/DataType.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
 
 namespace Aidge {
 /**
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index d04624fc530a21730cc4dc1f4f1ac90a58e6590b..cc0b2e6655e64633c77db09334ee9edee2ad8cb2 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -13,6 +13,7 @@
 #define AIDGE_CPU_DATA_TENSORIMPL_H_
 
 #include "aidge/backend/TensorImpl.hpp"
+#include "aidge/data/DataType.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
@@ -55,7 +56,7 @@ public:
         T* dstT = static_cast<T *>(rawPtr(offset));
 
         AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "TensorImpl_cpu<{}>::copy(): overlapping copy is not supported", typeid(T).name());
-        std::copy(srcT, srcT + length, dstT);
+        std::copy_n(srcT, length, dstT);
     }
 
     void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final;
@@ -126,6 +127,20 @@ REGISTRAR(Tensor, {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<int64_t>::crea
 REGISTRAR(Tensor, {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int32_t>::create);
 REGISTRAR(Tensor, {"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
 REGISTRAR(Tensor, {"cpu", DataType::Int8}, Aidge::TensorImpl_cpu<int8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Int4}, Aidge::TensorImpl_cpu<int8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::UInt4}, Aidge::TensorImpl_cpu<uint8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Int3}, Aidge::TensorImpl_cpu<int8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::UInt3}, Aidge::TensorImpl_cpu<uint8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Int2}, Aidge::TensorImpl_cpu<int8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::UInt2}, Aidge::TensorImpl_cpu<uint8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Dual_Int4}, Aidge::TensorImpl_cpu<int8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Dual_UInt4}, Aidge::TensorImpl_cpu<uint8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Dual_Int3}, Aidge::TensorImpl_cpu<int8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Dual_UInt3}, Aidge::TensorImpl_cpu<uint8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Quad_Int2}, Aidge::TensorImpl_cpu<int8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Quad_UInt2}, Aidge::TensorImpl_cpu<uint8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Binary}, Aidge::TensorImpl_cpu<int8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Octo_Binary}, Aidge::TensorImpl_cpu<int8_t>::create);
 REGISTRAR(Tensor, {"cpu", DataType::UInt64}, Aidge::TensorImpl_cpu<uint64_t>::create);
 REGISTRAR(Tensor, {"cpu", DataType::UInt32}, Aidge::TensorImpl_cpu<uint32_t>::create);
 REGISTRAR(Tensor, {"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index a34718296e4ccddbfca0b4eb0daf14b08124389a..fac8e7fb4ac0df25caedca1862939444cf7c4391 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -12,83 +12,12 @@
 #ifndef AIDGE_DATA_H_
 #define AIDGE_DATA_H_
 
-#include <cstdint>
-#include <fmt/format.h>
+#include <cstddef>  // std::size_t
 #include <string>
-#include <tuple>
-#include <array>
 
-#include "aidge/data/half.hpp"
-#include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
-enum class DataType {
-    Float64,
-    Float32,
-    Float16,
-    BFloat16,
-    Binary,
-    Ternary,
-    Int2,
-    Int3,
-    Int4,
-    Int5,
-    Int6,
-    Int7,
-    Int8,
-    Int16,
-    Int32,
-    Int64,
-    UInt2,
-    UInt3,
-    UInt4,
-    UInt5,
-    UInt6,
-    UInt7,
-    UInt8,
-    UInt16,
-    UInt32,
-    UInt64,
-    Any
-};
-
-bool isDataTypeFloatingPoint(const DataType& type);
-size_t getDataTypeBitWidth(const DataType& type);
-
-enum class DataFormat {
-    Default,
-    NCHW,
-    NHWC,
-    CHWN,
-    NCDHW,
-    NDHWC,
-    CDHWN,
-    Any
-};
-
-using DataFormatTranspose = std::array<size_t, 5>;
-// Permutation arrays dict to obtain DataFormat (same order as DataFormat enum)
-constexpr std::array<DataFormatTranspose, 7> DataFormatTransposeDict = {{
-    // Important: in this array only, dimension index must start at 1, not 0!
-    // (0 is the default value)
-    {},
-    {1, 2, 3, 4},
-    {1, 3, 4, 2},
-    {2, 3, 4, 1},
-    {1, 2, 3, 4, 5},
-    {1, 3, 4, 5, 2},
-    {2, 3, 4, 5, 1}
-}};
-
-/**
- * Get the DataFormatTranspose array to transpose data from src to dst DataFormat.
- * @param src Source DataFormat
- * @param dst Destination DataFormat
- * @return DataFormatTranspose Permutation array to achieve a transposition
- *         from src to dst DataFormat.
-*/
-DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataFormat& dst);
 
 class Data {
 public:
@@ -109,60 +38,11 @@ public:
         return mType;
     }
     virtual ~Data() = default;
-    virtual std::string toString() const = 0;
+    virtual std::string toString(int precision = -1, std::size_t offset = 0) const = 0;
 
 private:
     const std::string mType;
 };
 }
 
-namespace {
-template <typename T> struct NativeType { static const Aidge::DataType type; };
-template <> const Aidge::DataType NativeType<double>::type = Aidge::DataType::Float64;
-template <> const Aidge::DataType NativeType<float>::type = Aidge::DataType::Float32;
-template <> const Aidge::DataType NativeType<half_float::half>::type = Aidge::DataType::Float16;
-template <> const Aidge::DataType NativeType<std::int8_t>::type = Aidge::DataType::Int8;
-template <> const Aidge::DataType NativeType<std::int16_t>::type = Aidge::DataType::Int16;
-template <> const Aidge::DataType NativeType<std::int32_t>::type = Aidge::DataType::Int32;
-template <> const Aidge::DataType NativeType<std::int64_t>::type = Aidge::DataType::Int64;
-template <> const Aidge::DataType NativeType<std::uint8_t>::type = Aidge::DataType::UInt8;
-template <> const Aidge::DataType NativeType<std::uint16_t>::type = Aidge::DataType::UInt16;
-template <> const Aidge::DataType NativeType<std::uint32_t>::type = Aidge::DataType::UInt32;
-template <> const Aidge::DataType NativeType<std::uint64_t>::type = Aidge::DataType::UInt64;
-
-template <>
-const char* const EnumStrings<Aidge::DataType>::data[]
-    = {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Ternary",
-       "Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16",
-       "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6",
-       "UInt7", "UInt8", "UInt16", "UInt32", "UInt64", "Any"};
-
-template <>
-const char* const EnumStrings<Aidge::DataFormat>::data[]
-    = {"Default", "NCHW", "NHWC", "CHWN", "NCDHW", "NDHWC", "CDHWN", "Any"};
-
-template <Aidge::DataType D> struct cpptype {
-    using type = void; // Placeholder
-};
-template <> struct cpptype<Aidge::DataType::Float16> { using type = half_float::half; };
-template <> struct cpptype<Aidge::DataType::Float32> { using type = float; };
-template <> struct cpptype<Aidge::DataType::Float64> { using type = double; };
-template <> struct cpptype<Aidge::DataType::Int8> { using type = std::int8_t; };
-template <> struct cpptype<Aidge::DataType::Int16> { using type = std::int16_t; };
-template <> struct cpptype<Aidge::DataType::Int32> { using type = std::int32_t; };
-template <> struct cpptype<Aidge::DataType::Int64> { using type = std::int64_t; };
-template <> struct cpptype<Aidge::DataType::UInt8> { using type = std::uint8_t; };
-template <> struct cpptype<Aidge::DataType::UInt16> { using type = std::uint16_t; };
-template <> struct cpptype<Aidge::DataType::UInt32> { using type = std::uint32_t; };
-template <> struct cpptype<Aidge::DataType::UInt64> { using type = std::uint64_t; };
-
-template <Aidge::DataType D> using cpptype_t = typename cpptype<D>::type;
-}
-
-
-namespace Aidge {
-inline auto format_as(DataType dt) { return EnumStrings<Aidge::DataType>::data[static_cast<int>(dt)]; }
-inline auto format_as(DataFormat df) { return EnumStrings<Aidge::DataFormat>::data[static_cast<int>(df)]; }
-}
-
 #endif /* AIDGE_DATA_H_ */
diff --git a/include/aidge/data/DataFormat.hpp b/include/aidge/data/DataFormat.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..77be8680deeda422e6cf41e612eab21ecf80c5e1
--- /dev/null
+++ b/include/aidge/data/DataFormat.hpp
@@ -0,0 +1,83 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_DATA_DATAFORMAT_H_
+#define AIDGE_CORE_DATA_DATAFORMAT_H_
+
+#include <array>
+#include <cstddef>  // std::size_t
+
+#include "aidge/utils/logger/EnumString.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Enumeration of supported tensor data layouts
+ *
+ * Represents different memory layout formats for multi-dimensional tensors:
+ * - N: Batch size
+ * - C: Channels
+ * - H: Height
+ * - W: Width
+ * - D: Depth (for 3D tensors)
+ */
+enum class DataFormat {
+    Default,    ///< Default format, implementation dependent
+    NCHW,      ///< 4D format: [batch][channel][height][width]
+    NHWC,      ///< 4D format: [batch][height][width][channel]
+    CHWN,      ///< 4D format: [channel][height][width][batch]
+    NCDHW,     ///< 5D format: [batch][channel][depth][height][width]
+    NDHWC,     ///< 5D format: [batch][depth][height][width][channel]
+    CDHWN,     ///< 5D format: [channel][depth][height][width][batch]
+    Any        ///< Unspecified format
+};
+
+using DataFormatTranspose = std::array<std::size_t, 5>;
+
+/**
+ * @brief Dictionary of transpose operations between different formats
+ *
+ * Contains permutation arrays to convert between different data formats.
+ * @warning In this array only, dimension index starts at 1
+ * (0 is reserved as default value).
+ */
+constexpr std::array<DataFormatTranspose, 7> DataFormatTransposeDict = {{
+    {},                 // Default
+    {1, 2, 3, 4},      // NCHW
+    {1, 3, 4, 2},      // NHWC
+    {2, 3, 4, 1},      // CHWN
+    {1, 2, 3, 4, 5},   // NCDHW
+    {1, 3, 4, 5, 2},   // NDHWC
+    {2, 3, 4, 5, 1}    // CDHWN
+}};
+
+/**
+ * @brief Get the permutation array for converting between data formats
+ *
+ * @param src Source data format
+ * @param dst Destination data format
+ * @return DataFormatTranspose Permutation array to achieve the format conversion
+ */
+DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataFormat& dst);
+
+} // namespace Aidge
+
+namespace {
+template <>
+const char* const EnumStrings<Aidge::DataFormat>::data[]
+    = {"Default", "NCHW", "NHWC", "CHWN", "NCDHW", "NDHWC", "CDHWN", "Any"};
+}
+
+namespace Aidge {
+inline auto format_as(DataFormat df) { return EnumStrings<DataFormat>::data[static_cast<int>(df)]; }
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_DATA_DATAFORMAT_H_ */
diff --git a/include/aidge/data/DataType.hpp b/include/aidge/data/DataType.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..41984f1996d773589c0f69f12e9e4509f1d02d91
--- /dev/null
+++ b/include/aidge/data/DataType.hpp
@@ -0,0 +1,189 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_DATA_DATATYPE_H_
+#define AIDGE_CORE_DATA_DATATYPE_H_
+
+#include <cstddef>  // std::size_t
+#include <cstdint>
+
+#include "aidge/data/half.hpp"
+#include "aidge/utils/logger/EnumString.hpp"
+
+namespace Aidge {
+/**
+ * @brief Enumeration of data types supported by the framework
+ *
+ * Represents the various data types that can be used for computation and storage.
+ * This includes standard types (floating point, integers), quantized types,
+ * and specialized formats for neural network operations.
+ */
+enum class DataType {
+    // Floating point types
+    Float64,    ///< 64-bit floating point (double)
+    Float32,    ///< 32-bit floating point (float)
+    Float16,    ///< 16-bit floating point (half)
+    BFloat16,   ///< 16-bit brain floating point
+
+    // Quantized binary and ternary types
+    Binary,     ///< 1-bit binary values
+    Octo_Binary,///< 8x1-bit interleaved binary
+    Ternary,    ///< Ternary values (-1,0,1)
+
+    // Quantized integer types (2-bit)
+    Int2,       ///< 2-bit signed integer
+    Quad_Int2,  ///< 4x2-bit interleaved signed integer
+    UInt2,      ///< 2-bit unsigned integer
+    Quad_UInt2, ///< 4x2-bit interleaved unsigned integer
+
+    // Quantized integer types (3-bit)
+    Int3,       ///< 3-bit signed integer
+    Dual_Int3,  ///< 2x3-bit interleaved signed integer
+    UInt3,      ///< 3-bit unsigned integer
+    Dual_UInt3, ///< 2x3-bit interleaved unsigned integer
+
+    // Quantized integer types (4-bit)
+    Int4,       ///< 4-bit signed integer
+    Dual_Int4,  ///< 2x4-bit interleaved signed integer
+    UInt4,      ///< 4-bit unsigned integer
+    Dual_UInt4, ///< 2x4-bit interleaved unsigned integer
+
+    // Standard integer types
+    Int5,       ///< 5-bit signed integer
+    Int6,       ///< 6-bit signed integer
+    Int7,       ///< 7-bit signed integer
+    Int8,       ///< 8-bit signed integer
+    Int16,      ///< 16-bit signed integer
+    Int32,      ///< 32-bit signed integer
+    Int64,      ///< 64-bit signed integer
+
+    UInt5,      ///< 5-bit unsigned integer
+    UInt6,      ///< 6-bit unsigned integer
+    UInt7,      ///< 7-bit unsigned integer
+    UInt8,      ///< 8-bit unsigned integer
+    UInt16,     ///< 16-bit unsigned integer
+    UInt32,     ///< 32-bit unsigned integer
+    UInt64,     ///< 64-bit unsigned integer
+
+    Any ///< Unspecified type
+};
+
+namespace {
+// Type trait for mapping C++ types to Aidge DataType
+template<typename T>
+struct NativeType {
+    static constexpr DataType value = DataType::Any;
+};
+// Helper variable template
+template<typename T>
+constexpr DataType NativeType_v = NativeType<T>::value;
+// Specializations for native types
+template<> struct NativeType<double> { static constexpr DataType value = Aidge::DataType::Float64; };
+template<> struct NativeType<float> { static constexpr DataType value = Aidge::DataType::Float32; };
+template<> struct NativeType<half_float::half> { static constexpr DataType value = Aidge::DataType::Float16; };
+template<> struct NativeType<std::int8_t> { static constexpr DataType value = Aidge::DataType::Int8; };
+template<> struct NativeType<std::int16_t> { static constexpr DataType value = Aidge::DataType::Int16; };
+template<> struct NativeType<std::int32_t> { static constexpr DataType value = Aidge::DataType::Int32; };
+template<> struct NativeType<std::int64_t> { static constexpr DataType value = Aidge::DataType::Int64; };
+template<> struct NativeType<std::uint8_t> { static constexpr DataType value = Aidge::DataType::UInt8; };
+template<> struct NativeType<std::uint16_t> { static constexpr DataType value = Aidge::DataType::UInt16; };
+template<> struct NativeType<std::uint32_t> { static constexpr DataType value = Aidge::DataType::UInt32; };
+template<> struct NativeType<std::uint64_t> { static constexpr DataType value = Aidge::DataType::UInt64; };
+
+
+// Type trait for mapping Aidge DataType to C++ types
+template <DataType D>
+struct cpptype {
+    using type = void; // Placeholder
+};
+// Helper alias template
+template <DataType D>
+using cpptype_t = typename cpptype<D>::type;
+// Specializations for data types
+template <> struct cpptype<DataType::Float16> { using type = half_float::half; };
+template <> struct cpptype<DataType::Float32> { using type = float; };
+template <> struct cpptype<DataType::Float64> { using type = double; };
+template <> struct cpptype<DataType::Int4> { using type = std::int8_t; };
+template <> struct cpptype<DataType::UInt4> { using type = std::int8_t; };
+template <> struct cpptype<DataType::Int3> { using type = std::int8_t; };
+template <> struct cpptype<DataType::UInt3> { using type = std::int8_t; };
+template <> struct cpptype<DataType::Int2> { using type = std::int8_t; };
+template <> struct cpptype<DataType::UInt2> { using type = std::int8_t; };
+template <> struct cpptype<DataType::Dual_Int4> { using type = std::int8_t; };
+template <> struct cpptype<DataType::Dual_UInt4> { using type = std::int8_t; };
+template <> struct cpptype<DataType::Dual_Int3> { using type = std::int8_t; };
+template <> struct cpptype<DataType::Dual_UInt3> { using type = std::int8_t; };
+template <> struct cpptype<DataType::Quad_Int2> { using type = std::int8_t; };
+template <> struct cpptype<DataType::Quad_UInt2> { using type = std::int8_t; };
+template <> struct cpptype<DataType::Binary> { using type = std::int8_t; };
+template <> struct cpptype<DataType::Octo_Binary> { using type = std::int8_t; };
+template <> struct cpptype<DataType::Int8> { using type = std::int8_t; };
+template <> struct cpptype<DataType::Int16> { using type = std::int16_t; };
+template <> struct cpptype<DataType::Int32> { using type = std::int32_t; };
+template <> struct cpptype<DataType::Int64> { using type = std::int64_t; };
+template <> struct cpptype<DataType::UInt8> { using type = std::uint8_t; };
+template <> struct cpptype<DataType::UInt16> { using type = std::uint16_t; };
+template <> struct cpptype<DataType::UInt32> { using type = std::uint32_t; };
+template <> struct cpptype<DataType::UInt64> { using type = std::uint64_t; };
+
+
+// Type trait for mapping DataType to their interleaved variants
+template<DataType D>
+struct WeightInterleavedType {
+    static const DataType value = DataType::Any;
+};
+// Helper varible template
+template<DataType D>
+constexpr DataType WeightInterleavedType_v =  WeightInterleavedType<D>::value;
+// Specializations for interleaved types
+template<> struct WeightInterleavedType<DataType::Int4> { static constexpr DataType value = DataType::Dual_Int4; };
+template<> struct WeightInterleavedType<DataType::UInt4> { static constexpr DataType value = DataType::Dual_UInt4; };
+template<> struct WeightInterleavedType<DataType::Int3> { static constexpr DataType value = DataType::Dual_Int3; };
+template<> struct WeightInterleavedType<DataType::UInt3> { static constexpr DataType value = DataType::Dual_UInt3; };
+template<> struct WeightInterleavedType<DataType::Int2> { static constexpr DataType value = DataType::Quad_Int2; };
+template<> struct WeightInterleavedType<DataType::UInt2> { static constexpr DataType value = DataType::Quad_UInt2; };
+template<> struct WeightInterleavedType<DataType::Binary> { static constexpr DataType value = DataType::Octo_Binary; };
+
+}
+
+/**
+ * @brief Check if a data type is floating point
+ * @param type The type to check
+ * @return true if the type is floating point, false otherwise
+ */
+constexpr bool isFloatingPoint(const DataType& type) noexcept {
+    return type == DataType::Float64 ||
+           type == DataType::Float32 ||
+           type == DataType::Float16 ||
+           type == DataType::BFloat16;
+}
+
+std::size_t getDataTypeBitWidth(const DataType& type);
+
+} // namespace Aidge
+
+namespace {
+template <>
+const char* const EnumStrings<Aidge::DataType>::data[]
+    = {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Octo_Binary",
+       "Ternary", "Int2", "Quad_Int2", "UInt2", "Quad_UInt2", "Int3",
+       "Dual_Int3", "UInt3", "Dual_UInt3", "Int4", "Dual_Int4", "UInt4",
+       "Dual_UInt4", "Int5", "Int6", "Int7", "Int8", "Int16", "Int32", "Int64",
+       "UInt5", "UInt6", "UInt7", "UInt8", "UInt16", "UInt32", "UInt64", "Any"};
+}
+
+namespace Aidge {
+inline auto format_as(DataType dt) {
+    return EnumStrings<DataType>::data[static_cast<int>(dt)];
+}
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_DATA_DATATYPE_H_ */
diff --git a/include/aidge/data/Elts.hpp b/include/aidge/data/Elts.hpp
index 1a5a9e10ea131751ff5616eb2c310068d42ce991..b2df11968e8c688729c93747cdf953d288b26aef 100644
--- a/include/aidge/data/Elts.hpp
+++ b/include/aidge/data/Elts.hpp
@@ -14,6 +14,7 @@
 
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/logger/EnumString.hpp"
 
 namespace Aidge {
 /**
@@ -101,12 +102,12 @@ private:
 template<>
 struct fmt::formatter<Aidge::Elts_t> {
     template<typename ParseContext>
-    inline constexpr auto parse(ParseContext& ctx) {
+    inline constexpr auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {
         return ctx.begin();
     }
 
     template<typename FormatContext>
-    inline auto format(Aidge::Elts_t const& elt, FormatContext& ctx) {
+    inline auto format(const Aidge::Elts_t& elt, FormatContext& ctx) const {
         return fmt::format_to(ctx.out(), "{}:{}", elt.data, elt.token);
     }
 };
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 627a5a4784b4e6546cdfc96b65acbe2a39ee119c..7aa2ed52b95e11598a2975558212b00a85dac598 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -27,6 +27,8 @@
 
 #include "aidge/backend/TensorImpl.hpp"
 #include "aidge/data/Data.hpp"
+#include "aidge/data/DataType.hpp"
+#include "aidge/data/DataFormat.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -85,11 +87,11 @@ class Tensor : public Data,
              typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
     Tensor(T val)
         : Data(Type),
-          mDataType(NativeType<VT>::type),
+          mDataType(NativeType_v<VT>),
           mDataFormat(DataFormat::Default),
           mDims({}),
           mStrides({1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<VT>})(0, std::vector<std::size_t>())),
           mSize(1)
     {
         *static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
@@ -112,12 +114,12 @@ class Tensor : public Data,
      * @tparam T datatype
      */
     template <typename T>
-    constexpr Tensor(Vector<T> &&arr)
+    Tensor(Vector<T> &&arr)
         : Data(Type),
-          mDataType(NativeType<T>::type),
+          mDataType(NativeType_v<T>),
           mDims({arr.data.size()}),
           mStrides({1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {arr.data.size()})),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {arr.data.size()})),
           mSize(arr.data.size())
     {
         mImpl->copyFromHost(&arr.data[0], arr.data.size());
@@ -131,11 +133,11 @@ class Tensor : public Data,
     template <typename T, std::size_t SIZE_0>
     constexpr Tensor(Array1D<T, SIZE_0> &&arr)
         : Data(Type),
-          mDataType(NativeType<T>::type),
+          mDataType(NativeType_v<T>),
           mDataFormat(DataFormat::Default),
           mDims({SIZE_0}),
           mStrides({1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0})),
           mSize(SIZE_0)
     {
         mImpl->copyFromHost(&arr.data[0], SIZE_0);
@@ -150,11 +152,11 @@ class Tensor : public Data,
     template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
     constexpr Tensor(Array2D<T, SIZE_0, SIZE_1> &&arr)
         : Data(Type),
-          mDataType(NativeType<T>::type),
+          mDataType(NativeType_v<T>),
           mDataFormat(DataFormat::Default),
           mDims({SIZE_0, SIZE_1}),
           mStrides({SIZE_1, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1})),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1})),
           mSize(SIZE_0 * SIZE_1) {
         mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
     }
@@ -169,11 +171,11 @@ class Tensor : public Data,
     template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
     constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr)
         : Data(Type),
-          mDataType(NativeType<T>::type),
+          mDataType(NativeType_v<T>),
           mDataFormat(DataFormat::Default),
           mDims({SIZE_0, SIZE_1, SIZE_2}),
           mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2})),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1, SIZE_2})),
           mSize(SIZE_0 * SIZE_1 * SIZE_2) {
         mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
     }
@@ -189,11 +191,11 @@ class Tensor : public Data,
     template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
     constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr)
         : Data(Type),
-          mDataType(NativeType<T>::type),
+          mDataType(NativeType_v<T>),
           mDataFormat(DataFormat::Default),
           mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
           mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
           mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) {
         mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
     }
@@ -204,13 +206,13 @@ class Tensor : public Data,
      * Tensor and the initial one.
      * @param other
      */
-    Tensor(const Tensor& other) = default;
+    Tensor(const Tensor& other);
 
     /**
      * @brief Move constructor.
      * @param other
      */
-    Tensor(Tensor&& other) = default;
+    Tensor(Tensor&& other);
 
     /**
      * @brief Copy dimensions, datatype and data from another Tensor.
@@ -219,8 +221,8 @@ class Tensor : public Data,
      * @param other other Tensor object.
      * @return Tensor&
      */
-    Tensor &operator=(const Tensor& other) = default;
-    Tensor &operator=(Tensor&& other) = default;
+    Tensor &operator=(const Tensor& other);
+    Tensor &operator=(Tensor&& other);
 
     template <typename T>
     constexpr Tensor &operator=(Vector<T> &&arr) {
@@ -393,6 +395,13 @@ public:
         return hasImpl() ? getImpl()->backend() : "";
     }
 
+
+    /**
+     * @brief Get the device index.
+     * @return DeviceIdx_t
+     */
+    DeviceIdx_t device() const noexcept { return mImpl ? mImpl->device().second : static_cast<DeviceIdx_t>(0); }
+
     /**
      * @brief Set the backend of the Tensor associated implementation. If there
      * was no previous implementation set, data will be allocated, but it will
@@ -608,7 +617,7 @@ public:
 
     template <typename expectedType>
     const expectedType& get(std::size_t idx) const {
-        AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "Tensor::get<>({}): wrong data type, expected {}, got {}", idx, mDataType, NativeType<expectedType>::type);
+        AIDGE_ASSERT(NativeType_v<expectedType> == mDataType, "Tensor::get<>({}): wrong data type, expected {}, got {}", idx, mDataType, NativeType_v<expectedType>);
         AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "Tensor::get<>({}): can only be used for backends providing a valid host pointer.", idx);
         AIDGE_ASSERT(idx < mSize, "Tensor::get<>({}): idx {} out of range, tensor size {}", idx, mSize);
         return *reinterpret_cast<expectedType *>(mImpl->hostPtr(mImplOffset + idx));
@@ -621,7 +630,7 @@ public:
 
     template <typename expectedType>
     void set(std::size_t idx, expectedType value){
-        AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
+        AIDGE_ASSERT(NativeType_v<expectedType> == mDataType, "wrong data type");
         AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer");
         AIDGE_ASSERT(idx < mSize, "idx out of range");
         expectedType* dataPtr = static_cast<expectedType*>(mImpl->hostPtr(mImplOffset + idx));
@@ -633,7 +642,7 @@ public:
         set<expectedType>(getStorageIdx(coordIdx), value);
     }
 
-    std::string toString() const override;
+    std::string toString(int precision = -1, std::size_t offset = 0) const override;
 
     inline void print() const { fmt::print("{}\n", toString()); }
 
@@ -972,14 +981,34 @@ private:
 
 template<>
 struct fmt::formatter<Aidge::Tensor> {
+    // Only stores override precision from format string
+    int precision_override = -1;
+
     template<typename ParseContext>
-    inline constexpr auto parse(ParseContext& ctx) {
-        return ctx.begin();
+    constexpr auto parse(ParseContext& ctx) {
+        auto it = ctx.begin();
+        if (it != ctx.end() && *it == '.') {
+            ++it;
+            if (it != ctx.end() && *it >= '0' && *it <= '9') {
+                precision_override = 0;
+                do {
+                    precision_override = precision_override * 10 + (*it - '0');
+                    ++it;
+                } while (it != ctx.end() && *it >= '0' && *it <= '9');
+            }
+        }
+
+        if (it != ctx.end() && *it == 'f') {
+            ++it;
+        }
+
+        return it;
     }
 
     template<typename FormatContext>
-    inline auto format(Aidge::Tensor const& t, FormatContext& ctx) const {
-        return fmt::format_to(ctx.out(), "{}", t.toString());
+    auto format(Aidge::Tensor const& t, FormatContext& ctx) const {
+        // Use precision_override if specified, otherwise toString will use default
+        return fmt::format_to(ctx.out(), "Tensor({})", t.toString(precision_override, 7));
     }
 };
 
diff --git a/include/aidge/data/half.hpp b/include/aidge/data/half.hpp
index 1464ac1e092e43059048825bf98d1186314b902c..10b324336f099a6a3de9a291fe9f7f8bb0fa8c7d 100644
--- a/include/aidge/data/half.hpp
+++ b/include/aidge/data/half.hpp
@@ -177,7 +177,6 @@
 #endif
 
 #include <algorithm>
-#include <iostream>
 #include <limits>
 #include <climits>
 #include <cmath>
@@ -192,6 +191,8 @@
 	#include <functional>
 #endif
 
+#include <fmt/format.h>
+
 
 /// Default rounding mode.
 /// This specifies the rounding mode used for all conversions between [half](\ref half_float::half)s and `float`s as well as
@@ -271,7 +272,7 @@ namespace half_float
 	/// ~~~~
 	namespace literal
 	{
-		half operator"" _h(long double);
+		half operator""_h(long double);
 	}
 #endif
 
@@ -1082,7 +1083,7 @@ namespace half_float
 		friend struct std::hash<half>;
 	#endif
 	#if HALF_ENABLE_CPP11_USER_LITERALS
-		friend half literal::operator"" _h(long double);
+		friend half literal::operator""_h(long double);
 	#endif
 
 	public:
@@ -1195,7 +1196,7 @@ namespace half_float
 		/// to rather involved conversions.
 		/// \param value literal value
 		/// \return half with given value (if representable)
-		inline half operator"" _h(long double value) { return half(detail::binary, detail::float2half<half::round_style>(value)); }
+		inline half operator""_h(long double value) { return half(detail::binary, detail::float2half<half::round_style>(value)); }
 	}
 #endif
 
@@ -1228,23 +1229,23 @@ namespace half_float
 			/// \return Half-precision quotient stored in single-precision
 			static expr divides(float x, float y) { return expr(x/y); }
 
-			/// Output implementation.
-			/// \param out stream to write to
-			/// \param arg value to write
-			/// \return reference to stream
-			template<typename charT,typename traits> static std::basic_ostream<charT,traits>& write(std::basic_ostream<charT,traits> &out, float arg) { return out << arg; }
-
-			/// Input implementation.
-			/// \param in stream to read from
-			/// \param arg half to read into
-			/// \return reference to stream
-			template<typename charT,typename traits> static std::basic_istream<charT,traits>& read(std::basic_istream<charT,traits> &in, half &arg)
-			{
-				float f;
-				if(in >> f)
-					arg = f;
-				return in;
-			}
+			// /// Output implementation.
+			// /// \param out stream to write to
+			// /// \param arg value to write
+			// /// \return reference to stream
+			// template<typename charT,typename traits> static std::basic_ostream<charT,traits>& write(std::basic_ostream<charT,traits> &out, float arg) { return out << arg; }
+
+			// /// Input implementation.
+			// /// \param in stream to read from
+			// /// \param arg half to read into
+			// /// \return reference to stream
+			// template<typename charT,typename traits> static std::basic_istream<charT,traits>& read(std::basic_istream<charT,traits> &in, half &arg)
+			// {
+			// 	float f;
+			// 	if(in >> f)
+			// 		arg = f;
+			// 	return in;
+			// }
 
 			/// Modulo implementation.
 			/// \param x first operand
@@ -2193,20 +2194,6 @@ namespace half_float
 		/// \name Input and output
 		/// \{
 
-		/// Output operator.
-		/// \param out output stream to write into
-		/// \param arg half expression to write
-		/// \return reference to output stream
-		template<typename T,typename charT,typename traits> typename enable<std::basic_ostream<charT,traits>&,T>::type
-			operator<<(std::basic_ostream<charT,traits> &out, T arg) { return functions::write(out, arg); }
-
-		/// Input operator.
-		/// \param in input stream to read from
-		/// \param arg half to read into
-		/// \return reference to input stream
-		template<typename charT,typename traits> std::basic_istream<charT,traits>&
-			operator>>(std::basic_istream<charT,traits> &in, half &arg) { return functions::read(in, arg); }
-
 		/// \}
 		/// \name Basic mathematical operations
 		/// \{
@@ -2863,8 +2850,6 @@ namespace half_float
 	using detail::operator-;
 	using detail::operator*;
 	using detail::operator/;
-	using detail::operator<<;
-	using detail::operator>>;
 
 	using detail::abs;
 	using detail::fabs;
@@ -2940,6 +2925,48 @@ namespace half_float
 	using detail::isunordered;
 
 	using detail::half_cast;
+}  // namespace half_float
+
+template <>
+struct fmt::formatter<half_float::half> {
+    // Configuration options
+    int precision = 6;  // Default precision
+    char presentation = 'f';
+
+    // Parse format specifications like {:.2f}
+    constexpr auto parse(format_parse_context& ctx) {
+        auto it = ctx.begin(), end = ctx.end();
+        if (it != end && *it == '.') {
+            ++it;
+            precision = 0;
+            while (it != end && std::isdigit(*it)) {
+                precision = precision * 10 + (*it - '0');
+                ++it;
+            }
+        }
+        if (it != end && (*it == 'f' || *it == 'e' || *it == 'g')) {
+            presentation = *it++;
+        }
+        if (it != end && *it != '}')
+            throw format_error("invalid format");
+        return it;
+    }
+
+    template <typename FormatContext>
+    auto format(const half_float::half& h, FormatContext& ctx) const {
+        return fmt::format_to(ctx.out(), "{:.{}{}}",
+                            static_cast<float>(h),
+                            precision,
+                            presentation);
+    }
+};
+
+namespace half_float {
+    template<typename T>
+    static half from_string(const T& str) {
+        float f = std::stof(str);
+        return half(f);
+    }
 }
 
 
diff --git a/include/aidge/data/half_fmt.hpp b/include/aidge/data/half_fmt.hpp
deleted file mode 100644
index 5e2072038c10fffd8db5f7fe93381f002f2119b1..0000000000000000000000000000000000000000
--- a/include/aidge/data/half_fmt.hpp
+++ /dev/null
@@ -1,18 +0,0 @@
-#include "aidge/data/half.hpp"
-#include <fmt/core.h>
-
-// Specialize fmt::formatter for half_float::half
-template <>
-struct fmt::formatter<half_float::half> : fmt::formatter<float> {
-    // Parses the format specifications and stores them in the base formatter
-    template <typename ParseContext>
-    constexpr auto parse(ParseContext& ctx) {
-        return fmt::formatter<float>::parse(ctx);
-    }
-
-    // Formats the half type by first converting it to float
-    template <typename FormatContext>
-    auto format(const half_float::half& value, FormatContext& ctx) const {
-        return fmt::formatter<float>::format(static_cast<float>(value), ctx);
-    }
-};
diff --git a/include/aidge/graph/Connector.hpp b/include/aidge/graph/Connector.hpp
index 599ca7d6defd729b6e6536dcc95f326d345701d9..ec59e1b38c10cbe53eb667b724991ea8e5427a6e 100644
--- a/include/aidge/graph/Connector.hpp
+++ b/include/aidge/graph/Connector.hpp
@@ -11,10 +11,10 @@
 #ifndef AIDGE_CORE_GRAPH_CONNECTOR_H_
 #define AIDGE_CORE_GRAPH_CONNECTOR_H_
 
-#include <cassert>
 #include <memory>
 #include <vector>
 
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -55,7 +55,7 @@ class Connector {
 
    public:
     Connector operator[](IOIndex_t index) {
-        assert((size() > 1) && "Cannot refer a slice of the output.");
+        AIDGE_ASSERT((size() > 1), "Cannot refer a slice of the output.");
         return Connector(mNode, index);
     }
 
@@ -68,7 +68,7 @@ class Connector {
 
    private:
     Connector(std::shared_ptr<Node> node, IOIndex_t index) : mNode(node) {
-        assert((index != gk_IODefaultIndex) && (index < size()) &&
+        AIDGE_ASSERT((index != gk_IODefaultIndex) && (index < size()),
                "Non-valid output index.\n");
         mOutputId = index;
     }
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 76f5dcdfc28e90a3f83435841af21048bcb2a9c0..8a78d8bfc0c2a39e82a483298659a88540e0db2e 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -247,13 +247,51 @@ public:
                  const std::vector<std::vector<DimSize_t>> dims = {});
 
     /**
-     * @brief Compute dimensions of input/output Tensors for each Operator of the
-     * GraphView object's Nodes, by calling Node::forwardDims().
-     * This function verifies the following conditions:
-     * - Every node will forwardDims() regardless of if dims were previously forwarded or not;
-     * - forwadDims() calls are made in node dependencies order, because if dims have changed
-     *   at any point in the graph, it must de propagated correctly to all succeeding nodes;
-     * - It handles cyclic dependencies correctly (currently only induced by the Memorize_Op).
+     * @brief Compute and propagate Tensor dimensions through the GraphView.
+     *
+     * This function computes dimensions of input/output Tensors for each of the
+     * Node's associated Operator in the GraphView by propagating dimensions from
+     * inputs through the entire network.
+     * It handles:
+     * - Dimension propagation in dependency order
+     * - Cyclic dependencies (specifically for Memorize_Op)
+     * - Input dimension validation and setting
+     * - Optional vs mandatory inputs
+     *
+     * @note Dimensions will be propagated through every Node regardless of if
+     * dims were previously forwarded or not;
+     * @details The algorithm works in several phases:
+     * 1. Input Dimension Setup:
+     *    - Validates/sets provided input dimensions
+     *    - Checks compatibility with existing tensors
+     *
+     * 2. Connection Verification:
+     *    - Ensures all node connections are valid
+     *    - Verifies mandatory inputs are present
+     *
+     * 3. Dimension Propagation:
+     *    - Propagates dimensions through the graph in topological order
+     *    - Detects and handles circular dependencies induced by Memorize_Op
+     *
+     * Example:
+     * @code
+     * auto graph = std::make_shared<GraphView>();
+     * // ... build graph ...
+     *
+     * // Forward with default empty dimensions
+     * bool success = graph->forwardDims();
+     *
+     * // Forward with specific input dimensions
+     * std::vector<std::vector<DimSize_t>> inputDims = {
+     *     {1, 3, 224, 224},  // First input
+     *     {1, 64, 112, 112}  // Second input
+     * };
+     * success = graph->forwardDims(inputDims);
+     * @endcode
+     *
+     * @param dims Vector of dimension vectors for graph inputs. Empty by default.
+     * @param allowDataDependency Whether to allow data-dependent dimension computation. False by default.
+     * @return true if dimension propagation succeeded, false otherwise.
      */
     bool forwardDims(const std::vector<std::vector<DimSize_t>>& dims = {}, bool allowDataDependency = false);
 
@@ -424,21 +462,7 @@ public:
         addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor);
     }
 
-    inline void updateNodeName(const std::shared_ptr<Node>& node, const std::string& newName){
-        if (!newName.empty()) {
-            auto itNew = mNodeRegistry.insert(std::make_pair(newName, node));
-            if (!itNew.second) {
-                Log::notice("Replacing existing node name in graph node name registry: {}", newName);
-                (itNew.first)->second = node;
-            }
-        }
-
-        if (!node->name().empty()) {
-            const auto it = mNodeRegistry.find(node->name());
-            AIDGE_ASSERT(it != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", node->name(), name());
-            mNodeRegistry.erase(it);
-        }
-    }
+    void updateNodeName(const std::shared_ptr<Node>& node, const std::string& newName);
 
     /**
      * @brief Include a GraphView content in the current GraphView and link
diff --git a/include/aidge/graph/Matching.hpp b/include/aidge/graph/Matching.hpp
index 3b0874580b112f4c219886a78677e6c9801b72b8..c8de86e90989a6313f47b1b06dea401d5ebd6600 100644
--- a/include/aidge/graph/Matching.hpp
+++ b/include/aidge/graph/Matching.hpp
@@ -12,9 +12,12 @@
 #ifndef AIDGE_CORE_GRAPH_MATCHING_H_
 #define AIDGE_CORE_GRAPH_MATCHING_H_
 
+#include <functional>
 #include <map>
 #include <memory>
 #include <set>
+#include <string>
+
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
@@ -43,10 +46,10 @@ public:
         bool singleOutput = true;
         IOIndex_t edgeLeftIdx = 0;
         IOIndex_t edgeRightIdx = 0;
-        NodePtr startNode;
+        std::shared_ptr<Node> startNode;
 
         // For check & debug purpose:
-        size_t depth = 0;
+        std::size_t depth = 0;
         std::set<std::string> anchors;
     };
 
@@ -56,8 +59,8 @@ public:
         // We use graph->rootNode() as the std::set key, which is guaranteed
         // to never change after insertion!
         mutable std::shared_ptr<GraphView> graph;
-        mutable std::map<std::string, std::map<std::string, NodePtr>> anchors;
-        mutable NodePtr startNode;
+        mutable std::map<std::string, std::map<std::string, std::shared_ptr<Node>>> anchors;
+        mutable std::shared_ptr<Node> startNode;
 
         MatchingResult();
 
@@ -66,11 +69,14 @@ public:
         ~MatchingResult() noexcept;
     };
 
+    SinglePassGraphMatching() = delete;
     SinglePassGraphMatching(std::shared_ptr<GraphView> graph) : mGraph(graph) {}
     SinglePassGraphMatching(const SinglePassGraphMatching& other);
-    SinglePassGraphMatching& operator=(const SinglePassGraphMatching& other);
+
     ~SinglePassGraphMatching() noexcept;
 
+    SinglePassGraphMatching& operator=(const SinglePassGraphMatching& other);
+
     /**
      * Matches a query by direct, single pass parse and match.
      * The returned matches are non-ordered and therefore stored in a std::set.
@@ -141,26 +147,26 @@ public:
 
     /**
      * @brief Same as match() but with a mandatory start node.
-     * 
+     *
      * @param startNode Mandatory start node for the query.
      * @param query The query to search.
      * @return MatchingResult MatchingResult struct, with empty graph if query
      * is not found, or the graph corresponding to the query.
      */
-    MatchingResult matchFrom(NodePtr startNode, const std::string& query);
+    MatchingResult matchFrom(std::shared_ptr<Node> startNode, const std::string& query);
 
     /**
      * Filter to keep only the longest disjoint (non-overlapping) matches.
     */
     std::set<MatchingResult> filterLonguestDisjoint(const std::set<MatchingResult>& matches);
 
-    inline void addNodeLambda(const std::string& name, std::function<bool(const NodePtr&)> func) {
+    inline void addNodeLambda(const std::string& name, std::function<bool(const std::shared_ptr<Node>&)> func) {
         mLambda[name] = func;
     }
 
 private:
     std::shared_ptr<GraphView> mGraph;
-    std::map<std::string, std::function<bool(const NodePtr&)>> mLambda;
+    std::map<std::string, std::function<bool(const std::shared_ptr<Node>&)>> mLambda;
 
     /**
      * QUANTIFIER = '?' | '*' | '+' | ('{' [0-9]+ '}')
@@ -205,13 +211,6 @@ private:
     */
     bool matchNode(Context& ctx, std::set<MatchingResult>& matches);
 
-    inline void removeWhiteSpace(std::string& str) {
-        str.erase(str.begin(),
-            std::find_if(str.begin(),
-                        str.end(),
-                        [](char c) { return !std::isspace(c); }));
-    }
-
     struct CompareMatchingResultSize {
         bool operator()(const MatchingResult& lhs, const MatchingResult& rhs) const {
             // Some matches size could be the same
@@ -225,10 +224,8 @@ private:
     };
 };
 
-inline bool operator<(const Aidge::SinglePassGraphMatching::MatchingResult& lhs, const Aidge::SinglePassGraphMatching::MatchingResult& rhs) {
-    // Matches rootNode are guaranteed to be different!
-    return lhs.graph->rootNode() < rhs.graph->rootNode();
-}
+bool operator<(const SinglePassGraphMatching::MatchingResult& lhs, const SinglePassGraphMatching::MatchingResult& rhs);
+
 }  // namespace Aidge
 
 #endif /* AIDGE_CORE_GRAPH_MATCHING_H_ */
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index a16bbd63ecf52e8c97d5032c5c90a5f69186f995..9ad7bfb99d5ef0dba26dcb4e1e8d5a4e81dfb3aa 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -12,13 +12,13 @@
 #ifndef AIDGE_CORE_GRAPH_NODE_H_
 #define AIDGE_CORE_GRAPH_NODE_H_
 
-#include <cassert>
+#include <deque>
+#include <functional>
 #include <memory>
 #include <set>
 #include <string>
 #include <vector>
-#include <deque>
-#include <utility>
+#include <utility>     // std::pair
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
@@ -27,7 +27,9 @@
 
 #include "aidge/graph/Connector.hpp"
 #include "aidge/operator/Operator.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
 #ifdef PYBIND
 namespace py = pybind11;
@@ -131,7 +133,7 @@ public:
    * @brief Name of the Node.
    * @return std::string
    */
-  inline std::string name() const noexcept { return mAttrs->getAttr<std::string>("name"); }
+  std::string name() const noexcept { return mAttrs->getAttr<std::string>("name"); }
 
   /**
    * @brief Set the Node name.
@@ -175,7 +177,7 @@ public:
    * @brief Get the Operator object of the Node.
    * @return std::shared_ptr<Operator>
    */
-  inline std::shared_ptr<Operator> getOperator() const { return (*mOperator)(mAttrs); }
+  inline std::shared_ptr<Operator> getOperator() const { return mOperator; }
 //   inline std::shared_ptr<Operator> getOperator() const { return mOperator; }
 
   ///////////////////////////////////////////////////////
@@ -212,7 +214,7 @@ public:
    * @return std::pair<std::shared_ptr<Node>, IOIndex_t>
    */
   inline std::pair<NodePtr, IOIndex_t> input(const IOIndex_t inID) const {
-    assert((inID != gk_IODefaultIndex) && (inID < nbInputs()) && "Input index out of bound.");
+    AIDGE_ASSERT((inID != gk_IODefaultIndex) && (inID < nbInputs()), "Input index out of bound.");
     return std::pair<NodePtr, IOIndex_t>(mParents[inID], mIdOutParents[inID]);
   }
 
@@ -261,7 +263,7 @@ public:
    * @details [data, data, weight, bias] => 4
    * @return IOIndex_t
    */
-  inline IOIndex_t nbInputs() const noexcept { return getOperator()->nbInputs(); }
+  inline IOIndex_t nbInputs() const noexcept { return mOperator->nbInputs(); }
 
   /**
    * @brief Category of a specific input (Data or Param, optional or not).
@@ -269,7 +271,7 @@ public:
    * @return InputCategory
    */
   inline InputCategory inputCategory(IOIndex_t idx) const {
-    return getOperator()->inputCategory(idx);
+    return mOperator->inputCategory(idx);
   }
 
   /**
@@ -279,7 +281,7 @@ public:
    * @return true if the operator defines it as a back edge
    */
   inline bool parentIsBackEdge(IOIndex_t idx) const {
-    return getOperator()->isBackEdge(idx);
+    return mOperator->isBackEdge(idx);
   }
 
   /**
@@ -292,7 +294,7 @@ public:
    * @brief Getter for the number of Output Tensors of the Node.
    * @return IOIndex_t
    */
-  inline IOIndex_t nbOutputs() const noexcept { return getOperator()->nbOutputs(); }
+  inline IOIndex_t nbOutputs() const noexcept { return mOperator->nbOutputs(); }
 
   IOIndex_t nbValidOutputs() const;
 
@@ -304,15 +306,7 @@ public:
    * @brief Set of pointers to each GraphView containing this Node
    * @return std::set<GraphView>
    */
-  inline std::set<std::shared_ptr<GraphView>> views() const noexcept {
-    std::set<std::shared_ptr<GraphView>> res;
-    for (const auto &v : mViews) {
-      if (auto p = v.lock()) {
-        res.insert(p);
-      }
-    }
-    return res;
-  }
+  std::set<std::shared_ptr<GraphView>> views() const noexcept;
 
   /**
    * @brief Add a GraphView pointer to the list of GraphView containing
@@ -323,7 +317,7 @@ public:
     mViews.insert(std::weak_ptr<GraphView>(graphPtr));
   }
 
-  inline void removeView(const std::shared_ptr<GraphView> &graphPtr) {
+  void removeView(const std::shared_ptr<GraphView> &graphPtr) {
     mViews.erase(graphPtr);
   }
 
@@ -334,8 +328,11 @@ public:
    * Default to 0.
    * @param otherInId ID of the other Node input to connect to the current Node.
    * Default to the first available data input.
+   * 
+   * @note otherNode shared_ptr is passed by refenrece in order to be able to detect
+   * possible dangling connection situations in debug using ref counting.
    */
-  void addChild(NodePtr otherNode,
+  void addChild(const NodePtr& otherNode,
                 const IOIndex_t outId = IOIndex_t(0),
                 IOIndex_t otherInId = gk_IODefaultIndex);
 
@@ -368,7 +365,6 @@ public:
    * @return std::shared_ptr<Node>&
    */
   inline NodePtr &getParent(const IOIndex_t inId) {
-    assert(inId != gk_IODefaultIndex);
     return mParents.at(inId);
   }
 
@@ -524,8 +520,11 @@ private:
    * @param otherNode
    * @param outId
    * @param otherInId
+   * 
+   * @note otherNode shared_ptr is passed by refenrece in order to be able to detect
+   * possible dangling connection situations in debug using ref counting.
    */
-  void addChildOp(NodePtr otherNode, const IOIndex_t outId,
+  void addChildOp(const NodePtr& otherNode, const IOIndex_t outId,
                   const IOIndex_t otherInId);
 
   /**
diff --git a/include/aidge/graph/StaticAnalysis.hpp b/include/aidge/graph/StaticAnalysis.hpp
index d3fe681749eeb69e4816a38f302d510f1c81381a..cc5532224ebd00f17aefbf5c2620a3ef15cfaa2a 100644
--- a/include/aidge/graph/StaticAnalysis.hpp
+++ b/include/aidge/graph/StaticAnalysis.hpp
@@ -13,13 +13,12 @@
 #ifndef AIDGE_CORE_GRAPH_STATICANALYSIS_H_
 #define AIDGE_CORE_GRAPH_STATICANALYSIS_H_
 
+#include <cstddef>  // std::size_t
 #include <memory>
+#include <string>
 
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/graph/GraphView.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
-
+#include "aidge/graph/GraphView.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/Conv.hpp"
@@ -27,125 +26,131 @@
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/MaxPooling.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/ReduceMean.hpp"
 #include "aidge/operator/ReduceSum.hpp"
 #include "aidge/operator/Softmax.hpp"
 #include "aidge/operator/MetaOperator.hpp"
+#include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
 /**
  * @brief Base class to compute statistics from an Operator.
- * 
+ *
  */
 class OperatorStats : public Registrable<OperatorStats, std::string, std::function<std::shared_ptr<OperatorStats>(const Operator&)>> {
 public:
+    OperatorStats() = delete;
     OperatorStats(const Operator& op);
-    const Operator& getOperator() const noexcept { return mOp; }
+
+    virtual ~OperatorStats();
+
+    inline const Operator& getOperator() const noexcept { return mOp; }
 
     /**
-     * @brief Get the worst case total number of arithmetic operations for the 
+     * @brief Get the worst case total number of arithmetic operations for the
      * operator data flow. This includes base arithmetic operations: +, -, / and *.
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
      * Example of Operator with only arithmetic operations: Conv.
-     * 
-     * @return size_t Number of arithmetic operations.
+     *
+     * @return std::size_t Number of arithmetic operations.
      */
-    virtual size_t getNbArithmOps() const { return 2 * getNbMACOps(); };
+    virtual std::size_t getNbArithmOps() const { return 2 * getNbMACOps(); };
 
     /**
-     * @brief Get the worst case total number of logic operations for the 
+     * @brief Get the worst case total number of logic operations for the
      * operator data flow. This includes operations like logical shift, or, and...
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
      * Example of Operator with only logic operations: BitShift.
-     * 
-     * @return size_t Number of logic operations.
+     *
+     * @return std::size_t Number of logic operations.
      */
-    virtual size_t getNbLogicOps() const { return 0; };
+    virtual std::size_t getNbLogicOps() const { return 0; };
 
     /**
-     * @brief Get the worst case total number of comparison operations for the 
+     * @brief Get the worst case total number of comparison operations for the
      * operator data flow. This includes operations like <, >, =...
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
      * Example of Operator with only comparison operations: MaxPool.
-     * 
-     * @return size_t Number of comparison operations.
+     *
+     * @return std::size_t Number of comparison operations.
      */
-    virtual size_t getNbCompOps() const { return 0; };
+    virtual std::size_t getNbCompOps() const { return 0; };
 
     /**
      * @brief Get the worst case total number of non-linear (NL) operations for the
      * operator data flow. This includes operations like calls to tanh(), erf(), cos()...
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
      * Example of Operator with only NL operations: Tanh.
      * Non-linear operations are necessarily of floating-point type.
-     * 
-     * @return size_t Number of non-linear (NL) operations.
+     *
+     * @return std::size_t Number of non-linear (NL) operations.
      */
-    virtual size_t getNbNLOps() const { return 0; };
+    virtual std::size_t getNbNLOps() const { return 0; };
 
     /**
      * @brief Get the worst case total number of operations for the operator data flow.
      * Total number of operations = arithmetic ops + logic ops + comp ops + NL ops.
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
-     * 
-     * @return size_t Number of operations.
+     *
+     * @return std::size_t Number of operations.
      */
-    size_t getNbOps() const { return getNbArithmOps() + getNbLogicOps() + getNbCompOps() + getNbNLOps(); };
+    std::size_t getNbOps() const { return getNbArithmOps() + getNbLogicOps() + getNbCompOps() + getNbNLOps(); };
 
     /**
      * @brief Get the worst case total number of INT arithmetic operations for
      * the operator data flow.
      * Such that getNbArithmOps() = getNbArithmIntOps() + getNbArithmFpOps()
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
-     * 
-     * @return size_t Number of INT arithmetic operations.
+     *
+     * @return std::size_t Number of INT arithmetic operations.
      */
-    virtual size_t getNbArithmIntOps() const;
+    virtual std::size_t getNbArithmIntOps() const;
 
     /**
-     * @brief Get the worst case total number of FP arithmetic operations for 
+     * @brief Get the worst case total number of FP arithmetic operations for
      * the operator data flow.
      * Such that getNbArithmOps() = getNbArithmIntOps() + getNbArithmFpOps()
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
-     * 
-     * @return size_t Number of FP arithmetic operations.
+     *
+     * @return std::size_t Number of FP arithmetic operations.
      */
-    size_t getNbArithmFpOps() const { return getNbArithmOps() - getNbArithmIntOps(); };
+    std::size_t getNbArithmFpOps() const { return getNbArithmOps() - getNbArithmIntOps(); };
 
     /**
      * @brief Get the worst case total number of MAC operations for the operator
-     * data flow. MAC operations are included in getNbArithmOps(), with 1 MAC 
+     * data flow. MAC operations are included in getNbArithmOps(), with 1 MAC
      * operation counted as 2 arithmetic operations. MAC can be INT of FP.
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
-     * 
-     * @return size_t Number of MAC operations.
+     *
+     * @return std::size_t Number of MAC operations.
      */
-    virtual size_t getNbMACOps() const { return 0; };
-    virtual ~OperatorStats() = default;
+    virtual std::size_t getNbMACOps() const { return 0; };
 
 protected:
     const Operator &mOp;
@@ -153,16 +158,20 @@ protected:
 
 /**
  * @brief Base class to compute statistics from a GraphView
- * 
+ *
  */
 class StaticAnalysis : public std::enable_shared_from_this<StaticAnalysis> {
 public:
+    StaticAnalysis() = delete;
     StaticAnalysis(std::shared_ptr<GraphView> graph);
-    const std::shared_ptr<GraphView> getGraph() const noexcept { return mGraph; }
+
+    virtual ~StaticAnalysis();
+
+    inline const std::shared_ptr<GraphView> getGraph() const noexcept { return mGraph; }
 
     /**
      * @brief Get the Operator Stats object corresponding to the given node.
-     * 
+     *
      * @param node Node
      * @return std::shared_ptr<OperatorStats> Node's Operator stats
      */
@@ -172,65 +181,67 @@ public:
      * @brief Get the number of parameters associated to a node. This includes
      * all Producers directly connected to the node's inputs as well as all
      * internal Producers (in case of a meta operator).
-     * 
+     *
      * Note: this function does not check if parameters are shared between
      * several nodes or not. This means that simply adding parameters count from
      * several nodes may lead to a higher number of parameters than in reality
      * if some of them are shared.
-     * 
+     *
      * @param node Node
-     * @return size_t Number of parameters
+     * @return std::size_t Number of parameters
      */
-    virtual size_t getNbParams(std::shared_ptr<Node> node) const;
+    virtual std::size_t getNbParams(std::shared_ptr<Node> node) const;
 
     /**
      * @brief Get the total parameters memory size, in bits, associated to a node.
-     * This includes all Producers directly connected to the node's inputs as 
+     * This includes all Producers directly connected to the node's inputs as
      * well as all internal Producers (in case of a meta operator).
-     * 
+     *
      * Note: this function does not check if parameters are shared between
      * several nodes or not. This means that simply adding parameters size from
      * several nodes may lead to a higher parameter size than in reality
      * if some of them are shared.
-     * 
+     *
      * @param node Node
-     * @return size_t Total parameters memory, in bits
+     * @return std::size_t Total parameters memory, in bits
      */
-    virtual size_t getParamsSize(std::shared_ptr<Node> node) const;
-
-    size_t getNbArithmOps() const { return accumulate(&OperatorStats::getNbArithmOps); }
-    size_t getNbLogicOps() const { return accumulate(&OperatorStats::getNbLogicOps); }
-    size_t getNbCompOps() const { return accumulate(&OperatorStats::getNbCompOps); }
-    size_t getNbNLOps() const { return accumulate(&OperatorStats::getNbNLOps); }
-    size_t getNbOps() const { return accumulate(&OperatorStats::getNbOps); }
-    size_t getNbArithmIntOps() const { return accumulate(&OperatorStats::getNbArithmIntOps); }
-    size_t getNbArithmFpOps() const { return accumulate(&OperatorStats::getNbArithmFpOps); }
-    size_t getNbMACOps() const { return accumulate(&OperatorStats::getNbMACOps); }
+    virtual std::size_t getParamsSize(std::shared_ptr<Node> node) const;
+
+    std::size_t getNbArithmOps() const;
+    std::size_t getNbLogicOps() const;
+    std::size_t getNbCompOps() const;
+    std::size_t getNbNLOps() const;
+    std::size_t getNbOps() const;
+    std::size_t getNbArithmIntOps() const;
+    std::size_t getNbArithmFpOps() const;
+    std::size_t getNbMACOps() const;
     virtual void summary(bool incProducers = false) const;
-    virtual ~StaticAnalysis() = default;
 
 protected:
     const std::shared_ptr<GraphView> mGraph;
 
-    size_t accumulate(size_t (OperatorStats::*func)() const) const;
+    std::size_t accumulate(std::size_t (OperatorStats::*func)() const) const;
 };
 
 ////////////////////////////////////////////////////////////////////////////////
 
 class MetaOpStats : public OperatorStats {
 public:
+    MetaOpStats() = delete;
     MetaOpStats(const Operator& op) : OperatorStats(op) {}
 
+    ~MetaOpStats();
+
     static std::unique_ptr<MetaOpStats> create(const Operator& op) {
         return std::make_unique<MetaOpStats>(op);
     }
 
-    size_t getNbArithmOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmOps(); }
-    size_t getNbLogicOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbLogicOps(); }
-    size_t getNbCompOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbCompOps(); }
-    size_t getNbNLOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbNLOps(); }
-    size_t getNbArithmIntOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmIntOps(); }
-    size_t getNbMACOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbMACOps(); }
+    std::size_t getNbArithmOps() const override;
+    std::size_t getNbLogicOps() const override;
+    std::size_t getNbCompOps() const override;
+    std::size_t getNbNLOps() const override;
+    std::size_t getNbArithmIntOps() const override;
+    std::size_t getNbMACOps() const override;
 };
 
 template <class OP>
@@ -242,7 +253,7 @@ public:
         return std::make_unique<ConvStats<OP>>(op);
     }
 
-    size_t getNbMACOps() const override {
+    std::size_t getNbMACOps() const override {
         const OP& op_ = dynamic_cast<const OP&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
 	    const std::size_t weightsSize = op_.getInput(1)->size();
@@ -250,7 +261,7 @@ public:
             = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
                               op_.getOutput(0)->dims().cend(),
                               1,
-                              std::multiplies<size_t>()); // NCHW...
+                              std::multiplies<std::size_t>()); // NCHW...
         const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
         return batchSize * (weightsSize * outputSize);
     }
@@ -271,19 +282,19 @@ public:
         return std::make_unique<MaxPoolingStats<OP>>(op);
     }
 
-    size_t getNbCompOps() const override {
+    std::size_t getNbCompOps() const override {
         const OP& op_ = dynamic_cast<const OP&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
 	    const std::size_t poolSize
             = std::accumulate(op_.kernelDims().cbegin(),
                               op_.kernelDims().cend(),
                               1,
-                              std::multiplies<size_t>());
+                              std::multiplies<std::size_t>());
         const std::size_t outputSize
             = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
                               op_.getOutput(0)->dims().cend(),
                               1,
-                              std::multiplies<size_t>()); // NCHW...
+                              std::multiplies<std::size_t>()); // NCHW...
         const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
         return batchSize * ((poolSize - 1) * outputSize);
     }
@@ -302,19 +313,19 @@ public:
         return std::make_unique<AvgPoolingStats<OP>>(op);
     }
 
-    size_t getNbArithmOps() const override {
+    std::size_t getNbArithmOps() const override {
         const OP& op_ = dynamic_cast<const OP&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
 	    const std::size_t poolSize
             = std::accumulate(op_.kernelDims().cbegin(),
                               op_.kernelDims().cend(),
                               1,
-                              std::multiplies<size_t>());
+                              std::multiplies<std::size_t>());
         const std::size_t outputSize
             = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
                               op_.getOutput(0)->dims().cend(),
                               1,
-                              std::multiplies<size_t>()); // NCHW...
+                              std::multiplies<std::size_t>()); // NCHW...
         const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
         // (poolSize - 1) additions + 1 division for each output
         return batchSize * (poolSize * outputSize);
@@ -334,7 +345,7 @@ public:
         return std::make_unique<FCStats>(op);
     }
 
-    size_t getNbMACOps() const override {
+    std::size_t getNbMACOps() const override {
         const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
 	    const std::size_t weightsSize = op_.getInput(1)->size();
@@ -353,20 +364,20 @@ public:
         return std::make_unique<MatMulStats>(op);
     }
 
-    size_t getNbMACOps() const override {
+    std::size_t getNbMACOps() const override {
         const MatMul_Op& op_ = dynamic_cast<const MatMul_Op&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const size_t n = (op_.getInput(0)->dims().size() > 1)
+        const std::size_t n = (op_.getInput(0)->dims().size() > 1)
             ? op_.getInput(0)->dims().end()[-2] : 1;
-        const size_t k = op_.getInput(0)->dims().back();
-        const size_t m = (op_.getInput(1)->dims().size() > 1)
+        const std::size_t k = op_.getInput(0)->dims().back();
+        const std::size_t m = (op_.getInput(1)->dims().size() > 1)
             ? op_.getInput(1)->dims().back() : 1;
-        const size_t nb = (op_.getInput(0)->dims().size() > 2)
+        const std::size_t nb = (op_.getInput(0)->dims().size() > 2)
             ? std::accumulate(op_.getInput(0)->dims().cbegin(),
                               op_.getInput(0)->dims().cend() - 2,
                               1,
-                              std::multiplies<size_t>())
-            : 1; 
+                              std::multiplies<std::size_t>())
+            : 1;
 
         return nb * n * m * k;
     }
@@ -382,7 +393,7 @@ public:
         return std::make_unique<ReLUStats>(op);
     }
 
-    size_t getNbCompOps() const override {
+    std::size_t getNbCompOps() const override {
         const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
         return op_.getOutput(0)->size();
@@ -399,14 +410,14 @@ public:
         return std::make_unique<AbsStats>(op);
     }
 
-    size_t getNbCompOps() const override {
+    std::size_t getNbCompOps() const override {
         const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
         return op_.getOutput(0)->size();
     }
 
     // This is in the worst case (all values are negative)
-    size_t getNbArithmOps() const override {
+    std::size_t getNbArithmOps() const override {
         const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
         return op_.getOutput(0)->size();
@@ -423,12 +434,12 @@ public:
         return std::make_unique<ReduceMeanStats>(op);
     }
 
-    size_t getNbArithmOps() const override {
+    std::size_t getNbArithmOps() const override {
         const ReduceMean_Op& op_ = dynamic_cast<const ReduceMean_Op&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const size_t nbIn = op_.getInput(0)->size();
-        const size_t nbOut = op_.getOutput(0)->size();
-        const size_t nbReduce = nbIn / nbOut;
+        const std::size_t nbIn = op_.getInput(0)->size();
+        const std::size_t nbOut = op_.getOutput(0)->size();
+        const std::size_t nbReduce = nbIn / nbOut;
         // (nbReduce - 1) additions + 1 division for each output
         return nbOut * nbReduce;
     }
@@ -444,12 +455,12 @@ public:
         return std::make_unique<ReduceSumStats>(op);
     }
 
-    size_t getNbArithmOps() const override {
+    std::size_t getNbArithmOps() const override {
         const ReduceSum_Op& op_ = dynamic_cast<const ReduceSum_Op&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const size_t nbIn = op_.getInput(0)->size();
-        const size_t nbOut = op_.getOutput(0)->size();
-        const size_t nbReduce = nbIn / nbOut;
+        const std::size_t nbIn = op_.getInput(0)->size();
+        const std::size_t nbOut = op_.getOutput(0)->size();
+        const std::size_t nbReduce = nbIn / nbOut;
         // (nbReduce - 1) additions for each output
         return nbOut * (nbReduce - 1);
     }
@@ -465,22 +476,22 @@ public:
         return std::make_unique<SoftmaxStats>(op);
     }
 
-    size_t getNbArithmOps() const override {
+    std::size_t getNbArithmOps() const override {
         const Softmax_Op& op_ = dynamic_cast<const Softmax_Op&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
-        const size_t nbReduce = op_.getInput(0)->dims()[axis];
-        const size_t nbOut = op_.getOutput(0)->size();
+        const std::size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
+        const std::size_t nbReduce = op_.getInput(0)->dims()[axis];
+        const std::size_t nbOut = op_.getOutput(0)->size();
         // nbOut divisions + (nbReduce - 1) additions
         return nbOut + (nbReduce - 1);
     }
 
-    size_t getNbNLOps() const override {
+    std::size_t getNbNLOps() const override {
         const Softmax_Op& op_ = dynamic_cast<const Softmax_Op&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
-        const size_t nbReduce = op_.getInput(0)->dims()[axis];
-        const size_t nbOut = op_.getOutput(0)->size();
+        const std::size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
+        const std::size_t nbReduce = op_.getInput(0)->dims()[axis];
+        const std::size_t nbOut = op_.getOutput(0)->size();
         // nbOut exp + nbReduce exp
         return nbOut + nbReduce;
     }
@@ -515,7 +526,7 @@ public:
         return std::make_unique<ElemWiseOpStats>(op);
     }
 
-    size_t getNbArithmOps() const override {
+    std::size_t getNbArithmOps() const override {
         const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
         return op_.getOutput(0)->size();
@@ -535,7 +546,7 @@ public:
         return std::make_unique<ElemWiseLogicOpStats>(op);
     }
 
-    size_t getNbArithmOps() const override {
+    std::size_t getNbArithmOps() const override {
         const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
         return op_.getOutput(0)->size();
@@ -552,7 +563,7 @@ public:
         return std::make_unique<ElemWiseNLOpStats>(op);
     }
 
-    size_t getNbNLOps() const override {
+    std::size_t getNbNLOps() const override {
         const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
         return op_.getOutput(0)->size();
diff --git a/include/aidge/graphRegex/GraphFsmInterpreter.hpp b/include/aidge/graphRegex/GraphFsmInterpreter.hpp
deleted file mode 100644
index e2fd43b9e641e8cb4a695e3a3eecf5975610d564..0000000000000000000000000000000000000000
--- a/include/aidge/graphRegex/GraphFsmInterpreter.hpp
+++ /dev/null
@@ -1,76 +0,0 @@
-#ifndef AIDGE_CORE_GRAPH_FSM_INTERPRETER_H_
-#define AIDGE_CORE_GRAPH_FSM_INTERPRETER_H_
-
-#include <string>
-#include <memory>
-
-#include "aidge/utilsParsing/AstNode.hpp"
-#include "aidge/graphRegex/GraphRegexTypes.hpp"
-#include "aidge/graphRegex/GraphParser.hpp"
-#include "aidge/graphRegex/matchFsm/FsmGraph.hpp"
-
-namespace Aidge {
-
-    class GraphFsmInterpreter
-    {
-    private:
-        /* data */
-        GraphParser mParser;
-        std::size_t mActGroupe;
-        std::map<std::string,std::shared_ptr<ConditionalInterpreter>> mNodesCondition;
-
-        const std::string mGraphMatchExpr;
-    public:
-        GraphFsmInterpreter(const std::string graphMatchExpr,std::vector<std::shared_ptr<ConditionalInterpreter>> & nodesCondition);
-        virtual ~GraphFsmInterpreter() =default;
-
-
-        std::shared_ptr<FsmGraph>  interpret(void);
-
-        
-
-        private:
-
-
-        std::shared_ptr<FsmGraph> visit(std::shared_ptr<AstNode<gRegexTokenTypes>> AstTree);
-
-        /**
-         * @defgroup graphFsmInterpreterF Functions for interpreting AST nodes
-         * @brief For each node type in the AST, define how build the FsmGraph
-         */
-
-
-        /**
-         * @ingroup graphFsmInterpreterF
-         * @brief leaf of fsm make the fsm for test one transition
-         */
-        std::shared_ptr<FsmGraph> keyF(std::shared_ptr<AstNode<gRegexTokenTypes>> AstNode);
-        /**
-         * @ingroup graphFsmInterpreterF
-         * @brief combine two fsm of two expression.
-         */
-        std::shared_ptr<FsmGraph> sepF(std::shared_ptr<FsmGraph> leftFsm,std::shared_ptr<FsmGraph> rigthFsm);
-        /**
-         * @ingroup graphFsmInterpreterF
-         * @brief combine two to make a new that match leftFsm next rigthFsm
-         */
-        std::shared_ptr<FsmGraph> nextF(std::shared_ptr<FsmGraph> leftFsm,std::shared_ptr<FsmGraph> rigthFsm);
-        /**
-         * @ingroup graphFsmInterpreterF
-         * @brief make the fsm match +
-         */
-        std::shared_ptr<FsmGraph> qomF(std::shared_ptr<FsmGraph> fsm);
-        /**
-         * @ingroup graphFsmInterpreterF
-         * @brief  make the fsm match *
-         */
-        std::shared_ptr<FsmGraph> qzmF(std::shared_ptr<FsmGraph> fsm);
-
-    };
-
-
-
-}
-
-
-#endif // AIDGE_CORE_GRAPH_FSM_INTERPRETER_H_
diff --git a/include/aidge/graphRegex/GraphLexer.hpp b/include/aidge/graphRegex/GraphLexer.hpp
deleted file mode 100644
index bd65dfc15d18533676b19e148a98185d3844acbd..0000000000000000000000000000000000000000
--- a/include/aidge/graphRegex/GraphLexer.hpp
+++ /dev/null
@@ -1,71 +0,0 @@
-#ifndef AIDGE_CORE_GRAPH_LEXER_H_
-#define AIDGE_CORE_GRAPH_LEXER_H_
-
-#include <string>
-#include <memory>
-#include <regex>
-#include <stdexcept> //error
-#include <sstream>
-
-#include "aidge/utilsParsing/ParsingToken.hpp"
-#include "aidge/graphRegex/GraphRegexTypes.hpp"
-
-namespace Aidge {
-
-    class GraphLexer
-    {
-
-    public:
-    GraphLexer( const std::string gRegexExpressions );
-
-    /**
-     * @brief Get the next token on the gRegexExpressions
-     * @return ConditionalToken
-     */
-    std::shared_ptr<ParsingToken<gRegexTokenTypes>> getNextToken(void);
-    /**
-     * @brief Restart at the start of the gRegexExpressions
-     *
-     */
-    void rstPosition(void);
-
-    /**
-     * @brief Test if the string is completely read
-     * @return bool
-     */
-    bool isEnd(void);
-
-
-    const std::string getQuery();
-
-
-    /**
-     * @brief Get the representation of the class
-     * @return string
-     */
-    const std::string rep();
-
-    private:
-
-    /**
-     * @brief Constructs an error message to display the character not understood by the lexer
-     * @return error message
-     */
-    std::runtime_error badTokenError(const std::string& currentChars,std::size_t position);
-
-    /**
-     * @brief The expression of the test to be performed on the nodes
-     */
-    const std::string mRegularExpressions;
-    /**
-     * @brief The lexer's current position in mConditionalExpressions
-     */
-    std::size_t mPosition;
-
-    };
-}
-
-
-
-
-#endif //AIDGE_CORE_GRAPH_LEXER_H_
diff --git a/include/aidge/graphRegex/GraphParser.hpp b/include/aidge/graphRegex/GraphParser.hpp
deleted file mode 100644
index b165891ff1c8a55e565e3520813b707303ddfd1f..0000000000000000000000000000000000000000
--- a/include/aidge/graphRegex/GraphParser.hpp
+++ /dev/null
@@ -1,105 +0,0 @@
-#ifndef AIDGE_CORE_GRAPH_PARSER_H_
-#define AIDGE_CORE_GRAPH_PARSER_H_
-
-
-#include <memory> // for shared_ptr
-#include "aidge/graphRegex/GraphLexer.hpp"
-#include "aidge/utilsParsing/AstNode.hpp"
-#include "aidge/graphRegex/GraphRegexTypes.hpp"
-
-namespace Aidge{
-
-/**
- * @brief this class uses the lexer to create an AST according to a set of gramer rules
- */
-class GraphParser {
-
-public:
-    /**
-     * @brief AST graph creation function
-     * @param gRegexExpressions String representing the logical function to be performed
-     */
-    GraphParser(const std::string gRegexExpressions);
-
-    ~GraphParser() noexcept;
-
-    /**
-     * @brief AST graph creation function
-     * @return The AST tree
-     */
-    std::shared_ptr<AstNode<gRegexTokenTypes>> parse(void);
-
-
-    /**
-     * @brief get the query that be use in the parsing
-     * @return query
-     */
-    const std::string getQuery();
-
-
-private:
-    /**
-     * @brief restart at the start of the ConditionalExpressions for LEXER and restart  mCurrentToken
-     */
-    void rstParser(void);
-
-    //////////////////
-
-    /**
-     * @defgroup ParsingFunctions Function for creating AST
-     * @brief Functions for recursive construction of the AST representing grammar rules
-     */
-
-    /**
-     * @ingroup ParsingFunctions
-     * @brief Token reading and verification function
-     *
-     */
-    void ackToken(gRegexTokenTypes  tokenType);
-
-    //TODO TODO
-    /**
-     * @ingroup ParsingFunctions
-     * @brief Function of grammar rules for key :  KEY(QOM | QZM)? | CKEY
-     * @return AST node
-     */
-    std::shared_ptr<AstNode<gRegexTokenTypes>> constructAstExp(void);
-
-    /**
-    * @ingroup ParsingFunctions
-    * @brief Function of grammar rules for sequence :  seq :exp (NEXT seq)*
-    * @return AST node
-    */
-    std::shared_ptr<AstNode<gRegexTokenTypes>> constructAstSeq(void);
-
-    /**
-    * @ingroup ParsingFunctions
-    * @brief Function of grammar rules for domain : (seq NEXT domain)? | LPAREN domain RPAREN (QOM | QZM) (NEXT domain)?
-    * @return AST node
-    */
-    std::shared_ptr<AstNode<gRegexTokenTypes>> constructAstDomain(void);
-
-    /**
-    * @ingroup ParsingFunctions
-    * @brief Function of grammar rules for multiple exepresion : allExpr: domain (SEP allExpr)*
-    * @return AST node
-    */
-    std::shared_ptr<AstNode<gRegexTokenTypes>> constructAstAllExpr(void);
-
-
-    /**
-    * @brief The actual token in the parce
-    */
-    std::shared_ptr<ParsingToken<gRegexTokenTypes>> mCurrentToken;
-
-    /**
-    * @brief The lexem use
-    */
-    GraphLexer mLexer;
-
-};
-
-
-}
-
-#endif //AIDGE_CORE_GRAPH_PARSER_H_
diff --git a/include/aidge/graphRegex/GraphRegex.hpp b/include/aidge/graphRegex/GraphRegex.hpp
deleted file mode 100644
index f0f8e68e41a09cb54fb7528cb7f6ce065674af02..0000000000000000000000000000000000000000
--- a/include/aidge/graphRegex/GraphRegex.hpp
+++ /dev/null
@@ -1,106 +0,0 @@
-#ifndef AIDGE_CORE_GRAPH_REGEX_H_
-#define AIDGE_CORE_GRAPH_REGEX_H_
-
-#include <string>
-
-#include "aidge/graphRegex/matchFsm/MatchResult.hpp"
-#include "aidge/graphRegex/matchFsm/FsmGraph.hpp"
-#include "aidge/graphRegex/GraphFsmInterpreter.hpp"
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/graph/Node.hpp"
-
-namespace Aidge{
-
-/**
- * @brief type for recipes function use in query and resolve  
- */
-using RecipesFunctionType = std::function<void(std::shared_ptr<MatchSolution>)>;
-
-/**
- * @brief high level interface for graph matching, used to simplify match definition 
- */
-class GraphRegex{
-
-    private:
-
-    //std::vector<std::string> mQuery;
-    std::vector<std::shared_ptr<ConditionalInterpreter>> mAllTest;
-    std::map<std::string, std::function<bool(NodePtr)>> mAllLambda;
-    std::map<std::string,RecipesFunctionType> mQueryRecipe;
-
-    public:
-    GraphRegex(){};
-    virtual ~GraphRegex() = default;
-
-    /**
-     * @brief add a topology query to the match 
-     * @param query the topology query to find 
-    **/
-    //void addQuery(const std::string query);
-
-    /**
-     * @brief add a topology query to the match and a function for recipe 
-     * @param query the topology query to find 
-     * @param f the funct 
-    **/
-    void addQuery(const std::string query,RecipesFunctionType f = nullptr);
-   
-   
-   /**
-     * @brief get all the types of a graph and set it as type key in the query 
-     * @param Reference graph use to get all the node types 
-    **/
-    void setKeyFromGraph(std::shared_ptr<GraphView> ref);
-
-   /**
-     * @brief set a node test manually 
-     * @param key the ref of this test used in the query
-     * @param ConditionalExpressions expression to test the node 
-    **/
-    void setNodeKey(const std::string key, const std::string conditionalExpressions );
-
-    /**
-     * @brief set a specific lambda that can be used in setQueryKey
-     * @param key ref to the lambda to use in the 
-     * @param f expression to test the node ConditionalExpressions
-    **/
-    void setNodeKey(const std::string key,std::function<bool(NodePtr)> f);
-
-    /**
-     *  @brief brief match the queries in the graph 
-     *  @param ref the graph were the queries in search 
-     *  @return the result  
-    */
-    std::set<std::shared_ptr<MatchSolution>> match(std::shared_ptr<GraphView> ref);
-
-    /***
-     *  @brief  match the queries in the graph and applied the recipes function  
-     *  @param ref the graph were the queries in search 
-    */
-    void appliedRecipes(std::shared_ptr<GraphView> ref);
-
-    private:
-
-    void _generateCombinationsStart(const std::set<NodePtr>& elements, std::size_t n, std::size_t index, 
-    std::vector<NodePtr>& current, std::set<std::vector<NodePtr>>& combinations);
- 
-
-
-  void _findLargestCompatibleSet(
-      const std::vector<std::shared_ptr<MatchSolution>>& solutions,
-      std::set<std::shared_ptr<MatchSolution>>& currentSet,
-      std::set<std::shared_ptr<MatchSolution>>& largestSet,
-      size_t currentIndex
-  );
-
-  std::set<std::shared_ptr<MatchSolution>> _findLargestCompatibleSet(
-      const std::vector<std::shared_ptr<MatchSolution>>& solutions
-  );
-
-  void _majConditionalInterpreterLambda();
-
-};
-}
-
-
-#endif //AIDGE_CORE_GRAPH_REGEX_H_
\ No newline at end of file
diff --git a/include/aidge/graphRegex/GraphRegexTypes.hpp b/include/aidge/graphRegex/GraphRegexTypes.hpp
deleted file mode 100644
index 9e35f8f027eb363b71358922ffe0caa4a55fff1d..0000000000000000000000000000000000000000
--- a/include/aidge/graphRegex/GraphRegexTypes.hpp
+++ /dev/null
@@ -1,29 +0,0 @@
-
-#ifndef AIDGE_CORE_GREGEX_TOKEN_TYPES_H_
-#define AIDGE_CORE_GREGEX_TOKEN_TYPES_H_
-
-
-namespace Aidge {
-    /**
-     * @brief enum for all types of token use in the of the regex
-     * 7-5 type
-     * 4-0 id
-    */
-    enum class gRegexTokenTypes
-    {
-        STOP,
-        NEXT,   /**< -> */
-
-        QOM,    /**< + */
-        QZM,    /**< * */
-
-        KEY,    /**< [A-Za-z_0-9]+ */
-        CKEY,   /**< [A-Za-z_0-9]+#[0-9]* */
-
-        SEP,    /**< \( */
-        LPAREN, /**< \( */
-        RPAREN, /**< \) */
-    };
-
-}
-#endif //AIDGE_CORE_GREGEX_TOKEN_TYPES_H_
diff --git a/include/aidge/graphRegex/GraphStrInterpreter.hpp b/include/aidge/graphRegex/GraphStrInterpreter.hpp
deleted file mode 100644
index 38e89b3733e1a07062661fa520485f92fbd7f026..0000000000000000000000000000000000000000
--- a/include/aidge/graphRegex/GraphStrInterpreter.hpp
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef AIDGE_CORE_GRAPH_FSM_INTERPRETER_H_
-#define AIDGE_CORE_GRAPH_FSM_INTERPRETER_H_
-
-#include <sstream>
-#include <memory>
-#include <algorithm>
-
-#include "aidge/utilsParsing/AstNode.hpp"
-#include "aidge/graphRegex/GraphRegexTypes.hpp"
-#include "aidge/graphRegex/GraphParser.hpp"
-#include "aidge/graphRegex/matchFsm/FsmGraph.hpp"
-
-namespace Aidge {
-
-    class GraphStrInterpreter
-    {
-    private:
-        /* data */
-        GraphParser mParser;
-        std::string mToTest;
-    public:
-        GraphStrInterpreter(const std::string graphMatchExpr);
-        virtual ~GraphStrInterpreter() =default;
-
-
-        std::string interpret(void);
-
-        private:
-
-
-         std::string visit(std::shared_ptr<AstNode<gRegexTokenTypes>> AstTree);
-    };
-
-
-
-}
-
-
-#endif //AIDGE_CORE_GRAPH_FSM_INTERPRETER_H_
diff --git a/include/aidge/graphRegex/matchFsm/FsmEdge.hpp b/include/aidge/graphRegex/matchFsm/FsmEdge.hpp
deleted file mode 100644
index 6397da49478c44ef6050c5bad77f12ba10efaca7..0000000000000000000000000000000000000000
--- a/include/aidge/graphRegex/matchFsm/FsmEdge.hpp
+++ /dev/null
@@ -1,242 +0,0 @@
-#ifndef AIDGE_CORE_FSM_EDGE_H_
-#define AIDGE_CORE_FSM_EDGE_H_
-
-#include <memory>
-#include <set>
-#include <string>
-
-#include "aidge/nodeTester/ConditionalInterpreter.hpp"
-
-
-namespace Aidge{
-
-    class FsmNode;
-    class FsmRunTimeContext;
-
-    struct EdgeTestResult {
-        bool success;
-        std::set<NodePtr> node;
-    };
-
-    /**
-     * @brief virtual class use test the node  on the node to validate
-    */
-    class FsmEdge: public std::enable_shared_from_this<FsmEdge>
-    {
-    private:
-
-        /**
-         * @brief the relative position to this test relative to all the const key
-         * first is common id, second is the relative position
-        */
-        std::map<size_t,int> mRelativePos;
-        /**
-         * @brief the ptr on the source node
-        */
-        std::shared_ptr<FsmNode> mNodeSource;
-        /**
-         * @brief the ptr on the dest node
-        */
-        std::shared_ptr<FsmNode> mNodeDest;
-         /**
-         * @brief the weak ptr
-        */
-        std::weak_ptr<FsmEdge> weakPtr;
-
-    public:
-        FsmEdge(std::shared_ptr<FsmNode>& source,std::shared_ptr<FsmNode>& dest, const std::shared_ptr<ConditionalInterpreter>  toTest);
-
-        virtual  ~FsmEdge(){};
-
-        FsmEdge() : weakPtr(shared_from_this()) {}
-
-
-        /**
-        *  @brief test is the validation of the node, it must be defined for all types of edge
-        * it takes as argument an FSM traversal context and returns a set of next nodes
-        *  @return set of next node or nullptr if not next
-        */
-
-        virtual const EdgeTestResult test(const std::shared_ptr<FsmRunTimeContext> stmContext) =0;
-
-        /**
-        *  @brief test is the edge test a common node
-        *  @return true if is a common
-        */
-        virtual bool isCommon(void);
-        /**
-         * @brief get the Common idx of the common test in this edge (if is a common edge)
-         * @return idx of the common
-        */
-        virtual size_t getCommonIdx(void);
-        /**
-         * @brief get the relative position to the common node define in this edge
-         * @return map
-        */
-        const std::map<size_t,int>& getRelative(void);
-        /**
-         * @brief add new relative position
-        */
-        void updateRelative( const std::map<size_t,int>& relativePos );
-        /**
-         * @brief get source FsmNode
-         * @return FsmNode
-        */
-        std::shared_ptr<FsmNode> getSourceNode(void);
-        /**
-         * @brief set a new source to the edge
-         * @return FsmNode
-        */
-        void reSetSourceNode(const std::shared_ptr<FsmNode>& newSource);
-          /**
-         * @brief get dest FsmNode
-         * @return FsmNode
-        */
-        std::shared_ptr<FsmNode> getDestNode(void);
-        /**
-         * @brief set a new dest to the edge
-         * @return FsmNode
-        */
-        void reSetDestNode(const std::shared_ptr<FsmNode>& newDest);
-        /**
-         * @brief propagate the edge  mRelativePos to the others Edge and recalcul the relative position
-        */
-        void propagateRelativePos(void);
-
-         /**
-         * @brief test to make on the node to validate
-         * @see ConditionalInterpreter
-        */
-        const std::shared_ptr<ConditionalInterpreter>  mToTest;
-
-         /**
-         * @brief update week ptr for the node, TODO best
-        */
-        void updateWeak(void);
-    };
-
-    /**
-     * @brief class specialization for not common node (node that must be match one Unique) transition
-    */
-    class FsmEdgeUnique:public FsmEdge
-    {
-
-        public:
-        FsmEdgeUnique(std::shared_ptr<FsmNode>& source,std::shared_ptr<FsmNode>& dest, const std::shared_ptr<ConditionalInterpreter>  toTest);
-        const EdgeTestResult test(const std::shared_ptr<FsmRunTimeContext> stmContext) override;
-    };
-
-    /**
-     * @brief class specialization for  common node transition
-     * @see FsmEdge
-    */
-    class FsmEdgeCommon:public FsmEdge
-    {
-
-        private:
-        /**
-         * @brief the map that define the relation between the commonKey find by the lexer and a unique id use to refer to the common node
-        */
-        static std::map<std::string,int> mCommonIdxMap;
-        /**
-         * @brief the common id test in this transition
-        */
-        int mCommonIdx;
-        public:
-
-        /**
-         * @brief constructor  common node ,
-         * @details during construction,
-         * the node key found by the lexer is converted to a unique id and the relative positions are updated.
-        */
-        FsmEdgeCommon(std::shared_ptr<FsmNode>& source,std::shared_ptr<FsmNode>& dest, const std::shared_ptr<ConditionalInterpreter>  toTest, const std::string commonKey);
-       // ~FsmEdgeCommon() override {}
-        const EdgeTestResult test(const std::shared_ptr<FsmRunTimeContext> stmContext) override;
-        bool isCommon(void) override;
-
-    };
-
-
-
-    /**
-     * @brief class specialization for ref transition
-     * @see FsmEdge
-    */
-    class FsmEdgeRef:public FsmEdge
-    {
-        private:
-        /**
-         * @brief the id of one common node that we use as an anchor
-        */
-        const int mRefCommonIdx;
-        /**
-         * @brief the delta in terme of child or parent refer to the anchor
-        */
-        const int mdeltaCommonIdx;
-        public:
-        FsmEdgeRef(std::shared_ptr<FsmNode>& source,std::shared_ptr<FsmNode>& dest, const size_t refCommonIdx,const int deltaCommonIdx);
-        //~FsmEdgeRef() override {}
-        const EdgeTestResult test(const std::shared_ptr<FsmRunTimeContext> stmContext) override;
-
-    };
-
-    /**
-     * @brief class specialization for ref empty transition
-     * @see FsmEdge
-    */
-    class FsmEdgeEmpty:public FsmEdge
-    {
-
-        public:
-        FsmEdgeEmpty(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNode> dest);
-        //~FsmEdgeEmpty() override {}
-        const EdgeTestResult test(const std::shared_ptr<FsmRunTimeContext> stmContext) override;
-
-    };
-
-
-    /**
-     * @brief class specialization for ref empty transition
-     * @see FsmEdge
-    */
-    class FsmEdgeNone:public FsmEdge
-    {
-
-        public:
-        FsmEdgeNone(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNode> dest);
-        const EdgeTestResult test(const std::shared_ptr<FsmRunTimeContext> /*stmContext*/) override;
-
-    };
-
-
-
-////////////////////////
-// FACTORY
-////////////////////////
-
-enum class FsmEdgeTypes {
-    EMPTY = 0,
-    REF,
-    COMMON,
-    UNIQUE
-};
-
-
-class FsmEdgeFactory {
-    public:
-    /**
-    * @brief factory for making edge and read the info in the lexeme of the token
-    * @param source source node of the edge
-    * @param dest Dest node of the edge
-    * @param type type of the edge
-    * @param lexeme the additional information to build the edge
-    * @return s prt of the edge
-    */
-    static std::shared_ptr<FsmEdge> make(std::shared_ptr<FsmNode> source, std::shared_ptr<FsmNode> dest,
-    FsmEdgeTypes type,std::map<std::string, std::shared_ptr<ConditionalInterpreter>> allTest,
-    const std::string lexeme = "");
-   };
-
-}
-
-#endif //AIDGE_CORE_FSM_EDGE_H_
diff --git a/include/aidge/graphRegex/matchFsm/FsmGraph.hpp b/include/aidge/graphRegex/matchFsm/FsmGraph.hpp
deleted file mode 100644
index e7402b3f0973e4b9e7053b4d59c9ff63ca6dd496..0000000000000000000000000000000000000000
--- a/include/aidge/graphRegex/matchFsm/FsmGraph.hpp
+++ /dev/null
@@ -1,109 +0,0 @@
-
-#ifndef AIDGE_CORE_FSM_GRAPH_H_
-#define AIDGE_CORE_FSM_GRAPH_H_
-
-#include <set>
-#include <vector>
-#include <memory>
-#include <stdexcept> //error
-
-#include "aidge/graphRegex/matchFsm/FsmNode.hpp"
-#include "aidge/graphRegex/matchFsm/FsmEdge.hpp"
-#include "aidge/graphRegex/matchFsm/MatchResult.hpp"
-namespace Aidge{
-
-
-
-class FsmGraph
-{
-private:
-    /**
-     * @brief all node Origin
-    */
-    std::set<std::size_t> mAllOrigin;
-    std::set<std::shared_ptr<FsmEdge>> mEdges;
-
-
-    const std::string mQuery;
-
-public:
-
-    FsmGraph(const std::string query);
-    virtual ~FsmGraph() = default;
-
-    std::vector<std::shared_ptr<MatchSolution>> test(const std::vector<NodePtr>& StartNodes);
-
-
-
-    const std::set<std::shared_ptr<FsmEdge>>& getEdge(void);
-    /**
-     * @brief add edge in the graph, as FsmEdge know the source and dest FsmNode these nodes are also add to the graph
-    */
-    void addEdge(std::shared_ptr<FsmEdge>& edge);
-
-    /**
-     * @brief get the list of the starting states
-     * @details we need to use a vector because the order of the nodes is important for start node initialization \ref test()
-    */
-    const std::vector<std::shared_ptr<FsmNode>> getStartNodes(void);
-
-    /**
-     * @brief get the set of the valid states
-     * @return set of valid state
-    */
-    const std::set<std::shared_ptr<FsmNode>> getValidNodes(void);
-
-    /**
-     * @brief get the set of all the node in the graph
-     * @return set of all nodes
-    */
-    const std::set<std::shared_ptr<FsmNode>> getNodes(void);
-
-    /**
-     * @brief set a group idx for all the nodes in the graph
-    */
-    void setGroupe(std::size_t groupeIdx);
-
-    /**
-     * @brief make the union between this graph and an input graph
-     * @param fsmGraph graph to union
-    */
-    void unionG(const std::shared_ptr<FsmGraph> fsmGraph);
-
-
-    /**
-     * @brief make the union between this graph and an input graph and merge the valid state to the start state
-     * @param fsmGraph graph to merge
-    */
-    void mergeOneStartOneValid(const std::shared_ptr< FsmGraph> fsmGraph);
-    /**
-     * @brief get the number of sub FSM
-     * @return number of sub Fsm
-    */
-    std::size_t getNbSubFsm(void);
-
-    /**
-     * @brief get the number of start state
-     * @return number of start state
-    */
-    std::size_t getNbStart(void);
-
-    /**
-     * @brief increment the origin of all nodes in the graph
-     * @param incr  value
-    */
-    void incOriginAllNodeBy(std::size_t incr);
-
-    private:
-
-    /**
-     * @brief merge tow node of the graph
-     * @param node
-    */
-    void _mergeNode(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNode> dest);
-
-};
-
-
-}
-#endif //AIDGE_CORE_FSM_GRAPH_H_
diff --git a/include/aidge/graphRegex/matchFsm/FsmNode.hpp b/include/aidge/graphRegex/matchFsm/FsmNode.hpp
deleted file mode 100644
index f4636e0e025d26fa2afae88b6ffca28a511e9509..0000000000000000000000000000000000000000
--- a/include/aidge/graphRegex/matchFsm/FsmNode.hpp
+++ /dev/null
@@ -1,99 +0,0 @@
-#ifndef AIDGE_CORE_FSM_NODE_H_
-#define AIDGE_CORE_FSM_NODE_H_
-
-#include <set>
-#include <vector>
-#include <memory>
-
-//#include "graphRegex/matchFsm/FsmEdge.hpp"
-//#include "graphRegex/matchFsm/FsmRunTimeContext.hpp"
-
-namespace Aidge{
-    // Forward declaration of the class defined in graphRegex/matchFsm/FsmEdge.hpp
-    class FsmEdge;
-    struct EdgeTestResult;
-    class FsmRunTimeContext;
-
-
-    //------------------------------------------------------------------------------
-
-    // MAY BE IN UTILE
-    template <typename T>
-    struct lex_compare {
-        bool operator() (const std::weak_ptr<T> &lhs, const std::weak_ptr<T> &rhs)const {
-            auto lptr = lhs.lock(), rptr = rhs.lock();
-            if (!rptr) return false; // nothing after expired pointer
-            if (!lptr) return true;
-            return lptr < rptr;
-        }
-    };
-
-    /**
-     * @brief is a node in the FSM graph, it's a state in the FSM
-     * @details a state can be and/or :
-     * - a valid state, the match is valid if it stop on this edge
-     * - a start state , the match start on this state
-     * The state is also define by this Origin (is the unique id of it's expretion )
-     * and it's group (for inner expression TODO)
-    */
-    class FsmNode : public std::enable_shared_from_this<FsmNode>
-    {
-    private:
-        /**
-         * @brief the edge of the node
-         * @details the edge have a shared ref to the node so we use weak ref
-        */
-        std::set<std::weak_ptr<FsmEdge>,lex_compare<FsmEdge>> mEdges;
-        /**
-         * @brief the parent of the node
-        */
-        std::set<std::weak_ptr<FsmNode>,lex_compare<FsmNode>> mParents;
-
-        std::size_t mOriginFsm = 0;
-        std::size_t mGroupeFsm = 0;
-
-        bool mIsAValid;
-        bool mIsAStart;
-
-    public:
-        FsmNode(bool isAValid,bool isAStart );
-        virtual ~FsmNode() = default;
-        /**
-         * @brief use to MAG the actual context , and return all the possible new context
-         * @details one input context can generate a multitude of contexts because a graph node
-         *  can have more than one child, and each traversal possibility is a new context.
-         * @param actContext the actual context
-         * @return A vector of all the new context
-        */
-        const std::vector<std::shared_ptr<FsmRunTimeContext>> test( std::shared_ptr<FsmRunTimeContext>);
-
-
-        std::size_t getOrigin(void);
-        void incOrigin(std::size_t inc);
-
-
-        void rmEdge(std::shared_ptr<FsmEdge>);
-        void addEdge(std::shared_ptr<FsmEdge>);
-
-        //const std::set<std::shared_ptr<FsmNode>> getChildNodes(void);
-
-        const std::set<std::weak_ptr<FsmNode>,lex_compare<FsmNode>>& getParentNodes(void);
-        const std::set<std::weak_ptr<FsmEdge>,lex_compare<FsmEdge>>& getEdges(void);
-
-        void setGroupe(std::size_t groupeIdx);
-
-        bool isValid(void);
-        bool isStart(void);
-        void invalid(void);
-        void valid(void);
-        void unStart(void);
-        void start(void);
-
-
-
-        void addParent(std::shared_ptr<FsmNode>);
-        void rmParent(std::shared_ptr<FsmNode>);
-    };
-
-}
-#endif //AIDGE_CORE_FSM_NODE_H_
diff --git a/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp b/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp
deleted file mode 100644
index 0b44172be0c3b671043fda884efadb84ba46e215..0000000000000000000000000000000000000000
--- a/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp
+++ /dev/null
@@ -1,171 +0,0 @@
-#ifndef AIDGE_CORE_FSM_RUN_TIME_CONTEXT_H_
-#define AIDGE_CORE_FSM_RUN_TIME_CONTEXT_H_
-
-#include <memory>
-#include <vector>
-#include <set>
-#include <algorithm>
-
-#include "aidge/nodeTester/ConditionalInterpreter.hpp"
-#include "aidge/graph/Node.hpp"
-
-
-namespace Aidge{
-
-class FsmNode;
-
-/**
- * @brief a class used to save the execution context of state machines, that is the actual state in the FSM, the actual node in the graph
- * all node that have been Validate,Rejecte or Considered common
-*/
-class FsmRunTimeContext
-{
-private:
-    /**
-     * @brief the list of node rejected for all the context
-    */
-    static std::vector<std::set<NodePtr>> mRejectedNodes;
-    /**
-     * @brief the actual state of this Context (where it's in the FSM graph)
-    */
-    std::shared_ptr<FsmNode> mActState;
-    /**
-     * @brief the actual node of this Context (where it's in the graph)
-    */
-    NodePtr mActOpNode;
-    /**
-     * @brief the map of the node consider as common and the common ID
-     * @details we need to store what node it's consider as common because of the end
-     * resolution of the matching, all node consider as common need to be the same in all context
-    */
-    std::map<NodePtr,std::size_t> mCommonNodes;
-    /**
-     * @brief the map of the node that as been valid in this context , and the test that valid the node
-    */
-    std::map<std::shared_ptr<ConditionalInterpreter>,std::set<NodePtr>> mValidNodes;
-    /**
-     * @brief the index in the rejected node of this context
-    */
-    std::size_t mLocalIdxRejeced;
-public:
-    /**
-     * @brief constructor
-     * @param actState the actual state in the FSM
-     * @param actOpNode the actual node in the graph
-     * @param idxRejeced the idx in the global regected node vector init max() as sentinel value of undefined
-    */
-    FsmRunTimeContext(std::shared_ptr<FsmNode> actState ,NodePtr actOpNode ,std::size_t idxRejeced =std::numeric_limits<std::size_t>::max() );
-    FsmRunTimeContext(std::shared_ptr<FsmRunTimeContext> fsmRunTime);
-    FsmRunTimeContext(std::shared_ptr<FsmRunTimeContext> fsmRunTime,std::shared_ptr<FsmNode> actState ,NodePtr actOpNode );
-
-    virtual ~FsmRunTimeContext()=default;
-
-    /**
-     * @defgroup FsmRunTimeContextRejected Function for managing rejected nodes
-     */
-
-    /**
-     * @ingroup FsmRunTimeContextRejected
-     * @brief Add a node as rejected in this context
-     */
-    void addRejectedNode(NodePtr node);
-
-    /**
-     * @ingroup FsmRunTimeContextRejected
-     * @brief get the rejected nodes of this context
-     */
-    inline std::set<NodePtr> getRejectedNodes(void) const {
-        return mRejectedNodes[mLocalIdxRejeced];
-    }
-
-
-    /**
-     * @defgroup FsmRunTimeContextTest Function for test the context
-     */
-
-    /**
-     * @ingroup FsmRunTimeContextTest
-     * @brief test if the actual state is valid
-     * @return bool
-     */
-    bool isOnValidState(void);
-    /**
-     * @ingroup FsmRunTimeContextTest
-     * @brief test if the node is considered as common in this context
-     * @param node node to test
-     * @return bool
-     */
-    bool isCommonDefined(NodePtr node);
-    /**
-     * @ingroup FsmRunTimeContextTest
-     * @brief test if has already validated in this context
-     * @param node node to test
-     * @return bool
-     */
-    bool isAlreadyValid(NodePtr node);
-    /**
-     * @ingroup FsmRunTimeContextTest
-     * @brief test if this context is compatible with an others
-     * @details to say that two contexts are compatible is to check :
-     *  that the contexts do not validate the same nodes (other than the common ones)
-     *  and that the common ones have the same idx
-     * @param fsmContext the others context
-     * @return bool
-     */
-    bool areCompatible(std::shared_ptr<FsmRunTimeContext> fsmContext);
-    /**
-     * @ingroup FsmRunTimeContextTest
-     * @brief test if this context is strictly equal with an others
-     * @param fsmContext the others context
-     * @return bool
-     */
-    bool areEqual(std::shared_ptr<FsmRunTimeContext> fsmContext);
-
-    /**
-     * @defgroup FsmRunTimeContextSet Function set context
-    */
-
-
-    void setCommon(NodePtr node,std::size_t commonIdx);
-
-
-    void setValid(NodePtr node,std::shared_ptr<ConditionalInterpreter> tag);
-
-    /**
-     * @defgroup FsmRunTimeContextGet Function get context
-     */
-
-
-    /**
-     * @ingroup FsmRunTimeContextGet
-     * @brief get the sub idx state
-     * @return bool
-     */
-    std::size_t getSubStmId(void);
-
-    NodePtr getCommonNodeFromIdx(std::size_t commonIdx);
-    std::size_t getCommonNodeIdx(NodePtr node);
-    std::set<NodePtr> getCommonNodes(void);
-
-    std::map<NodePtr,std::size_t> getCommon(void);
-    std::set<NodePtr> getValidNodes(void);
-
-    std::set<NodePtr> getValidNodesNoCommon(void);
-    std::map<std::shared_ptr<ConditionalInterpreter>,std::set<NodePtr>>& getValid(void);
-
-
-    NodePtr getActNode(void);
-    std::shared_ptr<FsmNode> getActState(void);
-
-
-    /**
-     * @defgroup FsmRunTimeContextMem
-     */
-
-    void rst(void);
-
-
-};
-} // namespace Aidge
-
-#endif // AIDGE_CORE_FSM_RUN_TIME_CONTEXT_H_
diff --git a/include/aidge/graphRegex/matchFsm/MatchResult.hpp b/include/aidge/graphRegex/matchFsm/MatchResult.hpp
deleted file mode 100644
index 7954e932a20940946f444cf7277e2bf359f7f15a..0000000000000000000000000000000000000000
--- a/include/aidge/graphRegex/matchFsm/MatchResult.hpp
+++ /dev/null
@@ -1,95 +0,0 @@
-#ifndef AIDGE_CORE_MATCH_RESULT_H_
-#define AIDGE_CORE_MATCH_RESULT_H_
-
-#include <cstddef>
-#include <map>
-#include <memory>
-#include <string>
-#include <set>
-#include <vector>
-
-#include "aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp"
-#include "aidge/graph/Node.hpp"
-
-namespace Aidge{
-
-/**
- * @brief contained the result of one match and the associate key , the query and the start node
-*/
-
-class MatchSolution{
-private:
-    std::map<std::string, std::set<NodePtr>> mSolution;
-    const std::string mQueryFrom;
-    const std::vector<NodePtr> mStartNode;
-
-public:
-    MatchSolution() = delete;
-    MatchSolution(std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence,const std::string query,const std::vector<NodePtr> startNode);
-
-    inline const std::set<NodePtr>& at(const std::string& key) {
-        return mSolution[key];
-    }
-    const std::set<NodePtr> getAll();
-    bool areCompatible(std::shared_ptr<MatchSolution> solution);
-
-    inline const std::string& getQuery() const noexcept { return mQueryFrom; }
-    inline const std::vector<NodePtr>& getStartNode() const noexcept { return mStartNode; }
-};
-
-
-/**
- * @brief class that old the result of a matching
- * give access to all node and there tag in the expression
-*/
-class MatchResult
-{
-private:
-    /* data */
-    std::vector<std::shared_ptr<FsmRunTimeContext>> mAllValid;
-
-    /*
-    the Run time of each sub FSM , to have a valid match we need a set of one run time per FSM compatible
-    the id must be continue
-    */
-    std::vector<std::vector<std::shared_ptr<FsmRunTimeContext>>> mIdToRunTime;
-
-    std::vector<std::shared_ptr<MatchSolution>> mSolve;
-
-    std::size_t mNbSubStm;
-
-
-
-public:
-    MatchResult() = delete;
-    MatchResult(std::vector<std::shared_ptr<FsmRunTimeContext>> allValid,
-                std::size_t nbSubStm,
-                const std::string& query,const std::vector<NodePtr>& startNodes);
-
-    /**
-     * @brief get the set of the node match for une expression
-     * @return the set of node of the graph that corresponding to an expression
-    */
-    inline std::shared_ptr<MatchSolution> getBiggerSolution(void) const noexcept {
-        return mSolve.empty() ? nullptr : mSolve[0];
-    }
-
-    inline std::vector<std::shared_ptr<MatchSolution>> getSolutions(void) const noexcept {
-        return mSolve;
-    }
-
-private:
-
-/**
- * @brief recurrent function use to init mSolve in the constructor
- *
- **/
-void _generateCombination( std::size_t idxSubStm, std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence,const std::string& query,const std::vector<NodePtr>& startNodes);
-
-};
-
-
-}
-
-
-#endif //AIDGE_CORE_MATCH_RESULT_H_
diff --git a/include/aidge/nodeTester/ConditionalData.hpp b/include/aidge/nodeTester/ConditionalData.hpp
deleted file mode 100644
index c6c521bd9c3e1a0333bb2a6c38545bb2bf6f3fe6..0000000000000000000000000000000000000000
--- a/include/aidge/nodeTester/ConditionalData.hpp
+++ /dev/null
@@ -1,98 +0,0 @@
-
-#ifndef AIDGE_CORE_CONDITIONAL_DATA_H_
-#define AIDGE_CORE_CONDITIONAL_DATA_H_
-
-#include <vector>
-#include <string>
-#include <stdexcept> //error
-#include <memory>
-#include <map>
-namespace Aidge{
-
-
-
-/////////////////////////
-// The data type in AST Interpretation
-////////////////////////
-
-class BaseConditionalValue {
-public:
-    virtual ~BaseConditionalValue() {}
-};
-
-template <typename T>
-class ConditionalValue : public BaseConditionalValue {
-public:
-    ConditionalValue(const T& data) : value(data) {}
-    T value;
-};
-
-
-struct ConditionalData {
-    /**
-     * @brief generic type to propagate all the different values in the AST interpretation
-    */
-    //void* value;
-    std::unique_ptr<BaseConditionalValue> value;
-    const std::type_info* type =nullptr;
-
-    /////////////////////////////////
-    //
-    ////////////////////////////////
-    /**
-     * @brief set a value
-    */
-    template <typename T>
-    void setValue(const T& newValue) {
-        //make sure that the old value is free
-        deleteValue();
-        value = std::make_unique<ConditionalValue<T>>(newValue);
-        type = &typeid(T);
-    }
-
-    /**
-     * @brief get the actual value
-     * @details recaste the value to the templaited type and checks that the conversion type is compatible with type
-     * @tparam the type of the return value
-     * @return the value
-    */
-    template <typename T>
-    T getValue() const {
-        if (type && *type == typeid(T)) {
-            //const Value<T>* typedValue = dynamic_cast<const Value<T>*>(static_cast<const BaseValue*>(value));
-            const ConditionalValue<T>* typedValue = dynamic_cast<const ConditionalValue<T>*>(value.get());
-            if (typedValue) {
-                return typedValue->value;
-            }
-        }
-        throw std::runtime_error(std::string("DATA ERROR ") + type->name() + " != " + typeid(T).name());
-    }
-    ///////////////////////////////////
-    //
-    ///////////////////////////////////
-    std::string getType() const {
-        return  type ? type->name() : "nullptr";
-    }
-
-
-    template <typename T>
-    bool isTypeEqualTo() const {
-        return (type && *type == typeid(T));
-    }
-
-    void deleteValue() {
-        if (type) {
-            value.reset();
-            type = nullptr;
-        }
-    }
-
-    ~ConditionalData() { // TODO best can we have a list of type supported ?
-       deleteValue();
-    }
-};
-
-}
-
-
-#endif //AIDGE_CORE_CONDITIONAL_DATA_H_
diff --git a/include/aidge/nodeTester/ConditionalInterpreter.hpp b/include/aidge/nodeTester/ConditionalInterpreter.hpp
deleted file mode 100644
index 713a166ec2cea7781ce98c850ecbf587eca58678..0000000000000000000000000000000000000000
--- a/include/aidge/nodeTester/ConditionalInterpreter.hpp
+++ /dev/null
@@ -1,362 +0,0 @@
-
-
-#ifndef AIDGE_CORE_CONDITIONAL_INTERPRETER_H_
-#define AIDGE_CORE_CONDITIONAL_INTERPRETER_H_
-
-#include "aidge/nodeTester/ConditionalParser.hpp"
-#include "aidge/nodeTester/ConditionalData.hpp"
-
-#include <memory> // for shared_ptr
-#include <unordered_map>
-#include <functional>
-#include "aidge/graph/Node.hpp"
-#include <sstream>
-
-
-namespace Aidge{
-
-
-
-//////////////////////////////
-//
-/////////////////////////////
-/**
- * @brief class used to register any lambda function without context,
- * it encapsulates the source lambda in a lambda which takes as argument  std::shared_ptr<ConditionalData> which are any type.
- * @see ConditionalData
- */
-class ConditionalRegisterFunction {
-    //////////////////////////
-    //Safe recaste
-    //////////////////////////
-
-    /**
-     * @brief recast the  std::shared_ptr<ConditionalData> to the argument type of the lambda
-     * @tparam T type of the lambda argument
-     * @see ConditionalData
-     */
-    template <typename T>
-    T safeCastInput( std::shared_ptr<ConditionalData> data) {
-        //cnvertion and type checking
-        if (data->isTypeEqualTo<T>()){
-            return data->getValue<T>();
-        }else{
-            throw std::invalid_argument( "incompatible input type " + data->getType() +" "+ typeid(T).name() );
-        }
-
-    }
-
-
-    /**
-     * @brief recaste the output of the lambda to a   std::shared_ptr<ConditionalData>
-     * @tparam T type of the lambda return
-     * @see ConditionalData
-     */
-    template <typename T>
-     std::shared_ptr<ConditionalData> safeCastOutput(T data) {
-
-        std::shared_ptr<ConditionalData> out = std::make_shared<ConditionalData>();
-        out->setValue<T>(data);
-
-        return out;
-    }
-
-
-
-
-    //////////////////////
-    // get all the type of the function
-    //////////////////////
-
-    /**
-     * @brief Retrieves information about a function's return type and argument types.
-     * @tparam T The function type.
-     */
-    template <typename T>
-    struct function_traits;
-
-
-    /**
-     * @brief Specialization of function_traits for function pointers.
-     * @tparam R The return type of the function.
-     * @tparam Args The argument types of the function.
-     */
-    template <typename R, typename... Args>
-    struct function_traits<R (*)(Args...)> {
-        using return_type = R;
-        static constexpr std::size_t arity = sizeof...(Args);
-
-        template <std::size_t N>
-        struct argument {
-            static_assert(N < arity, "Index out of range.");
-            using type = typename std::tuple_element<N, std::tuple<Args...>>::type;
-        };
-    };
-
-    /**
-     * @brief Specialization of function_traits for std::function types.
-     * @tparam R The return type of the function.
-     * @tparam Args The argument types of the function.
-     */
-    template <typename R, typename... Args>
-    struct function_traits<std::function<R(Args...)>> {
-        using return_type = R;
-        static constexpr std::size_t arity = sizeof...(Args);
-
-        template <std::size_t N>
-        struct argument {
-            static_assert(N < arity, "Index out of range.");
-            using type = typename std::tuple_element<N, std::tuple<Args...>>::type;
-        };
-    };
-
-    /////////////////////
-    //change the function to  std::shared_ptr<ConditionalData>(std::vector< std::shared_ptr<ConditionalData>>)
-    /////////////////////
-
-    /**
-     * @brief Converts a function to a  std::shared_ptr<ConditionalData>(std::vector< std::shared_ptr<ConditionalData>>).
-     * @tparam F The type of the function to convert.
-     * @tparam ParamsIdx The indices of the function parameters.
-     * @param f The function to convert.
-     * @return The pointer to the converted function.
-     */
-    template <class F, std::size_t... ParamsIdx>
-    auto funcPointer(F f, std::index_sequence<ParamsIdx...>) {
-        //wrap the lambda in a new one that as ConditionalData as inputs and output
-    	return [this,f](std::vector< std::shared_ptr<ConditionalData>>  &args) {
-            if (args.size() < sizeof...(ParamsIdx)){
-                std::ostringstream errorMessage;
-                errorMessage << "bad Number of argument: get " << args.size() << " need " << sizeof...(ParamsIdx) << "\n";
-                throw std::runtime_error(errorMessage.str());
-            }
-    		//we used std::vector< std::shared_ptr<ConditionalData>> as a fifo 
-            std::size_t offset = args.size()-sizeof...(ParamsIdx);
-
-    		using FuncTraits = function_traits<decltype(f)>;
-    		using outType = typename FuncTraits::return_type;
-
-    		outType result = f(safeCastInput<typename FuncTraits::template argument<ParamsIdx>::type>(args[offset+ParamsIdx])...);
-
-            //suppress what we used
-            for (size_t i = 0; i < sizeof...(ParamsIdx); ++i) {
-                args.pop_back();
-            }
-    		//typename
-    		return safeCastOutput<outType>(result);
-    	};
-    }
-
-    /**
-     * @brief Converts a function pointer to a  std::shared_ptr<ConditionalData>(std::vector< std::shared_ptr<ConditionalData>>).
-     * @tparam R The return type of the function.
-     * @tparam Params The parameter types of the function.
-     * @param f The function pointer to convert.
-     * @return The pointer to the converted function.
-     */
-    template <class R,class... Params>
-    auto funcPointer(R (*f)(Params...)) {
-    	return funcPointer(f, std::index_sequence_for<Params...>{});
-    }
-
-    /**
-     * @brief Converts a std::function to a  std::shared_ptr<ConditionalData>(std::vector< std::shared_ptr<ConditionalData>>).
-     * @tparam R The return type of the function.
-     * @tparam Params The parameter types of the function.
-     * @param f The function pointer to convert.
-     * @return The pointer to the converted function.
-     */
-    template <class R,class... Params>
-    auto funcPointer(std::function<R(Params...)> f) {
-    	return funcPointer(f, std::index_sequence_for<Params...>{});
-    }
-
-
-    ///////////////////
-    // interface
-    ///////////////////
-
-    public:
-
-     /**
-     * @brief Default constructor
-     */
-    ConditionalRegisterFunction(){}
-
-
-     /**
-     * @brief Inserts a function into the map with the provided key.
-     * @tparam T The function type.
-     * @param key The key to associate with the function.
-     * @param f The function to insert.
-     */
-    template <class T>
-    void insert(const std::string key,T f){
-        mWlambda.insert({ key, funcPointer(f)});
-    }
-
-
-     /**
-     * @brief Runs the function associated with the given key, using the provided vector of input data.
-     * @param key The key of the function to run.
-     * @param data The vector of input data.
-     * @return A pointer to the output ConditionalData object.
-     */
-     std::shared_ptr<ConditionalData> run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & data);
-
-    bool isLambdaRegister(const std::string &key) {
-        if(mWlambda.find(key) != mWlambda.end()){
-            return true;
-        }
-        return false;
-    }
-
-    private:
-    /// @brief map of name and the converted function.
-    std::map<const std::string, std::function< std::shared_ptr<ConditionalData>(std::vector< std::shared_ptr<ConditionalData>>  &)>> mWlambda;
-};
-
-///////////////////
-//AST tree node
-// ////////////////
-/**
- * @brief this class interprets AST to generate a test on a graph node. For each AST node,
- * it generates an interpretation and registers lambda functions that can be used in the test expression.
- * there are two lambda control mechanisms:
- * - A cpp mechanism which allows any lambda to be inserted into the constructor that use templaite
- * - A user mechanism limited to lambda bool(NodePtr)
- * @see ConditionalParser use to get the AST
- */
-class ConditionalInterpreter
-{
-    private:
-
-    /**
-     * @brief the AST generate by the Parser
-     * @see ConditionalParser
-     */
-    std::shared_ptr<AstNode<ConditionalTokenTypes>> mTree;
-    /**
-     * @brief the registry for the lambda function
-     * @see ConditionalRegisterFunction
-    */
-    ConditionalRegisterFunction mLambdaRegister;
-
-
-    std::vector< std::shared_ptr<ConditionalData>> mResolution ;
-
-    // void clearRes(){
-
-    //     for (std::size_t i = 0; i < mResolution.size(); ++i) {
-    //         delete mResolution[i];
-    //     }
-    //     mResolution.clear();
-    // }
-
-    public:
-
-    const std::string mKey;
-
-    /**
-     * @brief Constructor
-     * @param ConditionalExpressions The expression of the test to be performed on the nodes
-     */
-
-    ConditionalInterpreter(const std::string key,const std::string ConditionalExpressions);
-
-    ~ConditionalInterpreter(){}
-
-     /**
-     * @brief get the condition key
-     * @return the key
-    */
-
-    const std::string& getKey();
-
-    /**
-     * @brief Test a node depending of the ConditionalExpressions
-     * @details the AST is visit using \ref visit() with the $ init with the nodeOp
-     * @return bool the match node has the initialized expression
-     * @see visit() This function uses the visit() function to perform the evaluation.
-     */
-    bool test( const NodePtr nodeOp);
-
-    /**
-     * @brief Interface for inserting custom lambda bool(NodePtr) functions in AST interpretation,
-     *         it will be available in the ConditionalExpressions expretion as : key($)
-     * @param key The key that will be used to call the function in the expression
-     * @param f The pointer to function
-     */
-    void insertLambda(const std::string key,std::function<bool(Aidge::NodePtr)> f);
-
-    bool isLambdaRegister(const std::string &key);
-    /////
-
-    private:
-    /**
-     * @brief Recursive AST traversal function, using the for interpreting AST nodes function,
-     * using \ref ASTnodeInterpreterF functions
-     * @param NodeOp The node currently being tested
-     * @param nodes The AST given by the parsing process
-     */
-    std::vector< std::shared_ptr<ConditionalData>> visit(const ASTNodeCh& nodes, const NodePtr NodeOp );
-
-    /**
-     * @defgroup ASTnodeInterpreterF Functions for interpreting AST nodes
-     * @brief For each node type in the AST, function defines the processing to be performed
-     *          they return a  std::vector< std::shared_ptr<ConditionalData>> which corresponds to the value(s) obtained
-     */
-
-    /**
-     * @ingroup ASTnodeInterpreterF
-     * @brief Function that does something.
-     */
-    void fLambda(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node);
-    /**
-     * @ingroup ASTnodeInterpreterF
-     * @brief Converted the lexeme to a int and to  std::shared_ptr<ConditionalData>
-     */
-    void fStrToInteger(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node);
-    /**
-     * @ingroup ASTnodeInterpreterF
-     * @brief Converted the lexeme to a float and to  std::shared_ptr<ConditionalData>
-     */
-    void fStrToFloat(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node);
-    /**
-     * @ingroup ASTnodeInterpreterF
-     * @brief Converted the lexeme to a str and to  std::shared_ptr<ConditionalData>
-     */
-    void fStrToStr(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node);
-
-    /**
-     * @ingroup ASTnodeInterpreterF
-     * @brief makes the == operation between two previously converted  std::shared_ptr<ConditionalData>
-     */
-    void fEq(void);
-       /**
-     * @ingroup ASTnodeInterpreterF
-     * @brief makes the != operation between two previously converted  std::shared_ptr<ConditionalData>
-     */
-    void fNeq(void);
-    /**
-     * @ingroup ASTnodeInterpreterF
-     * @brief makes the && operation between two previously converted  std::shared_ptr<ConditionalData> in bool
-     */
-    void fAnd(void);
-        /**
-     * @ingroup ASTnodeInterpreterF
-     * @brief makes the || operation between two previously converted  std::shared_ptr<ConditionalData> in bool
-     */
-    void fOr(void);
-
-    /**
-     * @ingroup ASTnodeInterpreterF
-     * @brief makes the ! operation
-     */
-    void fNot(void);
-};
-
-
-}
-
-#endif //AIDGE_CORE_CONDITIONAL_INTERPRETER_H_
diff --git a/include/aidge/nodeTester/ConditionalLexer.hpp b/include/aidge/nodeTester/ConditionalLexer.hpp
deleted file mode 100644
index 0cf15d968bb6dae7532a1bcbb6c77b98ba0e42c6..0000000000000000000000000000000000000000
--- a/include/aidge/nodeTester/ConditionalLexer.hpp
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * @file
- * @brief
- * @version file 1.0.0
- * @author vl241552
- * @copyright
- *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
- *  rights reserved.
- */
-
-
-
-#ifndef AIDGE_CORE_CONDITIONAL_LEXER_H_
-#define AIDGE_CORE_CONDITIONAL_LEXER_H_
-
-#include <string>
-#include <regex>
-#include <memory> // for shared_ptr
-
-
-#include <stdexcept> //error
-#include <sstream>
-
-#include "aidge/nodeTester/ConditionalTypes.hpp"
-#include "aidge/utilsParsing/ParsingToken.hpp"
-
-
-namespace Aidge{
-
-
-
-class ConditionalLexer
-{
-
-public:
-ConditionalLexer( const std::string ConditionalExpressions );
-
-/**
- * @brief Get the next token on the ConditionalExpressions
- * @return ParsingToken<ConditionalTokenTypes>
- */
-std::shared_ptr<ParsingToken<ConditionalTokenTypes>> getNextToken(void);
-/**
- * @brief Restart at the start of the ConditionalExpressions
- *
- */
-void rstPosition(void);
-
-/**
- * @brief Test if the string is completely read
- * @return bool
- */
-bool isEnd(void);
-
-
-/**
- * @brief Get the representation of the class
- * @return string
- */
-const std::string rep(){
-   return mConditionalExpressions;
-}
-
-private:
-
-/**
- * @brief Constructs an error message to display the character not understood by the lexer
- * @return error message
- */
-std::runtime_error badTokenError(const std::string& currentChars,std::size_t position);
-
-/**
- * @brief The expression of the test to be performed on the nodes
- */
-const std::string mConditionalExpressions;
-/**
- * @brief The lexer's current position in mConditionalExpressions
- */
-std::size_t mPosition;
-
-};
-
-/////////////////////////////////////
-
-
-}
-
-#endif //AIDGE_CORE_CONDITIONAL_LEXER_H_
diff --git a/include/aidge/nodeTester/ConditionalParser.hpp b/include/aidge/nodeTester/ConditionalParser.hpp
deleted file mode 100644
index 06b0e112cfe9bba6a4f0bf32eb1b793326a357f8..0000000000000000000000000000000000000000
--- a/include/aidge/nodeTester/ConditionalParser.hpp
+++ /dev/null
@@ -1,110 +0,0 @@
-
-
-
-#ifndef AIDGE_CORE_CONDITIONAL_PARSER_H_
-#define AIDGE_CORE_CONDITIONAL_PARSER_H_
-
-
-#include <memory> // for shared_ptr
-#include <map>
-#include <vector>
-
-#include "aidge/nodeTester/ConditionalLexer.hpp"
-#include "aidge/nodeTester/ConditionalTypes.hpp"
-#include "aidge/utilsParsing/ParsingToken.hpp"
-#include "aidge/utilsParsing/AstNode.hpp"
-
-namespace Aidge{
-
-const std::map<ConditionalTokenTypes, std::size_t> ConditionalPrec{
-    {ConditionalTokenTypes::AND,2},
-    {ConditionalTokenTypes::OR,1}
-};
-
-
-
-
-using ASTNodeCh = std::vector<std::shared_ptr<AstNode<ConditionalTokenTypes>>>;
-
-/**
- * @brief this class uses the lexer to create an AST according to a set of gramer rules
- */
-class ConditionalParser {
-
-    public:
-    /**
-     * @brief AST graph creation function
-     * @param ConditionalExpressions String representing the logical function to be performed
-     */
-    ConditionalParser(const std::string ConditionalExpressions);
-
-    ~ConditionalParser() noexcept;
-
-    /**
-     * @brief AST graph creation function
-     * @return The AST tree
-     */
-    std::shared_ptr<AstNode<ConditionalTokenTypes>> parse(void);
-
-
-    private:
-    /**
-     * @brief restart at the start of the ConditionalExpressions for LEXER and restart  mCurrentToken
-     */
-    void rstParser(void);
-
-    //////////////////
-
-    /**
-     * @defgroup ParsingFunctions Function for creating AST
-     * @brief Functions for recursive construction of the AST representing grammar rules
-     */
-
-    /**
-     * @ingroup ParsingFunctions
-     * @brief Token reading and verification function
-     *
-     */
-    void ackToken(ConditionalTokenTypes  tokenType);
-
-    /**
-     * @ingroup ParsingFunctions
-     * @brief Function of grammar rules for values : (KEY|INTEGER|FOAT|STRING|LAMBDA lambda)
-     * @return AST node
-     */
-    std::shared_ptr<AstNode<ConditionalTokenTypes>> constructAstVal(void);
-    /**
-    * @ingroup ParsingFunctions
-    * @brief Function of grammar rules for comparison : val (EQ|NEQ) val | LPAREN expr RPAREN
-    * @return AST node
-    */
-    std::shared_ptr<AstNode<ConditionalTokenTypes>> constructAstCmpr(void);
-    /**
-    * @ingroup ParsingFunctions
-    * @brief Function of grammar rules for arguments of a lambda : LAMBDA val (ARGSEP val)* RPAREN
-    * @return AST node
-    */
-    std::shared_ptr<AstNode<ConditionalTokenTypes>> constructAstLambda(void);
-    /**
-    * @ingroup ParsingFunctions
-    * @brief Function of grammar rules for a expression : cmpr ((AND | OR) cmpr)*
-    * @return AST node
-    */
-    std::shared_ptr<AstNode<ConditionalTokenTypes>> constructAstExpr(std::size_t precLimit = 0);
-
-
-    /**
-    * @brief The actual token in the parce
-    */
-    std::shared_ptr<ParsingToken<ConditionalTokenTypes>> mCurrentToken;
-    /**
-    * @brief The lexem use
-    */
-    ConditionalLexer mLexer;
-
-};
-
-
-}
-
-#endif //AIDGE_CORE_CONDITIONAL_PARSER_H_
diff --git a/include/aidge/nodeTester/ConditionalTypes.hpp b/include/aidge/nodeTester/ConditionalTypes.hpp
deleted file mode 100644
index 6cb2edfd78e6b43c4f2dbc89c49cdaa9ea79f7d2..0000000000000000000000000000000000000000
--- a/include/aidge/nodeTester/ConditionalTypes.hpp
+++ /dev/null
@@ -1,36 +0,0 @@
-
-
-#ifndef AIDGE_CORE_CONDITIONAL_TYPES_H_
-#define AIDGE_CORE_CONDITIONAL_TYPES_H_
-namespace Aidge{
-    /**
-     * @brief enum for all types of token use in the parsing
-     * 7-5 type
-     * 4-0 id
-    */
-    enum class ConditionalTokenTypes
-    {
-        STOP,
-
-        NOT,     /**< ! */
-        AND,     /**< && */
-        OR,      /**< || */
-
-        EQ,      /**< == */
-        NEQ,     /**< != */
-
-        KEY,     /**< [A-Za-z][A-Za-z0-9_]* */
-        INTEGER, /**< [0-9]+ */
-        FLOAT,   /**< [0-9]+\.[0-9]* */
-        STRING , /**< \'.*\' */
-        BOOL,    /**< true|false */
-        NODE,    /**< \$ */
-        LAMBDA , /**< [A-Za-z][A-Za-z0-9_]*\( */
-
-        ARGSEP,  /**< , */
-        LPAREN,  /**< \( */
-        RPAREN,  /**< \) */
-
-    };
-}
-#endif // AIDGE_CORE_CONDITIONAL_TYPES_H_
diff --git a/include/aidge/operator/Abs.hpp b/include/aidge/operator/Abs.hpp
index f1dc37003fbff9463d041030818ec0534c5ac1fd..d8dc62752ff8dcfc53715b446c101d20f3cfc7ac 100644
--- a/include/aidge/operator/Abs.hpp
+++ b/include/aidge/operator/Abs.hpp
@@ -24,16 +24,32 @@
 
 namespace Aidge {
 
+/**
+* @brief Description of an element-wise Absolute Value (Abs) operation
+ * on an input Tensor.
+ *
+ * For each element x in the input, the function is defined as:
+ * `f(x) = |x|`, where |x| denotes the absolute value of x.
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Abs_Op : public OperatorTensor,
-    public Registrable<Abs_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Abs_Op&)>> {
+    public Registrable<Abs_Op,  // <Op, backend, implementation creation function>
+        std::string,
+        std::function<std::shared_ptr<OperatorImpl>(const Abs_Op&)>> {
 public:
     static const std::string Type;
 
     Abs_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Abs_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Abs_Op(const Abs_Op& op)
         : OperatorTensor(op)
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 827fc0c2732695364aa2393692d7040b8b1a0e9f..fcd154b6e1abe0d3bcfc6e72d8c078b3277f9b47 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -24,16 +24,42 @@
 
 namespace Aidge {
 
+/**
+ * @brief Description of an element-wise Add operation on input Tensors,
+ * supporting NumPy broadcasting.
+ *
+ * For each pair of elements x and y from the input Tensors, the function 
+ * is defined as:
+ * `f(x, y) = x + y`
+ *
+ * Broadcasting adjusts shapes of the input Tensors to make them compatible:
+ * - Tensors are aligned from the rightmost dimensions.
+ * - Dimensions are compatible if they are equal, one of them is 1, or missing.
+ *
+ * The output Tensor shape is determined by taking the maximum size along 
+ * each dimension of the input Tensors after broadcasting.
+ *
+ * @example Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
+ * @example Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Add_Op : public OperatorTensor,
-    public Registrable<Add_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Add_Op&)>> {
+    public Registrable<Add_Op,
+                       std::string,
+                       std::function<std::shared_ptr<OperatorImpl>(const Add_Op&)>>
+{
 public:
     static const std::string Type;
 
     Add_Op();
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Add_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Add_Op(const Add_Op& op);
 
diff --git a/include/aidge/operator/And.hpp b/include/aidge/operator/And.hpp
index e4f04e2fa3ec2a4a01f023b9ab203e6b2ab36e76..32b6684a00a8e1dc9eb938e011f8529554e7de79 100644
--- a/include/aidge/operator/And.hpp
+++ b/include/aidge/operator/And.hpp
@@ -25,23 +25,39 @@
 namespace Aidge {
 
 /**
- * @brief Tensor element-wise logical and operation.
+ * @brief Description of an element-wise And operation on input Tensors,
+ * supporting NumPy broadcasting.
+ *
+ * For each pair of elements x and y from the input Tensors, the function 
+ * is defined as:
+ * `f(x, y) = x && y`
+ *
+ * Broadcasting adjusts shapes of the input Tensors to make them compatible:
+ * - Tensors are aligned from the rightmost dimensions.
+ * - Dimensions are compatible if they are equal, one of them is 1, or missing.
+ *
+ * The output Tensor shape is determined by taking the maximum size along 
+ * each dimension of the input Tensors after broadcasting.
+ * 
+ * Examples:
+ * 1. Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
+ * 2. Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
+ * 
+ * @see OperatorTensor
+ * @see Registrable
  */
 class And_Op : public OperatorTensor,
     public Registrable<And_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const And_Op&)>> {
 public:
     static const std::string Type;
 
-    /**
-     * @brief Compute element-wise and operation on two given inputs.
-     * @details supports broadcasting of both operands.
-     */
     And_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
-     * but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op And_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     And_Op(const And_Op& op)
         : OperatorTensor(op)
diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index 13f63ce98c526f0c57a363ada4e7f50ccdbfb83b..7358899a9e6dfac85299a2cf498b7e6e1ba9e7c2 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -26,15 +26,53 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ArgMaxAttr { Axis, KeepDims, SelectLastIndex };
+
+enum class ArgMaxAttr {
+    /**
+     * @brief Specifies the dimension along which the ArgMax operation is performed.
+     */
+    Axis,
+    /**
+     * Indicates whether reduced dimensions should be kept or removed.
+     */
+    KeepDims,
+    /**
+     * Determines whether to select the first or last index in case of ties.
+     */
+    SelectLastIndex
+};
 
 /**
- * @brief This operator has as purpose to reduce given dimension by replacing with the Max value's index.
-*/
+ * @brief Description of the ArgMax operation on a Tensor.
+ *
+ * The ArgMax operation identifies the index of the maximum value along a specified axis of a Tensor.
+ *
+ * The output of the ArgMax operation can retain the dimensionality of the input Tensor or reduce 
+ * it by removing the specified axis. Additionally, in cases where multiple maximum values exist, 
+ * the user can specify whether to select the first or the last occurrence of the maximum value.
+ *
+ * Attributes:
+ * - `Axis`: The axis along which the ArgMax operation is performed. For example, if the axis is `0`,
+ *   the operation is applied along rows; if it is `1`, it is applied along columns.
+ * - `KeepDims`: A boolean indicating whether to retain the reduced axis as a dimension of size `1` 
+ *   (`true`) or to completely remove it (`false`).
+ * - `SelectLastIndex`: A boolean indicating how to handle ties (multiple maximum values along the axis):
+ *   - If `true`, the last index of the maximum value is selected.
+ *   - If `false`, the first index of the maximum value is selected.
+ *
+ * @example:
+ * - Input Tensor: Shape (2, 3)
+ * - ArgMax(axis=1, keep_dims=false, select_last_index=false):
+ *   Output Tensor: Shape (2)
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class ArgMax_Op : public OperatorTensor,
                 public Registrable<ArgMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ArgMax_Op &)>> {
 
 public:
+    /// The type of the operator as a string.
     static const std::string Type;
 
 private:
@@ -44,20 +82,19 @@ private:
                                         bool>;
     template <ArgMaxAttr e>
     using attr = typename Attributes_::template attr<e>;
+    /// Pointer to the attribute storage.
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
     ArgMax_Op() = delete;
 
     /**
-     * @brief constructor for ArgMax op
-     * @param[in] axis around which perform the operation
-     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axis and 
-     * if false we remove the dimension completely
-     * @param[in] select_last_index in case we have many maximum, if true the last index is returned 
-     * if false the first index is returned. 
+     * @brief Constructs an ArgMax operator.
+     * @param[in] axis The axis along which the ArgMax operation is performed.
+     * @param[in] keep_dims Whether to retain reduced dimensions with size 1 (`true`) or remove them (`false`).
+     * @param[in] select_last_index Whether to select the last occurrence of the maximum value (`true`) or the first (`false`).
      */
-    ArgMax_Op(std::int32_t axis, bool keep_dims, bool select_last_index)
+    ArgMax_Op(std::int32_t axis = 0, bool keep_dims = true, bool select_last_index = false)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<ArgMaxAttr::Axis>(axis),
@@ -66,68 +103,104 @@ public:
     {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy constructor for the ArgMax operator.
+     *
+     * Creates a new ArgMax_Op instance by copying attributes and output tensors from another
+     * instance. The input tensors are not copied, so the new instance has no inputs associated.
+     *
+     * @param op The ArgMax_Op instance to copy.
      */
-    ArgMax_Op(const ArgMax_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ArgMax_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    ArgMax_Op(const ArgMax_Op& op);
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::ArgMax_Op
+     * @brief Creates a copy of the current ArgMax operator.
+     * @return A shared pointer to the new ArgMax operator instance.
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ArgMax_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Performs dimension inference for the ArgMax operation.
+     * @param[in] allowDataDependency Whether data dependency is allowed during dimension inference.
+     * @return `true` if dimensions were successfully inferred, `false` otherwise.
+     */
     bool forwardDims(bool allowDataDependency = false) override final;
 
+    /**
+     * @brief Sets the backend for the operator.
+     * @param name The name of the backend.
+     * @param device The device index on which the backend operates (default is 0).
+     */
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+
+    /**
+     * @brief Retrieves a list of available backends for the ArgMax operator.
+     * @return A set of strings representing the available backends.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Gets the attribute storage for the ArgMax operator.
+     * @return A shared pointer to the attribute storage.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Gets a reference to the axis attribute.
+     * @return A reference to the axis attribute.
+     */
     inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::Axis>(); }
+
+    /**
+     * @brief Gets a reference to the keepDims attribute.
+     * @return A reference to the keepDims attribute.
+     */
     inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::KeepDims>(); }
-    inline bool& selectLastIndex() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::SelectLastIndex>(); }
 
+    /**
+     * @brief Gets a reference to the selectLastIndex attribute.
+     * @return A reference to the selectLastIndex attribute.
+     */
+    inline bool& selectLastIndex() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::SelectLastIndex>(); }
 
+    /**
+     * @brief Returns the names of the input tensors for the ArgMax operator.
+     * @return A vector of strings containing the input names.
+     */
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
+
+    /**
+     * @brief Returns the names of the output tensors for the ArgMax operator.
+     * @return A vector of strings containing the output names.
+     */
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
 /**
- * @brief Compute the max value of a Tensor over the provided axes. Dimensions
- * may be reduced by erasing the provided axis or not.
+ * @brief Creates an ArgMax operation node.
  *
- * @param axis Dimension over which data max should be computed.
- * @param keep_dims Whether or not reduced dimensions are to be erased.
- * @param select_last_index Whether to select the last index of max elements in case there are many maximums.
- * By default the first max element index is 
- * @param name Name of the Operator.
- * @return std::shared_ptr<Node> Node containing the Operator.
+ * This function constructs a new Node containing an ArgMax_Op operator with the specified
+ * attributes.
+ *
+ * @param[in] axis The axis along which the ArgMax operation is performed.
+ * @param[in] keep_dims Whether to retain reduced dimensions with size 1 (`true`) or remove them (`false`).
+ * @param[in] select_last_index Whether to select the last occurrence of the maximum value (`true`) or the first (`false`).
+ * @param[in] name The name of the Node (optional).
+ * @return A shared pointer to the newly created Node.
  */
-inline std::shared_ptr<Node> ArgMax(std::int32_t axis=0,
-                                    bool keep_dims=true,
-                                    bool select_last_index=false,
-                                    const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<ArgMax_Op>(axis, keep_dims, select_last_index), name);
-
-}
+std::shared_ptr<Node> ArgMax(std::int32_t axis = 0,
+                             bool keep_dims = true,
+                             bool select_last_index = false,
+                             const std::string& name = "");
 
 }  // namespace Aidge
 
+/**
+ * @brief Provides string representations for the ArgMaxAttr enumeration.
+ */
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
diff --git a/include/aidge/operator/Atan.hpp b/include/aidge/operator/Atan.hpp
index f9c7d09e7a49cd8687166006eea75510aeda57ee..6f81ab0a878a0c253b98580ecbefaf2f821a543f 100644
--- a/include/aidge/operator/Atan.hpp
+++ b/include/aidge/operator/Atan.hpp
@@ -24,15 +24,41 @@
 
 namespace Aidge {
 
+/**
+ * @brief Description of an element-wise Arc Tangent (Atan) operation
+ * on an input Tensor.
+ *
+ * For each element x in the input, the function is defined as:
+ * `f(x) = atan(x)`, where atan(x) is the arc tangent of x, 
+ * returning the angle in radians whose tangent is x.
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Atan_Op : public OperatorTensor,
-    public Registrable<Atan_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Atan_Op&)>> {
+    public Registrable<Atan_Op,  // <Op, backend, implementation creation function>
+        std::string,
+        std::function<std::shared_ptr<OperatorImpl>(const Atan_Op&)>>
+{
 public:
     static const std::string Type;
 
     Atan_Op();
 
+    /**
+     * @brief Copy-constructor.
+     * @param op Atan_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
+     */
     Atan_Op(const Atan_Op& op);
 
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Atan_Op
+     */
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 54b40907e8b4127b7b96b95b229440d782149c3d..981f71762757795461226f2b052bda7f4bc9cd89 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -24,30 +24,72 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class AvgPoolingAttr { StrideDims, KernelDims };
+/**
+ * @brief Attributes specific to the AvgPooling operation.
+ */
+enum class AvgPoolingAttr {
+    /**
+     * @brief Stride dimensions for sliding the pooling window.
+     * Specifies the step size of the sliding window along each spatial dimension.
+     */
+    StrideDims,
+
+    /**
+     * @brief Kernel dimensions for the pooling operation.
+     * Specifies the size of the pooling window along each spatial dimension.
+     */
+    KernelDims
+};
 
+/**
+ * @brief Class representing an Average Pooling operation.
+ *
+ * The AvgPooling operation computes the average value within sliding windows of specified size
+ * (kernel dimensions) over the input tensor. The stride dimensions determine how the window
+ * moves across the input. This operation is commonly used in neural networks to reduce the spatial
+ * dimensions while preserving features.
+ *
+ * @tparam DIM Number of dimensions for the pooling operation.
+ */
 template <DimIdx_t DIM>
 class AvgPooling_Op : public OperatorTensor,
                 public Registrable<AvgPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>> {
 
 public:
+    /**
+     * @brief Type identifier for the AvgPooling operation.
+     */
     static const std::string Type;
 
 private:
+    /**
+     * @brief Static attributes representing kernel and stride dimensions.
+     */
     using Attributes_ = StaticAttributes<AvgPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>>;
     template <AvgPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
+
+    /**
+     * @brief Shared pointer to the attributes of the operation.
+     */
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-
+    /**
+     * @brief Default constructor is deleted.
+     */
     AvgPooling_Op() = delete;
 
-
+    /**
+     * @brief Constructs an AvgPooling operation with specified kernel and stride dimensions.
+     * @param kernel_dims Size of the pooling window for each spatial dimension.
+     * @param stride_dims Step size (stride) for sliding the pooling window across the input dimensions.
+     * Defaults to 1 for each dimension.
+     */
     constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
-                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
+                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1))
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
                         attr<AvgPoolingAttr::StrideDims>(stride_dims),
@@ -55,48 +97,107 @@ public:
     {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op AvgPooling_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     AvgPooling_Op(const AvgPooling_Op<DIM>& op);
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::AvgPooling_Op
+     * @brief Clones the operator using its copy-constructor.
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override final;
 
-
+    /**
+     * @brief Calculates the output dimensions based on the input dimensions and operator attributes.
+     * @param allowDataDependency If true, considers data-dependent operations. Defaults to false.
+     * @return True if the dimensions are successfully calculated.
+     */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-
+    /**
+     * @brief Computes the receptive field of the operator.
+     * @param firstEltDims Dimensions of the first element.
+     * @param outputDims Dimensions of the output tensor.
+     * @param outputIdx Index of the output tensor. Defaults to 0.
+     * @return A vector of pairs representing the receptive fields.
+     */
     std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
     computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
-                            const std::vector<DimSize_t>& outputDims,
-                            const IOIndex_t outputIdx = 0) const override final;
-
+                          const std::vector<DimSize_t>& outputDims,
+                          const IOIndex_t outputIdx = 0) const override final;
 
+    /**
+     * @brief Sets the backend for the operation.
+     * @param name Name of the backend.
+     * @param device Device index. Defaults to 0.
+     */
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+
+    /**
+     * @brief Retrieves the available backends for the operation.
+     * @return A set of strings representing the available backends.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Accessor for the operation attributes.
+     * @return Shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Accessor for the stride dimensions.
+     * @return An array representing the stride dimensions.
+     */
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<AvgPoolingAttr::StrideDims>(); }
+
+    /**
+     * @brief Accessor for the kernel dimensions.
+     * @return An array representing the kernel dimensions.
+     */
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<AvgPoolingAttr::KernelDims>(); }
 
+    /**
+     * @brief Retrieves the names of the input tensors.
+     * @return A vector of strings representing the input tensors names.
+     */
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
+
+    /**
+     * @brief Retrieves the names of the output tensors.
+     * @return A vector of strings representing the output tensors names.
+     */
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Creates an AvgPooling operator node.
+ * @tparam DIM Number of dimensions for the pooling operation.
+ * @param kernel_dims Size of the pooling window for each spatial dimension.
+ * @param name Name of the operator node. Defaults to an empty string.
+ * @param stride_dims Step size (stride) for sliding the pooling window across the input dimensions. Defaults to 1 for each dimension.
+ * @return A shared pointer to the created operator node.
+ */
 template <std::array<DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
-                                           const std::string& name = "",
-                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1));
-
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+                                 const std::string& name = "",
+                                 const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1));
+
+/**
+ * @brief Overload of AvgPooling for C-style arrays.
+ * @tparam DIM Number of dimensions for the pooling operation.
+ * @param kernel_dims C-style array specifying the kernel dimensions.
+ * @param name Name of the operator node. Defaults to an empty string.
+ * @param stride_dims Step size (stride) for sliding the pooling window across the input dimensions. Defaults to 1 for each dimension.
+ * @return A shared pointer to the created operator node.
+ */
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> AvgPooling(
     DimSize_t const (&kernel_dims)[DIM],
@@ -107,12 +208,18 @@ inline std::shared_ptr<Node> AvgPooling(
 }
 }  // namespace Aidge
 
+/**
+ * @brief Explicit template instantiations for AvgPooling_Op with 1 to 4 dimensions.
+ */
 extern template class Aidge::AvgPooling_Op<1>;
 extern template class Aidge::AvgPooling_Op<2>;
 extern template class Aidge::AvgPooling_Op<3>;
 extern template class Aidge::AvgPooling_Op<4>;
 
 namespace {
+/**
+ * @brief String representation of the AvgPooling attributes.
+ */
 template <>
 const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
     "stride_dims",
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 8f33380b29a1509c75721994f16139b9f1a9e20a..ddffaeb027ee6c581ae8e8c9abb06bfaa0d2a4d6 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -24,31 +24,83 @@
 
 namespace Aidge {
 
-enum class BatchNormAttr { Epsilon, Momentum, TrainingMode };
+enum class BatchNormAttr {
+  /**
+   * @brief Epsilon value to avoid division by zero during normalization.
+   *
+   * A small value added to the denominator during normalization to ensure numerical stability.
+   * Commonly used in batch normalization to avoid very small variance values.
+   */
+  Epsilon,
+
+  /**
+   * @brief Momentum factor for the moving average of batch statistics.
+   *
+   * Controls the weighting of past running averages in the batch normalization statistics.
+   * - `0.0`: Full reliance on current batch statistics.
+   * - `1.0`: Complete reliance on the previous moving average.
+   */
+  Momentum,
+
+  /**
+   * @brief Flag indicating whether the operator is in training mode.
+   *
+   * - `true`: Uses the current batch statistics for normalization.
+   * - `false`: Uses moving average statistics accumulated during training.
+   */
+  TrainingMode
+};
 
+/**
+ * @class BatchNorm_Op
+ * @brief Implements the Batch Normalization (BN) operation, a technique used to normalize the inputs of a layer.
+ *
+ * BatchNorm standardizes the inputs to a layer by adjusting and scaling activations based on the batch's mean and variance.
+ * This operator computes the following:
+ *   - Normalization of input features
+ *   - Scaling and shifting of normalized values based on learned parameters
+ *
+ * @example:
+ * - Input shape: (batch_size, num_features) // Batch size, number of features
+ * - Epsilon: 1e-5
+ * - Momentum: 0.1
+ * - TrainingMode: true
+ * - Output shape: (batch_size, num_features)
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 template <DimIdx_t DIM>
 class BatchNorm_Op : public OperatorTensor,
                 public Registrable<BatchNorm_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>> {
+
 public:
     static const std::string Type;
 
 private:
     using Attributes_ = StaticAttributes<BatchNormAttr, float, float, bool>;
+
     template <BatchNormAttr e>
     using attr = typename Attributes_::template attr<e>;
+
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-
     BatchNorm_Op() = delete;
 
+    /**
+     * @brief Constructor for BatchNorm operator.
+     * @param[in] epsilon Small value to add to the denominator for numerical stability.
+     * @param[in] momentum Momentum for the moving average of statistics.
+     * @param[in] trainingMode Flag indicating whether to use current or moving average statistics.
+     */
     constexpr BatchNorm_Op(float epsilon, float momentum, bool trainingMode)
         : OperatorTensor(Type,
                             {InputCategory::Data,
-                                InputCategory::Param,
-                                InputCategory::Param,
-                                InputCategory::Param,
-                                InputCategory::Param},
+                             InputCategory::Param,
+                             InputCategory::Param,
+                             InputCategory::Param,
+                             InputCategory::Param},
                             1),
           mAttributes(std::make_shared<Attributes_>(
             attr<BatchNormAttr::Epsilon>(epsilon),
@@ -68,29 +120,35 @@ public:
      */
     std::shared_ptr<Operator> clone() const override;
 
-    // Data operator[](const char* inputName) override final {
-    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
-    //         (strcmp(inputName, "weight") ? mInputs[1] :
-    //         (strcmp(inputName, "bias") ? mInputs[2] :
-    //         nullptr));
-    //     assert((in!=nullptr) && "No such parameter");
-    //     return *in;
-    // }
-
-
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get the epsilon value.
+     */
     inline float& epsilon() const { return mAttributes->template getAttr<BatchNormAttr::Epsilon>(); }
+
+    /**
+     * @brief Get the momentum value.
+     */
     inline float& momentum() const { return mAttributes->template getAttr<BatchNormAttr::Momentum>(); }
+
+    /**
+     * @brief Get whether the operator is in training mode.
+     */
     inline bool& trainingMode() const { return mAttributes->template getAttr<BatchNormAttr::TrainingMode>(); }
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input", "scale", "shift", "mean", "variance"};
     }
+
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
@@ -117,4 +175,4 @@ template <>
 const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum", "training_mode" };
 }
 
-#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
+#endif /* AIDGE_CORE_OPERATOR_BATCHNORM_H_ */
diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
index bd14bea76937fbfc42cbafa9636df9b55832fa9d..711cf858520c5f85ea0099f5a8ad9ab03940ee9f 100644
--- a/include/aidge/operator/BitShift.hpp
+++ b/include/aidge/operator/BitShift.hpp
@@ -23,38 +23,70 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/StaticAttributes.hpp"
 
-
 namespace Aidge {
-    enum class BitShiftAttr { BitShiftdirection };
+
+
+enum class BitShiftAttr {
+    /**
+     * 
+     */
+    BitShiftdirection
+};
 
 /**
- * @brief Tensor BitShift Operator
+ * @class BitShift_Op
+ * @brief A tensor operator to perform element-wise bitwise shift operations on tensors.
+ *
+ * The operator takes two inputs:
+ * - **InputTensor**: The tensor whose elements will be shifted.
+ * - **ShiftAmount**: The tensor specifying the shift amount for each element.
+ *
+ * The shift is applied in the direction specified by the attribute `BitShiftdirection`, 
+ * which can either be `left` or `right`.
+ *
+ * @see OperatorTensor
+ * @see Registrable
  */
 class BitShift_Op : public OperatorTensor,
     public Registrable<BitShift_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const BitShift_Op&)>> {
+
 public:
-    enum BitShiftDirection {left,right};
+    /**
+     * @enum BitShiftDirection
+     * @brief Specifies the direction of the bitwise shift.
+     */
+    enum BitShiftDirection { left, right };
+
+    /**
+     * @brief Type identifier for the operator.
+     */
     static const std::string Type;
-private:     
 
-    using Attributes_ = StaticAttributes<BitShiftAttr,BitShiftDirection>;
-    template <BitShiftAttr e> using attr = typename Attributes_::template attr<e>;
+private:
+    using Attributes_ = StaticAttributes<BitShiftAttr, BitShiftDirection>;
+
+    template <BitShiftAttr e>
+    using attr = typename Attributes_::template attr<e>;
+
     const std::shared_ptr<Attributes_> mAttributes;
-public:
 
-    BitShift_Op(BitShiftDirection direction) 
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1),
-    mAttributes(std::make_shared<Attributes_>(
-                attr<BitShiftAttr::BitShiftdirection>(direction))) 
-                {}
+public:
+    /**
+     * @brief Constructor to initialize the `BitShift_Op` with a shift direction.
+     * @param[in] direction The direction of the bitwise shift (left or right).
+     */
+    BitShift_Op(BitShiftDirection direction)
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+              attr<BitShiftAttr::BitShiftdirection>(direction))) {}
 
-    /**¨PPPP
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
-     * but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+    /**
+     * @brief Copy-constructor. Copies operator attributes and output tensors but not input tensors.
+     * @param[in] op Operator instance to copy.
      */
     BitShift_Op(const BitShift_Op& op)
-        : OperatorTensor(op),mAttributes(op.mAttributes)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl) {
             SET_IMPL_MACRO(BitShift_Op, *this, op.backend());
@@ -65,61 +97,76 @@ public:
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::BitShift_Op
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override {
         return std::make_shared<BitShift_Op>(*this);
     }
 
     bool forwardDims(bool allowDataDependency = false) override final;
-   
+
     /**
-     * @brief Setter to specify which backend to use
-     * 
-     * @return Boolean
+     * @brief Set the backend to be used for this operator.
+     * @param[in] name Backend name.
+     * @param[in] device Device index (default: 0).
      */
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Get the set of available backends for this operator.
+     * @return A set of backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
-    
+
     /**
-     * @brief Getter to retrieve Attributes of the bitshift class
-     * 
-     * @return Attributes
+     * @brief Get the attributes of the operator.
+     * @return A shared pointer to the attributes.
      */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
     /**
-     * @brief Retrieve the direction in which the shift should be applied (right or left)
-     * 
-     * @return BitShiftDirection 
+     * @brief Retrieve the direction of the bitwise shift (left or right).
+     * @return The direction of the bitwise shift.
      */
-    inline BitShiftDirection& direction() const noexcept { return mAttributes ->template getAttr<BitShiftAttr::BitShiftdirection>(); }
-
-    static const std::vector<std::string> getInputsName(){
-        return {"InputTensor", "ShiftAmount"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"OutputTensor"};
+    inline BitShiftDirection& direction() const noexcept {
+        return mAttributes->template getAttr<BitShiftAttr::BitShiftdirection>();
     }
 
+    /**
+     * @brief Get the names of the input tensors.
+     * @return A vector containing the input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
+        return { "InputTensor", "ShiftAmount" };
+    }
 
+    /**
+     * @brief Get the names of the output tensors.
+     * @return A vector containing the output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
+        return { "OutputTensor" };
+    }
 };
+
 /**
- * @brief The bitwise shift operator performs an element-wise operation between the input tensor and the shift tensor in 
-    the direction specified by "direction" 
- * @param[in] direction Direction of the bitshift (Left or Right)
- * @param[in] name Name of the node
- * @return std::shared_ptr<Node> 
+ * @brief Factory function to create a `BitShift` node.
+ * @param[in] direction The direction of the bitwise shift (`left` or `right`).
+ * @param[in] name (Optional) Name of the node.
+ * @return A shared pointer to the created node.
  */
-    inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direction, const std::string& name = "") {
-        return std::make_shared<Node>(std::make_shared<BitShift_Op>(direction), name);
-    }
+inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direction, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<BitShift_Op>(direction), name);
+}
+
 } // namespace Aidge
 
 namespace {
+/**
+ * @brief Specialization of `EnumStrings` for `BitShiftAttr`.
+ */
 template <>
-const char *const EnumStrings<Aidge::BitShiftAttr>::data[] = {"BitShiftdirection"};
-
+const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = { "BitShiftdirection" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_BITSHIFT_H_ */
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 3fa1bb22a0dd9def11e0621b67cbd8395b5344fa..1f934fbc7d12f79db7185a5ff12136513b2fb7df 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -30,22 +30,55 @@ public:
     void forward() override;
 };
 
-enum class CastAttr { TargetType };
+/**
+ * @enum CastAttr
+ * @brief Enum class defining the attributes for the Cast operator.
+ */
+enum class CastAttr {
+    /**
+     * @brief Target data type for the cast operation.
+     */
+    TargetType
+};
 
+/**
+ * @brief Description of the Cast operation to convert a tensor's data type.
+ *
+ * The Cast operator changes the data type of input tensor elements to a specified target type.
+ *
+ * ### Attributes:
+ * - `TargetType`: Specifies the data type to which the tensor elements are cast.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Cast_Op : public OperatorTensor,
     public Registrable<Cast_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Cast_Op&)>> {
+
 public:
+    /**
+     * @brief Type string identifying this operator.
+     */
     static const std::string Type;
 
 private:
     using Attributes_ = StaticAttributes<CastAttr, DataType>;
+
     template <CastAttr e>
     using attr = typename Attributes_::template attr<e>;
+
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    /**
+     * @brief Deleted default constructor.
+     */
     Cast_Op() = delete;
 
+    /**
+     * @brief Constructor for Cast operator.
+     * @param[in] targetType The desired data type to which the tensor will be cast.
+     */
     Cast_Op(const DataType targetType);
 
     /**
@@ -65,31 +98,56 @@ public:
     }
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Cast_Op
+     * @brief Clone the operator using its copy constructor.
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override {
         return std::make_shared<Cast_Op>(*this);
     }
 
+
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Access the attributes of the operator.
+     * @return A shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get the target data type for the cast operation.
+     * @return Reference to the target data type.
+     */
     inline DataType& targetType() const { return mAttributes->template getAttr<CastAttr::TargetType>(); }
 
-    static const std::vector<std::string> getInputsName(){
+    /**
+     * @brief Get the input tensor names for the Cast operator.
+     * @return A vector containing the input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
-    static const std::vector<std::string> getOutputsName(){
+
+    /**
+     * @brief Get the output tensor names for the Cast operator.
+     * @return A vector containing the output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
-
+/**
+ * @brief Factory function to create a Cast node.
+ * @param[in] targetType The desired data type to cast to.
+ * @param[in] name Name of the operator node.
+ * @return A shared pointer to the created Cast node.
+ */
 std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name = "");
 
-} // namespace Aidge
+}  // namespace Aidge
 
 namespace {
 template <>
diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp
index aa37b50320e9deaa94e83ff7cc3578745b560228..0825b85bbdcbcd9adf90d0dca6cbf0a292f4f94f 100644
--- a/include/aidge/operator/Clip.hpp
+++ b/include/aidge/operator/Clip.hpp
@@ -25,98 +25,152 @@
 
 namespace Aidge {
 
-enum class ClipAttr { Min, Max };
-
+/**
+ * @enum ClipAttr
+ * @brief Enum class defining the attributes for the Clip operator.
+ */
+enum class ClipAttr {
+    Min,  /**< Minimum value for clipping. */
+    Max   /**< Maximum value for clipping. */
+};
 
+/**
+ * @brief Description of the Clip operation to limit tensor values within a specified range.
+ *
+ * The Clip operator ensures tensor elements are within the range `[min, max]`.
+ * - Values less than `min` are set to `min`.
+ * - Values greater than `max` are set to `max`.
+ * 
+ * The input and output Tensors have the same dimensions.
+ *
+ * ### Attributes:
+ * - `Min`: Minimum value for clipping.
+ * - `Max`: Maximum value for clipping.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Clip_Op : public OperatorTensor,
     public Registrable<Clip_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Clip_Op&)>> {
 
 public:
+    /**
+     * @brief Type string identifying this operator.
+     */
     static const std::string Type;
+
 private:
     using Attributes_ = StaticAttributes<ClipAttr, float, float>;
-    template <ClipAttr e> using attr = typename Attributes_::template attr<e>;
+
+    template <ClipAttr e>
+    using attr = typename Attributes_::template attr<e>;
+
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    /**
+     * @brief Deleted default constructor.
+     */
     Clip_Op() = delete;
-    Clip_Op(float min,float max) : 
-    OperatorTensor(Type, {InputCategory::Data,InputCategory::OptionalData,InputCategory::OptionalData}, 1),
-    mAttributes(std::make_shared<Attributes_>(attr<ClipAttr::Min>(min), attr<ClipAttr::Max>(max)))
-    {}
+
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Constructor for Clip operator.
+     * @param[in] min Minimum value for clipping.
+     * @param[in] max Maximum value for clipping.
+     */
+    Clip_Op(float min, float max)
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData}, 1),
+          mAttributes(std::make_shared<Attributes_>(attr<ClipAttr::Min>(min), attr<ClipAttr::Max>(max))) {}
+
+    /**
+     * @brief Copy-constructor. Copies operator attributes and output tensors, but not input tensors.
+     * @param op Clip_Op instance to copy.
      */
     Clip_Op(const Clip_Op& op)
         : OperatorTensor(op),
           mAttributes(op.mAttributes)
-
     {
-        if (op.mImpl){
+        if (op.mImpl) {
             SET_IMPL_MACRO(Clip_Op, *this, op.backend());
-        }else{
+        } else {
             mImpl = nullptr;
         }
     }
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Clip_Op
+     * @brief Clone the operator using its copy constructor.
+     * @return A shared pointer to the cloned operator.
      */
-    std::shared_ptr<Operator> clone() const override 
-    {
+    std::shared_ptr<Operator> clone() const override {
         return std::make_shared<Clip_Op>(*this);
     }
+
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
-    
+
     /**
-     * @brief Setter to specify the backend to use
+     * @brief Setter to specify the backend to use.
      */
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    /**
+     * @brief Access the attributes of the operator.
+     * @return A shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
     /**
-    * @brief Getter and Setter for min attribute value
-    * @return float& 
-    */
+     * @brief Getter for the minimum clipping value.
+     * @return Reference to the minimum value.
+     */
     inline float& min() const noexcept { return mAttributes->getAttr<ClipAttr::Min>(); }
+
     /**
-    * @brief Getter and Setter for max attribute value
-    * @return float& 
-    */
+     * @brief Getter for the maximum clipping value.
+     * @return Reference to the maximum value.
+     */
     inline float& max() const noexcept { return mAttributes->getAttr<ClipAttr::Max>(); }
 
     std::set<std::string> getAvailableBackends() const override;
 
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input","min_empty_tensor","max_empty_tensor"};
+    /**
+     * @brief Get the input tensor names for the Clip operator.
+     * @return A vector containing the input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
+        return { "data_input", "min_empty_tensor", "max_empty_tensor" };
     }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
+
+    /**
+     * @brief Get the output tensor names for the Clip operator.
+     * @return A vector containing the output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
+        return { "data_output" };
     }
 };
-    /**
-     * @brief The Clip operator is a tensor operator that performs a clipping operation on tensor elements.
-            This class allows limiting tensor values to a specified range, defined by the min 
-            and max parameters (Tensors of empty shapes). Values outside this range are replaced by the corresponding limit values
-            When ‘min’ is greater than ‘max’, the clip operator sets all the ‘input’ values to the value of ‘max’
-    * @param[in] name Name of the node
-    * @param[in] min Min clip value as attribute
-     * @param[in] max Max clip value as attribute
-    * @return std::shared_ptr<Node> 
-    */
-    std::shared_ptr<Aidge::Node> Clip(
-        const std::string &name = "",
-        float min = std::numeric_limits<float>::lowest(),
-        float max = std::numeric_limits<float>::max()
-        );
 
-}
+/**
+ * @brief Factory function to create a Clip node.
+ * @param[in] name Name of the node.
+ * @param[in] min Minimum clipping value (default: lowest float).
+ * @param[in] max Maximum clipping value (default: maximum float).
+ * @return A shared pointer to the created Clip node.
+ */
+std::shared_ptr<Aidge::Node> Clip(
+    const std::string& name = "",
+    float min = std::numeric_limits<float>::lowest(),
+    float max = std::numeric_limits<float>::max()
+);
+
+} // namespace Aidge
+
 namespace {
+/**
+ * @brief Specialization of EnumStrings for ClipAttr.
+ */
 template <>
-const char* const EnumStrings<Aidge::ClipAttr>::data[]
-    = {"min", "max"};
+const char* const EnumStrings<Aidge::ClipAttr>::data[] = { "min", "max" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CLIP_H_ */
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 98835dd2a4b02e51b50636ee8606382a50ba7b89..83914b6730bde238d5e2e7b4391bd034c8f4d146 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -26,68 +26,170 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+
+/**
+ * @class Concat_OpImpl
+ * @brief Implementation of the Concat operator.
+ *
+ * Since Concat operation is backend-agnostic, its implementation is located in aidge_core.
+ */
 class Concat_OpImpl : public OperatorImpl {
 public:
+    /**
+     * @brief Constructor for Concat_OpImpl.
+     * @param[in] op Operator instance.
+     * @param[in] backend Name of the backend.
+     */
     Concat_OpImpl(const Operator& op, const std::string& backend = "")
         : OperatorImpl(op, backend)
     {}
+
+    /**
+     * @brief Perform the forward pass of the Concat operator.
+     */
     void forward() override;
 };
 
-enum class ConcatAttr { Axis };
+enum class ConcatAttr {
+    /**
+     * @brief Axis along which to concat the input tensor.
+     *
+     * The specified axis determines the direction of concatenating.
+     */
+    Axis 
+};
 
+/**
+ * @class Concat_Op
+ * @brief Implements the Concat operation to concatenate multiple tensors along a specified axis.
+ *
+ * The Concat operation combines multiple input tensors into a single tensor by concatenating them
+ * along a specified axis. The tensors must have the same shape except for the concatenating axis.
+ *
+ * ### Output Shape Calculation
+ * - Input shapes: [(N1, D1, D2, ..., DN), (N2, D1, D2, ..., DN), ...]
+ * - Axis: 1
+ * - Output shape: (N1 + N2, D1, D2, ..., DN)
+ *
+ * @example:
+ * - Input shapes: [(2, 4, 8), (3, 4, 8), (1, 4, 8)]
+ * - Axis: 0
+ * - Output shape: (6, 4, 8)
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Concat_Op : public OperatorTensor,
     public Registrable<Concat_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Concat_Op&)>> {
+
 public:
+    /**
+     * @brief Type identifier for the Concat operator.
+     */
     static const std::string Type;
 
 private:
     using Attributes_ = StaticAttributes<ConcatAttr, std::int32_t>;
+
     template <ConcatAttr e>
     using attr = typename Attributes_::template attr<e>;
+
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    /**
+     * @brief Default constructor is deleted to enforce explicit initialization.
+     */
     Concat_Op() = delete;
 
+    /**
+     * @brief Constructor to initialize the Concat operator.
+     * @param[in] nbIn Number of input tensors.
+     * @param[in] axis Axis along which concatenation is performed.
+     */
     Concat_Op(const IOIndex_t nbIn, const std::int32_t axis);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor. Copies the operator attributes and its output tensors,
+     * but not its input tensors (the new operator has no input associated).
+     * @param[in] op Operator to copy.
      */
     Concat_Op(const Concat_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Concat_Op
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Forward the dimensions of the operator's inputs and outputs.
+     * @param[in] allowDataDependency Allow data dependency during dimension propagation.
+     * @return True if dimensions were forwarded successfully.
+     */
     bool forwardDims(bool allowDataDependency = false) override final;
 
+    /**
+     * @brief Set the backend for the operator.
+     * @param[in] name Backend name.
+     * @param[in] device Device index (default: 0).
+     */
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Get the set of available backends for the operator.
+     * @return A set of backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the Concat operator.
+     * @return A shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get or modify the axis along which tensors are concatenated.
+     * @return A reference to the axis attribute.
+     */
     inline std::int32_t& axis() const { return mAttributes->template getAttr<ConcatAttr::Axis>(); }
 
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input_0", "data_input_n"};
+    /**
+     * @brief Get the names of the input tensors.
+     * @return A vector of input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
+        return { "data_input_0", "data_input_n" };
     }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
+
+    /**
+     * @brief Get the names of the output tensors.
+     * @return A vector of output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
+        return { "data_output" };
     }
 };
 
+/**
+ * @brief Factory function to create a Concat node.
+ * @param[in] nbIn Number of input tensors.
+ * @param[in] axis Axis along which concatenation is performed (default: 0).
+ * @param[in] name (Optional) Name of the node.
+ * @return A shared pointer to the created node.
+ */
 std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = "");
-}
+
+} // namespace Aidge
 
 namespace {
-    template <>
-    const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
-        "axis"
-    };
+/**
+ * @brief Specialization of EnumStrings for ConcatAttr.
+ */
+template <>
+const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
+    "axis"
+};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CONCAT_H_ */
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index cd1a57dd9ac52d2f5cdff3b5ed54c6dd2aeeed34..8984ebd08cb0b1ddfffc885f7d8c2e3df9b23da2 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -30,8 +30,45 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvAttr { StrideDims, DilationDims, KernelDims };
 
+/**
+ * @enum ConvAttr
+ * @brief Attributes used for the Convolution operation.
+ */
+enum class ConvAttr {
+    StrideDims,     // The stride dimensions
+    DilationDims,   // The dilation dimensions
+    KernelDims      // The kernel dimensions
+};
+
+/**
+ * @class Conv_Op
+ * @brief Convolution operator for performing a multi-dimensional convolution.
+ * 
+ * The Conv_Op class implements a convolution operator for tensors with customizable 
+ * kernel dimensions, stride, and dilation values. The operator performs a convolution 
+ * operation on the input tensor and produces an output tensor.
+ * 
+ * ### Attributes:
+ * - `strideDims`: Stride for each dimension of the input.
+ * - `dilationDims`: Dilation for each dimension of the input.
+ * - `kernelDims`: Kernel size for each dimension.
+ *
+ * The output shape will depend on the kernel size, stride, and dilation values:
+ *    Output Dimension=((Input Dimension−Kernel Dimension+2×Padding​)/Stride)+1
+ *
+ * @example: Basic 2D Convolution
+ *      - Input shape: (1, 3, 32, 32)
+ *      - Kernel dimensions: {3, 3} (2D kernel)
+ *      - Stride dimensions: {1, 1} (stride of 1 in both height and width)
+ *      - Dilation dimensions: {1, 1} (no dilation)
+ *      - Padding: None
+ *      - Output shape: 
+ *         (1, 64, (32−3+2×0)/1+1, (32−3+2×0)/1+1) = (1, 64, 30, 30)
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
                 public Registrable<Conv_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>> {
@@ -44,13 +81,21 @@ private:
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>>;
+
     template <ConvAttr e>
     using attr = typename Attributes_::template attr<e>;
+
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
     Conv_Op() = delete;
 
+    /**
+     * @brief Constructor for the Conv_Op operator.
+     * @param[in] kernelDims The dimensions of the kernel.
+     * @param[in] strideDims The stride dimensions (default is an array of ones).
+     * @param[in] dilationDims The dilation dimensions (default is an array of ones).
+     */
     constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
                       const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                       const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
@@ -62,43 +107,58 @@ public:
     {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op The Conv_Op to copy.
+     * @details This constructor copies the operator attributes and its output tensors, but not the input tensors.
      */
     Conv_Op(const Conv_Op<DIM>& op);
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Conv_Op
+     * @brief Clone the operator using its copy constructor.
+     * @return A shared pointer to the cloned Conv_Op object.
      */
     std::shared_ptr<Operator> clone() const override {
         return std::make_shared<Conv_Op<DIM>>(*this);
     }
 
-    // Data operator[](const char* inputName) override final {
-    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? getInput(0) :
-    //         (strcmp(inputName, "weight") ? getInput(1) :
-    //         (strcmp(inputName, "bias") ? getInput(2) :
-    //         nullptr));
-    //     assert((in!=nullptr) && "No such parameter");
-    //     return *in;
-    // }
-
-    // std::shared_ptr<Conv_Op> clone() const override final {
-
-    // }
 
+    /**
+     * @brief Compute forward dimensions for the operator.
+     * @param allowDataDependency Flag to allow data dependency in dimension calculation.
+     * @return true if the dimensions are computed successfully.
+     */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
+    /**
+     * @brief Calculate the receptive field of the convolution operation.
+     * @param firstEltDims The dimensions of the first element.
+     * @param outputDims The dimensions of the output tensor.
+     * @param outputIdx Index of the output tensor.
+     * @return A pair containing the receptive field dimensions.
+     */
     std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
     computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
                           const std::vector<DimSize_t>& outputDims,
                           const IOIndex_t outputIdx = 0) const override;
 
-
+    /**
+     * @brief Set the backend for the operator.
+     * @param name The name of the backend.
+     * @param device The device index (default is 0).
+     */
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Get the list of available backends for the operator.
+     * @return A set of available backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the number of input channels.
+     * @return The number of input channels.
+     * @throws std::runtime_error If the operator has no associated weight tensor.
+     */
     DimSize_t inChannels() const {
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
@@ -106,6 +166,11 @@ public:
         return getInput(1)->template dims<DIM+2>()[1];
     }
 
+    /**
+     * @brief Get the number of output channels.
+     * @return The number of output channels.
+     * @throws std::runtime_error If the operator has no associated weight tensor.
+     */
     DimSize_t outChannels() const {
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of output channel imposed.");
@@ -113,43 +178,70 @@ public:
         return getInput(1)->template dims<DIM+2>()[0];
     }
 
+    /**
+     * @brief Get the attributes of the operator.
+     * @return A shared pointer to the operator's attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get the stride dimensions.
+     * @return The stride dimensions as a reference.
+     */
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvAttr::StrideDims>(); }
+
+    /**
+     * @brief Get the dilation dimensions.
+     * @return The dilation dimensions as a reference.
+     */
     inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvAttr::DilationDims>(); }
-    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvAttr::KernelDims>(); }
 
+    /**
+     * @brief Get the kernel dimensions.
+     * @return The kernel dimensions as a reference.
+     */
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvAttr::KernelDims>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
+
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
 };
 
-
 /**
- * @brief Perform a convolution on the input Tensor.
+ * @brief Create a Conv_Op operator for performing convolution.
  *
- * @tparam DIM Number of dimensions for the feature map.
- * @param inChannels Number of input channels.
- * @param outChannels Number of output channels.
- * @param kernelDims Dimensions of the kernel. Must be the same number of dimensions as the feature map.
- * @param name Name of the operator.
- * @param strideDims Dimensions of the stride attribute. Must be the same number of dimensions as the feature map.
- * @param dilationDims Dimensions of the dilation attribute. Must be the same number of dimensions as the feature map.
- * @return std::shared_ptr<Node> A Node containing the operator.
+ * This function constructs a Convolution operation by specifying the input and output channels,
+ * kernel dimensions, stride, and dilation dimensions.
+ *
+ * @tparam DIM The number of dimensions of the input tensor.
+ * @param[in] inChannels The number of input channels.
+ * @param[in] outChannels The number of output channels.
+ * @param[in] kernelDims The kernel dimensions.
+ * @param[in] name The name of the operator (optional).
+ * @param[in] strideDims The stride dimensions (optional).
+ * @param[in] dilationDims The dilation dimensions (optional).
+ * @param[in] noBias A flag indicating if no bias is used (default is false).
+ * @return A shared pointer to the created Node containing the Conv_Op.
  */
 template <std::array<DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Node> Conv(DimSize_t inChannels,
-                                  DimSize_t outChannels,
-                                  const std::array<DimSize_t, DIM> &kernelDims,
-                                  const std::string& name = "",
-                                  const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                                  bool noBias = false);
-
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+                           DimSize_t outChannels,
+                           const std::array<DimSize_t, DIM> &kernelDims,
+                           const std::string& name = "",
+                           const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                           const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
+                           bool noBias = false);
+
+/**
+ * @brief Helper function for Conv with C-style arrays.
+ *
+ * This helper function allows automatic template deduction of the number of dimensions (DIM)
+ * based on the kernel dimensions provided.
+ */
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> Conv(
     DimSize_t inChannels,
@@ -162,6 +254,7 @@ inline std::shared_ptr<Node> Conv(
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
     return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
 }
+
 }  // namespace Aidge
 
 extern template class Aidge::Conv_Op<1>;
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index f0a55a299094add58bd3938e9cca9bbb48e21da8..03e821041981b5d5bc6ca972c5923751588a75eb 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -29,62 +29,120 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims };
+enum class ConvDepthWiseAttr {
+    StrideDims,   // The stride dimensions for the convolution.
+    DilationDims, // The dilation dimensions for the convolution.
+    KernelDims    // The kernel dimensions for the convolution.
+};
 
+/**
+ * @class ConvDepthWise_Op
+ * @brief Depthwise Convolution operator for performing a multi-dimensional depthwise convolution.
+ * 
+ * The ConvDepthWise_Op class implements a depthwise convolution operator for tensors with customizable 
+ * kernel dimensions, stride, and dilation values. It performs a depthwise convolution operation on the 
+ * input tensor and produces an output tensor.
+ * 
+ * ### Attributes:
+ * - strideDims: Stride for each dimension of the input.
+ * - dilationDims: Dilation for each dimension of the input.
+ * - kernelDims: Kernel size for each dimension.
+ *
+ * The output shape will depend on the kernel size, stride, and dilation values:
+ *    Output Dimension=((Input Dimension−Kernel Dimension+2×Padding)/Stride)+1
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
                 public Registrable<ConvDepthWise_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>> {
+
 public:
     static const std::string Type;
 
 private:
     using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
-                                             std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, DIM>>;
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>>;
+
     template <ConvDepthWiseAttr e>
     using attr = typename Attributes_::template attr<e>;
+
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-
     ConvDepthWise_Op() = delete;
 
-    constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
-                               const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                               const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+    /**
+     * @brief Constructor for the ConvDepthWise_Op operator.
+     * @param[in] kernelDims The dimensions of the kernel.
+     * @param[in] strideDims The stride dimensions (default is an array of ones).
+     * @param[in] dilationDims The dilation dimensions (default is an array of ones).
+     */
+    constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernelDims,
+                               const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                               const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
         : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
           mAttributes(std::make_shared<Attributes_>(
-            attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
-            attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
-            attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)))
+            attr<ConvDepthWiseAttr::StrideDims>(strideDims),
+            attr<ConvDepthWiseAttr::DilationDims>(dilationDims),
+            attr<ConvDepthWiseAttr::KernelDims>(kernelDims)))
     {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param[in] op The ConvDepthWise_Op to copy.
+     * @details This constructor copies the operator attributes and its output tensors, but not the input tensors.
      */
     ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op);
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::ConvDepthWise_Op
+     * @brief Clone the operator using its copy constructor.
+     * @return A shared pointer to the cloned ConvDepthWise_Op object.
      */
     std::shared_ptr<Operator> clone() const override {
         return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
     }
 
-
+    /**
+     * @brief Compute forward dimensions for the operator.
+     * @param[in] allowDataDependency Flag to allow data dependency in dimension calculation.
+     * @return true if the dimensions are computed successfully.
+     */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
+    /**
+     * @brief Calculate the receptive field of the depthwise convolution operation.
+     * @param firstEltDims The dimensions of the first element.
+     * @param outputDims The dimensions of the output tensor.
+     * @param outputIdx Index of the output tensor.
+     * @return A pair containing the receptive field dimensions.
+     */
     std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
     computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
                           const std::vector<DimSize_t>& outputDims,
                           const IOIndex_t outputIdx = 0) const override;
 
+    /**
+     * @brief Set the backend for the operator.
+     * @param[in] name The name of the backend.
+     * @param[in] device The device index (default is 0).
+     */
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Get the list of available backends for the operator.
+     * @return A set of available backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the number of input channels.
+     * @return The number of input channels.
+     * @throws std::runtime_error If the operator has no associated weight tensor.
+     */
     DimSize_t nbChannels() const {
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of channel imposed.");
@@ -92,28 +150,76 @@ public:
         return getInput(1)->template dims<DIM+2>()[0];
     }
 
+    /**
+     * @brief Get the operator's attributes.
+     * @return A shared pointer to the operator's attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get the stride dimensions.
+     * @return The stride dimensions as a reference.
+     */
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>(); }
+
+    /**
+     * @brief Get the dilation dimensions.
+     * @return The dilation dimensions as a reference.
+     */
     inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>(); }
+
+    /**
+     * @brief Get the kernel dimensions.
+     * @return The kernel dimensions as a reference.
+     */
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>(); }
 
+    /**
+     * @brief Get the names of the inputs.
+     * @return A vector containing the input names.
+     */
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
+
+    /**
+     * @brief Get the names of the outputs.
+     * @return A vector containing the output names.
+     */
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Create a ConvDepthWise_Op operator for performing depthwise convolution.
+ *
+ * This function constructs a Depthwise Convolution operation by specifying the input channels,
+ * kernel dimensions, stride, and dilation dimensions.
+ *
+ * @tparam DIM The number of dimensions of the input tensor.
+ * @param[in] nbChannels The number of input channels.
+ * @param[in] kernelDims The kernel dimensions.
+ * @param[in] name The name of the operator (optional).
+ * @param[in] strideDims The stride dimensions (optional).
+ * @param[in] dilationDims The dilation dimensions (optional).
+ * @param[in] noBias A flag indicating if no bias is used (default is false).
+ * @return A shared pointer to the created Node containing the ConvDepthWise_Op.
+ */
 template <std::array<DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
-                                           const std::array<DimSize_t, DIM> &kernelDims,
-                                           const std::string& name = "",
-                                           const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                                           const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                                           bool noBias=false);
-
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+                                    const std::array<DimSize_t, DIM> &kernelDims,
+                                    const std::string& name = "",
+                                    const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                                    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
+                                    bool noBias = false);
+
+/**
+ * @brief Helper function for ConvDepthWise with C-style arrays.
+ *
+ * This helper function allows automatic template deduction of the number of dimensions (DIM)
+ * based on the kernel dimensions provided.
+ */
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> ConvDepthWise(
     const DimSize_t nbChannels,
@@ -121,10 +227,11 @@ inline std::shared_ptr<Node> ConvDepthWise(
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-    bool noBias=false) {
+    bool noBias = false) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
     return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
 }
+
 }  // namespace Aidge
 
 extern template class Aidge::ConvDepthWise_Op<1>;
@@ -132,8 +239,11 @@ extern template class Aidge::ConvDepthWise_Op<2>;
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"stride_dims", "dilation_dims",
-                                                          "kernel_dims"};
+const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {
+    "stride_dims",
+    "dilation_dims",
+    "kernel_dims"
+};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
index 856cd0e85d1abb47d3c163115bef6cbfb59bb66f..769dad767e90dfe23f67867d72f08f0787d1cdf8 100644
--- a/include/aidge/operator/DepthToSpace.hpp
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -1,4 +1,4 @@
-/********************************************************************************
+/***************************************************************************
  * Copyright (c) 2023 CEA-List
  *
  * This program and the accompanying materials are made available under the
@@ -7,7 +7,7 @@
  *
  * SPDX-License-Identifier: EPL-2.0
  *
- ********************************************************************************/
+ ***************************************************************************/
 
 #ifndef AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
 #define AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
@@ -23,21 +23,67 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+
+/**
+ * @class DepthToSpace_OpImpl
+ * @brief Implementation of the DepthToSpace operation for rearranging data from depth into spatial dimensions.
+ */
 class DepthToSpace_OpImpl : public OperatorImpl {
 public:
-    DepthToSpace_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    /**
+     * @brief Constructor for the DepthToSpace_OpImpl.
+     * @param op Operator containing attributes for DepthToSpace.
+     * @param backend The backend used for computation.
+     */
+    DepthToSpace_OpImpl(const Operator& op, const std::string& backend = "") : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Perform the forward computation for DepthToSpace.
+     */
     void forward() override;
 };
 
-enum class DepthToSpaceAttr { BlockSize, Mode };
-
+/**
+ * @enum DepthToSpaceAttr
+ * @brief Attributes for the DepthToSpace operation.
+ */
+enum class DepthToSpaceAttr {
+    BlockSize, /**< The block size for rearranging depth to spatial dimensions. */
+    Mode       /**< The mode for depth-to-space transformation. */
+};
 
+/**
+ * @class DepthToSpace_Op
+ * @brief Represents the DepthToSpace operation to rearrange data from depth to spatial dimensions.
+ *
+ * This operator splits the depth dimension of a tensor into blocks and distributes them across spatial dimensions.
+ *
+ * ### Output Shape Calculation
+ * Given an input tensor with shape (N, C, H, W):
+ * - Block size (B): The depth dimension (C) must be divisible by B^2.
+ * - Output shape: (N, C / (B^2), H * B, W * B).
+ *
+ * ### Modes
+ * - `DCR`: Depth-Channel-Row ordering.
+ * - `CRD`: Channel-Row-Depth ordering.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class DepthToSpace_Op : public OperatorTensor,
                 public Registrable<DepthToSpace_Op,
                     std::string,
                     std::function<std::shared_ptr<OperatorImpl>(const DepthToSpace_Op &)>> {
 public:
+    /**
+     * @brief The type identifier for the DepthToSpace operator.
+     */
     static const std::string Type;
+
+    /**
+     * @enum Mode
+     * @brief Defines the modes for depth-to-space transformation.
+     */
     enum class Mode { DCR, CRD };
 
 private:
@@ -47,15 +93,20 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-
     DepthToSpace_Op() = delete;
 
+    /**
+     * @brief Constructor for DepthToSpace_Op.
+     * @param blockSize Size of the blocks to split depth into spatial dimensions.
+     * @param mode Depth-to-space transformation mode (DCR or CRD).
+     */
     DepthToSpace_Op(const std::uint32_t blockSize, const Mode mode = Mode::CRD);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
-     * but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor.
      * @param op Operator to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     DepthToSpace_Op(const DepthToSpace_Op& op);
 
@@ -67,21 +118,61 @@ public:
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
+    /**
+     * @brief Set the backend for this operator.
+     * @param name Backend name.
+     * @param device Device index for the backend.
+     */
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+
+    /**
+     * @brief Get the available backends for this operator.
+     * @return A set of strings representing available backends.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     * @return Shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get the block size for the DepthToSpace operation.
+     * @return Block size.
+     */
     inline std::uint32_t& blockSize() const { return mAttributes->template getAttr<DepthToSpaceAttr::BlockSize>(); }
+
+    /**
+     * @brief Get the mode of the DepthToSpace operation.
+     * @return Depth-to-space mode.
+     */
     inline Mode& mode() const { return mAttributes->template getAttr<DepthToSpaceAttr::Mode>(); }
 
+    /**
+     * @brief Get the input tensor names.
+     * @return A vector of input tensor names.
+     */
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
+
+    /**
+     * @brief Get the output tensor names.
+     * @return A vector of output tensor names.
+     */
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Create a DepthToSpace node.
+ * @param blockSize Size of the blocks to split depth into spatial dimensions.
+ * @param mode Depth-to-space transformation mode (DCR or CRD).
+ * @param name Name of the operator.
+ * @return Shared pointer to the created DepthToSpace node.
+ */
 std::shared_ptr<Node> DepthToSpace(const std::uint32_t blockSize,
                                     const DepthToSpace_Op::Mode mode = DepthToSpace_Op::Mode::CRD,
                                     const std::string& name = "");
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 5ed9e789deab71b107a6071ab11452c3cf73fa9d..5dec988145874af9312d3ac8133c850c490bf0e7 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -24,6 +24,28 @@
 
 namespace Aidge {
 
+/**
+ * @brief Description of an element-wise Divide operation on input Tensors,
+ * supporting NumPy broadcasting.
+ *
+ * For each pair of elements x and y from the input Tensors, the function 
+ * is defined as:
+ * `f(x, y) = x / y`
+ *
+ * Broadcasting adjusts shapes of the input Tensors to make them compatible:
+ * - Tensors are aligned from the rightmost dimensions.
+ * - Dimensions are compatible if they are equal, one of them is 1, or missing.
+ *
+ * The output Tensor shape is determined by taking the maximum size along 
+ * each dimension of the input Tensors after broadcasting.
+ * 
+ * Examples:
+ * 1. Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
+ * 2. Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
+ * 
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Div_Op : public OperatorTensor,
     public Registrable<Div_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Div_Op&)>> {
 
@@ -33,8 +55,10 @@ public:
     Div_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Div_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Div_Op(const Div_Op& op)
         : OperatorTensor(op)
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index 88a4bfd29e7d27e7eaea00d967e0ba631354d253..c17e8075fbb48bb14f075d323b24b4539a69cf9a 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -24,16 +24,34 @@
 
 namespace Aidge {
 
+/**
+ * @brief Description of an element-wise Error Function (Erf) operation
+ * on an input Tensor.
+ *
+ * For each element x in the input, the function is defined as:
+ * `f(x) = erf(x)`, where erf(x) is the Gauss error function, given by:
+ * `erf(x) = (2 / sqrt(pi)) * integral from 0 to x of exp(-t^2) dt`
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Erf_Op : public OperatorTensor,
-    public Registrable<Erf_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Erf_Op&)>> {
+    public Registrable<Erf_Op,  // <Op, backend, implementation creation function>
+        std::string,
+        std::function<std::shared_ptr<OperatorImpl>(const Erf_Op&)>>
+{
 public:
     static const std::string Type;
 
     Erf_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Erf_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Erf_Op(const Erf_Op& op);
 
diff --git a/include/aidge/operator/Expand.hpp b/include/aidge/operator/Expand.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..da87cfb562efe650b434f73e486430da72ad0780
--- /dev/null
+++ b/include/aidge/operator/Expand.hpp
@@ -0,0 +1,119 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_EXPAND_H_
+#define AIDGE_CORE_OPERATOR_EXPAND_H_
+
+#include <cstdlib>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+/**
+ * @brief Operator that broadcasts an input tensor to a larger provided shape.
+ * @details This operator takes an input tensor and expands (broadcasts) it to
+ * a target shape while following ONNX broadcasting semantics. Broadcasting
+ * allows a tensor to be repeated along dimensions to match the desired output
+ * dimensions.
+ *
+ * input#0 : Tensor to be broadcasted
+ * input#1 : Expand shape
+ *
+ *  @example
+ *    input#0 = [[[[2, 1, 3]]]] => dims = [1, 1, 3, 1]
+ *    input#1 = [2, 1, 1]       => converted to [1,2,1,1]
+ *    output = [[[[2, 1, 3], [2, 1, 3]]]] => dims = [1, 2, 3, 1]
+ *
+ * @see https://onnx.ai/onnx/repo-docs/Broadcasting.html for detailed ONNX
+ * broadcasting rules
+ */
+class Expand_Op
+    : public OperatorTensor,
+      public Registrable<
+          Expand_Op,
+          std::string,
+          std::function<std::shared_ptr<OperatorImpl>(const Expand_Op &)>> {
+  public:
+    static const std::string Type;
+
+    /**
+     * @brief Operator that broadcasts an input tensor to a larger provided
+     * shape.
+     * @details This operator takes an input tensor and expands (broadcasts) it
+     * to a target shape while following ONNX broadcasting semantics.
+     * Broadcasting allows a tensor to be repeated along dimensions to match
+     * the desired output dimensions.
+     *
+     * input#0 : Tensor to be broadcasted
+     * input#1 : Expand shape
+     *
+     *  @example
+     *    input#0 = [[[[2, 1, 3]]]] => dims = [1, 1, 3, 1]
+     *    input#1 = [2, 1, 1]       => converted to [1,2,1,1]
+     *    output = [[[[2, 1, 3], [2, 1, 3]]]] => dims = [1, 2, 3, 1]
+     *
+     * @see https://onnx.ai/onnx/repo-docs/Broadcasting.html for detailed ONNX
+     * broadcasting rules
+     */
+    Expand_Op()
+        : OperatorTensor(Type,
+                         {InputCategory::Data, InputCategory::Param},
+                         1) {}
+
+    Expand_Op(const Expand_Op &op);
+
+    std::shared_ptr<Operator> clone() const override;
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string &name,
+                    DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data", "shape"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"output"};
+    }
+};
+
+/**
+ * @brief Operator that broadcasts an input tensor to a larger provided shape.
+ * @details This operator takes an input tensor and expands (broadcasts) it to
+ * a target shape while following ONNX broadcasting semantics. Broadcasting
+ * allows a tensor to be repeated along dimensions to match the desired output
+ * dimensions.
+ *
+ * input#0 : Tensor to be broadcasted
+ * input#1 : Expand shape
+ *
+ *  @example
+ *    input#0 = [[[[2, 1, 3]]]] => dims = [1, 1, 3, 1]
+ *    input#1 = [2, 1, 1]       => converted to [1,2,1,1]
+ *    output = [[[[2, 1, 3], [2, 1, 3]]]] => dims = [1, 2, 3, 1]
+ *
+ * @see https://onnx.ai/onnx/repo-docs/Broadcasting.html for detailed ONNX
+ * broadcasting rules
+ */
+std::shared_ptr<Node> Expand(const std::string &name = "");
+
+} // namespace Aidge
+
+#endif // AIDGE_CORE_OPERATOR_EXPAND_H_
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 592ba4e2b796ba1aede24a737e296ddf1e285499..393e640d60934059a9c216a9335a7018388fe9da 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -24,20 +24,58 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
+
+/**
+ * @brief Description of a Fully Connected (FC) operation on an input Tensor.
+ *
+ * The Fully Connected (FC) operation applies a linear transformation to the input Tensor
+ * by multiplying it with a weight matrix and optionally adding a bias vector: 
+ * - If `bias` is included:
+ *   f(x) = x × weights^T + bias
+ * - If `bias` is omitted:
+ *   f(x) = x × weights^T
+ *
+ * Attributes:
+ * - `inChannels`: The number of input features (or channels). Determined from the dimensions
+ *   of the weight Tensor. This represents the size of the input vector.
+ * - `outChannels`: The number of output features (or channels). Determined from the dimensions
+ *   of the weight Tensor. This represents the size of the output vector.
+ * - `noBias`: A boolean value indicating whether the bias vector is omitted in the operation.
+ *
+ * @example:
+ * - Input Tensor: Shape (64, 128)  // Batch size of 64, 128 input features
+ * - Weights: Shape (256, 128)      // 128 input channels, 256 output channels
+ * - Bias: Shape (256)              // Bias vector (optional)
+ * - Output Tensor: Shape (64, 256) // Batch size of 64, 256 output features
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
                                  std::function<std::shared_ptr<OperatorImpl>(const FC_Op &)>> {
 public:
+    /**
+     * @brief Static type identifier for the FC operator.
+     */
     static const std::string Type;
 
+    /**
+     * @brief Default constructor for the FC operator.
+     *
+     * Initializes the operator with a type identifier and input categories.
+     */
     FC_Op()
     : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1)
     {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy constructor.
+     *
+     * Copies the attributes and output tensor(s) of the operator, but does not
+     * copy input tensors. The new operator instance has no associated inputs.
+     * 
+     * @param op The `FC_Op` instance to copy.
      */
     FC_Op(const FC_Op& op)
         : OperatorTensor(op)
@@ -50,18 +88,56 @@ public:
     }
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::FC_Op
+     * @brief Clones the operator using its copy constructor.
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override final;
 
+    /**
+     * @brief Associates an input tensor with the operator.
+     *
+     * This method links the specified input tensor to the operator's input slot.
+     *
+     * @param[in] inputIdx Index of the input to associate.
+     * @param[in] data Shared pointer to the input tensor data.
+     */
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
+    /**
+     * @brief Computes the output dimensions during the forward pass.
+     *
+     * Calculates the dimensions of the output tensor based on the input dimensions,
+     * weights, and optional parameters.
+     *
+     * @param[in] allowDataDependency Flag to allow data-dependent dimension computation.
+     * @return Boolean indicating the success of the dimension computation.
+     */
     bool forwardDims(bool allowDataDependency = false) override final;
 
+    /**
+     * @brief Sets the backend for the operator.
+     *
+     * Configures the backend used for computation.
+     *
+     * @param[in] name Name of the backend.
+     * @param[in] device Index of the target device (default is 0).
+     */
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Retrieves the available backends for the operator.
+     * @return A set of backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Gets the number of input channels for the FC operator.
+     *
+     * Retrieves the number of input channels from the weight tensor.
+     *
+     * @return Number of input channels.
+     * @throws std::runtime_error if no weight tensor is associated with the operator.
+     */
     DimSize_t inChannels() const {
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Fully Connected (FC) operator has no weight Tensor associated so no specific number of input channel imposed.");
@@ -69,6 +145,14 @@ public:
         return getInput(1)->template dims<2>()[1];
     }
 
+    /**
+     * @brief Gets the number of output channels for the FC operator.
+     *
+     * Retrieves the number of output channels from the weight tensor.
+     *
+     * @return Number of output channels.
+     * @throws std::runtime_error if no weight tensor is associated with the operator.
+     */
     DimSize_t outChannels() const {
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Fully Connected (FC) operator has no weight Tensor associated so no specific number of output channel imposed.");
@@ -76,14 +160,34 @@ public:
         return getInput(1)->template dims<2>()[0];
     }
 
+    /**
+     * @brief Retrieves the input tensor names for the FC operator.
+     * @return A vector of input tensor names: `{"data_input", "weight", "bias"}`.
+     */
     static const std::vector<std::string> getInputsName() {
         return {"data_input", "weight", "bias"};
     }
+
+    /**
+     * @brief Retrieves the output tensor names for the FC operator.
+     * @return A vector of output tensor names: `{"data_output"}`.
+     */
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Creates a Fully Connected operation node.
+ *
+ * Constructs an FC operator node with the specified input and output channels.
+ *
+ * @param[in] inChannels Number of input channels.
+ * @param[in] outChannels Number of output channels.
+ * @param[in] noBias Flag indicating whether to use a bias term (default is `false`).
+ * @param[in] name Name of the operator (optional).
+ * @return A shared pointer to the Node containing the FC operator.
+ */
 std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "");
 
 } // namespace Aidge
diff --git a/include/aidge/operator/Flatten.hpp b/include/aidge/operator/Flatten.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a7f5c6435f17eefceae6c82655599096229c3b3c
--- /dev/null
+++ b/include/aidge/operator/Flatten.hpp
@@ -0,0 +1,179 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_FLATTEN_H_
+#define AIDGE_CORE_OPERATOR_FLATTEN_H_
+
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+/**
+ * @brief Implementation of the Flatten operation.
+ *
+ * Since Flatten operation is just backend-agnostic, its implementation is located in aidge_core.
+ */
+class Flatten_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for Flatten operator implementation.
+     * @param op Operator instance.
+     * @param backend Optional. Name of the backend.
+     */
+    Flatten_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Compute the forward pass of the Flatten operation.
+     */
+    void forward() override;
+};
+
+/**
+ * @enum FlattenAttr
+ * @brief Defines attributes for the Flatten operator.
+ */
+enum class FlattenAttr {
+    /**
+     * @brief The axis at which to flatten the input tensor.
+     */
+    Axis
+};
+
+/**
+ * @brief Description the Flatten operation to reshape a tensor into a 2D matrix.
+ *
+ * The Flatten operation rearranges a tensor such that the specified axis is
+ * the start of the second dimension, flattening all dimensions before it
+ * into the first dimension and all dimensions after it into the second.
+ *
+ * @example:
+ * - Input shape: (2, 3, 4, 5)
+ * - Axis: 2
+ * - Output shape: (6, 20)
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
+class Flatten_Op : public OperatorTensor,
+                   public Registrable<Flatten_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Flatten_Op&)>> {
+
+public:
+    /**
+     * @brief The type identifier for the Flatten operator.
+     */
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<FlattenAttr, std::int64_t>;
+    template <FlattenAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    /**
+     * @brief Deleted default constructor.
+     */
+    Flatten_Op() = delete;
+
+    /**
+     * @brief Constructor for the Flatten operator.
+     * @param axis The axis at which to flatten the tensor.
+     */
+    Flatten_Op(std::int64_t axis = 1);
+
+    /**
+     * @brief Copy-constructor. Copies the operator attributes and its output tensor(s), but not its input tensors.
+     * @param[in] op Operator to copy.
+     */
+    Flatten_Op(const Flatten_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @return A shared pointer to the cloned operator.
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    /**
+     * @brief Compute the forward dimensions.
+     * @param[in] allowDataDependency Whether to allow data dependency in computation.
+     * @return True if successful, false otherwise.
+     */
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    /**
+     * @brief Set the backend for the operator.
+     * @param[in] name The name of the backend.
+     * @param[in] device Optional. The device index.
+     */
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    /**
+     * @brief Get the set of available backends for the operator.
+     * @return A set of backend names.
+     */
+    std::set<std::string> getAvailableBackends() const override;
+
+    /**
+     * @brief Get the attributes of the operator.
+     * @return A shared pointer to the attributes.
+     */
+    std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get the axis at which the tensor is flattened.
+     * @return A reference to the axis attribute.
+     */
+    inline std::int64_t& axis() const { return mAttributes->template getAttr<FlattenAttr::Axis>(); }
+
+    /**
+     * @brief Get the names of the input tensors.
+     * @return A vector of input tensor names.
+     */
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+
+    /**
+     * @brief Get the names of the output tensors.
+     * @return A vector of output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+/**
+ * @brief Create a Flatten node.
+ *
+ * Initializes a Flatten node that reshapes a tensor into a 2D matrix based
+ * on the specified axis.
+ *
+ * @param[in] axis The axis at which to flatten the tensor.
+ * @param[in] name Optional. The name of the node.
+ * @return A shared pointer to a Node representing the Flatten operation.
+ */
+std::shared_ptr<Node> Flatten(std::int64_t axis = 1,
+                            const std::string &name = "");
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::FlattenAttr>::data[] = { "axis" };
+}
+
+#endif /* AIDGE_CORE_OPERATOR_FLATTEN_H_ */
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index 517d63adc59ed848c53852697ab9f8511dfc2a2a..3b5b9449d82c1dac315573e2820b1dda7c6fb7bf 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -30,7 +30,69 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class FoldAttr { OutputDims, StrideDims, DilationDims, KernelDims };
+
+/**
+ * @enum FoldAttr
+ * @brief Enumeration for the attributes of the Fold operation.
+ */
+enum class FoldAttr {
+    /**
+     * @brief Output dimensions of the fold operation.
+     *
+     * Specifies the shape of the output tensor after applying the fold operation.
+     */
+    OutputDims,
+
+    /**
+     * @brief Stride dimensions used during the fold operation.
+     *
+     * Strides are the step sizes in each dimension during the fold operation.
+     */
+    StrideDims,
+
+    /**
+     * @brief Dilation dimensions for the fold operation.
+     *
+     * Dilation is the spacing between elements in the kernel during the fold.
+     */
+    DilationDims,
+
+    /**
+     * @brief Kernel dimensions used for the fold operation.
+     *
+     * Specifies the size of the kernel or filter applied during the fold.
+     */
+    KernelDims
+};
+
+/**
+ * @class Fold_Op
+ * @brief Implements the Fold operation to combine or transform tensor dimensions.
+ *
+ * The Fold operation applies a transformation to the tensor by folding it according to
+ * the specified dimensions and kernel properties. This is commonly used in operations
+ * like convolution or pooling, where the kernel slides over the input tensor and
+ * applies an operation (e.g., sum or max) over a specified region.
+ *
+ * The output shape depends on the `OutputDims`, `StrideDims`, `DilationDims`, and `KernelDims` attributes:
+ *     Let the input tensor be of shape `(batch_size, channels, height, width)` (assuming 2D spatial dims).
+ *     - The number of output channels is the input channels divided by the product of kernel dimensions:
+ *      output channels = input channels / (product of kernelDims)
+ *     - The output height and width are calculated as:
+ *       output height (out_h) = floor((input height - kernel height) / stride height) + 1
+ *       output width (out_w) = floor((input width - kernel width) / stride width) + 1
+ *      - The exact output shape will depend on these calculations for each spatial dimension (height, width) and the number of output channels.
+ *         
+ * @example:
+ *  - Input shape: (1, 16, 32, 32)  // Batch size: 1, Channels: 16, Height: 32, Width: 32
+ *  - Kernel dimensions: (3, 3)  // 3x3 kernel
+ *  - Stride: (1, 1)  // Stride of 1
+ *  - Dilation: (1, 1)  // No dilation
+ *  - Output shape: (1, 16, 30, 30)  // Output height and width after applying the kernel and stride
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 
 template <DimIdx_t DIM>
 class Fold_Op : public OperatorTensor,
@@ -45,16 +107,24 @@ private:
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>>;
+
     template <FoldAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
     Fold_Op() = delete;
 
-    constexpr Fold_Op(const std::array<DimSize_t, DIM> &outputDims,
-                      const std::array<DimSize_t, DIM> &kernelDims,
-                      const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
+    /**
+     * @brief Constructor for Fold_Op operator.
+     * @param[in] outputDims Output dimensions of the folded tensor.
+     * @param[in] kernelDims Kernel size for the fold operation.
+     * @param[in] strideDims Stride dimensions for the fold operation.
+     * @param[in] dilationDims Dilation dimensions for the fold operation.
+     */
+    constexpr Fold_Op(const std::array<DimSize_t, DIM>& outputDims,
+                      const std::array<DimSize_t, DIM>& kernelDims,
+                      const std::array<DimSize_t, DIM>& strideDims = create_array<DimSize_t,DIM>(1),
+                      const std::array<DimSize_t, DIM>& dilationDims = create_array<DimSize_t,DIM>(1))
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<FoldAttr::OutputDims>(outputDims),
@@ -63,61 +133,123 @@ public:
             attr<FoldAttr::KernelDims>(kernelDims))) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
-     * input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Fold_Op operator to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
-    Fold_Op(const Fold_Op<DIM> &op);
+    Fold_Op(const Fold_Op<DIM>& op);
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Fold_Op
+     * @brief Clone the operator using its copy constructor.
      */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Compute forward dimensions for the operator.
+     * @param allowDataDependency Flag to allow data dependency in dimension calculation.
+     * @return true if the dimensions are computed successfully.
+     */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    /**
+     * @brief Set the backend for the operator.
+     * @param name Name of the backend.
+     * @param device Index of the device.
+     */
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Get the available backends for this operator.
+     * @return A set of available backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     * @return Shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get the output dimensions for the fold operation.
+     * @return Output dimensions of the folded tensor.
+     */
     inline std::array<DimSize_t, DIM>& outputDims() const { return mAttributes->template getAttr<FoldAttr::OutputDims>(); }
+
+    /**
+     * @brief Get the stride dimensions for the fold operation.
+     * @return Stride dimensions.
+     */
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<FoldAttr::StrideDims>(); }
+
+    /**
+     * @brief Get the dilation dimensions for the fold operation.
+     * @return Dilation dimensions.
+     */
     inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<FoldAttr::DilationDims>(); }
+
+    /**
+     * @brief Get the kernel dimensions for the fold operation.
+     * @return Kernel dimensions.
+     */
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<FoldAttr::KernelDims>(); }
 
+    /**
+     * @brief Get the input names for the Fold operation.
+     * @return List of input names.
+     */
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
+
+    /**
+     * @brief Get the output names for the Fold operation.
+     * @return List of output names.
+     */
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Create a Fold operation node.
+ * 
+ * This function creates a Fold operation node that applies a fold transformation
+ * to a tensor based on the specified attributes.
+ *
+ * @param[in] outputDims Output dimensions for the fold operation.
+ * @param[in] kernelDims Kernel dimensions.
+ * @param[in] name Name of the operator.
+ * @param[in] strideDims Stride dimensions for the fold operation.
+ * @param[in] dilationDims Dilation dimensions for the fold operation.
+ * @return A shared pointer to the created Node.
+ */
 template <std::array<DimSize_t, 1>::size_type DIM>
-std::shared_ptr<Node> Fold(const std::array<DimSize_t, DIM> &outputDims,
-                                  const std::array<DimSize_t, DIM> &kernelDims,
-                                  const std::string& name = "",
-                                  const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
+std::shared_ptr<Node> Fold(const std::array<DimSize_t, DIM>& outputDims,
+                           const std::array<DimSize_t, DIM>& kernelDims,
+                           const std::string& name = "",
+                           const std::array<DimSize_t, DIM>& strideDims = create_array<DimSize_t,DIM>(1),
+                           const std::array<DimSize_t, DIM>& dilationDims = create_array<DimSize_t,DIM>(1));
 
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> Fold(
     DimSize_t const (&outputDims)[DIM],
     DimSize_t const (&kernelDims)[DIM],
     const std::string& name = "",
-    const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Fold, not supported");
+    const std::array<DimSize_t, DIM>& strideDims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, DIM>& dilationDims = create_array<DimSize_t,DIM>(1)) {
+    static_assert(DIM <= MaxDim, "Too many kernel dimensions required by Fold, not supported");
     return Fold(to_array(outputDims), to_array(kernelDims), name, strideDims, dilationDims);
 }
-}  // namespace Aidge
 
 extern template class Aidge::Fold_Op<2>;
 
+}  // namespace Aidge
+
 namespace {
 template <>
-const char *const EnumStrings<Aidge::FoldAttr>::data[] = {
+const char* const EnumStrings<Aidge::FoldAttr>::data[] = {
     "output_dims",
     "stride_dims",
     "dilation_dims",
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 80dcdd67883529c710b142b6b547d4b02e85cd44..dc3e1a814248a2b742d813f679eea22c1954e1a9 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -25,70 +25,184 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+
+/**
+ * @class Gather_OpImpl
+ * @brief Backend implementation for the Gather operation.
+ *
+ * The Gather operation selects elements from the input tensor based on specified indices
+ * and an axis, producing a tensor with a gathered shape.
+ */
 class Gather_OpImpl : public OperatorImpl {
 public:
-    Gather_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Gather_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Execute the Gather operation.
+     */
     void forward() override;
 };
 
-enum class GatherAttr { Axis, Indices, GatheredShape };
+enum class GatherAttr {
+    /**
+     * @brief Axis along which to gather elements.
+     */
+    Axis,
+
+    /**
+     * @brief Indices specifying which elements to gather.
+     */
+    Indices,
+
+    /**
+     * @brief Shape of the resulting gathered tensor.
+     */
+    GatheredShape
+};
 
+/**
+ * @brief Description for the Gather operation on an input tensor.
+ *
+ * The Gather operation extracts elements from an input tensor along a specified axis
+ * using a set of indices, resulting in a gathered tensor with a specified shape.
+ *
+ * The shape of the output tensor depends on the gathered shape attribute, the axis,
+ * and the indices.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Gather_Op : public OperatorTensor,
-                public Registrable<Gather_Op,
-                                   std::string,
-                                   std::function<std::shared_ptr<OperatorImpl>(const Gather_Op&)>> {
+                  public Registrable<Gather_Op,
+                                     std::string,
+                                     std::function<std::shared_ptr<OperatorImpl>(const Gather_Op&)>> {
 public:
     static const std::string Type;
 
     using Attributes_ = StaticAttributes<GatherAttr,
-                                            std::int8_t,
-                                            std::vector<int64_t>,
-                                            std::vector<DimSize_t>>;
+                                          std::int8_t,
+                                          std::vector<int64_t>,
+                                          std::vector<DimSize_t>>;
+
 private:
     template <GatherAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-
+    /**
+     * @brief Default constructor is deleted.
+     */
     Gather_Op() = delete;
 
+    /**
+     * @brief Constructor for the Gather operator.
+     * @param[in] axis The axis along which to gather elements.
+     * @param[in] indices A vector specifying which elements to gather.
+     * @param[in] gatheredShape The shape of the resulting gathered tensor.
+     */
     Gather_Op(std::int8_t axis,
               const std::vector<int64_t>& indices,
               const std::vector<DimSize_t>& gatheredShape);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy constructor.
+     * @param[in] op The Gather operator to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not its input tensors.
+     *          The new operator has no associated input tensors.
      */
     Gather_Op(const Gather_Op& op);
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Gather_Op
+     * @brief Clone the operator using its copy constructor.
      */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Check if dimensions have been forwarded.
+     * @return True if dimensions have been forwarded, false otherwise.
+     */
     bool dimsForwarded() const override final;
+
+    /**
+     * @brief Forward the dimensions.
+     * @param[in] allowDataDependency Whether to allow data dependency during dimension forwarding.
+     * @return True if successful, false otherwise.
+     */
     bool forwardDims(bool allowDataDependency = false) override final;
 
+    /**
+     * @brief Set the backend for the operator.
+     * @param name The name of the backend.
+     * @param device Optional. The device index.
+     */
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Get the available backends for the operator.
+     * @return A set of available backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     * @return A shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::int8_t& axis() const { return mAttributes -> getAttr<GatherAttr::Axis>(); }
-    inline std::vector<int64_t>& indices() const { return mAttributes -> getAttr<GatherAttr::Indices>(); }
-    inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes -> getAttr<GatherAttr::GatheredShape>(); }
 
-    static const std::vector<std::string> getInputsName(){
+    /**
+     * @brief Get the axis along which elements are gathered.
+     * @return The axis attribute.
+     */
+    inline std::int8_t& axis() const { return mAttributes->getAttr<GatherAttr::Axis>(); }
+
+    /**
+     * @brief Get the indices specifying which elements to gather.
+     * @return The indices attribute.
+     */
+    inline std::vector<int64_t>& indices() const { return mAttributes->getAttr<GatherAttr::Indices>(); }
+
+    /**
+     * @brief Get the shape of the gathered tensor.
+     * @return The gathered shape attribute.
+     */
+    inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes->getAttr<GatherAttr::GatheredShape>(); }
+
+    /**
+     * @brief Get the input tensor names.
+     * @return A vector of input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
         return {"data_input", "indices"};
     }
-    static const std::vector<std::string> getOutputsName(){
+
+    /**
+     * @brief Get the output tensor names.
+     * @return A vector of output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
-std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int64_t>& indices = {}, const std::vector<DimSize_t>& gatheredShape = {}, const std::string& name = "");
+/**
+ * @brief Create a Gather node.
+ *
+ * Initializes a Gather node that extracts elements from an input tensor
+ * along a specified axis using a set of indices.
+ *
+ * @param[in] axis The axis along which to gather elements. Default is 0.
+ * @param[in] indices A vector specifying which elements to gather. Default is an empty vector.
+ * @param[in] gatheredShape The shape of the resulting gathered tensor. Default is an empty vector.
+ * @param[in] name Optional. The name of the node.
+ * @return A shared pointer to a Node representing the Gather operation.
+ */
+std::shared_ptr<Node> Gather(std::int8_t axis = 0,
+                             const std::vector<int64_t>& indices = {},
+                             const std::vector<DimSize_t>& gatheredShape = {},
+                             const std::string& name = "");
+
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 327f4f7c3d43b5194f23cfaed8674ee0b47bd6a2..023f197e3f862eedc3a846b4451c22676d8ed073 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -22,91 +22,185 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-
 namespace Aidge {
+
+/**
+ * @class GenericOperator_Op
+ * @brief Represents a generic operator used as a fallback when the requested operator is not natively supported by Aidge.
+ *
+ * This class enables the import and simulation of unknown or custom operators. 
+ * It provides a flexible structure for defining operators dynamically with attributes, inputs, and outputs.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class GenericOperator_Op
     : public OperatorTensor,
       public Registrable<GenericOperator_Op, std::array<std::string, 2>, std::function<std::shared_ptr<OperatorImpl>(const GenericOperator_Op &)>> {
 private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
+    /// Function to compute output dimensions based on input dimensions.
     ComputeDimsFunc mForwardDims;
 
+    /// Dynamic attributes associated with the operator.
     const std::shared_ptr<DynamicAttributes> mAttributes;
 
 public:
+
+    /**
+     * @brief Constructs a generic operator with input categories and output count.
+     * @param[in] type Type of the operator (e.g., the operator name or key).
+     * @param[in] inputsCategory Vector specifying the categories of inputs.
+     * @param[in] nbOut Number of output tensors produced by the operator.
+     */
     GenericOperator_Op(const std::string& type, const std::vector<InputCategory>& inputsCategory, IOIndex_t nbOut);
 
+    /**
+     * @brief Constructs a generic operator with specified data, parameters, and output counts.
+     * @param[in] type Type of the operator.
+     * @param[in] nbData Number of data inputs.
+     * @param[in] nbParam Number of parameter inputs.
+     * @param[in] nbOut Number of output tensors.
+     */
     GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param[in] op Operator to copy.
+     * @details Copies the operator attributes and its output tensor(s), but does not associate input tensors with the new operator.
      */
     GenericOperator_Op(const GenericOperator_Op& op);
 
+    /**
+     * @brief Destructor.
+     */
     ~GenericOperator_Op() noexcept;
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::GenericOperator_Op
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
 public:
+
+    /**
+     * @brief Computes the output dimensions of the operator.
+     * @param[in] allowDataDependency If true, allows computations based on data dependencies.
+     * @return True if dimensions are successfully computed, otherwise false.
+     */
     bool forwardDims(bool allowDataDependency = false) override final;
 
+    /**
+     * @brief Sets the backend for the operator.
+     * @param[in] name Backend name.
+     * @param[in] device Device index for the backend.
+     */
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override { return std::set<std::string>(); };
+
+    /**
+     * @brief Retrieves the available backends for the operator.
+     * @return A set of available backend names.
+     */
+    std::set<std::string> getAvailableBackends() const override { return std::set<std::string>(); }
+
+    /**
+     * @brief Retrieves the attributes of the operator.
+     * @return A shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
+    /**
+     * @brief Retrieves a specific attribute by name.
+     * @tparam T Expected type of the attribute.
+     * @param[in] name Name of the attribute.
+     * @return Reference to the attribute value.
+     */
     template <class T>
-    inline T& getAttr(const std::string& name)
-    { return mAttributes -> template getAttr<T>(name); }
+    inline T& getAttr(const std::string& name) { return mAttributes->template getAttr<T>(name); }
+
+    /**
+     * @brief Retrieves a specific attribute by name (const version).
+     * @tparam T Expected type of the attribute.
+     * @param[in] name Name of the attribute.
+     * @return Value of the attribute.
+     */
     template <class T>
-    inline T getAttr(const std::string& name) const
-    { return mAttributes -> template getAttr<T>(name); }
+    inline T getAttr(const std::string& name) const { return mAttributes->template getAttr<T>(name); }
 
-    ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
-    ///\tparam T expected Attribute type
-    ///\param name Attribute name
-    ///\param value Attribute value
+    /**
+     * @brief Adds a new attribute or asserts if it already exists.
+     * @tparam T Expected type of the attribute.
+     * @param[in] name Name of the attribute.
+     * @param[in] value Value to set for the attribute.
+     */
     template <class T>
-    inline void addAttr(const std::string& name, const T& value) const
-    { mAttributes -> template addAttr<T>(name, value); }
+    inline void addAttr(const std::string& name, const T& value) const { mAttributes->template addAttr<T>(name, value); }
+
+    /**
+     * @brief Sets multiple attributes at once.
+     * @param[in] attrs A map of attribute names to values.
+     */
+    inline void setAttrs(const std::map<std::string, future_std::any>& attrs) {
+        *mAttributes = attrs;
+    }
 
     // Helper functions that can be used with setForwardDims():
+    /**
+     * @brief Identity function for forward dimension computation.
+     */
     static const ComputeDimsFunc Identity;
+
+    /**
+     * @brief Function to copy input dimensions to output dimensions.
+     * @param[in] inputIdx Index of the input whose dimensions are to be copied.
+     * @param[in] nbOutputs Number of outputs to generate.
+     * @return A ComputeDimsFunc instance.
+     */
     static const ComputeDimsFunc InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs);
-    inline void setForwardDims(ComputeDimsFunc func) {
-        mForwardDims = func;
-    }
+
+    /**
+     * @brief Sets the forward dimension computation function.
+     * @param[in] func The function to compute output dimensions.
+     */
+    inline void setForwardDims(ComputeDimsFunc func) { mForwardDims = func; }
 };
 
 /**
- * @brief Fictive custom operator not associated with any implementation.
- * Allows to import unknown operators and simulate new ones.
- * @param type Type of the fictive operator.
- * @param inputCategory List inputs with their category
- * @param nbOut Number of output data.
- * @param name (optional) name of the Operator.
- * @return std::shared_ptr<Node> Node associated with the Generic Operator.
+ * @brief Creates a generic operator with input categories and output count.
+ * @param[in] type Type of the operator.
+ * @param[in] inputCategory List of input categories.
+ * @param[in] nbOut Number of output tensors.
+ * @param[in] name Optional name for the operator.
+ * @return A shared pointer to the created operator node.
  */
 std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector<InputCategory>& inputCategory, IOIndex_t nbOut,
                                              const std::string& name = "");
 
 /**
- * @brief Fictive custom operator not associated with any implementation.
- * Allows to import unknown operators and simulate new ones.
- * @param type Type of the fictive operator.
- * @param nbData Number of input data.
- * @param nbParam Number of parameters.
- * @param nbOut Number of output data.
- * @param name (optional) name of the Operator.
- * @return std::shared_ptr<Node> Node associated with the Generic Operator.
+ * @brief Creates a generic operator with specified input and output counts.
+ * @param[in] type Type of the operator.
+ * @param[in] nbData Number of input data tensors.
+ * @param[in] nbParam Number of parameter tensors.
+ * @param[in] nbOut Number of output tensors.
+ * @param[in] name Optional name for the operator.
+ * @return A shared pointer to the created operator node.
  */
 std::shared_ptr<Node> GenericOperator(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
                                              const std::string& name = "");
+
+/**
+ * @brief Creates a generic operator based on another operator.
+ * @param[in] type Type of the generic operator.
+ * @param[in] op Existing operator to derive from.
+ * @param[in] name Optional name for the operator.
+ * @return A shared pointer to the created operator node.
+ */
+std::shared_ptr<Aidge::Node> GenericOperator(const std::string& type,
+                                            std::shared_ptr<OperatorTensor> op,
+                                            const std::string& name = "");
 }  // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_ */
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index ef440e8c697ff221aa8df42e459de7ac697e8a0c..0cfc16ccafcb79b041d3b964fd51c756fec63060 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -25,9 +25,25 @@
 namespace Aidge {
 
 /**
- * @brief Description for the tensor data structure.
- * @details Sets the properties of the tensor without actually containing any
- * data. Contains a pointer to an actual contiguous implementation of data.
+ * @brief Description of a Global Average Pooling operation on an input Tensor.
+ *
+ * Global Average Pooling computes the average value of each feature map (channel) 
+ * in the input Tensor across all spatial dimensions (height and width).
+ *
+ * For each channel in the input Tensor, the function is defined as:
+ * `f(x) = (sum(x)) / N`, where `x` represents the spatial elements in that channel,
+ * and `N` is the total number of spatial elements (height * width) in the input.
+ *
+ * The output Tensor retains the batch and channel dimensions, but the spatial dimensions 
+ * are reduced to 1. Specifically:
+ * - If the input has shape `(N, C, H, W)`, where:
+ *   - `N` is the batch size
+ *   - `C` is the number of channels
+ *   - `H` and `W` are the spatial dimensions
+ * - Then the output shape will be `(N, C, 1, 1)`.
+ *
+ * @see OperatorTensor
+ * @see Registrable
  */
 class GlobalAveragePooling_Op
     : public OperatorTensor,
@@ -35,25 +51,35 @@ class GlobalAveragePooling_Op
                          std::function<std::shared_ptr<OperatorImpl>(
                              const GlobalAveragePooling_Op &)>> {
 public:
-  static const std::string Type;
+	static const std::string Type;
 
-  GlobalAveragePooling_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+	GlobalAveragePooling_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
-  GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op);
+	/**
+	 * @brief Copy-constructor.
+	 * @param op GlobalAveragePooling_Op to copy.
+	 * @details Copies the operator attributes and its output tensor(s), but not
+	 * its input tensors. The new operator has no associated input.
+	 */
+	GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op);
 
-    std::shared_ptr<Operator> clone() const override;
+	/**
+	 * @brief Clone the operator using its copy-constructor.
+	 * @see Operator::GlobalAveragePooling_Op
+	 */
+	std::shared_ptr<Operator> clone() const override;
 
-    bool forwardDims(bool allowDataDependency = false) override final;
+	bool forwardDims(bool allowDataDependency = false) override final;
 
-  void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
+	void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+	std::set<std::string> getAvailableBackends() const override;
 
-  static const std::vector<std::string> getInputsName() {
-    return {"data_input"};
-  }
-  static const std::vector<std::string> getOutputsName() {
-    return {"data_output"};
-  }
+	static const std::vector<std::string> getInputsName() {
+	return {"data_input"};
+	}
+	static const std::vector<std::string> getOutputsName() {
+	return {"data_output"};
+	}
 };
 
 std::shared_ptr<Node> GlobalAveragePooling(const std::string &name = "");
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index dc2b2059e75711572e0f7fa94cc6ccb9f58c970b..999f7bba1c7399ee6e813e7fa4297e27b7b7ae58 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -19,21 +19,62 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/logger/EnumString.hpp"
 
 namespace Aidge {
+enum class GridSampleAttr {
+	Mode,			// Specifies the interpolation mode (e.g., Linear, Nearest, Cubic).
+	PaddingMode,	// Specifies how to handle out-of-boundary grid values.
+	AlignCorners	// Determines whether grid values are normalized to align with the image corners.
+};
 
-enum class GridSampleAttr { Mode, PaddingMode, AlignCorners };
-
+/**
+ * @class GridSample_Op
+ * @brief Implements the GridSample operation for sampling input data using a grid.
+ *
+ * GridSample is used to perform spatial transformation on an input tensor by
+ * sampling its values based on a grid tensor. It supports different
+ * interpolation and padding modes for flexibility.
+ *
+ * ### Attributes:
+ * - **Mode**: Determines the interpolation technique to use.
+ *   - `Linear`: Bilinear interpolation.
+ *   - `Nearest`: Nearest neighbor interpolation.
+ *   - `Cubic`: Bicubic interpolation.
+ * - **PaddingMode**: Specifies how to handle grid values outside the input boundaries.
+ *   - `Zeros`: Pads with zeros.
+ *   - `Border`: Extends the edge values.
+ *   - `Reflection`: Mirrors the input tensor.
+ * - **AlignCorners**: Aligns grid points with the corners of the input image.
+ *   - `true`: Aligns grid values to the image corners.
+ *   - `false`: Normalizes grid values in the range [-1, 1].
+ *
+ * ### Example Usage:
+ * - Input shape: (N, C, H, W) // Batch size N, Channels C, Height H, Width W.
+ * - Grid shape: (N, H_out, W_out, 2) // Grid specifying output coordinates.
+ * - Output shape: (N, C, H_out, W_out).
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class GridSample_Op : public OperatorTensor,
 	public Registrable<GridSample_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const GridSample_Op&)>> {
 
 public:
 	static const std::string Type;
 
+	/**
+	 * @enum Mode
+	 * @brief Interpolation modes available for GridSample.
+	 */
 	enum class Mode { Linear, Nearest, Cubic };
+
+	/**
+	 * @enum PaddingMode
+	 * @brief Padding modes available for GridSample.
+	 */
 	enum class PaddingMode { Zeros, Border, Reflection };
 
 private:
@@ -43,36 +84,103 @@ private:
 	const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-
+	/**
+	 * @brief Constructs a GridSample operator with specified attributes.
+	 * @param[in] mode Interpolation mode (e.g., Linear, Nearest, Cubic).
+	 * @param[in] paddingMode Padding mode (e.g., Zeros, Border, Reflection).
+	 * @param[in] alignCorners Whether to align grid values with input corners.
+	 */
 	GridSample_Op(Mode mode = Mode::Linear,
-			PaddingMode paddingMode = PaddingMode::Zeros,
-			bool alignCorners = false);
+				PaddingMode paddingMode = PaddingMode::Zeros,
+				bool alignCorners = false);
 
+	/**
+	 * @brief Copy-constructor. Copies the operator attributes and its output tensor(s).
+	 * @param[in] other Operator to copy.
+	 */
 	GridSample_Op(const GridSample_Op& other);
-	~GridSample_Op() noexcept;
 
-public:
+	/**
+	 * @brief Destructor.
+	 */
+	~GridSample_Op() noexcept;
 
+	/**
+	 * @brief Clone the operator using its copy-constructor.
+	 * @return A shared pointer to the cloned operator.
+	 */
 	std::shared_ptr<Operator> clone() const override;
 
+	/**
+	 * @brief Determines whether dimensions can be forwarded.
+	 * @param allowDataDependencies Allow data-dependent dimensions.
+	 * @return True if dimensions are forwarded successfully.
+	 */
 	bool forwardDims(bool /*allowDataDependencies*/ = false) override final;
 
+	/**
+	 * @brief Sets the backend for execution.
+	 * @param name Backend name.
+	 * @param device Device index.
+	 */
 	void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+	/**
+	 * @brief Retrieves the available backends.
+	 * @return A set of available backend names.
+	 */
     std::set<std::string> getAvailableBackends() const override;
 
+	/**
+	 * @brief Retrieves the operator's attributes.
+	 * @return Shared pointer to the attributes.
+	 */
 	inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+	/**
+	 * @brief Gets the interpolation mode.
+	 * @return Current interpolation mode.
+	 */
 	inline Mode mode() const { return mAttributes->template getAttr<GridSampleAttr::Mode>(); }
+
+	/**
+	 * @brief Gets the padding mode.
+	 * @return Current padding mode.
+	 */
 	inline PaddingMode paddingMode() const { return mAttributes->template getAttr<GridSampleAttr::PaddingMode>(); }
+
+	/**
+	 * @brief Checks if grid values align with input corners.
+	 * @return True if corners are aligned.
+	 */
 	inline bool alignCorners() const { return mAttributes->template getAttr<GridSampleAttr::AlignCorners>(); }
 
+	/**
+	 * @brief Retrieves the input names for GridSample.
+	 * @return Vector of input tensor names.
+	 */
 	static const std::vector<std::string> getInputsName() {
 		return {"data_input", "grid_field"};
 	}
+
+	/**
+	 * @brief Retrieves the output names for GridSample.
+	 * @return Vector of output tensor names.
+	 */
 	static const std::vector<std::string> getOutputsName() {
 		return {"data_output"};
 	}
 };
 
+/**
+ * @brief Creates a GridSample operator node.
+ *
+ * @param[in] mode Interpolation mode.
+ * @param[in] paddingMode Padding mode.
+ * @param[in] alignCorners Whether to align grid points with corners.
+ * @param[in] name Name of the operator.
+ * @return Shared pointer to the GridSample node.
+ */
 std::shared_ptr<Node> GridSample(
                         typename GridSample_Op::Mode mode = GridSample_Op::Mode::Linear,
                         typename GridSample_Op::PaddingMode paddingMode = GridSample_Op::PaddingMode::Zeros,
@@ -81,7 +189,6 @@ std::shared_ptr<Node> GridSample(
 
 } // namespace Aidge
 
-
 namespace {
 template <>
 const char* const EnumStrings<Aidge::GridSampleAttr>::data[] = {
diff --git a/include/aidge/operator/Heaviside.hpp b/include/aidge/operator/Heaviside.hpp
index 65c7f341a1c5aa12eeada2165e459cc1c933e327..94eaa400a52c8e3e9ffcf40c4896aee423fb6ed9 100644
--- a/include/aidge/operator/Heaviside.hpp
+++ b/include/aidge/operator/Heaviside.hpp
@@ -25,61 +25,102 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class HeavisideAttr { 
+enum class HeavisideAttr {
     /**
      * @brief The value used in the output tensor when the input is 0.
      */
-    Value 
+    Value
 };
 
+/**
+ * @class Heaviside_Op
+ * @brief Implements the Heaviside step function operation.
+ *
+ * The Heaviside step function computes element-wise output based on the input values:
+ *
+ * if x < 0: f(x) = 0
+ * if x = 0: f(x) = Value
+ * if x > 0: f(x) = 1
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Heaviside_Op
     : public OperatorTensor,
-      public Registrable<
-          Heaviside_Op,
-          std::string,
-          std::function<std::shared_ptr<OperatorImpl>(const Heaviside_Op &)>> {
-  private:
+      public Registrable<Heaviside_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Heaviside_Op &)>> {
+
+private:
     using Attributes_ = StaticAttributes<HeavisideAttr, float>;
+
     template <HeavisideAttr e>
     using attr = typename Attributes_::template attr<e>;
+
     const std::shared_ptr<Attributes_> mAttributes;
 
-  public:
+public:
     static const std::string Type;
 
-    /*
-     * Compute the Heaviside step function for each element of the first input.
-     * Heaviside step function is defined as :
-     *
-     * \f[
-     * heaviside(input, values) = \begin{cases}
-     *     0      & \text{if input }  < 0 \\
-     *     values & \text{if input }  = 0 \\
-     *     1      & \text{if input }  > 0
-     * \end{cases}
-     * \f]
-     * */
+    /**
+     * @brief Constructor for the Heaviside operator.
+     * @param[in] value The value to use in the output tensor when the input is 0.
+     */
     Heaviside_Op(float value);
 
+    /**
+     * @brief Copy constructor.
+     * @param op The Heaviside_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not its input tensors.
+     *          The new operator has no associated input.
+     */
     Heaviside_Op(const Heaviside_Op &op);
 
+    /**
+     * @brief Clone the operator using its copy constructor.
+     */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Set the backend for this operator.
+     * @param name The backend name.
+     * @param device The device index (default is 0).
+     */
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
+    /**
+     * @brief Get the set of available backends.
+     * @return A set of backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the input names required by this operator.
+     * @return A vector containing the input names.
+     */
     static const std::vector<std::string> getInputsName() {
         return {"data_input", "data_values"};
     }
 
+    /**
+     * @brief Get the output names generated by this operator.
+     * @return A vector containing the output names.
+     */
     static const std::vector<std::string> getOutputsName() {
         return {"output"};
     }
 
+    /**
+     * @brief Get the attributes of the operator.
+     */
     inline std::shared_ptr<Attributes> attributes() const override {
         return mAttributes;
     }
+
+    /**
+     * @brief Get the value used for inputs equal to zero.
+     * @return A reference to the value attribute.
+     */
     inline float &value() const {
         return mAttributes->template getAttr<HeavisideAttr::Value>();
     }
@@ -88,18 +129,22 @@ class Heaviside_Op
 /**
  * @brief Create a Heaviside node.
  *
- * Initializes a Heaviside node that computes the Heaviside step function for each element 
+ * Initializes a Heaviside node that computes the Heaviside step function for each element
  * of the input tensor, using the specified value for inputs equal to zero.
  *
  * @param value The value used in the output tensor when the input is 0.
  * @param name  Optional. The name of the node.
- * 
+ *
  * @return A shared pointer to a Node representing the Heaviside operation.
  */
 std::shared_ptr<Node> Heaviside(float value, const std::string &name = "");
+
 } // namespace Aidge
 
 namespace {
+/**
+ * @brief Define string representations for Heaviside attributes.
+ */
 template <>
 const char *const EnumStrings<Aidge::HeavisideAttr>::data[] = {"value"};
 }
diff --git a/include/aidge/operator/ILayerNorm.hpp b/include/aidge/operator/ILayerNorm.hpp
index f660cc64eb65770cc6cf5335d9c070b155d03c0f..dc90b762260cb4498d15aa18024684b98b162175 100644
--- a/include/aidge/operator/ILayerNorm.hpp
+++ b/include/aidge/operator/ILayerNorm.hpp
@@ -27,55 +27,98 @@
 
 namespace Aidge {
 
+/**
+ * @brief Description of the Layer Normalization operation on an input tensor.
+ *
+ * Layer Normalization normalizes the inputs across the features of a given tensor:
+ *  f(x) =
+ *
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class ILayerNorm_Op : public OperatorTensor,
     public Registrable<ILayerNorm_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ILayerNorm_Op&)>> {
+
 public:
     static const std::string Type;
 
+    /**
+     * @brief Default constructor.
+     */
     ILayerNorm_Op()
     : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::Param}, 1)
     {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param[in] op ILayerNorm_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not its input tensors. 
+     * The new operator has no associated input.
      */
     ILayerNorm_Op(const ILayerNorm_Op& op)
         : OperatorTensor(op)
     {
         if (op.mImpl){
             SET_IMPL_MACRO(ILayerNorm_Op, *this, op.backend());
-        }else{
+        } else {
             mImpl = nullptr;
         }
     }
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::ILayerNorm_Op
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override {
         return std::make_shared<ILayerNorm_Op>(*this);
     }
 
+    /**
+     * @brief Associates an input tensor with the operator.
+     * @param inputIdx The index of the input.
+     * @param data The input data to associate.
+     */
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
+    /**
+     * @brief Propagates the dimensions from the input tensors to the output tensors.
+     * @param allowDataDependency Whether to allow dimension propagation based on data.
+     * @return True if propagation is successful, false otherwise.
+     */
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Gets the names of the input tensors.
+     * @return A vector containing the names of input tensors.
+     */
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
+
+    /**
+     * @brief Gets the names of the output tensors.
+     * @return A vector containing the names of output tensors.
+     */
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Constructs a Layer Normalization operation node.
+ * @param[in] name Optional name for the operator.
+ * @return A shared pointer to a Node containing the ILayerNorm_Op operator.
+ */
 inline std::shared_ptr<Node> ILayerNorm(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<ILayerNorm_Op>(), name);
 }
-}
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_ILAYERNORM_H_ */
diff --git a/include/aidge/operator/LRN.hpp b/include/aidge/operator/LRN.hpp
index 669564d4a8fb0da2fac4f67e164fc08653131472..369da5f97edf1927ec3f255b7ace35520212e031 100644
--- a/include/aidge/operator/LRN.hpp
+++ b/include/aidge/operator/LRN.hpp
@@ -24,14 +24,49 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class LRNAttr { Alpha, Beta, Bias, Size };
+enum class LRNAttr {
+    Alpha,  ///< Scale factor for normalization.
+    Beta,   ///< Exponent applied to the normalization term.
+    Bias,   ///< Constant bias added to the normalization term.
+    Size    ///< Number of channels to normalize over.
+};
 
+/**
+ * @brief Description of a Local Response Normalization (LRN) operation on an input Tensor.
+ *
+ * LRN is a normalization technique that applies across channels in a local region 
+ * to enhance generalization and promote competition between neurons. It is commonly 
+ * used in Convolutional Neural Networks (CNNs).
+ *
+ * For each element x in the input Tensor, the function is defined as:
+ * `f(x) = x / (bias + alpha * sum(x_i^2))^beta`, where:
+ * - `x` is the current element being normalized.
+ * - The summation `sum(x_i^2)` is taken over a local region of `size` channels 
+ *   surrounding `x` (both before and after the current channel, if available).
+ * - `bias`, `alpha`, and `beta` are scalar hyperparameters controlling the 
+ *   normalization behavior.
+ *
+ * Parameters:
+ * - `size`: The number of channels in the local region over which normalization is performed.
+ * - `bias`: A scalar added to the normalization term to ensure numerical stability.
+ * - `alpha`: A scaling factor for the squared sum of elements in the local region.
+ * - `beta`: The exponent applied to the normalization term.
+ *
+ * The input and output Tensors have the same shape. If the input Tensor has shape `(N, C, H, W)`, 
+ * the output Tensor will also have shape `(N, C, H, W)`.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class LRN_Op : public OperatorTensor,
                 public Registrable<LRN_Op,
                                    std::string,
                                    std::function<std::shared_ptr<OperatorImpl>(const LRN_Op&)>> {
 
 public:
+    /**
+     * @brief Static type string for the LRN operator.
+     */
     static const std::string Type;
 
 private:
@@ -40,43 +75,106 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    /**
+     * @brief Deleted default constructor.
+     */
     LRN_Op() = delete;
 
+    /**
+     * @brief Constructor for the LRN operator.
+     * @param[in] size Normalization size (span across adjacent channels).
+     */
     LRN_Op(std::int32_t size);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param[in] op LRN_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     LRN_Op(const LRN_Op& op);
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::LRN_Op
+     * @brief Clone the operator using its copy constructor.
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Set the backend for the LRN operator.
+     * @param[in] name Name of the backend.
+     * @param[in] device Device index (optional).
+     */
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    /**
+     * @brief Get the available backends for the LRN operator.
+     * @return A set of backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     * @return A shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline float& alpha() const noexcept { return mAttributes -> getAttr<LRNAttr::Alpha>(); }
-    inline float& beta() const noexcept { return mAttributes -> getAttr<LRNAttr::Beta>(); }
-    inline float& bias() const noexcept { return mAttributes -> getAttr<LRNAttr::Bias>(); }
-    inline std::int32_t& size() const noexcept { return mAttributes -> getAttr<LRNAttr::Size>(); }
 
-    static const std::vector<std::string> getInputsName(){
+    /**
+     * @brief Get or modify the `alpha` attribute.
+     * @return Reference to the `alpha` attribute.
+     */
+    inline float& alpha() const noexcept { return mAttributes->getAttr<LRNAttr::Alpha>(); }
+
+    /**
+     * @brief Get or modify the `beta` attribute.
+     * @return Reference to the `beta` attribute.
+     */
+    inline float& beta() const noexcept { return mAttributes->getAttr<LRNAttr::Beta>(); }
+
+    /**
+     * @brief Get or modify the `bias` attribute.
+     * @return Reference to the `bias` attribute.
+     */
+    inline float& bias() const noexcept { return mAttributes->getAttr<LRNAttr::Bias>(); }
+
+    /**
+     * @brief Get or modify the `size` attribute.
+     * @return Reference to the `size` attribute.
+     */
+    inline std::int32_t& size() const noexcept { return mAttributes->getAttr<LRNAttr::Size>(); }
+
+    /**
+     * @brief Get the input tensor names for the LRN operator.
+     * @return A vector of input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
-    static const std::vector<std::string> getOutputsName(){
+
+    /**
+     * @brief Get the output tensor names for the LRN operator.
+     * @return A vector of output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Create an LRN operation node.
+ *
+ * @param[in] size Normalization size (must be an odd positive integer).
+ * @param[in] name Name of the operator (optional).
+ * @return A shared pointer to the Node containing the LRN operator.
+ */
 std::shared_ptr<Node> LRN(std::int32_t size, const std::string& name = "");
+
 } // namespace Aidge
 
 namespace {
+/**
+ * @brief EnumStrings specialization for LRNAttr.
+ */
 template <>
 const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size"};
 }
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 179eb90b39bb5d527781289b9b233d3a29d14494..46730d0269dfeff1dd7cc47fe5ae01597b28dcdf 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -25,11 +25,29 @@
 
 namespace Aidge {
 enum class LeakyReLUAttr {
+    /**
+     * @brief Slope for the negative input values.
+     */
     NegativeSlope
 };
 
+/**
+ * @class LeakyReLU_Op
+ * @brief Implements the LeakyReLU activation function.
+ *
+ * The LeakyReLU operation introduces a small, non-zero gradient for negative input values,
+ * defined as:
+ *      - x < 0: f(x) = negativeSlope * x
+ *      - x >= 0 : f(x) = x
+ *
+ * The output tensor has the same shape as the input tensor.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class LeakyReLU_Op : public OperatorTensor,
     public Registrable<LeakyReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>> {
+
 public:
     static const std::string Type;
 
@@ -40,8 +58,15 @@ private:
 
 public:
 
+    /**
+     * @brief Default constructor is deleted.
+     */
     LeakyReLU_Op() = delete;
 
+    /**
+     * @brief Constructor for LeakyReLU operator.
+     * @param[in] negativeSlope The slope for negative input values.
+     */
     LeakyReLU_Op(float negativeSlope)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(
@@ -50,31 +75,55 @@ public:
     {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param[in] op LeakyReLU_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not its input tensors. 
+     * The new operator has no associated input.
      */
     LeakyReLU_Op(const LeakyReLU_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::LeakyReLU_Op
      */
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get the negative slope value.
+     */
     inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); }
 
+    /**
+     * @brief Get the names of the input tensors.
+     * @return A vector containing the names of input tensors.
+     */
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
+
+    /**
+     * @brief Get the names of the output tensors.
+     * @return A vector containing the names of output tensors.
+     */
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Apply the LeakyReLU activation function to a tensor.
+ *
+ * @param[in] negativeSlope Slope for the negative input values.
+ * @param[in] name Name of the Operator.
+ * @return std::shared_ptr<Node> Node containing the Operator.
+ */
 std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "");
 }
 
@@ -84,4 +133,4 @@ const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
     = {"negative_slope"};
 }
 
-#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
+#endif /* AIDGE_CORE_OPERATOR_LEAKYRELU_H_ */
diff --git a/include/aidge/operator/Ln.hpp b/include/aidge/operator/Ln.hpp
index 22fc51664b89bcdeb5970b0cc92beafdde52e43f..4a78db4391e0f69023b0de8c843264352c9ca3fb 100755
--- a/include/aidge/operator/Ln.hpp
+++ b/include/aidge/operator/Ln.hpp
@@ -25,16 +25,33 @@
 
 namespace Aidge {
 
+/**
+ * @brief Description of an element-wise Natural Logarithm (Ln) operation
+ * on an input Tensor.
+ *
+ * For each element x in the input, the function is defined as:
+ * `f(x) = ln(x)`, where ln(x) is the natural logarithm of x (logarithm to the base e).
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Ln_Op : public OperatorTensor,
-    public Registrable<Ln_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Ln_Op&)>> {
+    public Registrable<Ln_Op,  // <Op, backend, implementation creation function>
+        std::string,
+        std::function<std::unique_ptr<OperatorImpl>(const Ln_Op&)>>
+{
 public:
     static const std::string Type;
 
     Ln_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Ln_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Ln_Op(const Ln_Op& op);
 
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index bf6ab84c7373962e71434050427c9b6ecae3b034..0313815ee2d2dd568f7dc9b9b496e234349e657f 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -23,6 +23,34 @@
 
 namespace Aidge {
 
+/**
+ * @brief Description of a Matrix Multiplication (MatMul) operation on input Tensors.
+ *
+ * Performs matrix multiplication on two input Tensors A and B:
+ * - If both inputs are 2D matrices, the function is defined as:
+ *   `C = A @ B`, where `@` represents matrix multiplication.
+ * - For higher-dimensional Tensors, it performs batched matrix multiplication:
+ *   The last two dimensions of A and B are used for matrix multiplication, 
+ *   while the remaining dimensions are broadcast according to NumPy broadcasting rules.
+ *
+ * Requirements:
+ * - The number of columns in the last dimension of A must match the number of rows
+ *   in the last dimension of B.
+ *
+ * The output Tensor shape is determined as:
+ * - For 2D matrices A (m x n) and B (n x p), the output shape is (m x p).
+ * - For higher-dimensional Tensors, broadcasting rules are applied to the batch dimensions,
+ *   and the resulting shape is `[broadcasted_batch_dims, m, p]`.
+ *
+ * @example:
+ * - Input A shape: (2, 3, 4), Input B shape: (3, 4, 5)
+ * - Batch dimensions (first dimension) are broadcasted: A is repeated along the first axis.
+ * - Last two dimensions are multiplied: (4, 5) for A and B.
+ * - Output shape: (3, 4, 5).
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class MatMul_Op : public OperatorTensor,
               public Registrable<MatMul_Op,
                                  std::string,
@@ -33,8 +61,10 @@ public:
     MatMul_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op MatMul_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     MatMul_Op(const MatMul_Op& op);
 
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 0cc43a6fbe50849b169a59d048962668d3e4666c..8503b1be15df24056d61885af27d0d1778990015 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -29,72 +29,192 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
 
+/**
+ * @enum MaxPoolingAttr
+ * @brief Attributes defining the configuration of a MaxPooling operation.
+ */
+enum class MaxPoolingAttr {
+  /**
+   * @brief Stride dimensions for sliding the pooling window across the input dimensions.
+   * The stride specifies how much the window moves after each operation.
+   * Must be positive integers.
+   */
+  StrideDims,
+
+  /**
+   * @brief Kernel dimensions specifying the size of the pooling window for each spatial dimension.
+   * For example, common kernel dimensions include 2x2 or 3x3.
+   * Must be positive integers.
+   */
+  KernelDims,
+
+  /**
+   * @brief Flag indicating whether to use ceil or floor when calculating output size.
+   * - `true`: Use `ceil` for output size calculation.
+   * - `false`: Use `floor` for output size calculation.
+   */
+  CeilMode,
+};
+
+/**
+ * @class MaxPooling_Op
+ * @tparam DIM Dimensionality of the input tensor (e.g., 1D, 2D, 3D).
+ * @brief Implements the MaxPooling operation over a specified input tensor.
+ *
+ * MaxPooling reduces spatial dimensions by applying a max filter over a sliding window.
+ * The resulting output tensor contains the maximum value within each window.
+ *
+ * ### Output Shape Calculation
+ * - If `CeilMode` is false:
+ *   `output_size = floor((input_size - kernel_size) / stride + 1)`
+ * - If `CeilMode` is true:
+ *   `output_size = ceil((input_size - kernel_size) / stride + 1)`
+ *
+ * @example Example usage:
+ * - Input shape: (1, 3, 32, 32) // Batch size 1, 3 channels, 32x32 spatial dimensions
+ * - KernelDims: (2, 2)
+ * - StrideDims: (2, 2)
+ * - CeilMode: false
+ * - Output shape: (1, 3, 16, 16)
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 template <DimIdx_t DIM>
 class MaxPooling_Op : public OperatorTensor,
-                public Registrable<MaxPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>> {
+                public Registrable<MaxPooling_Op<DIM>,
+                                   std::string,
+                                   std::function<std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>>
+{
 public:
-    static const std::string Type;
+    static const std::string Type; ///< Static identifier for this operator type.
 
     using Attributes_ = StaticAttributes<MaxPoolingAttr,
-                                             std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, DIM>,
-                                             bool>;
+                                         std::array<DimSize_t, DIM>,
+                                         std::array<DimSize_t, DIM>,
+                                         bool>;
 
 private:
     template <MaxPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
-    const std::shared_ptr<Attributes_> mAttributes;
+    const std::shared_ptr<Attributes_> mAttributes; ///< Shared pointer to operator attributes.
 
 public:
-    MaxPooling_Op() = delete;
+    MaxPooling_Op() = delete; ///< Deleted default constructor.
 
+    /**
+     * @brief Constructor.
+     * @param[in] kernel_dims Size of the pooling window for each spatial dimension.
+     * @param[in] stride_dims Step size (stride) for sliding the pooling window across input dimensions.
+     * @param[in] ceil_mode Indicates whether to use ceil or floor for output size calculation.
+     */
     MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
-                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                            bool ceil_mode = false);
+                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                  bool ceil_mode = false);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op MaxPooling_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     MaxPooling_Op(const MaxPooling_Op<DIM>& op);
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::MaxPooling_Op
+     * @brief Clones the operator using the copy constructor.
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Computes output tensor dimensions based on input dimensions and operator attributes.
+     * @param[in] allowDataDependency If true, dimensions may depend on input data; otherwise, strictly attribute-based.
+     * @return Boolean indicating success of the operation.
+     */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
+    /**
+     * @brief Sets the backend implementation for this operator.
+     * @param[in] name Name of the backend.
+     * @param[in] device Device index where the backend will run (default: 0).
+     */
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Retrieves the list of available backend implementations for this operator.
+     * @return A set of available backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Accessor for operator attributes.
+     * @return A shared pointer to the attributes object.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Accessor for stride dimensions.
+     * @return An array representing stride dimensions.
+     */
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); }
+
+    /**
+     * @brief Accessor for kernel dimensions.
+     * @return An array representing kernel dimensions.
+     */
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<MaxPoolingAttr::KernelDims>(); }
+
+    /**
+     * @brief Accessor for ceil mode flag.
+     * @return Boolean value indicating whether ceil mode is enabled.
+     */
     inline bool& ceilMode() const { return mAttributes->template getAttr<MaxPoolingAttr::CeilMode>(); }
 
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
+    /**
+     * @brief Retrieves the names of the input tensors.
+     * @return A vector of input tensors names.
+     */
+    static const std::vector<std::string> getInputsName(){ return {"data_input"}; }
+
+    /**
+     * @brief Retrieves the names of the output tensors.
+     * @return A vector of output tensors names.
+     */
+    static const std::vector<std::string> getOutputsName(){ return {"data_output"}; }
 };
 
+/**
+ * @brief Explicit template instantiations for 1D, 2D, and 3D MaxPooling operations.
+ */
 extern template class Aidge::MaxPooling_Op<1>;
 extern template class Aidge::MaxPooling_Op<2>;
 extern template class Aidge::MaxPooling_Op<3>;
 
+/**
+ * @brief Factory function for creating MaxPooling operations.
+ * @tparam DIM Dimensionality of the pooling operation (e.g., 1D, 2D, 3D).
+ * @param[in] kernel_dims Kernel dimensions specifying the size of the pooling window.
+ * @param[in] name Optional name for the operation.
+ * @param[in] stride_dims Stride dimensions specifying the step size for the pooling window.
+ * @param[in] ceil_mode Indicates whether to use ceil mode for output size calculation.
+ * @return A shared pointer to a Node representing the MaxPooling operation.
+ */
 template <std::array<DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
-                                           const std::string& name = "",
-                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                           bool ceil_mode=false);
-
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+                                 const std::string& name = "",
+                                 const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                 bool ceil_mode=false);
+
+/**
+ * @brief Helper function for creating MaxPooling operations with C-style array input.
+ * @tparam DIM Dimensionality of the pooling operation.
+ * @param[in] kernel_dims C-style array of kernel dimensions.
+ * @param[in] name Optional name for the operation.
+ * @param[in] stride_dims Stride dimensions specifying the step size for the pooling window.
+ * @param[in] ceil_mode Indicates whether to use ceil mode for output size calculation.
+ * @return A shared pointer to a Node representing the MaxPooling operation.
+ */
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> MaxPooling(
     DimSize_t const (&kernel_dims)[DIM],
@@ -104,9 +224,13 @@ inline std::shared_ptr<Node> MaxPooling(
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
     return MaxPooling(to_array(kernel_dims), name, stride_dims, ceil_mode);
 }
+
 }  // namespace Aidge
 
 namespace {
+/**
+ * @brief String representations of MaxPooling attributes for debugging and logging.
+ */
 template <>
 const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "ceil_mode"};
 }
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index 2b05b5fffed98a7df99a450a5f99c88efa2f7288..deefc007772ccaa8744c42aa47dbb5f2d5a2c7f5 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -25,23 +25,119 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+/**
+ * @class Memorize_ProdConso
+ * @brief Implements the producer-consumer principle for the `Memorize` operator.
+ *
+ * The `Memorize_ProdConso` class defines the logic for managing data dependencies during
+ * the forward process of the `Memorize` operator.
+ *
+ * This class ensures that:
+ * - All data produced by the `Memorize` operator is properly consumed.
+ * - All required outputs are correctly filled during the forward pass.
+ *
+ * It also calculates data and memory requirements specific to the `Memorize` operator.
+ */
 class Memorize_ProdConso : public ProdConso {
 public:
+    /**
+     * @brief Constructor for the `Memorize_ProdConso` class.
+     * @param[in] op The operator instance for which producer-consumer relationships are managed.
+     *
+     * @details:
+     * - The provided `Operator` instance is used to initialize the base `ProdConso` class.
+     * - This operator will determine the specific requirements for data production
+     *   and consumption during the forward process.
+     */
     Memorize_ProdConso(const Operator& op): ProdConso(op) {}
+
+    /**
+     * @brief Get the number of data elements required from an input tensor for forward computation.
+     * @param[in] inputIdx The index of the input tensor.
+     * @return The number of required elements (`Elts_t`).
+     *
+     * @details:
+     * - For each input tensor identified by `inputIdx`, this method calculates the
+     *   minimum amount of data needed by the `Memorize` operator to perform its forward step.
+     */
     Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
-    Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+
+    /**
+     * @brief Compute the memory requirements for an output tensor.
+     * @param[in] outputIdx The index of the output tensor.
+     * @param[in] inputsSize A vector containing the dimensions of the input tensors.
+     * @return The memory required (`Elts_t`) for the specified output tensor.
+     *
+     * @details:
+     * - This method evaluates how much memory is needed for the `outputIdx` tensor
+     *   based on the input tensor dimensions and the attributes of the `Memorize` operator.
+     * - Memory requirements are influenced by factors such as sequence length and
+     *   the forward step configuration.
+     */
+    Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
+
+    /**
+     * @brief Update the producer-consumer relationships for the `Memorize` operator.
+     * @details:
+     * - This method ensures that all data produced by the `Memorize` operator is
+     *   appropriately consumed by downstream operators in the computational graph.
+     * - It also verifies that all required outputs are filled during the forward pass,
+     *   maintaining consistency in the data flow.
+     * - This step is crucial for ensuring correctness in recurrent computations and
+     *   maintaining dependencies in the graph.
+     */
     void updateConsummerProducer() override;
 };
 
+/**
+ * @brief Implementation of the Memorize operation.
+ *
+ * Since Memorize operation is just backend-agnostic, its implementation is located in aidge_core.
+ */
 class Memorize_OpImpl : public OperatorImpl {
 public:
+    /**
+     * @brief Constructs a Memorize_OpImpl object.
+     * @param[in] op The operator to be implemented.
+     * @param[in] backend The backend used for execution.
+     */
     Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Get the Producer Consumer object of the operator.
+     * @return A shared pointer to the ProdConso object.
+     */
     std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Memorize_ProdConso>(mOp); };
+
+    /**
+     * @brief Executes the forward pass for the Memorize operation.
+     */
     void forward() override;
 };
 
-enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep };
+enum class MemorizeAttr {
+    ScheduleStep,   // Defines the step interval for scheduling memory updates.
+    ForwardStep,    // Tracks the current step in the forward pass.
+    EndStep         // The final step for which memory updates will occur.
+};
 
+/**
+ * @class Memorize_Op
+ * @brief The Memorize Operator is responsible for storing a tensor's state over a defined 
+ * number of iterations and providing the stored value as output at each iteration.
+ *
+ *  Memorize operators are used in models with recurrent structures or feedback loops, such as LSTMs.
+ * They maintain an internal state that is updated and accessed sequentially.
+ * - **Inputs**:
+ *   - `data_input`: The main data input for the operator.
+ *   - `data_input_init`: The initial value used to initialize the internal memory.
+ * - **Outputs**:
+ *   - `data_output`: The current output of the internal memory.
+ *   - `data_output_rec`: The recurrent output that feeds back into the memory for subsequent steps.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Memorize_Op : public OperatorTensor,
     public Registrable<Memorize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Memorize_Op&)>> {
 public:
@@ -56,46 +152,109 @@ private:
 public:
     Memorize_Op() = delete;
 
+    /**
+     * @brief Construct a new Memorize Operator.
+     * @param endStep Defines the number of steps the operator will retain memory.
+     */
     Memorize_Op(const std::uint32_t endStep);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
-     * but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copies the operator's attributes and its output tensors.
+     * The new operator has no input tensors associated initially.
      * @param op Operator to copy.
      */
     Memorize_Op(const Memorize_Op& op);
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Memorize_Op
+     * @brief Clone the operator by creating a copy of it.
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Assign a specific backend and device for computation.
+     * @param name Name of the backend.
+     * @param device The device index (default is 0).
+     */
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    /**
+     * @brief Get the list of available backends compatible with this operator.
+     * @return A set of strings representing backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Perform dimension inference for the operator, optionally allowing
+     * data dependency during the process.
+     * @param allowDataDependency Flag indicating whether data dependency is allowed.
+     * @return True if dimensions were successfully inferred.
+     */
     bool forwardDims(bool allowDataDependency = false) override final;
+
+    /**
+     * @brief Check if dimensions have been forwarded successfully.
+     * @return True if dimensions are forwarded.
+     */
     bool dimsForwarded() const override;
+
     void updateConsummerProducer() override;
     void forward() override;
 
+    /**
+     * @brief Access the operator's attributes.
+     * @return A shared pointer to the operator's attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get or set the scheduling step for the operator.
+     * @return A reference to the scheduling step.
+     */
     inline std::uint32_t& scheduleStep() const { return mAttributes->template getAttr<MemorizeAttr::ScheduleStep>(); }
+
+    /**
+     * @brief Get or set the forward step counter for the operator.
+     * @return A reference to the forward step counter.
+     */
     inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<MemorizeAttr::ForwardStep>(); }
+
+    /**
+     * @brief Get or set the end step defining the memory duration.
+     * @return A reference to the end step value.
+     */
     inline std::uint32_t& endStep() const { return mAttributes->template getAttr<MemorizeAttr::EndStep>(); }
 
+    /**
+     * @brief Retrieve the names of the operator's input tensors.
+     * @return A vector of strings representing input tensor names.
+     */
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "data_input_init"};
     }
+
+    /**
+     * @brief Retrieve the names of the operator's output tensors.
+     * @return A vector of strings representing output tensor names.
+     */
     static const std::vector<std::string> getOutputsName(){
         return {"data_output", "data_output_rec"};
     }
 };
 
+/**
+ * @brief Create a Memorize operator node.
+ * @param endStep The step duration for memory storage.
+ * @param name The optional name for the node.
+ * @return A shared pointer to the newly created Memorize operator node.
+ */
 std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
+/**
+ * @brief String representations of the Memorize operator's attributes.
+ */
 template <>
 const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = {
     "schedule_step",
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 47eb6cf97e238f820b8ef2d1f3296040e73aa43f..f7f1cdfd5bc0d799adc87bd7b7e1be999363627f 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -27,54 +27,111 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+
+/**
+ * @class MetaOperator_Op
+ * @brief Represents a meta-operator, which is a composition of multiple operators.
+ * 
+ * A meta-operator encapsulates a micro-graph of operations, facilitating modularity
+ * and reusability. It extends the functionality of `OperatorTensor` and provides
+ * features such as cloning, dynamic input association, and custom backend support.
+ */
 class MetaOperator_Op : public OperatorTensor,
                 public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::function<std::shared_ptr<OperatorImpl>(const MetaOperator_Op &)>> {
 public:
-    // outputs shared with micro-graph output Tensors
-    // Micro-graph handling:
-    std::shared_ptr<GraphView> mGraph; // Meta operator micro-graph
+    // The micro-graph defining the internal operations of this meta-operator
+    std::shared_ptr<GraphView> mGraph;
+
+    // Scheduler for sequential execution of the micro-graph
     std::shared_ptr<SequentialScheduler> mScheduler;
+
+    // Weak reference to the parent node for scheduling purposes
     std::weak_ptr<Node> mUpperNode;
 
 private:
+    // Dynamic attributes specific to this meta-operator
     const std::shared_ptr<DynamicAttributes> mAttributes = std::make_shared<DynamicAttributes>();
 
 public:
+    /**
+     * @brief Constructor for MetaOperator_Op.
+     * 
+     * @param type The type of the meta-operator.
+     * @param graph The micro-graph defining the meta-operator.
+     * @param forcedInputsCategory Optional input categories to override default behavior.
+     */
     MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph, const std::vector<InputCategory>& forcedInputsCategory = {});
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy constructor.
+     * 
+     * Copies the operator's attributes and output tensors, but not its input tensors.
+     * 
+     * @param op The operator to copy.
      */
     MetaOperator_Op(const MetaOperator_Op& op)
         : OperatorTensor(op),
-          mGraph(op.mGraph->clone())
+          mGraph(op.mGraph->clone()) // Clone the micro-graph for isolation
     {}
 
     /**
-     * Set the node that should be used for the scheduling.
-    */
+     * @brief Set the node for scheduling.
+     * 
+     * @param node The node to be used as the upper node in the scheduling hierarchy.
+     */
     inline void setUpperNode(std::shared_ptr<Node> node) {
         mUpperNode = node;
     }
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::MetaOperator_Op
+     * @brief Clone this meta-operator.
+     * 
+     * Uses the copy constructor to create a new instance with identical attributes.
+     * 
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Retrieve the micro-graph defining the meta-operator.
+     * 
+     * @return A shared pointer to the micro-graph.
+     */
     inline const std::shared_ptr<GraphView>& getMicroGraph() const noexcept {
         return mGraph;
     }
 
+    /**
+     * @brief Retrieve the scheduler for the micro-graph.
+     * 
+     * @return A shared pointer to the scheduler.
+     */
     inline const std::shared_ptr<SequentialScheduler>& getMicroGraphScheduler() const noexcept {
         return mScheduler;
     }
 
+    /**
+     * @brief Associate an input tensor to the operator.
+     * 
+     * @param inputIdx Index of the input tensor.
+     * @param data Shared pointer to the data tensor.
+     */
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
+
+    /**
+     * @brief Set an input tensor for the operator.
+     * 
+     * @param inputIdx Index of the input tensor.
+     * @param data Shared pointer to the data tensor.
+     */
     void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
+    /**
+     * @brief Forward the dimensions through the micro-graph.
+     * 
+     * @param allowDataDependency If true, allows data-dependent operations during forwarding.
+     * @return True if the operation succeeded, false otherwise.
+     */
     bool forwardDims(bool allowDataDependency = false) override final {
         if (inputsAssociated()) {
             // Forward dims of micro-graph
@@ -83,11 +140,35 @@ public:
         return false;
     }
 
+    /**
+     * @brief Retrieve the backend for the operator.
+     * 
+     * @return The name of the backend.
+     */
     std::string backend() const noexcept override;
 
+    /**
+     * @brief Set the backend for the operator.
+     * 
+     * @param name The name of the backend.
+     * @param device The device index.
+     */
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Get the available backends for the operator.
+     * 
+     * @return A set of available backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Set the data type for the operator.
+     * 
+     * This propagates the data type change to the micro-graph.
+     * 
+     * @param datatype The new data type.
+     */
     void setDataType(const DataType &datatype) const override {
         // The micro-graph should always be set to the right data type, since it
         // shares input/output tensors.
@@ -95,6 +176,11 @@ public:
         mGraph->setDataType(datatype);
     }
 
+    /**
+     * @brief Retrieve the dynamic attributes of the operator.
+     * 
+     * @return A shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
     Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
@@ -103,21 +189,53 @@ public:
     Elts_t getNbConsumedData(IOIndex_t inputIdx) const override;
     Elts_t getNbProducedData(IOIndex_t outputIdx) const override;
 
+    /**
+     * @brief Update the consumer-producer relationships for the operator.
+     */
     void updateConsummerProducer() override;
+
+    /**
+     * @brief Reset the consumer-producer relationships.
+     */
     void resetConsummerProducer() override;
+
+    /**
+     * @brief Perform the forward pass for the operator.
+     */
     void forward() override;
+
+    /**
+     * @brief Perform the backward pass for the operator.
+     * 
+     * @note Currently not implemented.
+     */
     void backward() override {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented yet for a MetaOperator");
     }
 
+    /**
+     * @brief Check if the operator is atomic.
+     * 
+     * @return False, as meta-operators are inherently non-atomic.
+     */
     inline bool isAtomic() const noexcept override final { return false; }
 
 };
 
+/**
+ * @brief Helper function to create a MetaOperator node.
+ * 
+ * @param type The type of the meta-operator.
+ * @param graph The micro-graph defining the meta-operator.
+ * @param forcedInputsCategory Optional input categories to override default behavior.
+ * @param name Optional name for the operator.
+ * @return A shared pointer to the created Node.
+ */
 std::shared_ptr<Node> MetaOperator(const char *type,
                                   const std::shared_ptr<GraphView>& graph,
                                   const std::vector<InputCategory>& forcedInputsCategory = {},
                                   const std::string& name = "");
+
 }  // namespace Aidge
 
-#endif /* MetaOperator_H_ */
+#endif /* AIDGE_CORE_OPERATOR_METAOPERATOR_H_ */
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index 481a7795e24acd006acfc66a0fccde1b8da747e7..5bb184b808e0a9d685879e53554ff3be500f5717 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -19,29 +19,58 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/OpArgs.hpp" // Sequential
-#include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/AvgPooling.hpp"
-#include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+#include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/Pad.hpp"
 #include "aidge/operator/Sigmoid.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-
-
+/**
+ * @brief Creates a padded convolution operation (Conv2D/Conv3D).
+ *
+ * This function creates a padded convolution operation that applies padding before the convolution operation.
+ * It uses various parameters like the number of input/output channels, kernel dimensions, stride, padding, etc.
+ *
+ * @param[in] in_channels The number of input channels.
+ * @param[in] out_channels The number of output channels.
+ * @param[in] kernel_dims The dimensions of the convolution kernel.
+ * @param[in] name Optional name for the operation.
+ * @param[in] stride_dims The stride dimensions for the convolution operation (default is 1).
+ * @param[in] padding_dims The padding dimensions to apply before convolution (default is 0).
+ * @param[in] dilation_dims Dilation factor for convolution (default is 1).
+ * @param[in] no_bias Whether to disable the bias (default is false).
+ * @return A shared pointer to the Node representing the padded convolution operation.
+ */
 template <std::array<DimSize_t, 1>::size_type DIM>
-extern std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
-                                  DimSize_t out_channels,
-                                  const std::array<DimSize_t, DIM> &kernel_dims,
-                                  const std::string& name = "",
-                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false);
+extern std::shared_ptr<Node>
+PaddedConv(DimSize_t in_channels,
+           DimSize_t out_channels,
+           const std::array<DimSize_t, DIM> &kernel_dims,
+           const std::string &name = "",
+           const std::array<DimSize_t, DIM> &stride_dims =
+               create_array<DimSize_t, DIM>(1),
+           const std::array<DimSize_t, 2 * DIM> &padding_dims =
+               create_array<DimSize_t, 2 * DIM>(0),
+           const std::array<DimSize_t, DIM> &dilation_dims =
+               create_array<DimSize_t, DIM>(1),
+           bool no_bias = false);
 
+/**
+ * @brief Creates a padded convolution operation as a MetaOperator.
+ *
+ * This function creates a graph-based MetaOperator representing a padded convolution operation (Conv2D/Conv3D).
+ *
+ * @param[in] kernel_dims The dimensions of the convolution kernel.
+ * @param[in] stride_dims The stride dimensions for the convolution operation (default is 1).
+ * @param[in] padding_dims The padding dimensions to apply before convolution (default is 0).
+ * @param[in] dilation_dims Dilation factor for convolution (default is 1).
+ * @return A shared pointer to the MetaOperator_Op representing the padded convolution operation.
+ */
 template <std::array<DimSize_t, 1>::size_type DIM>
 extern std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
@@ -49,29 +78,61 @@ extern std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
                                   const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1));
 
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+// Helper function with C-style array for kernel_dims, enabling automatic template DIM deduction.s
 template <DimSize_t DIM>
-extern std::shared_ptr<Node> PaddedConv(
-    DimSize_t in_channels,
-    DimSize_t out_channels,
-    DimSize_t const (&kernel_dims)[DIM],
-    const std::string& name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-    bool no_bias = false);
+extern std::shared_ptr<Node>
+PaddedConv(DimSize_t in_channels,
+           DimSize_t out_channels,
+           DimSize_t const (&kernel_dims)[DIM],
+           const std::string &name = "",
+           const std::array<DimSize_t, DIM> &stride_dims =
+               create_array<DimSize_t, DIM>(1),
+           const std::array<DimSize_t, 2 * DIM> &padding_dims =
+               create_array<DimSize_t, 2 * DIM>(0),
+           const std::array<DimSize_t, DIM> &dilation_dims =
+               create_array<DimSize_t, DIM>(1),
+           bool no_bias = false);
 
 ////////////////////////////////////////////////////////////////////////////////
 
+/**
+ * @brief Creates a padded depthwise convolution operation (Conv2DDepthwise/Conv3DDepthwise).
+ *
+ * This function creates a depthwise convolution operation with padding, where each input channel has its own filter.
+ *
+ * @param[in] nb_channels The number of input/output channels (same for depthwise convolution).
+ * @param[in] kernel_dims The dimensions of the convolution kernel.
+ * @param[in] name Optional name for the operation.
+ * @param[in] stride_dims The stride dimensions for the convolution operation (default is 1).
+ * @param[in] padding_dims The padding dimensions to apply before convolution (default is 0).
+ * @param[in] dilation_dims Dilation factor for convolution (default is 1).
+ * @param[in] no_bias Whether to disable the bias (default is false).
+ * @return A shared pointer to the Node representing the padded depthwise convolution operation.
+ */
 template <std::array<DimSize_t, 1>::size_type DIM>
-std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
-                                  const std::array<DimSize_t, DIM> &kernel_dims,
-                                  const std::string& name = "",
-                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false);
+std::shared_ptr<Node>
+PaddedConvDepthWise(const DimSize_t nb_channels,
+                    const std::array<DimSize_t, DIM> &kernel_dims,
+                    const std::string &name = "",
+                    const std::array<DimSize_t, DIM> &stride_dims =
+                        create_array<DimSize_t, DIM>(1),
+                    const std::array<DimSize_t, 2 * DIM> &padding_dims =
+                        create_array<DimSize_t, 2 * DIM>(0),
+                    const std::array<DimSize_t, DIM> &dilation_dims =
+                        create_array<DimSize_t, DIM>(1),
+                    bool no_bias = false);
 
+/**
+ * @brief Creates a padded depthwise convolution operation as a MetaOperator.
+ *
+ * This function creates a graph-based MetaOperator representing a padded depthwise convolution operation.
+ *
+ * @param[in] kernel_dims The dimensions of the convolution kernel.
+ * @param[in] stride_dims The stride dimensions for the convolution operation (default is 1).
+ * @param[in] padding_dims The padding dimensions to apply before convolution (default is 0).
+ * @param[in] dilation_dims Dilation factor for convolution (default is 1).
+ * @return A shared pointer to the MetaOperator_Op representing the padded depthwise convolution operation.
+ */
 template <std::array<DimSize_t, 1>::size_type DIM>
 std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
@@ -79,48 +140,84 @@ std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
                                   const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1));
 
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+// Helper function for depthwise convolution, with C-style array to automatically deduce DIM.
 template <DimSize_t DIM>
-inline std::shared_ptr<Node> PaddedConvDepthWise(
-    const DimSize_t nb_channels,
-    DimSize_t const (&kernel_dims)[DIM],
-    const std::string& name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-    bool no_bias = false);
+inline std::shared_ptr<Node>
+PaddedConvDepthWise(const DimSize_t nb_channels,
+                    DimSize_t const (&kernel_dims)[DIM],
+                    const std::string &name = "",
+                    const std::array<DimSize_t, DIM> &stride_dims =
+                        create_array<DimSize_t, DIM>(1),
+                    const std::array<DimSize_t, 2 * DIM> &padding_dims =
+                        create_array<DimSize_t, 2 * DIM>(0),
+                    const std::array<DimSize_t, DIM> &dilation_dims =
+                        create_array<DimSize_t, DIM>(1),
+                    bool no_bias = false);
 
 ////////////////////////////////////////////////////////////////////////////////
 
-
+/**
+ * @brief Creates a padded average pooling operation.
+ *
+ * This function creates an average pooling operation with padding before pooling.
+ *
+ * @param[in] kernel_dims The dimensions of the pooling window.
+ * @param[in] name Optional name for the operation.
+ * @param[in] stride_dims The stride dimensions for pooling (default is 1).
+ * @param[in] padding_dims Padding dimensions before pooling (default is 0).
+ * @return A shared pointer to the Node representing the padded average pooling operation.
+ */
 template <std::array<DimSize_t, 1>::size_type DIM>
 extern std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
 
-
+/**
+ * @brief Creates a padded average pooling operation as a MetaOperator.
+ *
+ * This function creates a graph-based MetaOperator representing a padded average pooling operation.
+ *
+ * @param[in] kernel_dims The dimensions of the pooling window.
+ * @param[in] stride_dims The stride dimensions for pooling (default is 1).
+ * @param[in] padding_dims Padding dimensions before pooling (default is 0).
+ * @return A shared pointer to the MetaOperator_Op representing the padded average pooling operation.
+ */
 template <std::array<DimSize_t, 1>::size_type DIM>
 extern std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
 
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+// Helper function for average pooling with C-style array for kernel_dims, enabling automatic DIM deduction.
 template <DimSize_t DIM>
-extern std::shared_ptr<Node> PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM],
-    const std::string& name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
+extern std::shared_ptr<Node>
+PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM],
+                 const std::string &name = "",
+                 const std::array<DimSize_t, DIM> &stride_dims =
+                     create_array<DimSize_t, DIM>(1),
+                 const std::array<DimSize_t, 2 * DIM> &padding_dims =
+                     create_array<DimSize_t, 2 * DIM>(0));
 
 ////////////////////////////////////////////////////////////////////////////////
 
+/**
+ * @brief Creates a padded max pooling operation.
+ *
+ * This function creates a max pooling operation with padding before pooling.
+ *
+ * @param[in] kernel_dims The dimensions of the pooling window.
+ * @param[in] name Optional name for the operation.
+ * @param[in] stride_dims The stride dimensions for pooling (default is 1).
+ * @param[in] padding_dims Padding dimensions before pooling (default is 0).
+ * @param[in] ceil_mode Whether to use ceiling mode for pooling (default is false).
+ * @return A shared pointer to the Node representing the padded max pooling operation.
+ */
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  bool ceil_mode = false)
-{
+                                  bool ceil_mode = false) {
     auto graph = Sequential({
         Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
         MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode)
@@ -129,42 +226,74 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &
     return MetaOperator(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph, {}, name);
 }
 
+/**
+ * @brief Creates a padded max pooling operation as a MetaOperator.
+ *
+ * This function creates a graph-based MetaOperator representing a padded max pooling operation.
+ *
+ * @param[in] kernel_dims The dimensions of the pooling window.
+ * @param[in] stride_dims The stride dimensions for pooling (default is 1).
+ * @param[in] padding_dims Padding dimensions before pooling (default is 0).
+ * @param[in] ceil_mode Whether to use ceiling mode for pooling (default is false).
+ * @return A shared pointer to the MetaOperator_Op representing the padded max pooling operation.
+ */
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<MetaOperator_Op> PaddedMaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  bool ceil_mode = false)
-{
+                                  bool ceil_mode = false) {
     auto graph = Sequential({
         Pad<DIM>(padding_dims, ""),
         MaxPooling(kernel_dims, "", stride_dims, ceil_mode)
     });
-
     return std::make_shared<MetaOperator_Op>(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph);
 }
 
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+// Helper function for max pooling with C-style array to automatically deduce DIM.
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> PaddedMaxPooling(
     DimSize_t const (&kernel_dims)[DIM],
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-    bool ceil_mode= false)
-{
+    bool ceil_mode= false) {
     return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims, ceil_mode);
 }
 
-////////////////////////////////////////////////////////////////////////////////
-
+/**
+ * @brief Creates an LSTM (Long Short-Term Memory) operator.
+ *
+ * This function creates an LSTM operation which is a popular recurrent neural network (RNN) layer for sequence processing.
+ *
+ * @param[in] in_channels The number of input channels.
+ * @param[in] hidden_channels The number of hidden channels in the LSTM.
+ * @param[in] seq_length The length of the input sequence.
+ * @param[in] noBias Whether to disable the bias (default is false).
+ * @param[in] name Optional name for the operation.
+ * @return A shared pointer to the Node representing the LSTM operation.
+ */
 std::shared_ptr<Node> LSTM(DimSize_t in_channels,
                            DimSize_t hidden_channels,
                            DimSize_t seq_length,
                            bool noBias = false,
-                           const std::string& name = "");
+                           const std::string &name = "");
 
+/**
+ * @brief Creates an LSTM (Long Short-Term Memory) operation as a MetaOperator.
+ *
+ * This function creates an LSTM operation as a MetaOperator for use in graph-based computation.
+ *
+ * @param[in] seq_length The length of the input sequence.
+ * @return A shared pointer to the MetaOperator_Op representing the LSTM operation.
+ */
 std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length);
 
-}  // namespace Aidge
+std::shared_ptr<MetaOperator_Op> LeakyOp();
+std::shared_ptr<Node> Leaky(const int nbTimeSteps,
+                            const float beta,
+                            const float threshold = 1.0,
+                            const std::string &name = "");
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index 49d92cd12f68a0b23530039c1df70ced9b2d2080..caea7a646f9ec013c29ad42e82f3f368caf553aa 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -30,6 +30,18 @@ public:
     void forward() override;
 };
 
+/**
+ * @brief Description of a Move operation that copies the input Tensor to the output Tensor.
+ *
+ * For each element x in the input, the function is defined as:
+ * `f(x) = x`
+ *
+ * This operation copies the input Tensor to the output Tensor without modification.
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Move_Op : public OperatorTensor,
     public Registrable<Move_Op, std::tuple<std::string, std::string>, std::function<std::unique_ptr<OperatorImpl>(const Move_Op&)>> {
 public:
@@ -38,8 +50,10 @@ public:
     Move_Op();
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Move_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Move_Op(const Move_Op& op);
 
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index bfe4fcb0de1cb7dda4a0ea8fc7b99638bc813f47..913fa05b4dadab05addaa2837bfbd25d25711716 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -25,7 +25,26 @@
 namespace Aidge {
 
 /**
- * @brief Tensor element-wise multiplication.
+ * @brief Description of an element-wise Multiplication operation on input Tensors,
+ * supporting NumPy broadcasting.
+ *
+ * For each pair of elements x and y from the input Tensors, the function 
+ * is defined as:
+ * `f(x, y) = x * y`
+ *
+ * Broadcasting adjusts shapes of the input Tensors to make them compatible:
+ * - Tensors are aligned from the rightmost dimensions.
+ * - Dimensions are compatible if they are equal, one of them is 1, or missing.
+ *
+ * The output Tensor shape is determined by taking the maximum size along 
+ * each dimension of the input Tensors after broadcasting.
+ * 
+ * Examples:
+ * 1. Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
+ * 2. Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
+ * 
+ * @see OperatorTensor
+ * @see Registrable
  */
 class Mul_Op : public OperatorTensor,
     public Registrable<Mul_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Mul_Op&)>> {
@@ -35,9 +54,10 @@ public:
     Mul_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
-     * but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Mul_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Mul_Op(const Mul_Op& op);
 
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index e9988b4421b785a91ec170796be49c0c8df52142..40899ffa7668298f3e90e09b9a30ed9f438d89b2 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -12,64 +12,110 @@
 #ifndef AIDGE_CORE_OPERATOR_OPERATOR_H_
 #define AIDGE_CORE_OPERATOR_OPERATOR_H_
 
+#include <cstddef>
 #include <memory>
 #include <string>
 #include <vector>
 #include <utility>
-#include <cstddef>
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
 #include <fmt/format.h>
 #endif
 
-
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Types.h"
 
-
 #ifdef PYBIND
 namespace py = pybind11;
 #endif
 namespace Aidge {
 
+/**
+ * @enum OperatorType
+ * @brief Specifies the type of the operator.
+ */
 enum class OperatorType {
-    Data,
-    Tensor
+    Data,   /**< Indicates a data operator. */
+    Tensor  /**< Indicates a tensor operator. */
 };
 
+/**
+ * @enum InputCategory
+ * @brief Describes the category of an input for an operator.
+ */
 enum class InputCategory {
-    Data,
-    Param,
-    OptionalData,
-    OptionalParam
+    Data,          /**< Regular data input. */
+    Param,         /**< Parameter input. */
+    OptionalData,  /**< Optional data input. */
+    OptionalParam  /**< Optional parameter input. */
 };
 
+/**
+ * @class Operator
+ * @brief Base class for all operator types in the Aidge framework.
+ *
+ * The `Operator` class provides a foundation for implementing various operator types.
+ * Derived classes must implement specific behaviors for computation, attributes,
+ * and input/output handling.
+ */
 class Operator : public std::enable_shared_from_this<Operator> {
 protected:
-    std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator
-    std::shared_ptr<DynamicAttributes> mInheritedAttrs;
+    /** @brief Implementation of the operator. */
+    std::shared_ptr<OperatorImpl> mImpl;
+
+    /** @brief Attributes of the associated Node.
+     *
+     * Default to empty vector for copy construction because two Operator cannot
+     * be associated to the same Node.
+     */
+    std::vector<std::shared_ptr<DynamicAttributes>> mInheritedAttrs{};
 
 private:
+    /** @brief Type of the operator (e.g., "Add", "AveragePool"). */
     std::string mType;
+
+    /** @brief Specifies whether the operator works on data or tensors. */
     const OperatorType mOperatorType;
+
+    /** @brief Categories for each input. */
     const std::vector<InputCategory> mInputsCategory;
+
+    /** @brief Number of outputs generated by the operator. */
     const IOIndex_t mNbOut;
+
+    /** @brief Set of back edge input indexes for recurring operators. */
     std::set<IOIndex_t> mBackEdges;
 
 public:
+    /** @brief Deleted default constructor. */
     Operator() = delete;
-    Operator(const std::string& type, const std::vector<InputCategory>& inputsCategory, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)
-    : mType(type),
-      mOperatorType(operatorType),
-      mInputsCategory(inputsCategory),
-      mNbOut(nbOut)
+
+    /**
+     * @brief Constructs an Operator instance.
+     *
+     * @param[in] type           The type of operator (e.g., "Add", "AveragePool").
+     * @param[in] inputsCategory Categories of each input.
+     * @param[in] nbOut          Number of outputs.
+     * @param[in] operatorType   Type of operator (Data or Tensor).
+     */
+    Operator(const std::string& type, const std::vector<InputCategory>& inputsCategory,
+             const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)
+             : mType(type),
+            mOperatorType(operatorType),
+            mInputsCategory(inputsCategory),
+            mNbOut(nbOut)
     {
         // ctor
     }
 
+    /**
+     * @brief Copy constructor.
+     *
+     * @param[in] op The operator to copy.
+     */
     Operator(const Operator& op):
         std::enable_shared_from_this<Operator>(),
         mOperatorType(op.mOperatorType),
@@ -82,57 +128,115 @@ public:
         // Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation.
         // See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion.
     }
-    std::shared_ptr<Operator> operator()(std::shared_ptr<DynamicAttributes> attrs) {
-        mInheritedAttrs = attrs;
-        return shared_from_this();
-    }
+    // std::shared_ptr<Operator> operator()(std::shared_ptr<DynamicAttributes> attrs) {
+    //     mInheritedAttrs = attrs;
+    //     return shared_from_this();
+    // }
 
+    /** @brief Virtual destructor. */
     virtual ~Operator() noexcept;
 
 public:
+    void setInheritedAttrs(std::shared_ptr<DynamicAttributes>& attr) {
+        mInheritedAttrs.push_back(attr);
+    }
+
+    /**
+     * @brief Creates a clone of the current operator.
+     *
+     * Derived classes must implement this method to provide a deep copy of the operator.
+     *
+     * @return A shared pointer to the cloned operator.
+     */
     virtual std::shared_ptr<Operator> clone() const = 0;
 
+    /** @brief Returns the attributes of the operator. */
     virtual std::shared_ptr<Attributes> attributes() const { return nullptr; };
-    virtual std::shared_ptr<DynamicAttributes> inheritedAttributes() const { return mInheritedAttrs; };
+
     /**
-     * @brief Set the specified input with a shallow copy.
-     * @param inputIdx Index of the input to set.
-     * @param data Data to copy.
+     * @brief Get the currently associated Node's attributes.
+     * @return Shared pointer to the Attributes of the associated Node.
+     *
+     * If no Node as be associated to the Operator, returns a `nullptr`.
+     * @note As Operators have only been tested with a single associated Node,
+     * only attributes of the first associated Node are returned. This should be
+     * updated.
+     */
+    virtual std::shared_ptr<DynamicAttributes> inheritedAttributes() const {
+        return mInheritedAttrs.empty() ? nullptr : mInheritedAttrs[0];
+    }
+
+    /**
+     * @brief Associates a shallow copy of the specified input data with the operator.
+     *
+     * Derived classes must implement this method.
+     *
+     * @param[in] inputIdx Index of the input to associate.
+     * @param[in] data     Data to associate.
      */
     virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
+
+    /** @brief Resets the specified input. */
     virtual void resetInput(const IOIndex_t inputIdx) = 0;
 
     /**
-     * @brief Set the specified input value by performing a deep copy of the given data.
-     * The pointer itself is not changed, thus keeping the current connections.
-     * @param inputIdx Index of the input to set.
-     * @param data Data to copy.
+     * @brief Sets the specified input with a deep copy of the given data.
+     *
+     * Derived classes must implement this method.
+     *
+     * @param[in] inputIdx Index of the input to set.
+     * @param[in] data     Data to set.
      */
     virtual void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
+
+    /** @brief Retrieves the raw input data for the specified index. */
     virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const = 0;
-        /**
-     * @brief Set the specified output value by performing a deep copy of the given data.
-     * The pointer itself is not changed, thus keeping the current connections.
-     * @param inputIdx Index of the input to set.
+
+    /**
+     * @brief Sets the specified output with a deep copy of the given data.
+     *
+     * Derived classes must implement this method.
+     *
+     * @param[in] outputIdx Index of the output to set.
+     * @param[in] data      Data to set.
      */
     virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const = 0;
-    virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0;
 
+    /** @brief Retrieves the raw output data for the specified index. */
+    virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0;
 
-///////////////////////////////////////////////////////
-//        IMPLEMENTATION
-///////////////////////////////////////////////////////
-    virtual std::string backend() const noexcept {
-        return mImpl ? mImpl->backend() : "";
-    }
+    /**
+     * @brief Returns the backend implementation name.
+     *
+     * @return The name of the backend implementation.
+     */
+    virtual std::string backend() const noexcept { return mImpl ? mImpl->backend() : ""; }
 
+    /**
+     * @brief Sets the backend implementation.
+     *
+     * @param[in] name   Name of the backend.
+     * @param[in] device Device index.
+     */
     virtual void setBackend(const std::string& name, DeviceIdx_t device = 0) = 0;
+
+    /**
+     * @brief Sets the backend implementation for multiple devices.
+     *
+     * @param[in] backends A vector of backend and device index pairs.
+     */
     void setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends);
+
     virtual void setDataType(const DataType& dataType) const = 0;
     virtual void setDataFormat(const DataFormat& dataFormat) const = 0;
-
+    /**
+     * @brief Returns the available backend implementations.
+     *
+     * Derived classes must implement this method.
+     *
+     * @return A set of available backend names.
+     */
     virtual std::set<std::string> getAvailableBackends() const = 0;
-
     /**
      * @brief Set a new OperatorImpl to the Operator
      *
@@ -183,52 +287,66 @@ public:
     virtual void forward();
 
     virtual void backward();
+    /**
+     * @brief Returns the type of the operator.
+     *
+     * @return The operator type as a string.
+     */
+    inline std::string type() const noexcept { return mType; }
 
-///////////////////////////////////////////////////////
-//        INNER
-///////////////////////////////////////////////////////
-
-    inline std::string type() const noexcept {
-        return mType;
-    }
+    /**
+     * @brief Returns the type of the operator (Data or Tensor).
+     *
+     * @return The operator type as an `OperatorType` enum value.
+     */
+    inline OperatorType operatorType() const noexcept { return mOperatorType; }
 
-    inline OperatorType operatorType() const noexcept{
-        return mOperatorType;
-    }
+    /** @brief Returns the categories of all inputs. */
+    inline std::vector<InputCategory> inputCategory() const { return mInputsCategory; }
 
+    /** @brief Returns the category of a specific input. */
     inline InputCategory inputCategory(IOIndex_t idx) const {
         // AIDGE_ASSERT(idx < mInputsCategory.size(), "Input #{} out of range (number of inputs is {})", idx, mInputsCategory.size());
         return mInputsCategory.at(idx);
     }
 
+    inline bool isOptionalInput(std::size_t inputIdx) const {
+        return static_cast<int>(mInputsCategory[inputIdx]) > 1;
+    }
+
     virtual inline bool isAtomic() const noexcept { return true; }
 
-    inline IOIndex_t nbInputs() const noexcept { return mInputsCategory.size(); };
-    inline IOIndex_t nbOutputs() const noexcept { return mNbOut; };
+    /** @brief Returns the number of inputs. */
+    inline IOIndex_t nbInputs() const noexcept { return mInputsCategory.size(); }
+
+    /** @brief Returns the number of outputs. */
+    inline IOIndex_t nbOutputs() const noexcept { return mNbOut; }
 
     /**
-     * @brief Set the back edge input indexes for recurting operators.
-     * Any recurring operators should specify it's back edges, otherwise
-     * the interpretation of the data flow graph may not be possible.
+     * @brief Sets the back edge input indexes for recurring operators.
+     *
+     * @param[in] backEdges A set of input indexes representing back edges.
      */
     inline void setBackEdges(const std::set<IOIndex_t>& backEdges) { mBackEdges = backEdges; }
 
     /**
-     * @brief Returns whether the given input index is a back edge.
-     * @return true if the input index is in the back edge set
+     * @brief Checks if a given input index is a back edge.
+     *
+     * @param[in] inputIdx Index of the input to check.
+     * @return True if the input index is a back edge, false otherwise.
      */
-    inline bool isBackEdge(IOIndex_t inputIdx) const {
-        return mBackEdges.find(inputIdx) != mBackEdges.end();
-    }
+    inline bool isBackEdge(IOIndex_t inputIdx) const { return mBackEdges.find(inputIdx) != mBackEdges.end(); }
 
-    static const std::vector<std::string> getInputsName() {
-        return {};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {};
-    }
+    /** @brief Returns an empty vector of input names. */
+    static const std::vector<std::string> getInputsName() { return {}; }
+
+    /** @brief Returns an empty vector of output names. */
+    static const std::vector<std::string> getOutputsName() { return {}; }
 
 #ifdef PYBIND
+    /**
+     * @brief Returns a string representation of the operator (for Python bindings).
+     */
     std::string repr() const {
         return fmt::format("Operator(type = '{}', nb_in = {}, nb_out = {}, attr = {}, backend = {})",
                     type(),
@@ -239,6 +357,7 @@ public:
     }
 #endif
 };
-} // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_OPERATOR_H_ */
+}  // namespace Aidge
+
+#endif  // AIDGE_CORE_OPERATOR_OPERATOR_H_
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index 19e2f13e4ff39fee181c6ad0cf2fbab510f22c3e..a515ecb5ba4843c1338f526d05cdef327bfd1ba0 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -23,18 +23,36 @@
 namespace Aidge {
 
 class Tensor;
+
+/**
+ * @class OperatorTensor
+ * @brief Base class for all operators that work with tensor inputs and outputs.
+ *
+ * The `OperatorTensor` class provides an abstraction for operations on tensors
+ * with features such as input/output management, dimension handling, and
+ * receptive field computation.
+ *
+ * @see Operator
+ */
 class OperatorTensor : public Operator {
     /* TODO: Add an attribute specifying the type of Data used by the Operator.
      * The same way ``Type`` attribute specifies the type of Operator. Hence this
      * attribute could be checked in the forwardDims function to assert Operators
-     * being used work with Tensors and cast them to OpertorTensor instead of
+     * being used work with Tensors and cast them to OperatorTensor instead of
      * Operator.
      */
     /* TODO: Maybe change type attribute of Data object by an enum instead of an
      * array of char. Faster comparisons.
      */
 protected:
+    /**
+     * @brief Inputs of the operator represented as tensors.
+     */
     std::vector<std::shared_ptr<Tensor>> mInputs;
+
+    /**
+     * @brief Outputs of the operator represented as tensors.
+     */
     std::vector<std::shared_ptr<Tensor>> mOutputs;
 
 public:
@@ -43,7 +61,7 @@ public:
     /**
      * @brief Operator tensor constructor. This function is not meant to be called directly but by a derived class constructor
      * every operator class derive from this class.
-     * 
+     *
 	 * @param[in] type     : type of operator (i.e. "Add", "AveragePool",...)
 	 * @param[in] inputsCategory : describes the type of each input.
 	 * @param[in] nbOut    : Number of tensors this operator will output
@@ -51,63 +69,142 @@ public:
     OperatorTensor(const std::string& type, const std::vector<InputCategory>& inputsCategory,
                    const IOIndex_t nbOut);
 
+    /**
+     * @brief Copy constructor.
+     * @param[in] other Another `OperatorTensor` instance to copy.
+     */
     OperatorTensor(const OperatorTensor& other);
 
+    /**
+     * @brief Destructor for the OperatorTensor class.
+     */
     ~OperatorTensor();
 
 public:
     ///////////////////////////////////////////////////
+    /**
+     * @brief Associates an input tensor to the operator.
+     * @param[in] inputIdx Index of the input to associate.
+     * @param[in] data     Shared pointer to the data to associate.
+     */
     virtual void associateInput(const IOIndex_t inputIdx,
                                 const std::shared_ptr<Data>& data) override;
+
+    /**
+     * @brief Resets the input tensor at a given index.
+     * @param[in] inputIdx Index of the input to reset.
+     */
     void resetInput(const IOIndex_t inputIdx) override final;
     ///////////////////////////////////////////////////
 
     ///////////////////////////////////////////////////
     // Tensor access
-    // input management
+
+    /**
+     * @brief Sets an input tensor for the operator.
+     * @param[in] inputIdx Index of the input to set.
+     * @param[in] data     Shared pointer to the data to set.
+     */
     void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override;
+
+    /**
+     * @brief Retrieves an input tensor.
+     * @param[in] inputIdx Index of the input to retrieve.
+     * @return Shared pointer to the input tensor.
+     */
     const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const;
+
+    virtual const std::vector<std::shared_ptr<Tensor>>& getInputs() const;
+
+    /**
+     * @brief Retrieves a raw input tensor.
+     * @param[in] inputIdx Index of the input to retrieve.
+     * @return Shared pointer to the raw input tensor.
+     */
     std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final;
 
-    // output management
+    /**
+     * @brief Sets an output tensor for the operator.
+     * @param[in] outputIdx Index of the output to set.
+     * @param[in] data      Shared pointer to the data to set.
+     */
     void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const override;
+
+    /**
+     * @brief Retrieves an output tensor.
+     * @param[in] outputIdx Index of the output to retrieve.
+     * @return Shared pointer to the output tensor.
+     */
     virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const;
+
+    virtual const std::vector<std::shared_ptr<Tensor>>& getOutputs() const;
+
+    /**
+     * @brief Retrieves a raw output tensor.
+     * @param[in] outputIdx Index of the output to retrieve.
+     * @return Shared pointer to the raw output tensor.
+     */
     std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final;
     ///////////////////////////////////////////////////
 
     ///////////////////////////////////////////////////
     // Tensor dimensions
+
     /**
-     * @brief For a given output feature area, compute the associated receptive
-     * field for each data input.
-     * @param firstIdx First index of the output feature.
-     * @param outputDims Size of output feature.
-     * @param outputIdx Index of the output. Default 0.
-     * @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>
-     * For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
+     * @brief Computes the receptive field for a given output feature area.
+     *
+     * @param[in] firstIdx    First index of the output feature.
+     * @param[in] outputDims  Dimensions of the output feature.
+     * @param[in] outputIdx   Index of the output (default is 0).
+     *
+     * @return Vector of pairs containing, for each data input tensor, the first index and dimensions of the feature area.
      */
     virtual std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
 
-	/**
-	 * @brief Will compute the dimensions of operator's output tensor given the input sizes
- 	 *        If the output dimensions cannot be computed because it depends on some undefined inputs then forwardDims will return false and enter in TOKEN mode for subsequent tensors.
- 	 *        - TOKEN mode means that forwarddims will only ensure that all inputs and outputs of the graph the node is within are connected.
- 	 * @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optional parameter tensors.
- 	 * @return true if dims have been properly forwarded. false otherwise. If set to false, then forwardDims will enter in token mode.
- 	 *      
+    /**
+     * @brief Computes the dimensions of the operator's output tensor based on input sizes.
+     *
+     * - If the output dimensions depend on undefined inputs, this function returns false and enters TOKEN mode.
+     * - TOKEN mode ensures that all inputs and outputs of the graph the node belongs to are connected.
+     *
+     * @param[in] allowDataDependency Flag to indicate if output dimensions depend on optional parameter tensors.
+     * @return True if dimensions are successfully computed, false otherwise.
      */
     virtual bool forwardDims(bool allowDataDependency = false);
+
+    /**
+     * @brief Checks if dimensions have been successfully forwarded.
+     * @return True if dimensions are forwarded, false otherwise.
+     */
     virtual bool dimsForwarded() const;
     ///////////////////////////////////////////////////
 
+    /**
+     * @brief Sets the data type of the operator's tensors.
+     * @param dataType Data type to set.
+     */
     virtual void setDataType(const DataType& dataType) const override;
+
+    /**
+     * @brief Sets the data format of the operator's tensors.
+     * @param dataFormat Data format to set.
+     */
     virtual void setDataFormat(const DataFormat& dataFormat) const override;
 
+    /**
+     * @brief Executes the forward operation for the operator.
+     */
     virtual void forward() override;
 
 protected:
+    /**
+     * @brief Checks if all inputs are associated.
+     * @param checkNonEmpty Flag to indicate if non-empty check is required.
+     * @return True if inputs are associated, false otherwise.
+     */
     bool inputsAssociated(bool checkNonEmpty = true) const;
 };
+
 }  // namespace Aidge
 
-#endif  // AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_
\ No newline at end of file
+#endif  // AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 181a4e88a0fe30e2a86c44adda2195d7e6f5293d..c1ed3500cf3dc8c36c611f2d5b41744e881c299e 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -25,106 +25,255 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
+
+/**
+ * @enum PadAttr
+ * @brief Attributes for the Pad operator.
+ */
+enum class PadAttr {
+    BeginEndBorders, ///< Specifies the padding sizes for the beginning and end of each dimension.
+    BorderType,      ///< Type of border handling during padding.
+    BorderValue      ///< Value to be used for constant padding.
+};
+
+/**
+ * @enum PadBorderType
+ * @brief Types of border handling available for padding.
+ */
 enum class PadBorderType {
-    /** @brief all out of bound values will be set to a given value.*/
-    Constant,
-    Edge,
-    Reflect,
-    Wrap,
-    /** @brief all out of bound values will be set to 0.*/
-    Zero,
+    Constant, ///< Out-of-bound values set to a constant value.
+    Edge,     ///< Values are replicated from the nearest edge.
+    Reflect,  ///< Values are mirrored around the edges.
+    Wrap,     ///< Values wrap around the tensor dimensions.
+    Zero      ///< All out-of-bound values are set to 0.
 };
 
+/**
+ * @class Pad_Op
+ * @brief Implementation of the Pad operator.
+ *
+ * The Pad operator adds padding to the input tensor along specified dimensions.
+ * Padding can be customized by specifying the border type and border value.
+ *
+ * @tparam DIM The number of dimensions for the padding operation.
+ */
+/**
+ * @brief Description of tensor padding along spatial dimensions with configurable border handling.
+ *
+ * The operator adds Padding for each spatial dimension (leaving the batch and channel dimensions unchanged).
+ * The operator supports various border handling techniques (e.g., constant padding, reflection, wrapping).
+ *
+ * ### Output Tensor Shape:
+ * If the input tensor has a shape `[B, C, d1, d2, ..., dN]`, where `B` is the batch size, 
+ * `C` is the number of channels, and `[d1, d2, ..., dN]` are the spatial dimensions, 
+ * and the padding is defined by `beginEndTuples = {b1, e1, b2, e2, ..., bN, eN}`, 
+ * the output tensor shape will be:
+ * 
+ * `[B, C, d1 + b1 + e1, d2 + b2 + e2, ..., dN + bN + eN]`.
+ * 
+ * The padding values `b_i` and `e_i` specify the number of elements to add before and after 
+ * the corresponding spatial dimension `d_i`. Batch size and channel count remain unchanged.
+ *
+ * @example Constant Padding:
+ *    - beginEndTuples = [1, 1, 2, 2]
+ *    - Input tensor shape: `[B, C, 3, 4]` (batch and channel are fixed)
+ *    - Output tensor shape: `[B, C, 3 + 1 + 1, 4 + 2 + 2] = [B, C, 5, 8]`
+ *    - Padding values: Zero (0.0).
+ *
+ * @example Edge Padding:
+ *    - padding = [2, 2, 3, 3]
+ *    - Input tensor shape: `[B, C, 5, 5]`
+ *    - Output tensor shape: `[B, C, 5 + 2 + 2, 5 + 3 + 3] = [B, C, 9, 11]`
+ *    - Padding values replicate the edge values of the tensor.
+ *
+ * @example Reflect Padding:
+ *    - padding = [1, 1, 2, 2, 0, 0]
+ *    - Input tensor shape: `[B, C, 4, 5, 6]`
+ *    - Output tensor shape: `[B, C, 4 + 1 + 1, 5 + 2 + 2, 6 + 0 + 0] = [B, C, 6, 9, 6]`
+ *    - Padding values mirror the existing tensor values.
+ *
+ * This operator is commonly used for image processing, extending spatial dimensions while maintaining 
+ * batch and channel consistency, or aligning tensor dimensions in machine learning workflows.
+ */
 template <DimIdx_t DIM>
 class Pad_Op : public OperatorTensor,
-                public Registrable<Pad_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>> {
+               public Registrable<Pad_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pad_Op<DIM>&)>> {
 public:
+    /**
+     * @brief Static string indicating the type of the operator.
+     */
     static const std::string Type;
 
 private:
     using Attributes_ = StaticAttributes<PadAttr,
-                                            std::array<DimSize_t, 2*DIM>,
-                                            PadBorderType,
-                                            double>;
+                                         std::array<DimSize_t, 2 * DIM>, // Padding for start and end of each dimension.
+                                         PadBorderType,                  // Border handling type.
+                                         double                          // Border value for constant padding.
+                                         >;
     template <PadAttr e>
     using attr = typename Attributes_::template attr<e>;
-    const std::shared_ptr<Attributes_> mAttributes;
 
-public:
+    const std::shared_ptr<Attributes_> mAttributes; ///< Holds operator attributes.
 
+public:
+    /**
+     * @brief Deleted default constructor.
+     */
     Pad_Op() = delete;
 
-    constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
+    /**
+     * @brief Constructor for the Pad operator.
+     * @param beginEndTuples Array specifying padding for the beginning and end of each dimension.
+     * @param borderType Type of border handling (default is constant).
+     * @param borderValue Value to use for constant padding (default is 0.0).
+     */
+    constexpr Pad_Op(const std::array<DimSize_t, 2 * DIM>& beginEndTuples,
                      PadBorderType borderType = PadBorderType::Constant,
                      double borderValue = 0.0)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
-            attr<PadAttr::BeginEndBorders>(beginEndTuples),
-            attr<PadAttr::BorderType>(borderType),
-            attr<PadAttr::BorderValue>(borderValue))) {}
+              attr<PadAttr::BeginEndBorders>(beginEndTuples),
+              attr<PadAttr::BorderType>(borderType),
+              attr<PadAttr::BorderValue>(borderValue))) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor.
      * @param op Operator to copy.
+     * @details Copies operator attributes and its output tensors, but not its input tensors. The new operator has no associated input.
      */
     Pad_Op(const Pad_Op& op)
         : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {}
+          mAttributes(op.mAttributes) {}
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Pad_Op
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
-
+    /**
+     * @brief Compute output dimensions during the forward pass.
+     * @param[in] allowDataDependency Flag indicating whether to allow data-dependent dimensions.
+     * @return True if dimensions are computed successfully.
+     */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    /**
+     * @brief Set the backend for the Pad operator.
+     * @param name Name of the backend.
+     * @param device Device index (optional).
+     */
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Get the available backends for the Pad operator.
+     * @return A set of available backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     * @return A shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<PadAttr::BeginEndBorders>(); }
-    inline PadBorderType& borderType() const noexcept { return mAttributes->template getAttr<PadAttr::BorderType>(); }
-    inline double& borderValue() const noexcept { return mAttributes->template getAttr<PadAttr::BorderValue>(); }
 
-    static const std::vector<std::string> getInputsName(){
+    /**
+     * @brief Get or modify the padding values for each dimension.
+     * @return Reference to the padding values.
+     */
+    inline std::array<DimSize_t, 2 * DIM>& beginEndBorders() const noexcept {
+        return mAttributes->template getAttr<PadAttr::BeginEndBorders>();
+    }
+
+    /**
+     * @brief Get or modify the border type for padding.
+     * @return Reference to the border type.
+     */
+    inline PadBorderType& borderType() const noexcept {
+        return mAttributes->template getAttr<PadAttr::BorderType>();
+    }
+
+    /**
+     * @brief Get or modify the border value for constant padding.
+     * @return Reference to the border value.
+     */
+    inline double& borderValue() const noexcept {
+        return mAttributes->template getAttr<PadAttr::BorderValue>();
+    }
+
+    /**
+     * @brief Get the input tensor names.
+     * @return Vector of input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
-    static const std::vector<std::string> getOutputsName(){
+
+    /**
+     * @brief Get the output tensor names.
+     * @return Vector of output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Create a Pad operation node.
+ *
+ * @tparam DIM Number of dimensions for the operation.
+ * @param[in] beginEndTuples Array specifying padding for the beginning and end of each dimension.
+ * @param[in] name Name of the operator (optional).
+ * @param[in] borderType Type of border handling (optional, default is constant).
+ * @param[in] borderValue Value for constant padding (optional, default is 0.0).
+ * @return A shared pointer to the Node containing the Pad operator.
+ */
 template <std::array<DimSize_t, 1>::size_type DIM>
-std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
-                        const std::string& name = "",
-                        PadBorderType borderType = PadBorderType::Constant,
-                        double borderValue = 0.0);
+std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2 * DIM>& beginEndTuples,
+                          const std::string& name = "",
+                          PadBorderType borderType = PadBorderType::Constant,
+                          double borderValue = 0.0);
 
-// helper with C-style array instead of std::array for beginEndTuples to allow automatic template DIM deduction
+// Helper overload with C-style array for automatic DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> Pad(
-    DimSize_t const (&beginEndTuples)[2*DIM],
+    DimSize_t const (&beginEndTuples)[2 * DIM],
     const std::string& name = "",
     PadBorderType borderType = PadBorderType::Constant,
-    double borderValue = 0.0)
-{
+    double borderValue = 0.0) {
     return Pad<DIM>(to_array(beginEndTuples), name, borderType, borderValue);
 }
+
 }  // namespace Aidge
 
+// Explicit instantiations for common dimensions
 extern template class Aidge::Pad_Op<1>;
 extern template class Aidge::Pad_Op<2>;
 
 namespace {
+
+/**
+ * @brief EnumStrings specialization for PadAttr.
+ */
 template <>
-const char *const EnumStrings<Aidge::PadAttr>::data[] = {"begin_end_borders", "border_type", "border_value"};
+const char* const EnumStrings<Aidge::PadAttr>::data[] = {
+    "begin_end_borders",
+    "border_type",
+    "border_value"
+};
 
+/**
+ * @brief EnumStrings specialization for PadBorderType.
+ */
 template <>
-const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Edge", "Reflect", "Wrap"};
-}
+const char* const EnumStrings<Aidge::PadBorderType>::data[] = {
+    "Constant",
+    "Edge",
+    "Reflect",
+    "Wrap",
+    "Zero"
+};
+
+}  // namespace
 
 #endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index d5898b3630721b036b3acb916e6dec87455009f7..0624286f7c8a8a84dd5aac5eae16c019b7a9e88b 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -24,21 +24,91 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+
+/**
+ * @class Pop_ProdConso
+ * @brief Implements the producer-consumer principle for the `Pop` operator.
+ *
+ * The `Pop_ProdConso` class defines the logic for managing data dependencies during
+ * the forward process of the `Pop` operator.
+ *
+ * This class ensures that:
+ * - All data consumed by the `Pop` operator is correctly handled.
+ * - The operator respects memory and data requirements during the forward computation.
+ */
 class Pop_ProdConso : public ProdConso {
 public:
+    /**
+     * @brief Constructor for the `Pop_ProdConso` class.
+     * @param[in] op The operator instance for which producer-consumer relationships are managed.
+     *
+     * @details:
+     * - The provided `Operator` instance is used to initialize the base `ProdConso` class.
+     * - This operator determines specific requirements for data consumption during the forward process.
+     */
     Pop_ProdConso(const Operator& op): ProdConso(op) {}
+
+    /**
+     * @brief Get the number of data elements required from an input tensor for forward computation.
+     * @param[in] inputIdx The index of the input tensor.
+     * @return The number of required elements (`Elts_t`).
+     *
+     * @details:
+     * - For each input tensor identified by `inputIdx`, this method calculates the
+     *   minimum amount of data needed by the `Pop` operator to perform its forward step.
+     */
     Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
 };
 
+/**
+ * @class Pop_OpImpl
+ * @brief Implementation of the `Pop` operation.
+ *
+ * The `Pop_OpImpl` class defines the backend-agnostic logic for executing
+ * the forward pass of the `Pop` operator.
+ */
 class Pop_OpImpl : public OperatorImpl {
 public:
+    /**
+     * @brief Constructs a `Pop_OpImpl` object.
+     * @param[in] op The operator to be implemented.
+     * @param[in] backend The backend used for execution (optional).
+     */
     Pop_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Pop_ProdConso>(mOp); };
+
+    /**
+     * @brief Get the Producer Consumer object of the operator.
+     * @return A shared pointer to the `Pop_ProdConso` object.
+     */
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Pop_ProdConso>(mOp); }
+
+    /**
+     * @brief Executes the forward pass for the `Pop` operation.
+     */
     void forward() override;
 };
 
-enum class PopAttr { ForwardStep };
+/**
+ * @enum PopAttr
+ * @brief Attributes specific to the `Pop` operator.
+ */
+enum class PopAttr {
+    ForwardStep     // Tracks the current step in the forward pass
+};
 
+/**
+ * @class Pop_Op
+ * @brief The `Pop` operator is responsible for removing and outputting elements from a data structure.
+ *
+ * `Pop` operators are used in models that manipulate sequential data or stacks.
+ * - **Inputs**:
+ *   - `data_input`: The main data input from which elements are removed.
+ * - **Outputs**:
+ *   - `data_output`: The output after performing the `Pop` operation.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Pop_Op : public OperatorTensor,
     public Registrable<Pop_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Pop_Op&)>> {
 public:
@@ -50,46 +120,102 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    /**
+     * @brief Default constructor for the `Pop` operator.
+     */
     Pop_Op();
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copies the operator's attributes and its output tensors.
+     * The new operator has no input tensors associated initially.
      * @param op Operator to copy.
      */
     Pop_Op(const Pop_Op& op);
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Pop_Op
+     * @brief Clone the operator by creating a copy of it.
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Assign a specific backend and device for computation.
+     * @param name Name of the backend.
+     * @param device The device index (default is 0).
+     */
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    /**
+     * @brief Get the list of available backends compatible with this operator.
+     * @return A set of strings representing backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Perform dimension inference for the operator, optionally allowing
+     * data dependency during the process.
+     * @param allowDataDependency Flag indicating whether data dependency is allowed.
+     * @return True if dimensions were successfully inferred.
+     */
     bool forwardDims(bool allowDataDependency = false) override final;
+
+    /**
+     * @brief Update the producer-consumer relationships for the `Pop` operator.
+     * @details:
+     * - Ensures all required outputs are properly handled during the forward pass.
+     */
     void updateConsummerProducer() override;
+
+    /**
+     * @brief Executes the forward pass for the `Pop` operation.
+     */
     void forward() override;
 
+    /**
+     * @brief Access the operator's attributes.
+     * @return A shared pointer to the operator's attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get or set the forward step counter for the operator.
+     * @return A reference to the forward step counter.
+     */
     inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<PopAttr::ForwardStep>(); }
 
-    static const std::vector<std::string> getInputsName(){
+    /**
+     * @brief Retrieve the names of the operator's input tensors.
+     * @return A vector of strings representing input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
-    static const std::vector<std::string> getOutputsName(){
+
+    /**
+     * @brief Retrieve the names of the operator's output tensors.
+     * @return A vector of strings representing output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Create a `Pop` operator node.
+ * @param name The optional name for the node.
+ * @return A shared pointer to the newly created `Pop` operator node.
+ */
 std::shared_ptr<Node> Pop(const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
+/**
+ * @brief String representations of the `Pop` operator's attributes.
+ */
 template <>
 const char *const EnumStrings<Aidge::PopAttr>::data[] = {
     "forward_step"
 };
 }
 
-#endif /* AIDGE_CORE_OPERATOR_POP_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_POP_H_ */
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index f6762dd33088f486184bdfd0a5b8dbdbd0c641da..5d0afc79d75f81d926a86a60cb73506347ddbd79 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -24,6 +24,28 @@
 
 namespace Aidge {
 
+/**
+ * @brief Description of an element-wise And operation on input Tensors,
+ * supporting NumPy broadcasting.
+ *
+ * For each pair of elements x and y from the input Tensors, the function 
+ * is defined as:
+ * `f(x, y) = x^y`
+ *
+ * Broadcasting adjusts shapes of the input Tensors to make them compatible:
+ * - Tensors are aligned from the rightmost dimensions.
+ * - Dimensions are compatible if they are equal, one of them is 1, or missing.
+ *
+ * The output Tensor shape is determined by taking the maximum size along 
+ * each dimension of the input Tensors after broadcasting.
+ * 
+ * Examples:
+ * 1. Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
+ * 2. Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
+ * 
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Pow_Op : public OperatorTensor,
     public Registrable<Pow_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pow_Op&)>> {
 public:
@@ -32,8 +54,10 @@ public:
     Pow_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Pow_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Pow_Op(const Pow_Op& op)
         : OperatorTensor(op)
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 115ddcb5549b1c0daa01b3ab67946655cda7287c..1d6b965820e90dcd9b54c61795358c5332d77efc 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -12,9 +12,13 @@
 #ifndef AIDGE_CORE_OPERATOR_PRODUCER_H_
 #define AIDGE_CORE_OPERATOR_PRODUCER_H_
 
-#include <cstddef>
 #include <array>
+#include <cstddef>
+#include <functional>
 #include <memory>
+#include <set>
+#include <string>
+#include <stdexcept>
 #include <vector>
 
 #include "aidge/utils/Types.h"
@@ -26,12 +30,40 @@
 
 namespace Aidge {
 
+/**
+ * @enum ProdAttr
+ * @brief Attributes specific to the `Producer_Op` class.
+ */
 enum class ProdAttr { Constant };
 
+/**
+ * @class Producer_Op
+ * @brief Represents an operator that stores a tensor in memory and provides it as an output.
+ * 
+ * The `Producer_Op` class is a specialized operator designed to store a tensor in memory 
+ * and return it as an output tensor. It is typically used to store parameters or input 
+ * values for a computational graph. A `Producer_Op` does not have any input data, parameters, 
+ * or attributes, making it a fundamental building block for constant or initialized values 
+ * within the graph.
+ * 
+ * Key characteristics of a `Producer_Op`:
+ * - No inputs: The operator does not accept any input tensors.
+ * - No parameters or attributes: It is solely responsible for producing an output tensor.
+ * - Stores and returns a tensor: The stored tensor is accessible as the operator's output.
+ * 
+ * This operator is useful for scenarios where fixed or pre-initialized tensors need to 
+ * be introduced into a graph, such as weights, biases, or constant values.
+ * 
+ * @see OperatorTensor
+ * @see Registrable
+ */
+
 class Producer_Op
     : public OperatorTensor,
-      public Registrable<Producer_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(
-                                          const Producer_Op &)>> {
+      public Registrable<Producer_Op,
+                         std::string,
+                         std::function<std::shared_ptr<OperatorImpl>( const Producer_Op& )>>
+{
 public:
     static const std::string Type;
 
@@ -43,82 +75,178 @@ private:
 public:
     Producer_Op() = delete;
 
+    /**
+     * @brief Constructs a `Producer_Op` object with specific dimensions.
+     * 
+     * @tparam DIM The number of dimensions for the tensor.
+     * @param[in] dims Array defining the dimensions of the tensor.
+     * @param[in] constant Indicates whether the tensor is constant.
+     */
     template <std::size_t DIM>
-    Producer_Op(const std::array<DimSize_t, DIM>& dims,
-                bool constant = false);
+    Producer_Op(const std::array<DimSize_t, DIM>& dims, bool constant = false);
 
     /**
-     * @brief Construct a new Producer_Op object from a Tensor.
-     *
-     * @param tensor Tensor to set in the Prducer.
-     * @param constant Whether the Producer should be considered constant.
+     * @brief Constructs a `Producer_Op` object from an existing tensor.
+     * 
+     * @param[in] tensor A shared pointer to the tensor to be produced.
+     * @param[in] constant Indicates whether the tensor should be constant.
      */
     Producer_Op(const std::shared_ptr<Tensor> tensor, bool constant = false);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
-     * but not its input tensors (the new operator has no input associated).
-     * @param op OperatorTensor to copy.
+     * @brief Copy constructor.
+     * 
+     * Copies the attributes and output tensors of the operator. 
+     * Input tensors are not copied, and the new operator will have no associated inputs.
+     * 
+     * @param[in] op The `Producer_Op` object to copy.
      */
     Producer_Op(const Producer_Op& op);
 
 public:
     /**
-     * @brief Conversion operator from Producer to Tensor.
-     *
-     * @return std::shared_ptr<Tensor>
+     * @brief Conversion operator to retrieve the output tensor.
+     * 
+     * @return A shared pointer to the output tensor.
      */
     operator std::shared_ptr<Tensor>() const { return mOutputs[0]; }
 
-public:
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Producer_Op(const Producer_Op&)
+     * @brief Clones the operator using the copy constructor.
+     * 
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
-    void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
-    }
-
-    inline bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
-
-    inline bool dimsForwarded() const noexcept override final { return true; }
-
-
+    /**
+     * @brief Retrieves the dimensions of the output tensor.
+     * 
+     * @return A vector containing the dimensions of the output tensor.
+     */
     inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
 
+    /**
+     * @brief Sets the backend for the operator's execution.
+     * 
+     * @param[in] name The name of the backend.
+     * @param[in] device The device index (default is 0).
+     */
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Retrieves the list of available backends for this operator.
+     * 
+     * @return A set containing the names of available backends.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Retrieves the operator's attributes.
+     * 
+     * @return A shared pointer to the operator's attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); }
 
-    static const std::vector<std::string> getInputsName(){
-        return {};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
+    /**
+     * @brief Retrieves the constant attribute.
+     * 
+     * @return A reference to the constant attribute.
+     */
+    inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); }
 
+    /**
+     * @brief Performs the forward operation for the operator.
+     * 
+     * Generates the output tensor based on the defined attributes and configuration.
+     */
     void forward() override final;
 
+    /**
+     * @brief Placeholder for the backward operation.
+     * 
+     * This function logs a debug message, as `Producer_Op` typically does not support backpropagation.
+     */
     void backward() override final {
         Log::debug("Basic Producer backward() function.");
     }
 
+    /**
+     * @brief Associates an input tensor with the operator.
+     * 
+     * This operation is not supported by `Producer_Op` as it does not take inputs.
+     * 
+     * @param[in] inputIdx The index of the input.
+     * @param[in] data A shared pointer to the data to associate.
+     * 
+     * @throws std::runtime_error Always throws, as inputs are not supported.
+     */
+    void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
+    }
+
+    /**
+     * @brief Checks whether dimensions are forwarded.
+     * 
+     * @return Always true for `Producer_Op`.
+     */
+    inline bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
+
+    /**
+     * @brief Confirms that dimensions have been forwarded.
+     * 
+     * @return Always true for `Producer_Op`.
+     */
+    inline bool dimsForwarded() const noexcept override final { return true; }
+
+    /**
+     * @brief Retrieves the names of the inputs for the operator.
+     * 
+     * @return An empty vector, as `Producer_Op` takes no inputs.
+     */
+    static const std::vector<std::string> getInputsName() { return {}; }
+
+    /**
+     * @brief Retrieves the names of the outputs for the operator.
+     * 
+     * @return A vector containing the output name "data_output".
+     */
+    static const std::vector<std::string> getOutputsName() { return {"data_output"}; }
+
+    /**
+     * @brief Sets the output tensor for the operator.
+     * 
+     * @param[in] outputIdx Index of the output to set.
+     * @param[in] data A shared pointer to the data.
+     */
     void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const override;
 };
 
-template <std::array<DimSize_t, 1>::size_type DIM>
-std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "", bool constant = false);
+/**
+ * @brief Helper function to create a producer node with specified dimensions.
+ * 
+ * @tparam DIM The number of dimensions.
+ * @param[in] dims Array defining the dimensions of the tensor.
+ * @param[in] name Optional name for the node.
+ * @param[in] constant Indicates whether the tensor should be constant.
+ * 
+ * @return A shared pointer to the created node.
+ */
+template <std::size_t DIM>
+std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM>& dims, const std::string& name = "", bool constant = false);
 
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+/**
+ * @brief Helper function with a C-style array for dimension deduction.
+ * 
+ * @param[in] dims C-style array defining the tensor dimensions.
+ * @param[in] name Optional name for the node.
+ * @param[in] constant Indicates whether the tensor should be constant.
+ * 
+ * @return A shared pointer to the created node.
+ */
 template <std::size_t DIM>
 inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::string& name = "", bool constant = false) {
-  return Producer(to_array(dims), name, constant);
+    return Producer(to_array(dims), name, constant);
 }
-
 std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "", bool constant = false);
 
 template <std::array<DimSize_t, 1>::size_type DIM>
@@ -127,17 +255,29 @@ std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode,
             const std::array<DimSize_t, DIM>& dims,
             const std::string& extension);
 
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+/**
+ * @brief Adds a producer node to another node with a C-style array.
+ * 
+ * @param[in] otherNode The node to associate with the producer.
+ * @param[in] inputIdx The input index.
+ * @param[in] dims C-style array defining the tensor dimensions.
+ * @param[in] extension An extension string for the producer.
+ * 
+ * @return A shared pointer to the updated node.
+ */
 template <std::size_t DIM>
 std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) {
     return addProducer(otherNode, inputIdx, to_array(dims), extension);
 }
+
 } // namespace Aidge
 
 namespace {
+/**
+ * @brief Enum string representation for `ProdAttr`.
+ */
 template <>
-const char *const EnumStrings<Aidge::ProdAttr>::data[] = {
-    "constant"
-};
+const char* const EnumStrings<Aidge::ProdAttr>::data[] = {"constant"};
 }
+
 #endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 5d5895a8fb279f1efa5c6321614199f44402b83a..6aded36383fbb8c96cc29af12ce5a6020cbd8f32 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -26,11 +26,59 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ReduceMeanAttr { Axes, KeepDims, NoopWithEmptyAxes };
+enum class ReduceMeanAttr {
+  /**
+   * @brief Axes over which the mean operation is performed.
+   *
+   * Axes are specified as a vector of integers, each representing a dimension
+   * of the input tensor to be reduced.
+   */
+  Axes,
+
+  /**
+   * @brief Flag indicating whether to keep reduced dimensions.
+   *
+   * - `true`: Retain reduced dimensions with size 1.
+   * - `false`: Completely remove reduced dimensions.
+   */
+  KeepDims,
+
+  /**
+   * @brief Flag indicating behavior when axes are empty.
+   *
+   * - `true`: No operation is performed if axes are empty.
+   * - `false`: Reduction is performed over all axes if none are specified.
+   */
+  NoopWithEmptyAxes
+};
 
 /**
- * @brief This operator has as purpose to reduce given axes by replacing with the mean value.
-*/
+ * @class ReduceMean_Op
+ * @brief Implements the ReduceMean operation to compute the mean of a tensor along specified axes.
+ *
+ * ReduceMean reduces specified axes of a tensor by computing the mean value.
+ * The resulting output tensor dimensions depend on the provided attributes.
+ *
+ * Output Shape Calculation
+ * - If `KeepDims` is true:
+ *   Reduced dimensions are retained with size 1.
+ * - If `KeepDims` is false:
+ *   Reduced dimensions are completely removed.
+ *
+ * @example:
+ * - Input shape: (2, 3, 4, 5) // Batch size 2, 3 channels, 4x5 spatial dimensions
+ * - Axes: {2, 3}
+ * - KeepDims: true
+ * - Output shape: (2, 3, 1, 1)
+ * @example:
+ * - Input shape: (2, 3, 4, 5)
+ * - Axes: {1, 2}
+ * - KeepDims: false
+ * - Output shape: (2, 5)
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class ReduceMean_Op : public OperatorTensor,
                 public Registrable<ReduceMean_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>> {
 
@@ -42,49 +90,77 @@ private:
                                             std::vector<std::int32_t>,
                                             bool,
                                             bool>;
+
     template <ReduceMeanAttr e>
     using attr = typename Attributes_::template attr<e>;
+
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
     ReduceMean_Op() = delete;
 
     /**
-     * @brief constructor for ReduceMean op
+     * @brief constructor for ReduceMean operator
      * @param[in] axes around which perform the operation
-     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and 
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and
      * if false we remove the dimension completely
      * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing
      * and if false, we reduce on all axes
      */
-    ReduceMean_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes);
+    ReduceMean_Op(const std::vector<std::int32_t>& axes, bool keep_dims = true, bool noop_with_empty_axes = false);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param[in] op ReduceMean_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     ReduceMean_Op(const ReduceMean_Op& op);
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::ReduceMean_Op
+     * @brief Clone the operator using its copy constructor.
      */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Compute the output dimensions during the forward pass.
+     * @param[in] allowDataDependency Whether to allow data-dependent dimensions.
+     * @return Boolean indicating whether dimension computation was successful.
+     */
     bool forwardDims(bool allowDataDependency = false) override final;
 
+    /**
+     * @brief Set the backend for the Reshape operator.
+     * @param[in] name Name of the backend.
+     * @param[in] device Device index (optional).
+     */
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get the axes over which the mean is computed.
+     */
     inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::Axes>(); }
+
+    /**
+     * @brief Get whether reduced dimensions are retained.
+     */
     inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); }
-    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::NoopWithEmptyAxes>(); }
 
+    /**
+     * @brief Get the behavior when axes are empty.
+     */
+    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::NoopWithEmptyAxes>(); }
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
+
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
@@ -93,38 +169,30 @@ public:
 };
 
 /**
- * @brief Compute the mean value of a Tensor over the provided axes. Dimensions
- * may be reduced by erasing the provided axes or not.
+ * @brief Compute the mean value of a Tensor over the specified axes.
  *
- * @param axes Dimensions over which data mean should be computed.
- * @param keep_dims Whether or not reduced dimensions are to be erased.
- * @param name Name of the Operator.
+ * Dimensions may be reduced by erasing the specified axes or retaining them with size 1.
+ *
+ * @param[in] axes Dimensions over which data mean should be computed.
+ * @param[in] keep_dims Whether or not reduced dimensions are to be retained.
+ * @param[in] noop_with_empty_axes Behavior when no axes are specified.
+ * @param[in] name Name of the Operator.
  * @return std::shared_ptr<Node> Node containing the Operator.
  */
-
-
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
-// template <DimSize_t DIM>
-// inline std::shared_ptr<Node> ReduceMean(
-//     std::int32_t const (&axes)[DIM],
-//     DimSize_t keep_dims = 1,
-//     const std::string& name = "") {
-//     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported");
-//     return ReduceMean(to_array(axes), keep_dims, name);
-// }
-
-// template <DimIdx_t DIM>
-// const std::string ReduceMean_Op::Type = "ReduceMean";
 std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
-                                        bool keep_dims=true,
-                                        bool noop_with_empty_axes=false,
+                                        bool keep_dims = true,
+                                        bool noop_with_empty_axes = false,
                                         const std::string& name = "");
 
 }  // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
+const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {
+    "axes",
+    "keep_dims",
+    "noop_with_empty_axes"
+};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
index bae03cb7d2e3ac855537eb22e54bf706ec0e0b4a..5a3674b21bca674a966d39dc103cde60a4964071 100644
--- a/include/aidge/operator/ReduceSum.hpp
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -26,12 +26,59 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ReduceSumAttr { Axes, KeepDims, NoopWithEmptyAxes };
-
+enum class ReduceSumAttr {
+/**
+   * @brief Axes over which the mean operation is performed.
+   *
+   * Axes are specified as a vector of integers, each representing a dimension
+   * of the input tensor to be reduced.
+   */
+  Axes,
+
+  /**
+   * @brief Flag indicating whether to keep reduced dimensions.
+   *
+   * - `true`: Retain reduced dimensions with size 1.
+   * - `false`: Completely remove reduced dimensions.
+   */
+  KeepDims,
+
+  /**
+   * @brief Flag indicating behavior when axes are empty.
+   *
+   * - `true`: No operation is performed if axes are empty.
+   * - `false`: Reduction is performed over all axes if none are specified.
+   */
+  NoopWithEmptyAxes
+};
 
 /**
- * @brief This operator has as purpose to reduce given axes by replacing with the sum value.
-*/
+ * @class ReduceSum_Op
+ * @brief Implements the ReduceSum operation to compute the sum of a tensor along specified axes.
+ *
+ * ReduceSum reduces specified axes of a tensor by computing the sum value.
+ * The resulting output tensor dimensions depend on the provided attributes.
+ *
+ * ### Output Shape Calculation
+ * - If `KeepDims` is true:
+ *   Reduced dimensions are retained with size 1.
+ * - If `KeepDims` is false:
+ *   Reduced dimensions are completely removed.
+ *
+ * @example:
+ * - Input shape: (2, 3, 4, 5) // Batch size 2, 3 channels, 4x5 spatial dimensions
+ * - Axes: {2, 3}
+ * - KeepDims: true
+ * - Output shape: (2, 3, 1, 1)
+ * @example:
+ * - Input shape: (2, 3, 4, 5)
+ * - Axes: {1, 2}
+ * - KeepDims: false
+ * - Output shape: (2, 5)
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class ReduceSum_Op : public OperatorTensor,
                 public Registrable<ReduceSum_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceSum_Op &)>> {
 
@@ -67,8 +114,10 @@ public:
     {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op ReduceSum_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     ReduceSum_Op(const ReduceSum_Op& op)
         : OperatorTensor(op),
@@ -94,9 +143,24 @@ public:
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get the axes over which the mean is computed.
+     */
     inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::Axes>(); }
+
+    /**
+     * @brief Get whether reduced dimensions are retained.
+     */
     inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::KeepDims>(); }
+
+    /**
+     * @brief Get the behavior when axes are empty.
+     */
     inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::NoopWithEmptyAxes>(); }
 
 
@@ -109,11 +173,13 @@ public:
 };
 
 /**
- * @brief Compute the sum value of a Tensor over the provided axes. Dimensions
- * may be reduced by erasing the provided axes or not.
+ * @brief Compute the sum value of a Tensor over the specified axes.
+ *
+ * Dimensions may be reduced by erasing the specified axes or retaining them with size 1.
  *
  * @param axes Dimensions over which data sum should be computed.
- * @param keep_dims Whether or not reduced dimensions are to be erased.
+ * @param keep_dims Whether or not reduced dimensions are to be retained.
+ * @param noop_with_empty_axes Behavior when no axes are specified.
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> Node containing the Operator.
  */
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 721b964d3ff4cd87121d43e8719a8fde1445761b..c170ad79e202158f78681855001ff46ba8167261 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -23,70 +23,181 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+/**
+ * @brief Implementation of the Reshape operator.
+ * @note This operator implementation is agnostic to the backend and is located here instead of in aidge_backend.
+ */
 class Reshape_OpImpl : public OperatorImpl {
 public:
-    Reshape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    /**
+     * @brief Constructor for Reshape_OpImpl.
+     * @param[in] op The Operator instance.
+     * @param[in] backend The backend name (optional).
+     */
+    Reshape_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Perform the forward operation for the reshape.
+     */
     void forward() override;
+    void backward() override;
 };
 
-enum class ReshapeAttr { Shape, AllowZero };
+/**
+ * @enum ReshapeAttr
+ * @brief Enumeration of attributes specific to the Reshape operator.
+ */
+enum class ReshapeAttr {
+    /**
+     * @brief The target shape for the output tensor.
+     */
+    Shape,
+    
+    /**
+     * @brief Whether zeros in the shape attribute are allowed.
+     * 
+     * When true, zeros in the target shape retain the corresponding dimension size from the input tensor.
+     */
+    AllowZero
+};
 
+/**
+ * @brief Description of Reshape operator that adjusts the shape of the input tensor.
+ *
+ * This operator reshapes the input tensor according to the specified target shape. 
+ * If the target shape is not compatible with the input tensor's total number of elements, 
+ * the operation will fail. If the `AllowZero` attribute is true, zeros in the target shape 
+ * retain the corresponding dimensions from the input tensor.
+ *
+ * @example Input: Tensor of dimensions `[2, 3]` with `Shape = {3, 2}` results in a tensor with dimensions `[3, 2]`.
+ * @example Input: Tensor of dimensions `[4, 3]` with `Shape = {0, -1}` and `AllowZero = true` results in a tensor with dimensions `[4, 3]`.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Reshape_Op : public OperatorTensor,
                    public Registrable<Reshape_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Reshape_Op&)>> {
 
 public:
+    /**
+     * @brief Static type string for the Reshape operator.
+     */
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ReshapeAttr,
-                                            std::vector<std::int64_t>,
-                                            bool>;
+    using Attributes_ = StaticAttributes<ReshapeAttr, std::vector<std::int64_t>, bool>;
     template <ReshapeAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    /**
+     * @brief Deleted default constructor.
+     */
     Reshape_Op() = delete;
 
+    /**
+     * @brief Constructor for the Reshape operator.
+     * @param[in] shape Target shape for the output tensor.
+     * @param[in] allowzero Whether zeros in the shape retain input tensor dimensions.
+     */
     Reshape_Op(const std::vector<std::int64_t>& shape = {}, bool allowzero = false);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param[in] op Reshape_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Reshape_Op(const Reshape_Op& op);
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Reshape_Op
+     * @brief Clone the operator using its copy constructor.
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Check whether the dimensions have been forwarded successfully.
+     * @return True if dimensions were successfully forwarded.
+     */
     bool dimsForwarded() const override final;
+
+    /**
+     * @brief Compute the output dimensions during the forward pass.
+     * @param[in] allowDataDependency Whether to allow data-dependent dimensions.
+     * @return Boolean indicating whether dimension computation was successful.
+     */
     bool forwardDims(bool allowDataDependency = false) override final;
 
+    /**
+     * @brief Set the backend for the Reshape operator.
+     * @param[in] name Name of the backend.
+     * @param[in] device Device index (optional).
+     */
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    /**
+     * @brief Get the available backends for the Reshape operator.
+     * @return A set of backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     * @return A shared pointer to the attributes.
+     */
     std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get or modify the target shape for the output tensor.
+     * @return Reference to the shape attribute.
+     */
     inline std::vector<std::int64_t>& shape() const { return mAttributes->template getAttr<ReshapeAttr::Shape>(); }
+
+    /**
+     * @brief Get or modify the AllowZero attribute.
+     * @return Reference to the AllowZero attribute.
+     */
     inline bool& allowZero() const { return mAttributes->template getAttr<ReshapeAttr::AllowZero>(); }
 
-    static const std::vector<std::string> getInputsName(){
+    /**
+     * @brief Get the input tensor names for the Reshape operator.
+     * @return A vector of input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
-    static const std::vector<std::string> getOutputsName(){
+
+    /**
+     * @brief Get the output tensor names for the Reshape operator.
+     * @return A vector of output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Create a Reshape operation node.
+ *
+ * @param[in] shape Target shape for the output tensor (optional).
+ * @param[in] allowzero Whether zeros in the shape retain input tensor dimensions.
+ * @param[in] name Name of the operator (optional).
+ * @return A shared pointer to the Node containing the Reshape operator.
+ */
 std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
-                            bool allowzero = false,
-                            const std::string &name = "");
+                              bool allowzero = false,
+                              const std::string& name = "");
+
 }  // namespace Aidge
 
 namespace {
+/**
+ * @brief EnumStrings specialization for ReshapeAttr.
+ */
 template <>
-const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = { "shape", "allow_zero" };
+const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = {"shape", "allow_zero"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RESHAPE_H_ */
diff --git a/include/aidge/operator/Round.hpp b/include/aidge/operator/Round.hpp
index 00352421d193eff543f1351b57f8db54ac742393..3a5bb0859392670e1c6710b115bef72c3c52074a 100644
--- a/include/aidge/operator/Round.hpp
+++ b/include/aidge/operator/Round.hpp
@@ -24,20 +24,33 @@
 
 namespace Aidge {
 
+/**
+ * @brief Description of an element-wise Round operation on an input Tensor.
+ *
+ * For each element x in the input, the function is defined as:
+ * `f(x) = round(x)`, where round(x) rounds x to the nearest integer value. 
+ * Halfway cases are rounded away from zero.
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Round_Op : public OperatorTensor,
-                public Registrable<Round_Op,
+                public Registrable<Round_Op,  // <Op, backend, implementation creation function>
                                 std::string,
-                                std::function<std::shared_ptr<OperatorImpl>(const Round_Op&)>> {
-
-
+                                std::function<std::shared_ptr<OperatorImpl>(const Round_Op&)>>
+{
 public:
     static const std::string Type;
 
     Round_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Round_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Round_Op(const Round_Op& op);
 
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 9465667babb0978e33de3cf9f155d9b2e9d495b4..b33fb584165efd7773c85249d713953e8303dc6b 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -23,17 +23,55 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-//Caution: This operator is now deprecated and should no longer be used. 
-//It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
+// Caution: This operator is now deprecated and should no longer be used. 
+// It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
 
 namespace Aidge {
 enum class ScalingAttr {
-    ScalingFactor, QuantizedNbBits, IsOutputUnsigned
+    /**
+     * @brief Scaling factor applied to the input tensor.
+     *
+     * This floating-point value is used to scale the input tensor.
+     */
+    ScalingFactor,
+
+    /**
+     * @brief Number of quantization bits.
+     *
+     * Specifies the bit-width used for quantization. 
+     * For example, a value of `8` represents 8-bit quantization.
+     */
+    QuantizedNbBits,
+
+    /**
+     * @brief Indicates whether the output is unsigned.
+     *
+     * - `true`: The quantized output values are unsigned integers.
+     * - `false`: The quantized output values are signed integers.
+     */
+    IsOutputUnsigned
 };
 
+/**
+ * @brief Description of a scaling operation to scale and quantize input tensors.
+ *
+ * The `Scaling_Op` class applies a scaling factor to the input tensor, quantizes 
+ * the scaled values to a specified bit-width, and outputs either signed or unsigned integers 
+ * based on the configuration.
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * ### Deprecation Notice
+ * This operator is deprecated and has been replaced by the `Quantizer` MetaOperator.
+ * It is retained for backward compatibility and should not be used in new implementations.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Scaling_Op
     : public OperatorTensor,
       public Registrable<Scaling_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Scaling_Op&)>> {
+
 public:
     static const std::string Type;
 
@@ -45,44 +83,71 @@ private:
 public:
     Scaling_Op() = delete;
 
+    /**
+     * @brief Constructor for the Scaling operator.
+     * @param[in] scalingFactor Scaling factor to be applied to the input tensor.
+     * @param[in] nbBits Number of bits for quantization.
+     * @param[in] isOutputUnsigned Flag indicating whether the output should be unsigned.
+     */
     Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param[in] op Scaling_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not its input tensors. 
+     * The new operator has no associated input.
      */
     Scaling_Op(const Scaling_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Scaling_Op
      */
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get the scaling factor.
+     */
     inline float& scalingFactor() const noexcept { return mAttributes -> getAttr<ScalingAttr::ScalingFactor>(); }
+
+    /**
+     * @brief Get the number of quantization bits.
+     */
     inline std::size_t& quantizedNbBits() const noexcept { return mAttributes -> getAttr<ScalingAttr::QuantizedNbBits>(); }
+
+    /**
+     * @brief Check if the output is unsigned.
+     */
     inline bool& isOutputUnsigned() const noexcept { return mAttributes -> getAttr<ScalingAttr::IsOutputUnsigned>(); }
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
+
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
-/*
-inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
-}
-*/
+/**
+ * @brief Apply a scaling and quantization operation on a tensor.
+ *
+ * @param[in] scalingFactor Scaling factor to apply to the input tensor.
+ * @param[in] quantizedNbBits Number of bits for quantization.
+ * @param[in] isOutputUnsigned Whether the quantized output should be unsigned.
+ * @param[in] name Name of the Operator.
+ * @return std::shared_ptr<Node> Node containing the Operator.
+ */
 std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
-                                     std::size_t quantizedNbBits=8,
-                                     bool isOutputUnsigned=true,
+                                     std::size_t quantizedNbBits = 8,
+                                     bool isOutputUnsigned = true,
                                      const std::string& name = "");
 } // namespace Aidge
 
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index cfd43fa0dd5a064ee21eafc2d0f50c12ad6e3272..609e354d57c2632f7ae5df53baf42c04843c8383 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -12,7 +12,7 @@
 #ifndef AIDGE_CORE_OPERATOR_SHAPE_H_
 #define AIDGE_CORE_OPERATOR_SHAPE_H_
 
-#include <cstdint>  // std::int64_t
+#include <cstdint>  // For fixed-width integer types (std::int64_t)
 #include <memory>
 #include <string>
 #include <vector>
@@ -25,20 +25,62 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+
+/**
+ * @class Shape_OpImpl
+ * @brief Backend-agnostic implementation of the Shape operator.
+ *
+ * This implementation is responsible for extracting and returning the shape
+ * of the input tensor. Specific backend functionality can extend this.
+ */
 class Shape_OpImpl : public OperatorImpl {
 public:
+    /**
+     * @brief Constructor for the Shape_OpImpl class.
+     * @param[in] op The Operator instance.
+     * @param[in] backend The backend name (optional).
+     */
     Shape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Perform the forward operation to compute the shape of the tensor.
+     */
     void forward() override;
 };
 
-enum class ShapeAttr { Start, End };
+/**
+ * @enum ShapeAttr
+ * @brief Enumeration of attributes specific to the Shape operator.
+ */
+enum class ShapeAttr {
+    /**
+     * @brief Start index of the slice of dimensions to return.
+     */
+    Start,
+    /**
+     * End index of the slice of dimensions to return (exclusive).
+     */
+    End
+};
 
+/**
+ * @brief Description of the operation of extracting the shape of a tensor.
+ *
+ * The Shape operator extracts the shape (dimensions) of a tensor as an output tensor.
+ * It allows optional slicing of the dimensions by specifying a start and end index.
+ *
+ * @example Input: Tensor with shape `[4, 5, 6, 7]`, `start=1`, `end=3` -> Output: `[5, 6]`
+ * @example Input: Tensor with shape `[4, 5, 6]`, `start=0`, `end=-1` (default) -> Output: `[4, 5, 6]`
+ */
 class Shape_Op : public OperatorTensor,
                 public Registrable<Shape_Op,
                                    std::string,
                                    std::function<std::shared_ptr<OperatorImpl>(const Shape_Op&)>> {
 
 public:
+    /**
+     * @brief Static type string for the Shape operator.
+     */
     static const std::string Type;
 
 private:
@@ -47,43 +89,98 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    Shape_Op() = delete;
-
-    Shape_Op(const std::int64_t start, const std::int64_t end);
+    /**
+     * @brief Constructor for the Shape operator.
+     * @param[in] start Start index for slicing dimensions.
+     * @param[in] end End index (exclusive) for slicing dimensions.
+     */
+    Shape_Op(const std::int64_t start = 0, const std::int64_t end = -1);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Shape_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Shape_Op(const Shape_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Shape_Op
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Compute the output dimensions during the forward pass.
+     * @param allowDataDependency Whether to allow data-dependent dimensions.
+     * @return Boolean indicating whether dimension computation was successful.
+     */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
+    /**
+     * @brief Set the backend for the Shape operator.
+     * @param[in] name Name of the backend.
+     * @param[in] device Device index (optional).
+     */
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Get the available backends for the Shape operator.
+     * @return A set of backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     * @return A shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::int64_t& start() const noexcept { return mAttributes -> getAttr<ShapeAttr::Start>(); }
-    inline std::int64_t& end() const noexcept { return mAttributes -> getAttr<ShapeAttr::End>(); }
 
-    static const std::vector<std::string> getInputsName(){
+    /**
+     * @brief Get or modify the start index for slicing dimensions.
+     * @return Reference to the start index attribute.
+     */
+    inline std::int64_t& start() const noexcept { return mAttributes->getAttr<ShapeAttr::Start>(); }
+
+    /**
+     * @brief Get or modify the end index for slicing dimensions.
+     * @return Reference to the end index attribute.
+     */
+    inline std::int64_t& end() const noexcept { return mAttributes->getAttr<ShapeAttr::End>(); }
+
+    /**
+     * @brief Get the input tensor names for the Shape operator.
+     * @return A vector of input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
-    static const std::vector<std::string> getOutputsName(){
+
+    /**
+     * @brief Get the output tensor names for the Shape operator.
+     * @return A vector of output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Create a Shape operation node.
+ *
+ * @param[in] start Start index for slicing dimensions (default is 0).
+ * @param[in] end End index (exclusive) for slicing dimensions (default is -1, meaning all dimensions).
+ * @param[in] name Name of the operator (optional).
+ * @return A shared pointer to the Node containing the Shape operator.
+ */
 std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end = -1, const std::string& name = "");
+
 } // namespace Aidge
 
 namespace {
+/**
+ * @brief EnumStrings specialization for ShapeAttr.
+ */
 template <>
 const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"start", "end"};
 }
diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp
index 30f1d71e0a56d92a70830a5def81040e0c5a186c..2375f845f2b05ceadfa4463eba00fc0c9eeb0302 100644
--- a/include/aidge/operator/ShiftGELU.hpp
+++ b/include/aidge/operator/ShiftGELU.hpp
@@ -27,16 +27,44 @@
 
 namespace Aidge {
 
+/**
+ * @brief Description of an element-wise Shifted Gaussian Error Linear Unit (ShiftGELU) operation
+ * on an input Tensor.
+ *
+ * The ShiftGELU operation applies a quantization and scaling-based approximation of the GELU activation.
+ * For each element `x` in the input tensor, the operation can be summarized as:
+ *
+ * `f(x) = scale_factor * quantize(x / SF) * shift(x)`
+ *
+ * Where:
+ * - `quantize` is a rounding operation to represent the input in a lower precision format.
+ * - `SF` is the scaling factor for quantization.
+ * - `shift(x)` is a transformation using exponent shifting and clamping to approximate smooth non-linear behavior.
+ * - `scale_factor` adjusts the final output to match the desired numerical range for quantized tensors.
+ *
+ * This approach replaces computationally expensive operations like `tanh` with efficient approximations, enabling
+ * smooth activation behavior while optimizing for low-precision neural network inference.
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class ShiftGELU_Op : public OperatorTensor,
-    public Registrable<ShiftGELU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ShiftGELU_Op&)>> {
+    public Registrable<ShiftGELU_Op,  // <Op, backend, implementation creation function>
+        std::string,
+        std::function<std::shared_ptr<OperatorImpl>(const ShiftGELU_Op&)>>
+{
 public:
     static const std::string Type;
 
     ShiftGELU_Op();
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op ShiftGELU_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     ShiftGELU_Op(const ShiftGELU_Op& op);
 
@@ -46,7 +74,11 @@ public:
      */
     std::shared_ptr<Operator> clone() const override;
 
-
+    /**
+     * @brief Set the backend for the Reshape operator.
+     * @param[in] name Name of the backend.
+     * @param[in] device Device index (optional).
+     */
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
     std::set<std::string> getAvailableBackends() const override;
 
diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp
index 9fbd81aedef1eb640a7ce805d745297edb640560..fc76e300527ca8b6f7a7097a8f7b2c11155fee56 100644
--- a/include/aidge/operator/ShiftMax.hpp
+++ b/include/aidge/operator/ShiftMax.hpp
@@ -27,6 +27,30 @@
 
 namespace Aidge {
 
+/**
+ * @brief Description of an element-wise Shifted Maximum (ShiftMax) operation on an input Tensor.
+ *
+ * The ShiftMax operation computes a shifted and quantized version of the input tensor's values,
+ * designed to approximate the maximum function while optimizing for quantized tensor computations.
+ * For each element `x` in the input tensor, the operation can be summarized as:
+ *
+ * `f(x) = scale_factor * quantize(shift(x - max_val))`
+ *
+ * Where:
+ * - `max_val` is the maximum value of the quantized tensor within a given dimension.
+ * - `quantize` represents a rounding operation to convert values into a lower precision format.
+ * - `shift` applies exponent shifting and clamping to the adjusted values to mimic smooth scaling.
+ * - `scale_factor` adjusts the final output to match the desired numerical range for quantized tensors.
+ *
+ * The algorithm ensures efficient computation by using integer arithmetic and avoids precision loss by
+ * scaling the outputs with a dynamic factor based on the tensor's sum.
+ *
+ * The input and output Tensors have the same dimensions, and this operation is particularly suited for
+ * neural networks requiring efficient inference with quantized inputs.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class ShiftMax_Op : public OperatorTensor,
     public Registrable<ShiftMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ShiftMax_Op&)>> {
 public:
@@ -35,8 +59,10 @@ public:
     ShiftMax_Op();
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op ShiftMax_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     ShiftMax_Op(const ShiftMax_Op& op);
 
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index ceef45bf0a12315354c8b7bf378c2da834b867b1..0208a7f600e95ea27f8af3da8417c85126a29f00 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -25,15 +25,39 @@
 
 namespace Aidge {
 
+/**
+ * @brief Description of an element-wise Sigmoid operation on an input Tensor.
+ *
+ * For each element x in the input, the function is defined as:
+ * `f(x) = 1 / (1 + exp(-x))`
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Sigmoid_Op : public OperatorTensor,
-    public Registrable<Sigmoid_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Sigmoid_Op&)>> {
+    public Registrable<Sigmoid_Op,  // <Op, backend, implementation creation function>
+        std::string,
+        std::function<std::shared_ptr<OperatorImpl>(const Sigmoid_Op&)>>
+{
 public:
     static const std::string Type;
 
     Sigmoid_Op();
 
+    /**
+     * @brief Copy-constructor.
+     * @param op Sigmoid_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
+     */
     Sigmoid_Op(const Sigmoid_Op& op);
 
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Sigmoid_Op
+     */
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 055e6fd1d8917ae015b88a223f1f8701fd9dce59..d32bc4fe2ec9846d4467316cace8f76e008dba2d 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -25,77 +25,202 @@
 
 namespace Aidge {
 
-enum class SliceAttr { Starts, Ends, Axes, Steps };
+/**
+ * @brief Implementation of the Slice operation.
+ *
+ * Since Slice operation is just backend-agnostic, its implementation is located in aidge_core.
+ */
+class Slice_OpImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructs a Slice_OpImpl object.
+     * @param[in] op The operator to be implemented.
+     * @param[in] backend The backend used for execution.
+     */
+    Slice_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Executes the forward pass for the Slice operation.
+     */
+    void forward() override;
+};
+
+/**
+ * @enum SliceAttr
+ * @brief Attributes for the Slice operation.
+ */
+enum class SliceAttr {
+    /**
+     * @brief Starting indices for the slice along each axis.
+     *
+     * Specifies the start position for slicing for each axis.
+     * @details if index is < 0 then the input tansor's rank is added.
+     * After, if index is < 0 then it is forced to 0,
+     * if index > dim then index is forced to dim.
+     */
+    Starts,
+
+    /**
+     * @brief Ending indices for the slice along each axis.
+     *
+     * Specifies the end position (exclusive) for slicing for each axis.
+     * @details if index is < 0 then the input tansor's rank is added.
+     * After, if index is < 0 then it is forced to 0,
+     * if index > dim then index is forced to dim.
+     */
+    Ends,
+
+    /**
+     * @brief Axes along which the slice operation is performed.
+     *
+     * Specifies which dimensions of the input tensor are affected by the slice.
+     */
+    Axes,
+
+    /**
+     * @brief Steps to move between each slice along each axis.
+     *
+     * Specifies the step size for slicing along each axis.
+     */
+    Steps
+};
+
+/**
+ * @class Slice_Op
+ * @brief Implements the Slice operation for extracting sub-tensors.
+ *
+ * The Slice operation extracts a sub-tensor from the input tensor using specified start and end indices,
+ * axes, and steps. The resulting tensor dimensions depend on the provided attributes.
+ *
+ * - The output shape is determined by the `Starts`, `Ends`, `Axes`, and `Steps` attributes.
+ * - Each dimension in the output tensor corresponds to the range defined for the respective axis.
+ *
+ * @example:
+ * - Input shape: (4, 5, 6)
+ * - Starts: {1, 0, 2}
+ * - Ends: {3, 5, 6}
+ * - Axes: {0, 1, 2}
+ * - Steps: {1, 1, 1}
+ * - Output shape: (2, 5, 4)
+ *
+ * @details If start or end are out of bound then it takes the max value for the given axe.
+ * @example:
+ * - Input: [0, 1, 2, 3]
+ * - Starts: {1}
+ * - Ends: {1000}
+ * - Axes: {0}
+ * - Output: [1, 2, 3]
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
+class Slice_Op : public OperatorTensor,
+                public Registrable<Slice_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Slice_Op &)>> {
 
-class Slice_Op
-    : public OperatorTensor,
-      public Registrable<Slice_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Slice_Op &)>> {
 public:
     static const std::string Type;
 
-private:
+    /**
+     * @brief Defines static attributes for the Slice operator.
+     */
     using Attributes_ = StaticAttributes<SliceAttr,
-                                            std::vector<std::int64_t>,
-                                            std::vector<std::int64_t>,
-                                            std::vector<std::int8_t>,
-                                            std::vector<std::int64_t>>;
-    template <SliceAttr e> using attr = typename Attributes_::template attr<e>;
+                                            std::vector<std::int64_t>, // Starts
+                                            std::vector<std::int64_t>, // Ends
+                                            std::vector<std::int8_t>,  // Axes
+                                            std::vector<std::int64_t>>; // Steps
+
+private:
+    template <SliceAttr e>
+    using attr = typename Attributes_::template attr<e>;
+
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
     Slice_Op() = delete;
 
+    /**
+     * @brief Constructor for Slice operator.
+     * @param[in] starts Starting indices for the slice along each axis.
+     * @param[in] ends Ending indices (exclusive) for the slice along each axis.
+     * @param[in] axes Axes along which the slice operation is performed.
+     * @param[in] steps Step sizes for slicing along each axis.
+     */
     Slice_Op(const std::vector<std::int64_t>& starts,
-            const std::vector<std::int64_t>& ends,
-            const std::vector<std::int8_t>& axes,
-            const std::vector<std::int64_t>& steps);
-
+             const std::vector<std::int64_t>& ends,
+             const std::vector<std::int8_t>& axes,
+             const std::vector<std::int64_t>& steps);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
-     * input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param[in] op Operator to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Slice_Op(const Slice_Op &op);
 
-public:
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Slice_Op
      */
     std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
+
     bool forwardDims(bool allowDataDependency = true) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get the starting indices for the slice.
+     */
     inline std::vector<std::int64_t>& starts() const noexcept { return mAttributes -> getAttr<SliceAttr::Starts>(); }
+
+    /**
+     * @brief Get the ending indices for the slice.
+     */
     inline std::vector<std::int64_t>& ends() const noexcept { return mAttributes -> getAttr<SliceAttr::Ends>(); }
+
+    /**
+     * @brief Get the axes along which the slice is performed.
+     */
     inline std::vector<std::int8_t>& axes() const noexcept { return mAttributes -> getAttr<SliceAttr::Axes>(); }
+
+    /**
+     * @brief Get the steps for the slice operation.
+     */
     inline std::vector<std::int64_t>& steps() const noexcept { return mAttributes -> getAttr<SliceAttr::Steps>(); }
 
-    static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName() {
         return {"data_input", "starts", "ends", "axes", "steps"};
     }
-    static const std::vector<std::string> getOutputsName(){
+
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
 };
 
 /**
  * @brief Extract a sub-Tensor from a bigger original Tensor.
- * @param name Name of the Operator.
+ *
+ * @param[in] starts Starting indices for the slice.
+ * @param[in] ends Ending indices (exclusive) for the slice.
+ * @param[in] axes Axes along which the slice operation is performed.
+ * @param[in] steps Step sizes for slicing along each axis.
+ * @param[in] name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
 std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
-                                   const std::vector<std::int64_t>& ends = {},
-                                   const std::vector<std::int8_t>& axes = {},
-                                   const std::vector<std::int64_t>& steps = {},
-                                   const std::string &name = "");
+                            const std::vector<std::int64_t>& ends = {},
+                            const std::vector<std::int8_t>& axes = {},
+                            const std::vector<std::int64_t>& steps = {},
+                            const std::string &name = "");
+
 }  // namespace Aidge
 
 namespace {
@@ -103,4 +228,4 @@ template <>
 const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "starts", "ends", "axes", "steps" };
 }
 
-#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
+#endif /* AIDGE_CORE_OPERATOR_SLICE_H_ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 72ea56dd6293e416ddcca12ac38fd57d76071354..290132690149d4087f2ef89f9f3d0b9af631fff4 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -24,14 +24,39 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class SoftmaxAttr { Axis };
+enum class SoftmaxAttr {
+    /**
+     * @brief Axis along which the softmax operation is applied.
+     *
+     * Determines the dimension in the input tensor over which the softmax
+     * operation will compute normalized exponential values.
+     */
+    Axis
+};
 
+/**
+ * @brief Description of a Softmax operation on input Tensor along a specified axis.
+ *
+ * The Softmax operation normalizes the input tensor values along a specified axis
+ * into a probability distribution where the sum of values is equal to 1.
+ *
+ * The Softmax function for a given input `x_i` is defined as:
+ * `softmax(x_i) = exp(x_i) / sum(exp(x_j))`, where the sum is taken over all elements in the specified dimension.
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Softmax_Op : public OperatorTensor,
-                public Registrable<Softmax_Op,
-                                   std::string,
-                                   std::function<std::shared_ptr<OperatorImpl>(const Softmax_Op&)>> {
+                   public Registrable<Softmax_Op,
+                                      std::string,
+                                      std::function<std::shared_ptr<OperatorImpl>(const Softmax_Op&)>> {
 
 public:
+    /**
+     * @brief Static type string for the Softmax operator.
+     */
     static const std::string Type;
 
 private:
@@ -40,43 +65,90 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    /**
+     * @brief Deleted default constructor.
+     */
     Softmax_Op() = delete;
 
+    /**
+     * @brief Constructor for the Softmax operator.
+     * @param[in] axis Axis along which the softmax operation is applied.
+     */
     Softmax_Op(std::int32_t axis);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Softmax_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Softmax_Op(const Softmax_Op& op);
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Softmax_Op
+     * @brief Clone the operator using its copy constructor.
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Set the backend for the Softmax operator.
+     * @param[in] name Name of the backend.
+     * @param[in] device Device index (optional).
+     */
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    /**
+     * @brief Get the available backends for the Softmax operator.
+     * @return A set of backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     * @return A shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
-    inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<SoftmaxAttr::Axis>(); }
+    /**
+     * @brief Get the axis along which the softmax operation is applied.
+     * @return Reference to the axis attribute.
+     */
+    inline std::int32_t& axis() const noexcept { return mAttributes->getAttr<SoftmaxAttr::Axis>(); }
 
-    static const std::vector<std::string> getInputsName(){
+    /**
+     * @brief Get the input names for the Softmax operator.
+     * @return A vector of input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
-    static const std::vector<std::string> getOutputsName(){
+
+    /**
+     * @brief Get the output names for the Softmax operator.
+     * @return A vector of output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Create a Softmax operation node.
+ *
+ * @param[in] axis Axis along which the softmax operation is applied.
+ * @param[in] name Name of the operator (optional).
+ * @return A shared pointer to the Node containing the Softmax operator.
+ */
 std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "");
+
 } // namespace Aidge
 
 namespace {
+/**
+ * @brief EnumStrings specialization for SoftmaxAttr.
+ */
 template <>
-const char *const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"axis"};
+const char* const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"axis"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 5a5652388c3622bf8a46792b3c58e00c79de22f3..3c6b52d3c39ade1f73c0391f02964d0bba74148b 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -24,19 +24,78 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+
+/**
+ * @class  Implementation of the Split operation.
+ *
+ * Since Split operation is just backend-agnostic, its implementation is located in aidge_core.
+ */
 class Split_OpImpl : public OperatorImpl {
 public:
-    Split_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    /**
+     * @brief Constructor for the Split operator implementation.
+     * @param[in] op Operator to be implemented.
+     * @param[in] backend Name of the backend.
+     */
+    Split_OpImpl(const Operator& op, const std::string& backend = "") : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Executes the forward pass for the Split operation.
+     */
     void forward() override;
 };
 
-enum class SplitAttr { Axis, Split };
+/**
+ * @enum SplitAttr
+ * @brief Enumeration of Split operator attributes.
+ */
+enum class SplitAttr {
+    /**
+     * @brief Axis along which to split the input tensor.
+     *
+     * The specified axis determines the direction of splitting.
+     */
+    Axis,
+
+    /**
+     * @brief Sizes of each output tensor after splitting.
+     *
+     * If specified, the sum of the split sizes must match the size of the input
+     * tensor along the specified axis.
+     */
+    Split
+};
 
-class Split_Op
-    : public OperatorTensor,
-      public Registrable<Split_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Split_Op &)>> {
+/**
+ * @class Split_Op
+ * @brief Implements the Split operation to divide a tensor into multiple sub-tensors along a specified axis.
+ *
+ * The Split operation divides the input tensor into several output tensors along
+ * a specified axis. The size of each output tensor can be defined by the Split
+ * attribute. If no split sizes are provided, the input is divided evenly.
+ *
+ * ### Output Shape Calculation
+ * - Input shape: (N, D1, D2, ..., DN)
+ * - Axis: 1
+ * - Split: {2, 3}
+ * - Output shapes: [(N, 2, D2, ..., DN), (N, 3, D2, ..., DN)]
+ *
+ * @example:
+ * - Input shape: (6, 4, 8)
+ * - Axis: 0
+ * - Split: {2, 2, 2}
+ * - Output shapes: [(2, 4, 8), (2, 4, 8), (2, 4, 8)]
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
+class Split_Op : public OperatorTensor,
+                 public Registrable<Split_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Split_Op &)>> {
 
 public:
+    /**
+     * @brief Static type string for the Split operator.
+     */
     static const std::string Type;
 
 private:
@@ -47,56 +106,97 @@ private:
 public:
     Split_Op() = delete;
 
-    Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split);
-
+    /**
+     * @brief Constructor for the Split operator.
+     * @param[in] axis Axis along which to split the input tensor.
+     * @param[in] nbOutputs Number of output tensors to create.
+     * @param[in] split Sizes of each split (optional).
+     */
+    Split_Op(std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
-     * input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Split_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
-    Split_Op(const Split_Op &op);
+    Split_Op(const Split_Op& op);
 
-public:
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Split_Op
+     * @brief Clone the operator using its copy constructor.
      */
     std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    /**
+     * @brief Set the backend for the Split operator.
+     * @param[in] name Backend name.
+     * @param[in] device Device index (optional).
+     */
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Get the available backends for the Split operator.
+     * @return A set of backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Get the axis along which the input tensor is split.
+     */
     inline std::int8_t& axis() const { return mAttributes->template getAttr<SplitAttr::Axis>(); }
+
+    /**
+     * @brief Get the sizes of each split.
+     */
     inline std::vector<DimSize_t>& split() const { return mAttributes->template getAttr<SplitAttr::Split>(); }
 
-    static const std::vector<std::string> getInputsName(){
+    /**
+     * @brief Get the input names for the Split operator.
+     * @return A vector of input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
         return {"data_input", "split"};
     }
-    static const std::vector<std::string> getOutputsName(){
+
+    /**
+     * @brief Get the output names for the Split operator.
+     * @return A vector of output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output_0", "data_output_n"};
     }
-
 };
 
 /**
- * @brief Extract a sub-Tensor from a bigger original Tensor.
- * @param name Name of the Operator.
- * @return std::shared_ptr<Node> A Node containing the Operator.
+ * @brief Split a tensor into multiple sub-tensors along a specified axis.
+ *
+ * @param[in] nbOutput Number of output tensors to create.
+ * @param[in] axis Axis along which to split the input tensor.
+ * @param[in] split Sizes of each split (optional).
+ * @param[in] name Name of the Operator.
+ * @return std::shared_ptr<Node> Node containing the Operator.
  */
 std::shared_ptr<Node> Split(DimSize_t nbOutput,
-                                   std::int8_t axis = 0,
-                                   const std::vector<DimSize_t>& split = {},
-                                   const std::string &name = "");
+                            std::int8_t axis = 0,
+                            const std::vector<DimSize_t>& split = {},
+                            const std::string& name = "");
+
 }  // namespace Aidge
 
 namespace {
+/**
+ * @brief EnumStrings specialization for SplitAttr.
+ */
 template <>
-const char *const EnumStrings<Aidge::SplitAttr>::data[] = { "axis", "split" };
+const char* const EnumStrings<Aidge::SplitAttr>::data[] = {"axis", "split"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SPLIT_H_ */
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index 4858cdcd164d6be0582ddabe67c780461a9667aa..8b9eb1b787f6cc74c97fb25c1b3c1f7d5daf7123 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -23,8 +23,20 @@
 
 namespace Aidge {
 
+/**
+ * @brief Description of an element-wise Square Root (Sqrt) operation
+ * on an input Tensor.
+ *
+ * For each element x in the input, the function is defined as:
+ * `f(x) = sqrt(x)`, where sqrt(x) is the square root of x.
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Sqrt_Op : public OperatorTensor,
-                public Registrable<Sqrt_Op,
+                public Registrable<Sqrt_Op,  // <Op, backend, implementation creation function>
                                 std::string,
                                 std::function<std::shared_ptr<OperatorImpl>(const Sqrt_Op&)>> {
 public:
@@ -33,8 +45,10 @@ public:
     Sqrt_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Sqrt_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Sqrt_Op(const Sqrt_Op& op);
 
diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
index 21633e451961148deda8a3f39afd1965b3002d2e..71e4e780a1f1c2993ff05f09ef6aa92aab1986ee 100644
--- a/include/aidge/operator/Stack.hpp
+++ b/include/aidge/operator/Stack.hpp
@@ -1,3 +1,14 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
 #ifndef AIDGE_CORE_OPERATOR_STACK_H_
 #define AIDGE_CORE_OPERATOR_STACK_H_
 
@@ -12,96 +23,212 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+/**
+ * @class StackProdConso
+ * @brief Implements the producer-consumer principle for the `Stack` operator.
+ *
+ * The `StackProdConso` class defines the logic for managing data dependencies
+ * during the forward process of the `Stack` operator. It ensures proper allocation
+ * and consumption of resources required for stacking operations.
+ */
 class StackProdConso : public ProdConso {
-  public:
-    StackProdConso(const Operator &op) : ProdConso(op) {}
-    Elts_t getRequiredMemory(
-        const IOIndex_t outputIdx,
-        const std::vector<DimSize_t> &inputsSize) const override final;
+public:
+    /**
+     * @brief Constructor for the `StackProdConso` class.
+     * @param[in] op The operator instance for which producer-consumer relationships are managed.
+     */
+    StackProdConso(const Operator& op) : ProdConso(op) {}
+
+    /**
+     * @brief Compute the memory requirements for an output tensor.
+     * @param[in] outputIdx The index of the output tensor.
+     * @param[in] inputsSize A vector containing the dimensions of the input tensors.
+     * @return The memory required (`Elts_t`) for the specified output tensor.
+     *
+     * @details:
+     * - This method calculates how much memory is needed to store the stacked tensor.
+     * - Memory requirements depend on the number and size of the input tensors.
+     */
+    Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
+
+    /**
+     * @brief Reset producer-consumer relationships for the `Stack` operator.
+     *
+     * @details:
+     * - This method clears and reinitializes the producer-consumer relationships,
+     *   ensuring proper data flow and allocation for the stacking operation.
+     */
     void resetConsummerProducer() override;
 };
 
+/**
+ * @class StackOpImpl
+ * @brief Backend-specific implementation of the `Stack` operator.
+ *
+ * The `StackOpImpl` class handles the execution of the `Stack` operation, including
+ * forward computation and backend-specific optimizations.
+ */
 class StackOpImpl : public OperatorImpl {
-  public:
-    StackOpImpl(const Operator &op, const std::string &backend = "")
-        : OperatorImpl(op, backend) {}
+public:
+    /**
+     * @brief Constructs a StackOpImpl object.
+     * @param[in] op The operator to be implemented.
+     * @param[in] backend The backend used for execution.
+     */
+    StackOpImpl(const Operator& op, const std::string& backend = "") : OperatorImpl(op, backend) {}
 
+    /**
+     * @brief Get the Producer Consumer object of the operator.
+     * @return A shared pointer to the ProdConso object.
+     */
     std::shared_ptr<ProdConso> getProdConso() const override {
         return std::make_shared<StackProdConso>(mOp);
-    };
+    }
+
+    /**
+     * @brief Executes the forward pass for the Stack operation.
+     */
     void forward() override;
 };
 
-enum class StackAttr { ForwardStep, MaxElements };
-
-class StackOp
-    : public OperatorTensor,
-      public Registrable<
-          StackOp,
-          std::string,
-          std::function<std::unique_ptr<OperatorImpl>(const StackOp &)>> {
+enum class StackAttr {
+    ForwardStep,   // Tracks the current step in the forward pass.
+    MaxElements    // Maximum number of elements that can be stacked.
+};
 
-  private:
-    using Attributes_ =
-        StaticAttributes<StackAttr, std::uint32_t, std::uint32_t>;
+/**
+ * @class StackOp
+ * @brief The `Stack` operator performs a stacking operation over a sequence of input tensors.
+ *
+ * The `Stack` operator is used to aggregate multiple tensors into a single stacked tensor along
+ * a new dimension. It supports models requiring tensor aggregation or hierarchical data representation.
+ *
+ * - **Inputs**:
+ *   - `data_input`: The main data input for the operator.
+ *   - `max_elements`: An optional maximum limit on the number of elements to be stacked.
+ * - **Outputs**:
+ *   - `data_output`: The resulting stacked tensor.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
+class StackOp : public OperatorTensor,
+    public Registrable<StackOp, std::string, std::function<std::unique_ptr<OperatorImpl>(const StackOp&)>> {
+private:
+    using Attributes_ = StaticAttributes<StackAttr, std::uint32_t, std::uint32_t>;
     template <StackAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
-  public:
+public:
     static const std::string s_type;
 
+    /**
+     * @brief Constructs a new Stack Operator.
+     * @param maxElements The maximum number of elements to stack.
+     */
     StackOp(std::uint32_t maxElements = 0);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output
-     * tensor(s), but not its input tensors (the new operator has no input
-     * associated).
+     * @brief Copy-constructor. Copies the operator's attributes and its output tensors.
+     * The new operator has no input tensors associated initially.
      * @param op Operator to copy.
      */
-    StackOp(const StackOp &op);
+    StackOp(const StackOp& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::StackOp
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
-    void setBackend(const std::string &name,
-                    DeviceIdx_t device = 0) override final;
+    /**
+     * @brief Assign a specific backend and device for computation.
+     * @param name Name of the backend.
+     * @param device The device index (default is 0).
+     */
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
+    /**
+     * @brief Get the list of available backends compatible with this operator.
+     * @return A set of strings representing backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Check if dimensions have been forwarded successfully.
+     * @return True if dimensions are forwarded.
+     */
     bool dimsForwarded() const override final;
+
+    /**
+     * @brief Perform dimension inference for the operator, optionally allowing
+     * data dependency during the process.
+     * @param allowDataDependency Flag indicating whether data dependency is allowed.
+     * @return True if dimensions were successfully inferred.
+     */
     bool forwardDims(bool allowDataDependency = false) override final;
+
+    /**
+     * @brief Executes the forward pass for the `Stack` operation.
+     */
     void forward() override;
 
+    /**
+     * @brief Access the operator's attributes.
+     * @return A shared pointer to the operator's attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override {
         return mAttributes;
     }
 
-    inline std::uint32_t &maxElements() const {
+    /**
+     * @brief Get or set the maximum number of elements that can be stacked.
+     * @return A reference to the maximum elements attribute.
+     */
+    inline std::uint32_t& maxElements() const {
         return mAttributes->template getAttr<StackAttr::MaxElements>();
     }
 
-    inline std::uint32_t &forwardStep() const {
+    /**
+     * @brief Get or set the forward step counter for the operator.
+     * @return A reference to the forward step counter.
+     */
+    inline std::uint32_t& forwardStep() const {
         return mAttributes->template getAttr<StackAttr::ForwardStep>();
     }
 
+    /**
+     * @brief Retrieve the names of the operator's input tensors.
+     * @return A vector of strings representing input tensor names.
+     */
     static const std::vector<std::string> getInputsName() {
         return {"data_input", "max_elements"};
     }
+
+    /**
+     * @brief Retrieve the names of the operator's output tensors.
+     * @return A vector of strings representing output tensor names.
+     */
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
-std::shared_ptr<Node> Stack(std::uint32_t maxElements = 0,
-                            const std::string &name = "");
-} // namespace Aidge
+/**
+ * @brief Create a Stack operator node.
+ * @param maxElements The maximum number of elements to stack.
+ * @param name The optional name for the node.
+ * @return A shared pointer to the newly created Stack operator node.
+ */
+std::shared_ptr<Node> Stack(std::uint32_t maxElements = 0, const std::string& name = "");
+}  // namespace Aidge
 
 namespace {
+/**
+ * @brief String representations of the Stack operator's attributes.
+ */
 template <>
-const char *const EnumStrings<Aidge::StackAttr>::data[] = {"max_elements"};
+const char *const EnumStrings<Aidge::StackAttr>::data[] = {"forward_step", "max_elements"};
 }
 
-#endif
+#endif /* AIDGE_CORE_OPERATOR_STACK_H_ */
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index 170baf6fd0f38668f64cbd36044c856fae261737..73cce3f554f3ea7bbd802ee239de49077a676823 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -24,6 +24,28 @@
 
 namespace Aidge {
 
+/**
+ * @brief Description of an element-wise Subtract operation on input Tensors,
+ * supporting NumPy broadcasting.
+ *
+ * For each pair of elements x and y from the input Tensors, the function 
+ * is defined as:
+ * `f(x, y) = x - y`
+ *
+ * Broadcasting adjusts shapes of the input Tensors to make them compatible:
+ * - Tensors are aligned from the rightmost dimensions.
+ * - Dimensions are compatible if they are equal, one of them is 1, or missing.
+ *
+ * The output Tensor shape is determined by taking the maximum size along 
+ * each dimension of the input Tensors after broadcasting.
+ * 
+ * Examples:
+ * 1. Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
+ * 2. Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
+ * 
+ * @see OperatorTensor
+ * @see Registrable
+ */
 class Sub_Op : public OperatorTensor,
     public Registrable<Sub_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Sub_Op&)>> {
 public:
@@ -33,8 +55,10 @@ public:
     Sub_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Sub_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Sub_Op(const Sub_Op& op);
 
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
index f1a30e3f08ce3886cc1ca39a55a3b23979a47860..71b1511d962318eb784c23611dda1f346281d7ae 100644
--- a/include/aidge/operator/Tanh.hpp
+++ b/include/aidge/operator/Tanh.hpp
@@ -23,16 +23,36 @@
 
 namespace Aidge {
 
-class Tanh_Op : public OperatorTensor,
-    public Registrable<Tanh_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Tanh_Op&)>> {
+/**
+ * @brief Description of an element-wise Hyperbolic Tangent (Tanh) operation
+ * on an input Tensor.
+ *
+ * For each element x in the input, the function is defined as:
+ * `f(x) = tanh(x)`, where tanh(x) is the hyperbolic tangent of x, 
+ * computed as:
+ * `tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))`
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
+class Tanh_Op : 
+    public OperatorTensor,
+    public Registrable<Tanh_Op,  // <Op, backend, implementation creation function>
+        std::string,
+        std::function<std::unique_ptr<OperatorImpl>(const Tanh_Op&)>>
+{
 public:
     static const std::string Type;
 
     Tanh_Op();
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Tanh_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Tanh_Op(const Tanh_Op& op);
 
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index c6341e934ea415cb23a7d4ce201351a0825e6081..ab3b18e51d54e97d22d6c9006803d7e804064783 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -32,85 +32,158 @@ namespace Aidge {
  */
 class TransposeImpl : public OperatorImpl {
 public:
+    /**
+     * @brief Constructor for TransposeImpl.
+     * @param[in] op The Operator instance.
+     * @param[in] backend The backend name (optional).
+     */
     TransposeImpl(const Operator& op, const std::string& backend = "")
         : OperatorImpl(op, backend)
     {}
+
+    /**
+     * @brief Perform the forward operation for the transpose.
+     */
     void forward() override;
 };
 
+/**
+ * @enum TransposeAttr
+ * @brief Enumeration of attributes specific to the Transpose operator.
+ */
 enum class TransposeAttr {
-  /**
-   * @brief order of the output dims from the input dims. If left empty,
-   * the dimensions of input will be reversed.
-   */
+    /**
+     * @brief Order of the output dimensions relative to the input dimensions.
+     * 
+     * If this attribute is empty, the dimensions of the input tensor will
+     * be reversed.
+     */
     OutputDimsOrder
 };
 
 /**
- * @brief This operator has as purpose to transpose the axes of a given tensor.
- * input#0 : Tensor to transpose
- * @example Calling transpose() on a tensor of dimensions [1, 2, 3] with OutputDimsOrder=(1,0,2) result
- * in a tensor of dim [2, 1, 3].
- * @example Calling transpose() on a tensor of dimensions [1,2,3,4] with an empty OutputDimsOrder vector
- * will result in a tensor of dim [4,3,2,1].
+ * @brief Describes the operation of transposing the axes of a given tensor.
+ *
+ * The Transpose operator rearranges the axes of an input tensor according to a specified
+ * permutation order. If no order is provided, it reverses the axes.
+ *
+ * @example Input: Tensor of dimensions `[1, 2, 3]` with `OutputDimsOrder = {1, 0, 2}` results in
+ *   a tensor with dimensions `[2, 1, 3]`.
+ * @example Input: Tensor of dimensions `[1, 2, 3, 4]` with an empty `OutputDimsOrder` results in
+ *   a tensor with dimensions `[4, 3, 2, 1]`.
+ *
+ * @see OperatorTensor
+ * @see Registrable
  */
 class Transpose_Op : public OperatorTensor,
                 public Registrable<Transpose_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Transpose_Op&)>> {
 
 public:
+    /**
+     * @brief Static type string for the Transpose operator.
+     */
     static const std::string Type;
 
-
 private:
     using Attributes_ = StaticAttributes<TransposeAttr, std::vector<DimSize_t>>;
     template <TransposeAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    /**
+     * @brief Deleted default constructor.
+     */
     Transpose_Op() = delete;
 
-  /**
-   * @brief constructor for Transpose op
-   * @param[in] outputDimsOrder axes permutation order. By default axes are reversed.
-   */
+    /**
+     * @brief Constructor for the Transpose operator.
+     * @param[in] outputDimsOrder Axes permutation order. If empty, axes are reversed.
+     */
     Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Transpose_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     Transpose_Op(const Transpose_Op& op);
 
     /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Transpose_Op
+     * @brief Clone the operator using its copy constructor.
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Compute the output dimensions during the forward pass.
+     * @param allowDataDependency Whether to allow data-dependent dimensions.
+     * @return Boolean indicating whether dimension computation was successful.
+     */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
+    /**
+     * @brief Set the backend for the Transpose operator.
+     * @param[in] name Name of the backend.
+     * @param[in] device Device index (optional).
+     */
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Get the available backends for the Transpose operator.
+     * @return A set of backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     * @return A shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
     /**
-     * @brief axes new order, if left empty, axes will be reversed.
+     * @brief Get or modify the axes permutation order.
+     * @return Reference to the permutation order attribute.
+     *
+     * If left empty, axes will be reversed.
      */
-    inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<TransposeAttr::OutputDimsOrder>(); }
+    inline std::vector<DimSize_t>& outputDimsOrder() const noexcept {
+        return mAttributes->getAttr<TransposeAttr::OutputDimsOrder>();
+    }
 
-    static const std::vector<std::string> getInputsName(){
+    /**
+     * @brief Get the input tensor names for the Transpose operator.
+     * @return A vector of input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
-    static const std::vector<std::string> getOutputsName(){
+
+    /**
+     * @brief Get the output tensor names for the Transpose operator.
+     * @return A vector of output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Create a Transpose operation node.
+ *
+ * @param[in] outputDimsOrder Axes permutation order (optional).
+ * @param[in] name Name of the operator (optional).
+ * @return A shared pointer to the Node containing the Transpose operator.
+ */
 std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder = {},
                                 const std::string& name = "");
+
 }  // namespace Aidge
 
 namespace {
+/**
+ * @brief EnumStrings specialization for TransposeAttr.
+ */
 template <>
 const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"output_dims_order"};
 }
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index 09a689528a6814eca6bb56ef326e2da527f14843..333413b1d8d6530b14469ec1f451e9acfeead286 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -13,10 +13,10 @@
 #define AIDGE_CORE_OPERATOR_UNFOLD_H_
 
 #include <array>
-#include <cmath>    // std::floor
-#include <cstddef>  // std::size_t
+#include <cmath>
+#include <cstddef>
 #include <string>
-#include <utility>  // std::pair
+#include <utility>
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
@@ -25,95 +25,216 @@
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
-#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
+#include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+/**
+ * @brief Implementation of the Unfold operator.
+ * @tparam DIM Number of dimensions in the operation.
+ */
 template <DimIdx_t DIM>
 class Unfold_OpImpl : public OperatorImpl {
 public:
-    Unfold_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    /**
+     * @brief Constructor for Unfold_OpImpl.
+     * @param[in] op The Operator instance.
+     * @param[in] backend The backend name (optional).
+     */
+    Unfold_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend) {}
+
+    /**
+     * @brief Perform the forward operation for the unfold.
+     */
     void forward() override;
 };
 
-enum class UnfoldAttr { StrideDims, DilationDims, KernelDims };
+/**
+ * @enum UnfoldAttr
+ * @brief Enumeration of attributes specific to the Unfold operator.
+ */
+enum class UnfoldAttr {
+    /**
+     * @brief Stride dimensions for the unfolding operation.
+     */
+    StrideDims,
+
+    /**
+     * @brief Dilation dimensions for the unfolding operation.
+     */
+    DilationDims,
+
+    /**
+     * @brief Kernel dimensions for the unfolding operation.
+     */
+    KernelDims
+};
 
+/**
+ * @brief Describes the operation of unfolding a tensor into sliding blocks.
+ * 
+ * The Unfold operator extracts sliding blocks from the input tensor along
+ * specified dimensions, controlled by stride, dilation, and kernel size.
+ * 
+ * @tparam DIM Number of dimensions involved in the operation.
+ *
+ * @example Input: Tensor of dimensions `[1, 3, 32, 32]`, with `KernelDims = {3, 3}`,
+ *   `StrideDims = {1, 1}`, and `DilationDims = {1, 1}` results in a tensor of blocks
+ *   extracted from the input with overlapping areas.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
 template <DimIdx_t DIM>
 class Unfold_Op : public OperatorTensor,
-                public Registrable<Unfold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM> &)>> {
-
+                  public Registrable<Unfold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM>&)>> {
 public:
+    /**
+     * @brief Static type string for the Unfold operator.
+     */
     static const std::string Type;
 
 private:
     using Attributes_ = StaticAttributes<UnfoldAttr,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>>;
+                                         std::array<DimSize_t, DIM>,
+                                         std::array<DimSize_t, DIM>,
+                                         std::array<DimSize_t, DIM>>;
     template <UnfoldAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    /**
+     * @brief Deleted default constructor.
+     */
     Unfold_Op() = delete;
 
-    Unfold_Op(const std::array<DimSize_t, DIM> &kernelDims,
-            const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-            const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
+    /**
+     * @brief Constructor for the Unfold operator.
+     * @param[in] kernelDims Size of the sliding window for each dimension.
+     * @param[in] strideDims Step size for moving the window (optional).
+     * @param[in] dilationDims Spacing between elements in the kernel (optional).
+     */
+    Unfold_Op(const std::array<DimSize_t, DIM>& kernelDims,
+              const std::array<DimSize_t, DIM>& strideDims = create_array<DimSize_t, DIM>(1),
+              const std::array<DimSize_t, DIM>& dilationDims = create_array<DimSize_t, DIM>(1));
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
-     * input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op Unfold_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
-    Unfold_Op(const Unfold_Op<DIM> &op);
+    Unfold_Op(const Unfold_Op<DIM>& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Unfold_Op
+     * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
+    /**
+     * @brief Compute the output dimensions during the forward pass.
+     * @param[in] allowDataDependency Whether to allow data-dependent dimensions.
+     * @return Boolean indicating whether dimension computation was successful.
+     */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    /**
+     * @brief Set the backend for the Unfold operator.
+     * @param[in] name Name of the backend.
+     * @param[in] device Device index (optional).
+     */
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    /**
+     * @brief Get the available backends for the Unfold operator.
+     * @return A set of backend names.
+     */
     std::set<std::string> getAvailableBackends() const override;
 
+    /**
+     * @brief Get the attributes of the operator.
+     * @return A shared pointer to the attributes.
+     */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<UnfoldAttr::StrideDims>(); }
-    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<UnfoldAttr::DilationDims>(); }
-    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<UnfoldAttr::KernelDims>(); }
 
-    static const std::vector<std::string> getInputsName(){
+    /**
+     * @brief Get or modify the stride dimensions.
+     * @return Reference to the stride dimensions attribute.
+     */
+    inline std::array<DimSize_t, DIM>& strideDims() const {
+        return mAttributes->template getAttr<UnfoldAttr::StrideDims>();
+    }
+
+    /**
+     * @brief Get or modify the dilation dimensions.
+     * @return Reference to the dilation dimensions attribute.
+     */
+    inline std::array<DimSize_t, DIM>& dilationDims() const {
+        return mAttributes->template getAttr<UnfoldAttr::DilationDims>();
+    }
+
+    /**
+     * @brief Get or modify the kernel dimensions.
+     * @return Reference to the kernel dimensions attribute.
+     */
+    inline std::array<DimSize_t, DIM>& kernelDims() const {
+        return mAttributes->template getAttr<UnfoldAttr::KernelDims>();
+    }
+
+    /**
+     * @brief Get the input tensor names for the Unfold operator.
+     * @return A vector of input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
-    static const std::vector<std::string> getOutputsName(){
+
+    /**
+     * @brief Get the output tensor names for the Unfold operator.
+     * @return A vector of output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
+/**
+ * @brief Create an Unfold operation node.
+ *
+ * @param[in] kernelDims Size of the sliding window.
+ * @param[in] name Name of the operator (optional).
+ * @param[in] strideDims Step size for moving the window (optional).
+ * @param[in] dilationDims Spacing between elements in the kernel (optional).
+ * @return A shared pointer to the Node containing the Unfold operator.
+ */
 template <std::array<DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Node> Unfold(const std::array<DimSize_t, DIM> &kernelDims,
-                                  const std::string& name = "",
-                                  const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
+                            const std::string& name = "",
+                            const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                            const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
 
 template <DimSize_t DIM>
-inline std::shared_ptr<Node> Unfold(
-    DimSize_t const (&kernelDims)[DIM],
-    const std::string& name = "",
-    const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
+inline std::shared_ptr<Node> Unfold( DimSize_t const (&kernelDims)[DIM],
+                                    const std::string& name = "",
+                                    const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                                    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
+                                    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
     return Unfold(to_array(kernelDims), name, strideDims, dilationDims);
 }
+
 }  // namespace Aidge
 
 extern template class Aidge::Unfold_Op<2>;
 
 namespace {
+/**
+ * @brief EnumStrings specialization for UnfoldAttr.
+ */
 template <>
-const char *const EnumStrings<Aidge::UnfoldAttr>::data[] = {
+const char* const EnumStrings<Aidge::UnfoldAttr>::data[] = {
     "stride_dims",
     "dilation_dims",
     "kernel_dims"
diff --git a/include/aidge/operator/WeightInterleaving.hpp b/include/aidge/operator/WeightInterleaving.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..315bb3e2dd163f23949ac09719a20c335e03c265
--- /dev/null
+++ b/include/aidge/operator/WeightInterleaving.hpp
@@ -0,0 +1,96 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_WEIGHTINTERLEAVING_H_
+#define AIDGE_CORE_OPERATOR_WEIGHTINTERLEAVING_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+
+namespace Aidge {
+
+/**
+ * @brief WeightInterleaving operator Compresses the last dimension of a tensor by packing low-bitwidth values
+ * (e.g., 2, 3, or 4 bits) into fewer bytes.
+ *
+ * The operator reduces the size of the last dimension based on the bitwidth (`nb_bits`), 
+ * packing multiple values into each byte. For example, 4-bit values result in a halved last dimension, 
+ * while 2-bit values reduce it by a factor of 4.
+ * 
+ * The output tensor has the same shape as the input, except for the compressed last dimension.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
+class WeightInterleaving_Op :
+    public OperatorTensor,
+    public Registrable<WeightInterleaving_Op,  // <Op, backend, implementation creation function>
+        std::string,
+        std::function<std::shared_ptr<OperatorImpl>(const WeightInterleaving_Op&)>>
+{
+public:
+    static const std::string Type;
+
+    WeightInterleaving_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor.
+     * @param op WeightInterleaving_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
+     */
+    WeightInterleaving_Op(const WeightInterleaving_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::WeightInterleaving_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+
+    /**
+     * @brief Calculates the required size for the 8-bits`compactData` vector.
+     * 
+     * This function determines the minimum number of bytes needed in `compactData`
+     * to store `dataSize` elements compacted to `nb_bits` bits each.
+     * 
+     * @param dataSize The total number of elements in the input data array.
+     * @param nb_bits The number of bits to use for each compacted element (from 1 to 7).
+     * @return std::size_t The required size in bytes for `compactData`.
+     */
+    std::size_t compactDataSize(std::size_t dataSize, std::uint8_t nb_bits);
+
+};
+
+std::shared_ptr<Node> WeightInterleaving(const std::string& name = "");
+}
+
+#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index 86c722b158657633d4509c1181b1f18201d0d514..0a3f5dc4d0ea00ddb5c8d0b8885269c882f7f705 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -124,6 +124,12 @@ void explicitCastMove(std::shared_ptr<GraphView> graphView);
 */
 void explicitTranspose(std::shared_ptr<GraphView> graphView);
 
+/**
+ * Replace a single meta operator by its micro graph.
+ * @return true if node is indeed a meta operator and could be expanded.
+*/
+bool expandMetaOp(std::shared_ptr<Node> node);
+
 /**
  * Flatten the graph by replacing the meta operators by their micro graph.
  * @param recursive If true, recursively replace meta operators until there is
@@ -180,6 +186,24 @@ size_t convToMatMul(std::shared_ptr<GraphView> graph);
  */
 void adaptToBackend(std::shared_ptr<GraphView> graph);
 
+
+/**
+ * @brief Create a GenericOp from an Operator and replace it
+ * 
+ * @param node Node which Operator will be changed into a generic Operator
+ */
+void toGenericOp(std::shared_ptr<Node> node);
+
+/**
+ * @brief The node passed contains an operator which input of index 1 is supposed be be weights of type Int4, Int3, Int2, binary.
+ *        This recipie only operates memory transformations on the weight tensor. 
+ *        First, permutes the dimensions to match the dataformat NHWC
+ *        Second, compact the last dimension of the weights (Channel dimension) into 8bits 
+ * 
+ * @param node Node 
+ */
+void applyWeightInterleaving(std::shared_ptr<Node> node);
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_RECIPES_H_ */
diff --git a/include/aidge/scheduler/ProdConso.hpp b/include/aidge/scheduler/ProdConso.hpp
index cfc83cbf91cb7eeef2a3bbb0a4c5017a2480fe9b..bc42cb36c309952dbfc872d0722e1111aae6d3b7 100644
--- a/include/aidge/scheduler/ProdConso.hpp
+++ b/include/aidge/scheduler/ProdConso.hpp
@@ -12,6 +12,7 @@
 #ifndef AIDGE_SCHEDULER_PRODCONSO_H_
 #define AIDGE_SCHEDULER_PRODCONSO_H_
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -33,6 +34,10 @@ public:
         return std::make_unique<ProdConso>(op, true);
     }
 
+    const Operator& getOperator() const noexcept {
+        return mOp;
+    }
+
     /**
      * @brief Minimum amount of data from a specific input required by the
      * implementation to be run.
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 28ecde6d9319ae05be20f591cc9a6a4e2a29acc0..db9b903cc9f90ef3d84252f34867b23cfe13d237 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -90,6 +90,12 @@ public:
         NotConnected
     };
 
+    enum class EarlyLateSort {
+        Default,
+        AsSoonAsPossible,
+        AsLateAsPossible
+    };
+
     /**
      * @struct PriorProducersConsumers
      * @brief Manages producer-consumer relationships for nodes.
@@ -124,9 +130,10 @@ public:
     /**
      * @brief Get the static scheduling order of nodes.
      * @param step The step of the static schedule to retrieve (default is 0).
+     * @param sorting Sorting mode.
      * @return Vector of shared pointers to Nodes in their scheduled order.
      */
-    std::vector<std::shared_ptr<Node>> getStaticScheduling(std::size_t step = 0) const;
+    std::vector<std::shared_ptr<Node>> getStaticScheduling(std::size_t step = 0, EarlyLateSort sorting = EarlyLateSort::Default) const;
 
     /**
      * @brief Get the GraphView associated with this Scheduler.
@@ -156,6 +163,15 @@ public:
     */
     MemoryManager generateMemory(bool incProducers = false, bool wrapAroundBuffer = false) const;
 
+    /**
+     * Generate the memory layout for the current static scheduling, with auto-
+     * concatenation: the Concat operator is replaced by direct allocation
+     * when possible.
+     * @param incProducers If true, include the producers in the memory layout.
+     * @param wrapAroundBuffer If true, allow wrapping in memory planes.
+    */
+    MemoryManager generateMemoryAutoConcat(bool incProducers = false, bool wrapAroundBuffer = false) const;
+
     /**
      * @brief Connect input tensors to the data input of the GraphView.
      * In case of multiple data input tensors, they are mapped to producers in
@@ -171,6 +187,7 @@ public:
      * @param fileName Name of the file to save the diagram (without extension).
      */
     void saveStaticSchedulingDiagram(const std::string& fileName) const;
+    void saveFactorizedStaticSchedulingDiagram(const std::string& fileName, size_t minRepeat = 2) const;
 
     /**
      * @brief Save in a Mermaid file the order of layers execution.
@@ -217,6 +234,18 @@ protected:
      */
     void generateEarlyLateScheduling(std::vector<StaticSchedulingElement*>& schedule) const;
 
+    /**
+     * @brief Get the factorized scheduling, by identifying repetitive sequences
+     * in the scheduling.
+     *
+     * @param schedule Vector of shared pointers to StaticSchedulingElements to be processed
+     * @param size_t Minimum number repetitions to factorize the sequence
+     * @return Vector containing the repetitive sequences, in order. The second
+     * element of the pair is the number of repetitions.
+     */
+    std::vector<std::pair<std::vector<StaticSchedulingElement*>, size_t>>
+        getFactorizedScheduling(const std::vector<StaticSchedulingElement*>& schedule, size_t minRepeat = 2) const;
+
 private:
     /**
      * @brief Summarize the consumer state of a node for debugging purposes.
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index f73de2ea31681a60c85229102a9b2ebbce4d3c3e..e25485fe059ddbf2d5eb72e3aa1c79457633a467 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -26,14 +26,6 @@
 namespace py = pybind11;
 #endif
 
-namespace {
-// This is the type that will hold all the strings. Each enumerate type will
-// declare its own specialization.
-template <typename T> struct EnumStrings {
-    static const char* const data[];
-};
-}
-
 namespace Aidge {
 template<class T, std::size_t N>
 constexpr std::size_t size(T (&)[N]) { return N; }
diff --git a/include/aidge/utils/Directories.hpp b/include/aidge/utils/Directories.hpp
index ca49e1b57cc5d01f9f0ff7fe8dc85520697c6821..783783946ff5bdae5214cc41f6a1f029fae6e09c 100644
--- a/include/aidge/utils/Directories.hpp
+++ b/include/aidge/utils/Directories.hpp
@@ -8,29 +8,31 @@
  * SPDX-License-Identifier: EPL-2.0
  *
  ********************************************************************************/
-
-
 #ifndef AIDGE_DIRECTORIES_H_
 #define AIDGE_DIRECTORIES_H_
 
-
 #include <algorithm>
 #include <errno.h>
-#include <iostream>
-#include <sstream> // std::stringstream
-#include <string>  // std::string
+#include <string>
+// #include <string_view> available in c++-17
+#include <vector>
+
+#include <fmt/core.h>
+#include <fmt/format.h>
 #include <sys/stat.h>
+
 #ifndef _S_ISTYPE
 #define _S_ISTYPE(mode, mask)  (((mode) & _S_IFMT) == (mask))
 #endif
+
 #ifndef S_ISREG
 #define S_ISREG(mode) _S_ISTYPE((mode), _S_IFREG)
 #endif
+
 #ifndef S_ISDIR
 #define S_ISDIR(mode) _S_ISTYPE((mode), _S_IFDIR)
 #endif
 
-
 #ifdef WIN32
 #include <direct.h>
 #else
@@ -39,7 +41,6 @@
 #endif
 
 namespace Aidge {
-
     bool isNotValidFilePath(int c) {
         return (iscntrl(c)
             || c == '<'
@@ -58,37 +59,56 @@ namespace Aidge {
         return filePath;
     }
 
-
-    bool createDirectories(const std::string& dirName)
-    {
-        std::stringstream path(dirName);
-        std::string dir;
-        std::string pathToDir("");
+	// std::string_view could be used if C++ version was 17 or higher
+    // bool createDirectories(std::string_view dirName) {
+	bool createDirectories(const std::string& dirName) {
+        std::string currentPath;
+        std::string remaining = dirName;
+        size_t pos = 0;
         int status = 0;
 
-        while (std::getline(path, dir, '/') && status == 0) {
-            pathToDir += dir + '/';
+        while ((pos = remaining.find('/')) != std::string::npos && status == 0) {
+            currentPath += fmt::format("{}/", remaining.substr(0, pos));
+            remaining = remaining.substr(pos + 1);
+
             struct stat fileStat;
-            if (stat(pathToDir.c_str(), &fileStat) != 0) {
+            if (stat(currentPath.c_str(), &fileStat) != 0) {
                 // Directory does not exist
-    #ifdef WIN32
-                status = _mkdir(pathToDir.c_str());
-    #else
-    #if defined(S_IRWXU)
-                status = mkdir(pathToDir.c_str(), S_IRWXU | S_IRWXG | S_IRWXO);
-    #else
-                status = mkdir(pathToDir.c_str());
-    #endif
-    #endif
+#ifdef WIN32
+                status = mkdir(currentPath.c_str());
+#else
+#if defined(S_IRWXU)
+                status = mkdir(currentPath.c_str(), S_IRWXU | S_IRWXG | S_IRWXO);
+#else
+                status = mkdir(currentPath.c_str());
+#endif
+#endif
             } else if (!S_ISDIR(fileStat.st_mode)) {
                 status = -1;
             }
         }
-        return (status == 0 || errno == EEXIST);
-    }
 
+        // Handle the last component if any
+        if (!remaining.empty() && status == 0) {
+            currentPath += fmt::format("{}/", remaining);
+            struct stat fileStat;
+            if (stat(currentPath.c_str(), &fileStat) != 0) {
+#ifdef WIN32
+                status = mkdir(currentPath.c_str());
+#else
+#if defined(S_IRWXU)
+                status = mkdir(currentPath.c_str(), S_IRWXU | S_IRWXG | S_IRWXO);
+#else
+                status = mkdir(currentPath.c_str());
+#endif
+#endif
+            } else if (!S_ISDIR(fileStat.st_mode)) {
+                status = -1;
+            }
+        }
 
+        return (status == 0 || errno == EEXIST);
+    }
 }
 
 #endif //AIDGE_DIRECTORIES_H_
-
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 3ecd4da393eaac9881d008e27989a52e883ecb6a..6ac76c138e2a835f8e74c5ede26e449c537d61d2 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -34,6 +34,23 @@ namespace py = pybind11;
 
 
 namespace Aidge {
+#if __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
+#define AIDGE_DYNATTR_HAVE_CPP17
+#endif
+
+#if defined(AIDGE_DYNATTR_HAVE_CPP17) || defined(__cpp_lib_void_t)
+using std::void_t;
+#else
+template <typename...>
+using void_t = void;
+#endif
+
+// Detection idiom to check if a type T has a less-than operator
+template <typename T, typename = void>
+struct has_less_than_operator : std::false_type {};
+
+template <typename T>
+struct has_less_than_operator<T, void_t<decltype(std::declval<T>() < std::declval<T>())>> : std::true_type {};
 
 ///\todo store also a fix-sized code that indicates the type
 ///\todo managing complex types or excluding non-trivial, non-aggregate types
@@ -41,6 +58,10 @@ class DynamicAttributes : public Attributes {
 public:
     DynamicAttributes() = default;
     DynamicAttributes(const std::map<std::string, future_std::any>& attrs): mAttrs(attrs) {}
+    DynamicAttributes& operator=(const std::map<std::string, future_std::any>& attrs) {
+        mAttrs = attrs;
+        return *this;
+    }
 
     /**
      * \brief Returning an Attribute identified by its name
@@ -340,6 +361,14 @@ public:
         }
     };
 
+    template<typename T>
+    static inline typename std::enable_if<!has_less_than_operator<T>::value, void>::type makeTypeConditionallyAvailable() {}
+
+    template<typename T>
+    static typename std::enable_if<has_less_than_operator<T>::value, void>::type makeTypeConditionallyAvailable() {
+        mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
+    }
+
     // Stores typed utils functions for each attribute type ever used
     static std::map<std::type_index, std::unique_ptr<AnyUtils_>> mAnyUtils;
 };
@@ -359,7 +388,7 @@ struct DynamicAttributes::AnyUtils<py::object> : public DynamicAttributes::AnyUt
 
     size_t hash(const future_std::any& attr) const override final {
         // Here we are mixing Python and C++ hashes... if both are
-        // well implemented, this should not increase the collision 
+        // well implemented, this should not increase the collision
         // probability for the same number of stored hashes.
         return py::hash(future_std::any_cast<py::object>(attr));
     }
@@ -403,6 +432,30 @@ namespace std {
             return seed;
         }
     };
+
+    // Special case for std::array
+    template <typename T, std::size_t N>
+    struct hash<std::array<T, N>> {
+        std::size_t operator()(const std::array<T, N>& iterable) const {
+            std::size_t seed = 0;
+            for (const auto& v : iterable) {
+                // Recursively hash the value pointed by the iterator
+                Aidge::hash_combine(seed, std::hash<T>()(v));
+            }
+            return seed;
+        }
+    };
+
+    // Specialization of std::hash for std::pair<T1, T2>
+    template <typename T1, typename T2>
+    struct hash<std::pair<T1, T2>> {
+        std::size_t operator()(const std::pair<T1, T2>& p) const {
+            std::size_t seed = 0;
+            Aidge::hash_combine(seed, std::hash<T1>()(p.first));
+            Aidge::hash_combine(seed, std::hash<T2>()(p.second));
+            return seed;
+        }
+    };
 }
 
 namespace future_std {
diff --git a/include/aidge/utils/ErrorHandling.hpp b/include/aidge/utils/ErrorHandling.hpp
index f6a9aefe24a420e261a100041578dd751c4a1ee2..ddbe95557f1a8791cf53fb3eefc36ce0c4ab45a8 100644
--- a/include/aidge/utils/ErrorHandling.hpp
+++ b/include/aidge/utils/ErrorHandling.hpp
@@ -13,7 +13,6 @@
 #ifndef AIDGE_ERRORHANDLING_H_
 #define AIDGE_ERRORHANDLING_H_
 
-#include <memory>
 #include <cassert>
 
 #include <fmt/format.h>
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index d6851f1e42233f9d8af88d10da9046f73f94b8c4..394d65906d1b616f8b68e7ecf7d06a5f7f26c555 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -9,20 +9,28 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_LOG_H_
-#define AIDGE_LOG_H_
+#ifndef AIDGE_CORE_UTILS_LOG_H_
+#define AIDGE_CORE_UTILS_LOG_H_
 
 #include <memory>
+#include <string>
 #include <vector>
 
 #include <fmt/format.h>
-#include <fmt/ranges.h>
+#include <fmt/color.h>
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#endif
 
-#include "aidge/data/half_fmt.hpp"
+#include "aidge/utils/logger/EnumString.hpp"
 
-#include "aidge/utils/Attributes.hpp"
 
 namespace Aidge {
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
+
 /**
  * Helper to define a context anywhere, hiding the scoped variable name
  * which has no relevance.
@@ -30,169 +38,243 @@ namespace Aidge {
 #define AIDGE_LOG_CONTEXT(...)                                                \
     const Log::Context logContext_##__LINE__(__VA_ARGS__)
 
-template <class U> static void discard_args(U parg) {
-    (void)parg;
-}
-template <class U, class... Us> static void discard_args(U parg, Us... pargs) {
-    (void)parg;
-    discard_args(pargs...);
-}
-
 /**
- * Aidge logging class, for displaying and file logging of events.
+ * @brief Logging utility class for development and debugging
+ *
+ * The Log class provides a static interface for logging messages at different
+ * severity levels. It supports both console output (with optional colors) and
+ * file output. The logging behavior can be configured through environment
+ * variables or programmatically.
+ *
+ * Environment variables:
+ * - AIDGE_LOGLEVEL_CONSOLE: Set console output level (Debug,Info,Notice,Warn,Error,Fatal)
+ * - AIDGE_LOGLEVEL_FILE: Set file output level
+ * - AIDGE_LOG_COLOR: Enable/disable colored output ("off"/"OFF"/"0" to disable)
+ * - AIDGE_LOG_FILE: Set log file path
+ *
+ * @note Console colors are enabled by default
  */
 class Log {
-  public:
-    enum Level { Debug = 0, Info, Notice, Warn, Error, Fatal };
+public:
+    /**
+     * @brief Log severity levels in ascending order
+     */
+    enum Level {
+        Debug = 0,    ///< Detailed information for debugging
+        Info,         ///< General information about program execution
+        Notice,       ///< Normal but significant conditions
+        Warn,         ///< Warning messages for potentially problematic situations
+        Error,        ///< Error messages for serious problems
+        Fatal         ///< Critical errors that lead to program termination
+    };
 
+    /**
+     * @brief RAII class for managing logging contexts
+     *
+     * This class allows adding temporary context information to logs. When an instance
+     * is created, it adds a context message that will be displayed with all logs until
+     * the instance is destroyed.
+     *
+     * Example:
+     * @code
+     * {
+     *     Log::Context ctx("Processing file: {}", filename);
+     *     Log::info("Starting process"); // Will show context
+     *     // Context automatically removed when ctx is destroyed
+     * }
+     * @endcode
+     */
     class Context {
-      public:
-        template <typename... Args> Context(Args &&...args) {
+    public:
+        /**
+         * @brief Create a new logging context
+         * @param args Format string and arguments for the context message
+         */
+        template <typename... Args>
+        Context(Args&&... args) {
             Log::mContext.push_back(fmt::format(std::forward<Args>(args)...));
         }
 
+        /**
+         * @brief Destroy the context, removing it from the active contexts
+         */
         ~Context() {
             Log::mContext.pop_back();
         }
     };
 
     /**
-     * Detailed messages for debugging purposes, providing information helpful
-     * for developers to trace and identify issues.
-     * Detailed insights of what is happening in an operation, not useful for
-     * the end-user. The operation is performed nominally.
-     * @note This level is disabled at compile time for Release, therefore
-     * inducing no runtime overhead for Release.
+     * @brief Log a debug message
+     * @param args Format string and arguments for the message
+     * @note Debug messages are only compiled in debug builds
      */
-    template <typename... Args> static void debug(Args &&...args) {
 #ifndef NDEBUG
-        // only when compiled in Debug
+    template <typename... Args>
+    static void debug(Args&&... args) {
         log(Debug, fmt::format(std::forward<Args>(args)...));
+    }
 #else
-        discard_args(&args...);
-#endif
+    template <typename... Args>
+    static void debug(Args&&... /*args*/) {
+        // Debug logging disabled in release builds
     }
+#endif
 
     /**
-     * Messages that provide a record of the normal operation, about
-     * the application's state, progress, or important events.
-     * Reports normal start, end and key steps in an operation. The operation
-     * is performed nominally.
+     * @brief Log an info message
+     * @param args Format string and arguments for the message
      */
-    template <typename... Args> static void info(Args &&...args) {
+    template <typename... Args>
+    static void info(Args&&... args) {
         log(Info, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
-     * Applies to normal but significant conditions that may require
-     * monitoring, like unusual or normal fallback events. Reports specific
-     * paths in an operation. The operation can still be performed normally.
+     * @brief Log a notice message
+     * @param args Format string and arguments for the message
      */
-    template <typename... Args> static void notice(Args &&...args) {
+    template <typename... Args>
+    static void notice(Args&&... args) {
         log(Notice, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
-     * Indicates potential issues or situations that may lead to errors but do
-     * not necessarily cause immediate problems.
-     * Some specific steps of the operation could not be performed, but it can
-     * still provide an exploitable result.
+     * @brief Log a warning message
+     * @param args Format string and arguments for the message
      */
-    template <typename... Args> static void warn(Args &&...args) {
+    template <typename... Args>
+    static void warn(Args&&... args) {
         log(Warn, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
-     * Signifies a problem or unexpected condition that the application can
-     * recover from, but attention is needed to prevent further issues.
-     * The operation could not be performed, but it does not prevent potential
-     * further operations.
+     * @brief Log an error message
+     * @param args Format string and arguments for the message
      */
-    template <typename... Args> static void error(Args &&...args) {
+    template <typename... Args>
+    static void error(Args&&... args) {
         log(Error, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
-     * Represents a critical error or condition that leads to the termination
-     * of the application, indicating a severe and unrecoverable problem. The
-     * operation could not be performed and any further operation is
-     * impossible.
+     * @brief Log a fatal error message
+     * @param args Format string and arguments for the message
      */
-    template <typename... Args> static void fatal(Args &&...args) {
+    template <typename... Args>
+    static void fatal(Args&&... args) {
         log(Fatal, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
-     * Set the minimum log level displayed in the console.
+     * @brief Set the minimum level for console output
+     * @param level Minimum level to display
      */
     static void setConsoleLevel(Level level) {
         mConsoleLevel = level;
+        // Note: In python each module has a compiled version of Log.
+        // This means each static instance is separated and does not communicate.
+        // To allow the communication between the different modules, we use PyCapsule.
+        // https://docs.python.org/3/extending/extending.html#providing-a-c-api-for-an-extension-module
+#ifdef PYBIND
+#define _CRT_SECURE_NO_WARNINGS
+        if (Py_IsInitialized()){
+            // Note: Setting mConsoleLevel and not level is important
+            // to avoid garbage collection of the pointer.
+            py::set_shared_data("consoleLevel", &mConsoleLevel);
+        }
+#endif // PYBIND
+    }
+
+    static Level getConsoleLevel() {
+#ifdef PYBIND
+#define _CRT_SECURE_NO_WARNINGS
+        if (Py_IsInitialized()){
+            auto shared_data = reinterpret_cast<Level *>(py::get_shared_data("consoleLevel"));
+            if (shared_data)
+                mConsoleLevel = *shared_data;
+        }
+#endif // PYBIND
+        return mConsoleLevel;
     }
 
     /**
-     * Set or disable colors on console.
-     * Initial value should be assumed true.
+     * @brief Enable or disable colored console output
+     * @param enabled True to enable colors, false to disable
      */
-    static void setConsoleColor(bool enabled) {
-        mConsoleColor = enabled;
-    }
+    static void setConsoleColor(bool enabled) noexcept { mConsoleColor = enabled; }
 
     /**
-     * Set the minimum log level saved in the log file.
+     * @brief Set the minimum level for file output
+     * @param level Minimum level to write to file
      */
-    constexpr static void setFileLevel(Level level) {
-        mFileLevel = level;
-    }
+    static void setFileLevel(Level level) noexcept { mFileLevel = level; }
 
     /**
-     * Set the log file name.
-     * Close the current log file and open the one with the new file name.
-     * If empty, stop logging into a file.
+     * @brief Set the log file path
+     * @param fileName Path to log file (empty to disable file logging)
+     * @throw std::runtime_error if file cannot be opened
      */
-    static void setFileName(const std::string &fileName) {
-        if (fileName != mFileName) {
-            mFileName = fileName;
-            mFile.release();
-
-            if (!fileName.empty()) {
-                initFile(fileName);
-            }
-        }
-    }
+    static void setFileName(const std::string& fileName);
 
     /**
-     * @struct fcloseDeleter
-     * @brief Custom deleter to prevent compiler warnings when using
-     * std::unique_ptr with FILE*.
-     *
-     * Using function pointers with attributes (e.g., __attribute__((nonnull)))
-     * as deleters can trigger warnings like -Wignored-attributes. By defining a
-     * custom deleter struct, these attributes are preserved, eliminating the
-     * warnings.
+     * @brief Set the precision format for floating point numbers.
+     * @param precision number of digits displayed on the right-hand of the
+     * decimal point.
      */
-    struct fcloseDeleter {
-        void operator()(FILE *f) const noexcept {
-            std::fclose(f);
+    static void setPrecision(int precision) noexcept {
+        if (precision < 0) {
+            Log::notice("Impossible to set precision to {}. Must be a positive number.", precision);
+            return;
         }
-    };
+        mFloatingPointPrecision = precision;
+#ifdef PYBIND
+#define _CRT_SECURE_NO_WARNINGS
+        if (Py_IsInitialized()){
+            // Note: Setting mFloatingPointPrecision is important
+            // to avoid garbage collection of the pointer.
+            py::set_shared_data("floatingPointPrecision", &mFloatingPointPrecision);
+        }
+#endif // PYBIND
+    }
+    static int getPrecision() noexcept {
+        return mFloatingPointPrecision;
+    }
 
 private:
     static void log(Level level, const std::string& msg);
     static void initFile(const std::string& fileName);
 
-    static Level mConsoleLevel;
-    static bool mConsoleColor;
-    static Level mFileLevel;
-    static std::string mFileName;
-    static std::unique_ptr<FILE, fcloseDeleter> mFile;
-    static std::vector<std::string> mContext;
+    struct fcloseDeleter {
+        void operator()(FILE* f) const noexcept { std::fclose(f); }
+    };
+
+    static Level mConsoleLevel;        ///< Minimum level for console output
+    static bool mConsoleColor;         ///< Whether to use colored output
+    static Level mFileLevel;           ///< Minimum level for file output
+    static std::string mFileName;      ///< Path to log file
+    static std::unique_ptr<FILE, fcloseDeleter> mFile;  ///< File handle
+    static std::vector<std::string> mContext;  ///< Stack of active contexts
+
+    static int mFloatingPointPrecision;
 };
+
 } // namespace Aidge
 
+// Formatter specialization for Log::Level
+template <>
+struct fmt::formatter<Aidge::Log::Level> : formatter<const char*> {
+    template <typename FormatContext>
+    auto format(const Aidge::Log::Level& level, FormatContext& ctx) const {
+        const char* name = EnumStrings<Aidge::Log::Level>::data[static_cast<int>(level)];
+        return formatter<const char*>::format(name, ctx);
+    }
+};
+
 namespace {
 template <>
-const char *const EnumStrings<Aidge::Log::Level>::data[] =
-    {"Debug", "Info", "Notice", "Warn", "Error", "Fatal"};
+const char* const EnumStrings<Aidge::Log::Level>::data[] = {
+    "DEBUG", "INFO", "NOTICE", "WARNING", "ERROR", "FATAL"
+};
 }
 
-#endif // AIDGE_LOG_H_
+#endif // AIDGE_CORE_UTILS_LOG_H_
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 439d2c638731b40bec0696a73b62b99e3bfddd41..9c18d3cefe466f68edde70536bca4d493f9f9b18 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -24,6 +24,7 @@
 #endif
 
 #include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
@@ -322,7 +323,11 @@ private:
     inline typename std::enable_if<I == sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& /*t*/, std::map<std::string, future_std::any>& /*attrs*/) const {}
 
     template<std::size_t I = 0, typename... Tp>
-    inline typename std::enable_if<I < sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& t, std::map<std::string, future_std::any>& attrs) const {
+    inline typename std::enable_if<I < sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& t, std::map<std::string, future_std::any>& attrs) const {   
+        // Ensure that the type will be known to DynamicAttributes
+        using ElementType = typename std::tuple_element<I,std::tuple<Tp...>>::type;     
+        DynamicAttributes::makeTypeConditionallyAvailable<ElementType>();
+
         attrs.insert(std::make_pair(EnumStrings<ATTRS_ENUM>::data[I], future_std::any(std::get<I>(t))));
         appendAttr<I + 1, Tp...>(t, attrs);
     }
diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp
index b5601f84c60b81d8c560b61b06c00673f51f4eee..794abf7632766fb55d8a9a96c0ffa0c046d9de62 100644
--- a/include/aidge/utils/TensorUtils.hpp
+++ b/include/aidge/utils/TensorUtils.hpp
@@ -11,12 +11,18 @@
 
 #ifndef AIDGE_CORE_UTILS_TENSOR_UTILS_H_
 #define AIDGE_CORE_UTILS_TENSOR_UTILS_H_
+
 #include <cmath>  // std::abs
+
+#include "aidge/data/DataType.hpp"
 #include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Log.hpp"
 
 namespace Aidge {
+
 /**
- * @brief Compare two :cpp:class:`Aidge::Tensor` value wise. The comparison function is:
+ * @brief Compare two Aidge::Tensor value wise. The comparison function is:
  *
  * |t1-t2| <= absolute + relative * |t2|
  *
@@ -25,32 +31,62 @@ namespace Aidge {
  * If the datatype is not the same between each tensor return False
  * If the templated type does not correspond to the datatype of each tensor, raise an assertion error
  *
- * @tparam T should correspond to the type of the tensor, define the type of the absolute and relative error
- * @param t1  first :cpp:class:`Aidge::Tensor` to test
- * @param t2  second :cpp:class:`Aidge::Tensor` to test
+ * @tparam T1 should correspond to the type of the first tensor, defines part of the type for absolute and relative error
+ * @tparam T2 should correspond to the type of the second tensor, defaults to T1
+ * @param t1 first Aidge::Tensor to test
+ * @param t2 second Aidge::Tensor to test
  * @param relative relative difference allowed (should be between 0 and 1)
- * @param absolute absolute error allowed (shoulmd be positive)
- * @return true if both tensor are approximately equal and have the datatype, shape. Else return false
+ * @param absolute absolute error allowed (should be positive)
+ * @return true if both tensors are approximately equal and have the same datatype and shape. Else return false
  */
 template <typename T1, typename T2 = T1>
-bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float absolute = 1e-8f){
-    assert(t1.dataType() == NativeType<T1>::type);
-    assert(t2.dataType() == NativeType<T2>::type);
-    assert(relative >= 0);
-    assert(absolute >= 0 && absolute<=1);
+bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float absolute = 1e-8f) {
+    // Check template type matches tensor datatype
+    if (t1.dataType() != NativeType_v<T1>) {
+        Log::error("First tensor datatype ({}) does not match template type", t1.dataType());
+        return false;
+    }
 
-    if (t1.size() != t2.size()){
+    if (t2.dataType() != NativeType_v<T2>) {
+        Log::error("Second tensor datatype ({}) does not match template type", t2.dataType());
         return false;
     }
-    for(size_t i = 0; i < t1.size(); ++i){
-        if (static_cast<float>(std::abs(t1.get<T1>(i) - t2.get<T2>(i))) > (absolute + (relative * static_cast<float>(std::abs(t2.get<T2>(i)))))){
-            fmt::print("t1:\n{}\nt2\n{}\nat index {} {} != {}", t1, t2, i, t1.get<T1>(i), t2.get<T1>(i));
+
+    // Validate parameters
+    if (relative < 0.0f) {
+        Log::error("Relative error must be non-negative (got {})", relative);
+        return false;
+    }
+
+    if (absolute < 0.0f || absolute > 1.0f) {
+        Log::error("Absolute error must be between 0 and 1 (got {})", absolute);
+        return false;
+    }
+
+    // Check tensor sizes match
+    if (t1.size() != t2.size()) {
+        Log::error("Tensor sizes do not match: {} vs {}", t1.size(), t2.size());
+        return false;
+    }
+
+    // Compare values/
+    for (size_t i = 0; i < t1.size(); ++i) {
+        const auto val1 = t1.get<T1>(i);
+        const auto val2 = t2.get<T2>(i);
+        const float diff = static_cast<float>(std::abs(val1 - val2));
+        const float threshold = absolute + (relative * static_cast<float>(std::abs(val2)));
+
+        if (diff > threshold) {
+            Log::notice("Tensor values differ at index {}: {} vs {} (diff: {}, threshold: {})\n"
+                "Tensor 1:\n{}\nTensor 2:\n{}",
+                i, val1, val2, diff, threshold, t1, t2);
             return false;
         }
     }
+
     return true;
 }
 
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_UTILS_TENSOR_UTILS_H_s */
+#endif // AIDGE_CORE_UTILS_TENSOR_UTILS_H_
diff --git a/include/aidge/utils/logger/EnumString.hpp b/include/aidge/utils/logger/EnumString.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6091d8bf50c1de37fef351c20c710b2f43e2f545
--- /dev/null
+++ b/include/aidge/utils/logger/EnumString.hpp
@@ -0,0 +1,23 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_LOGGER_ENUMSTRING_H_
+#define AIDGE_CORE_UTILS_LOGGER_ENUMSTRING_H_
+
+namespace {
+// This is the type that will hold all the strings. Each enumerate type will
+// declare its own specialization.
+template <typename T> struct EnumStrings {
+    static const char* const data[];
+};
+}
+
+#endif /* AIDGE_CORE_UTILS_LOGGER_ENUMSTRING_H_ */
diff --git a/include/aidge/utils/sys_info/CoreVersionInfo.hpp b/include/aidge/utils/sys_info/CoreVersionInfo.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..648998ede84a886315be9f26dfde68e7abddd345
--- /dev/null
+++ b/include/aidge/utils/sys_info/CoreVersionInfo.hpp
@@ -0,0 +1,37 @@
+#ifndef AIDGE_UTILS_SYS_INFO_CORE_VERSION_INFO_H
+#define AIDGE_UTILS_SYS_INFO_CORE_VERSION_INFO_H
+
+#include "aidge/utils/Log.hpp"
+#include "aidge/core_version.h"
+
+namespace Aidge {
+
+constexpr inline const char * getCoreProjectVersion(){
+    return PROJECT_VERSION;
+}
+
+constexpr inline const char * getCoreGitHash(){
+    return PROJECT_GIT_HASH;
+}
+
+void showCoreVersion() {
+    Log::info("Aidge core: {} ({}), {} {}", getCoreProjectVersion(), getCoreGitHash(), __DATE__, __TIME__);
+        // Compiler version
+    #if defined(__clang__)
+    /* Clang/LLVM. ---------------------------------------------- */
+        Log::info("Clang/LLVM compiler version: {}.{}.{}\n", __clang_major__ , __clang_minor__, __clang_patchlevel__);
+    #elif defined(__ICC) || defined(__INTEL_COMPILER)
+    /* Intel ICC/ICPC. ------------------------------------------ */
+        Log::info("Intel ICC/ICPC compiler version: {}\n", __INTEL_COMPILER);
+    #elif defined(__GNUC__) || defined(__GNUG__)
+    /* GNU GCC/G++. --------------------------------------------- */
+        Log::info("GNU GCC/G++ compiler version: {}.{}.{}", __GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__);
+    #elif defined(_MSC_VER)
+    /* Microsoft Visual Studio. --------------------------------- */
+        Log::info("Microsoft Visual Studio compiler version: {}\n", _MSC_VER);
+    #else
+        Log::info("Unknown compiler\n");
+    #endif
+}
+}  // namespace Aidge
+#endif  // AIDGE_UTILS_SYS_INFO_CORE_VERSION_INFO_H
diff --git a/include/aidge/utilsParsing/ParsingToken.hpp b/include/aidge/utilsParsing/ParsingToken.hpp
index 8a1a740bf9bd4596675ef2dbd3e30af7765ffaa8..3781fcbf15a9035917af95c978faafe252813169 100644
--- a/include/aidge/utilsParsing/ParsingToken.hpp
+++ b/include/aidge/utilsParsing/ParsingToken.hpp
@@ -1,72 +1,79 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
 
 #ifndef AIDGE_CORE_PARSING_TOKEN_H_
 #define AIDGE_CORE_PARSING_TOKEN_H_
 
 #include <string>
 #include <type_traits>
-#include <sstream> // Include the necessary header
 
-namespace Aidge{
+#include <fmt/format.h>
 
-    template <typename EnumType>
-    class ParsingToken: public std::enable_shared_from_this<ParsingToken<EnumType>>
-    {
-        static_assert(std::is_enum<EnumType>::value, "ParsingToken EnumType must be an enum type");
-        public:
-        /**
-         * @brief Token container
-         * @param type one of the token type
-         * @param lexeme String representing additional information of the token
-         */
-        ParsingToken(const EnumType type , const std::string lexeme ):mLexeme(lexeme),mType(type){}
+namespace Aidge {
 
-        /**
-         * @brief get the lexeme
-         * @return std::string
-         */
-        const std::string getLexeme(void){
-            return mLexeme;
-        }
+template <typename EnumType>
+class ParsingToken: public std::enable_shared_from_this<ParsingToken<EnumType>>
+{
+    static_assert(std::is_enum<EnumType>::value, "ParsingToken EnumType must be an enum type");
+public:
+    /**
+     * @brief Token container
+     * @param type one of the token type
+     * @param lexeme String representing additional information of the token
+     */
+    ParsingToken(const EnumType type , const std::string& lexeme )
+        : mLexeme(lexeme), mType(type){}
 
-        /**
-         * @brief get the token type
-         *
-         * @return ParsingToken
-         */
-        const EnumType getType(void){
-            return mType;
-        }
+    /**
+     * @brief get the lexeme
+     * @return std::string
+     */
+    const std::string getLexeme(void) const noexcept {
+        return mLexeme;
+    }
 
-        /**
-         * @brief copy the token
-         * @return deep copy of the token
-         */
-        std::shared_ptr<ParsingToken> copy(){
-            auto newToken = std::make_shared<ParsingToken<EnumType>>(mType,mLexeme);
-            return newToken;
-        }
+    /**
+     * @brief get the token type
+     *
+     * @return ParsingToken
+     */
+    const EnumType getType(void) const noexcept {
+        return mType;
+    }
 
-        //TODO
-        std::ostringstream rep(void){
-            std::ostringstream out;
-            out << " Token ("  << mLexeme <<")" << "\n";
-            return out;
-        }
+    /**
+     * @brief copy the token
+     * @return deep copy of the token
+     */
+    std::shared_ptr<ParsingToken> copy() const noexcept {
+        return std::make_shared<ParsingToken<EnumType>>(mType, mLexeme);
+    }
 
-        private:
+    //TODO
+    std::string rep(void) const { return fmt::format(" Token ({})\n", mLexeme); }
 
-        /**
-         * @brief additional information of the token
-         */
-        const std::string mLexeme;
+private:
+    /**
+     * @brief additional information of the token
+     */
+    const std::string mLexeme;
 
-        /**
-         * @brief type of the token
-         * @see ConditionalTokenTypes
-         */
-        const EnumType mType;
+    /**
+     * @brief type of the token
+     * @see ConditionalTokenTypes
+     */
+    const EnumType mType;
 
-    };
-}
+};
+
+}  // namespace Aidge
 
 #endif //AIDGE_CORE_PARSING_TOKEN_H_
diff --git a/include/aidge/version.h.in b/include/aidge/version.h.in
new file mode 100644
index 0000000000000000000000000000000000000000..4b876f63002972c1f8f1340b70cdecdace911012
--- /dev/null
+++ b/include/aidge/version.h.in
@@ -0,0 +1,11 @@
+#ifndef VERSION_H
+#define VERSION_H
+
+namespace Aidge {
+static constexpr const int PROJECT_VERSION_MAJOR = @PROJECT_VERSION_MAJOR@;
+static constexpr const int PROJECT_VERSION_MINOR = @PROJECT_VERSION_MINOR@;
+static constexpr const int PROJECT_VERSION_PATCH = @PROJECT_VERSION_PATCH@;
+static constexpr const char * PROJECT_VERSION = "@PROJECT_VERSION_MAJOR@.@PROJECT_VERSION_MINOR@.@PROJECT_VERSION_PATCH@";
+static constexpr const char * PROJECT_GIT_HASH = "@GIT_COMMIT_HASH@";
+}
+#endif // VERSION_H
diff --git a/pyproject.toml b/pyproject.toml
index b838aca5ee100d182ba88b79f23f3a2ebff9acf3..122be13efd746e66c6f8aad7a3c564a4d010bb67 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,18 +1,27 @@
 [project]
-name = "aidge_core"
+name="aidge_core"
+
 description="Core algorithms for operators and graph of the AIDGE framework"
 dependencies = [
     "numpy>=1.21.6",
-    "Jinja2>=3.1.2"
+    "Jinja2>=3.1.2",
+    "matplotlib"
 ]
-requires-python = ">= 3.7"
+requires-python = ">= 3.8"
 readme = "README.md"
 license = { file = "LICENSE" }
-classifiers = [ 
+classifiers = [
     "Development Status :: 2 - Pre-Alpha",
     "Programming Language :: Python :: 3"
     ]
-dynamic = ["version"] # defined in tool.setuptools_scm
+dynamic = ["version"] # defined by pbr
+
+[project.urls]
+Homepage = "https://www.deepgreen.ai/en/platform"
+Documentation = "https://eclipse-aidge.readthedocs.io/en/latest/"
+Repository = "https://gitlab.eclipse.org/eclipse/aidge/aidge_core"
+Issues = "https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/issues/"
+Changelog = "https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/releases"
 
 [project.optional-dependencies]
 test = [
@@ -22,8 +31,8 @@ test = [
 [build-system]
 requires = [
     "setuptools>=64",
-    "setuptools_scm[toml]==7.1.0",
-    "cmake>=3.18.4.post1"
+    "cmake>=3.18.4.post1",
+    "pbr"
 ]
 build-backend = "setuptools.build_meta"
 
@@ -40,11 +49,7 @@ exclude = [ # exclude packages matching these glob patterns (empty by default)
     ".unit_tests.static",
     ".aidge_export_aidge.__pycache__",
     ".aidge_export_aidge.utils.__pycache__",
-] 
-
-# SETUPTOOLS_SCM
-[tool.setuptools_scm]
-write_to = "aidge_core/_version.py"
+]
 
 #####################################################
 # CIBUILDWHEEL
@@ -118,7 +123,7 @@ persistent = true
 
 # Minimum Python version to use for version dependent checks. Will default to the
 # version used to run pylint.
-py-version = "3.7"
+py-version = "3.8"
 
 # When enabled, pylint would attempt to guess common misconfiguration and emit
 # user-friendly hints instead of false-positive error messages.
diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp
index 49e45ed7e447c00cf7300e8228ee7d1b04800083..cd94997cf7a584633be8b811b1e694bdf9886fc1 100644
--- a/python_binding/backend/pybind_OperatorImpl.cpp
+++ b/python_binding/backend/pybind_OperatorImpl.cpp
@@ -81,6 +81,7 @@ void init_OperatorImpl(py::module& m){
     .def(py::init<const DynamicAttributes&>(), py::arg("attr") = DynamicAttributes())
     .def(py::init<const ImplSpec::IOSpec&, const DynamicAttributes&>(), py::arg("io"), py::arg("attr") = DynamicAttributes())
     .def(py::init<const ImplSpec::IOSpec&, const ImplSpec::IOSpec&, const DynamicAttributes&>(), py::arg("i"), py::arg("o"), py::arg("attr") = DynamicAttributes())
+    .def(py::init<const std::vector<ImplSpec::IOSpec>&, const std::vector<ImplSpec::IOSpec>&, const DynamicAttributes&>(), py::arg("i"), py::arg("o"), py::arg("attr") = DynamicAttributes())
     .def("__eq__", static_cast<bool(*)(const ImplSpec&, const ImplSpec&)>(&operator==))
     .def("__repr__", [](ImplSpec self){
         return fmt::format("{}\n", self);
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index cdf8cc23250366c62a4102118a95e68cec28ec3d..52e773b6962c5de8eb7499be03c4f2deb0be24c9 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -10,63 +10,14 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
-#include <pybind11/stl.h>
 
 #include "aidge/data/Data.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
-template <class T>
-void bindEnum(py::module& m, const std::string& name) {
-    // Define enumeration names for python as lowercase type name
-    // This defined enum names compatible with basic numpy type
-    // name such as: float32, flot64, [u]int32, [u]int64, ...
-    auto python_enum_name = [](const T& type) {
-        auto str_lower = [](std::string& str) {
-            std::transform(str.begin(), str.end(), str.begin(),
-                           [](unsigned char c){
-                               return std::tolower(c);
-                           });
-        };
-        auto type_name = std::string(Aidge::format_as(type));
-        str_lower(type_name);
-        return type_name;
-    };
-    // Auto generate enumeration names from lowercase type strings
-    std::vector<std::string> enum_names;
-    for (auto type_str : EnumStrings<T>::data) {
-        auto type = static_cast<T>(enum_names.size());
-        auto enum_name = python_enum_name(type);
-        enum_names.push_back(enum_name);
-    }
-
-    // Define python side enumeration aidge_core.type
-    auto e_type = py::enum_<T>(m, name.c_str());
-
-    // Add enum value for each enum name
-    for (std::size_t idx = 0; idx < enum_names.size(); idx++) {
-        e_type.value(enum_names[idx].c_str(), static_cast<T>(idx));
-    }
-
-    // Define str() to return the bare enum name value, it allows
-    // to compare directly for instance str(tensor.type())
-    // with str(nparray.type)
-    e_type.def("__str__", [enum_names](const T& type) {
-        return enum_names[static_cast<int>(type)];
-    }, py::prepend());;
-}
-
 void init_Data(py::module& m){
-    bindEnum<DataType>(m, "dtype");
-    bindEnum<DataFormat>(m, "dformat");
-
     py::class_<Data, std::shared_ptr<Data>>(m,"Data");
-
-
-    m.def("format_as", (const char* (*)(DataType)) &format_as, py::arg("dt"));
-    m.def("format_as", (const char* (*)(DataFormat)) &format_as, py::arg("df"));
-    m.def("get_data_format_transpose", &getDataFormatTranspose, py::arg("src"), py::arg("dst"));  
-
-}
 }
+
+} // namespace Aidge
diff --git a/python_binding/data/pybind_DataFormat.cpp b/python_binding/data/pybind_DataFormat.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a63df321c3298284df7de8fd2c3eb0fc0cecae24
--- /dev/null
+++ b/python_binding/data/pybind_DataFormat.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <algorithm>  // std::transform
+#include <cctype>     // std::tolower
+#include <string>     // std::string
+#include <vector>
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include "aidge/data/DataFormat.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <class T>
+void bindEnum(py::module& m, const std::string& name) {
+    // Define enumeration names for python as lowercase type name
+    // This defined enum names compatible with basic numpy type
+    // name such as: float32, flot64, [u]int32, [u]int64, ...
+    auto python_enum_name = [](const T& type) {
+        auto str_lower = [](std::string& str) {
+            std::transform(str.begin(), str.end(), str.begin(),
+                           [](unsigned char c){
+                               return std::tolower(c);
+                           });
+        };
+        auto type_name = std::string(Aidge::format_as(type));
+        str_lower(type_name);
+        return type_name;
+    };
+
+    // Auto generate enumeration names from lowercase type strings
+    std::vector<std::string> enum_names;
+    for (auto type_str : EnumStrings<T>::data) {
+        auto type = static_cast<T>(enum_names.size());
+        auto enum_name = python_enum_name(type);
+        enum_names.push_back(enum_name);
+    }
+
+    // Define python side enumeration aidge_core.type
+    auto e_type = py::enum_<T>(m, name.c_str());
+
+    // Add enum value for each enum name
+    for (std::size_t idx = 0; idx < enum_names.size(); idx++) {
+        e_type.value(enum_names[idx].c_str(), static_cast<T>(idx));
+    }
+
+    // Define str() to return the bare enum name value, it allows
+    // to compare directly for instance str(tensor.type())
+    // with str(nparray.type)
+    e_type.def("__str__", [enum_names](const T& type) {
+        return enum_names[static_cast<int>(type)];
+    }, py::prepend());
+}
+
+void init_DataFormat(py::module& m) {
+    bindEnum<DataFormat>(m, "dformat");
+    m.def("format_as", (const char* (*)(DataFormat)) &format_as, py::arg("df"));
+    m.def("get_data_format_transpose", &getDataFormatTranspose, py::arg("src"), py::arg("dst"));
+}
+
+} // namespace Aidge
diff --git a/python_binding/data/pybind_DataType.cpp b/python_binding/data/pybind_DataType.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6aab399760c52b3886cddb0fb26c2b649252e7bb
--- /dev/null
+++ b/python_binding/data/pybind_DataType.cpp
@@ -0,0 +1,71 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <algorithm>  // std::transform
+#include <cctype>     // std::tolower
+#include <string>     // std::string
+#include <vector>
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include "aidge/data/DataType.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <class T>
+void bindEnum(py::module& m, const std::string& name) {
+    // Define enumeration names for python as lowercase type name
+    // This defined enum names compatible with basic numpy type
+    // name such as: float32, flot64, [u]int32, [u]int64, ...
+    auto python_enum_name = [](const T& type) {
+        auto str_lower = [](std::string& str) {
+            std::transform(str.begin(), str.end(), str.begin(),
+                           [](unsigned char c){
+                               return std::tolower(c);
+                           });
+        };
+        auto type_name = std::string(Aidge::format_as(type));
+        str_lower(type_name);
+        return type_name;
+    };
+
+    // Auto generate enumeration names from lowercase type strings
+    std::vector<std::string> enum_names;
+    for (auto type_str : EnumStrings<T>::data) {
+        auto type = static_cast<T>(enum_names.size());
+        auto enum_name = python_enum_name(type);
+        enum_names.push_back(enum_name);
+    }
+
+    // Define python side enumeration aidge_core.type
+    auto e_type = py::enum_<T>(m, name.c_str());
+
+    // Add enum value for each enum name
+    for (std::size_t idx = 0; idx < enum_names.size(); idx++) {
+        e_type.value(enum_names[idx].c_str(), static_cast<T>(idx));
+    }
+
+    // Define str() to return the bare enum name value, it allows
+    // to compare directly for instance str(tensor.type())
+    // with str(nparray.type)
+    e_type.def("__str__", [enum_names](const T& type) {
+        return enum_names[static_cast<int>(type)];
+    }, py::prepend());
+}
+
+void init_DataType(py::module& m) {
+    bindEnum<DataType>(m, "dtype");
+    m.def("format_as", (const char* (*)(DataType)) &format_as, py::arg("dt"));
+}
+
+} // namespace Aidge
diff --git a/python_binding/data/pybind_Elts.cpp b/python_binding/data/pybind_Elts.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..59a8211e26c59cbcc4ec3e8456933403ba6c25e7
--- /dev/null
+++ b/python_binding/data/pybind_Elts.cpp
@@ -0,0 +1,85 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <algorithm>  // std::transform
+#include <cctype>     // std::tolower
+#include <string>     // std::string
+#include <vector>
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <pybind11/operators.h>
+
+#include "aidge/data/Elts.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <class T>
+void bindEnum(py::module& m, const std::string& name) {
+    // Define enumeration names for python as lowercase type name
+    // This defined enum names compatible with basic numpy type
+    // name such as: float32, flot64, [u]int32, [u]int64, ...
+    auto python_enum_name = [](const T& type) {
+        auto str_lower = [](std::string& str) {
+            std::transform(str.begin(), str.end(), str.begin(),
+                           [](unsigned char c){
+                               return std::tolower(c);
+                           });
+        };
+        auto type_name = std::string(Aidge::format_as(type));
+        str_lower(type_name);
+        return type_name;
+    };
+
+    // Auto generate enumeration names from lowercase type strings
+    std::vector<std::string> enum_names;
+    for (auto type_str : EnumStrings<T>::data) {
+        auto type = static_cast<T>(enum_names.size());
+        auto enum_name = python_enum_name(type);
+        enum_names.push_back(enum_name);
+    }
+
+    // Define python side enumeration aidge_core.type
+    auto e_type = py::enum_<T>(m, name.c_str());
+
+    // Add enum value for each enum name
+    for (std::size_t idx = 0; idx < enum_names.size(); idx++) {
+        e_type.value(enum_names[idx].c_str(), static_cast<T>(idx));
+    }
+
+    // Define str() to return the bare enum name value, it allows
+    // to compare directly for instance str(tensor.type())
+    // with str(nparray.type)
+    e_type.def("__str__", [enum_names](const T& type) {
+        return enum_names[static_cast<int>(type)];
+    }, py::prepend());
+}
+
+void init_Elts(py::module& m) {
+    bindEnum<Elts_t::EltType>(m, "EltType");
+    m.def("format_as", (const char* (*)(Elts_t::EltType)) &format_as, py::arg("elt"));
+    
+    py::class_<Elts_t, std::shared_ptr<Elts_t>>(
+        m, "Elts_t", py::dynamic_attr())
+        .def_static("none_elts", &Elts_t::NoneElts)
+        .def_static("data_elts", &Elts_t::DataElts, py::arg("data"), py::arg("token") = 1)
+        .def_static("token_elts", &Elts_t::TokenElts, py::arg("token"))
+        .def_readwrite("data", &Elts_t::data)
+        .def_readwrite("token", &Elts_t::token)
+        .def_readwrite("type", &Elts_t::type)
+        .def(py::self + py::self)
+        .def(py::self += py::self)
+        .def(py::self < py::self)
+        .def(py::self > py::self);
+}
+
+} // namespace Aidge
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 0d4ed716ca6c65c2e8a0153a729ebecef771ea9e..2171d48975db8f4029abe7982bf6dfc17640dd52 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -40,16 +40,16 @@ using NumpyDType = py::detail::npy_api::constants;
 // Map Numpy dtype ids to aidge datatypes.
 // If a numpy dtype is not present, np array of this type is rejected.
 static const std::map<NumpyDType, DataType> NumpyTypeNameAsNativeType = {
-    { NumpyDType::NPY_INT8_, NativeType<std::int8_t>::type },
-    { NumpyDType::NPY_INT16_, NativeType<std::int16_t>::type },
-    { NumpyDType::NPY_INT32_, NativeType<std::int32_t>::type },
-    { NumpyDType::NPY_INT64_, NativeType<std::int64_t>::type },
-    { NumpyDType::NPY_UINT8_, NativeType<std::uint8_t>::type },
-    { NumpyDType::NPY_UINT16_, NativeType<std::uint16_t>::type },
-    { NumpyDType::NPY_UINT32_, NativeType<std::uint32_t>::type },
-    { NumpyDType::NPY_UINT64_, NativeType<std::uint64_t>::type },
-    { NumpyDType::NPY_FLOAT_, NativeType<float>::type },
-    { NumpyDType::NPY_DOUBLE_, NativeType<double>::type },
+    { NumpyDType::NPY_INT8_, NativeType_v<std::int8_t> },
+    { NumpyDType::NPY_INT16_, NativeType_v<std::int16_t> },
+    { NumpyDType::NPY_INT32_, NativeType_v<std::int32_t> },
+    { NumpyDType::NPY_INT64_, NativeType_v<std::int64_t> },
+    { NumpyDType::NPY_UINT8_, NativeType_v<std::uint8_t> },
+    { NumpyDType::NPY_UINT16_, NativeType_v<std::uint16_t> },
+    { NumpyDType::NPY_UINT32_, NativeType_v<std::uint32_t> },
+    { NumpyDType::NPY_UINT64_, NativeType_v<std::uint64_t> },
+    { NumpyDType::NPY_FLOAT_, NativeType_v<float> },
+    { NumpyDType::NPY_DOUBLE_, NativeType_v<double> },
 };
 
 // The Numpy API indexes that we need to convert bare numpy scalars
@@ -159,20 +159,20 @@ static bool getScalarNativeVal(const py::object obj, NativeValue* val_ptr, DataT
             using caster_i64 = py::detail::type_caster<std::int64_t>;
             using caster_f32 = py::detail::type_caster<float>;
             if (caster_i32().load(obj, false)) {
-                native_dtype = NativeType<std::int32_t>::type;
+                native_dtype = NativeType_v<std::int32_t>;
                 native_val.i32 = py::cast<std::int32_t>(obj);
             } else if (caster_i64().load(obj, false)) {
-                native_dtype = NativeType<std::int64_t>::type;
+                native_dtype = NativeType_v<std::int64_t>;
                 native_val.i64 = py::cast<std::int64_t>(obj);
             } else {
-                native_dtype = NativeType<float>::type;
+                native_dtype = NativeType_v<float>;
                 native_val.f32 = py::cast<float>(obj);
             }
             found = true;
         } else if (py::isinstance<py::float_>(obj)) {
             // Note that for native python float, we cast to float32 which may loss
             // precision as python floats are of type float64.
-            native_dtype = NativeType<float>::type;
+            native_dtype = NativeType_v<float>;
             native_val.f32 = py::cast<float>(obj);
             found = true;
         }
@@ -196,19 +196,19 @@ static void getConservativeNativeVal(const py::object obj, NativeValue *val_ptr,
             using caster_i64 = py::detail::type_caster<std::int64_t>;
             using caster_u64 = py::detail::type_caster<std::uint64_t>;
             if (caster_i64().load(obj, false)) {
-                native_dtype = NativeType<std::int64_t>::type;
+                native_dtype = NativeType_v<std::int64_t>;
                 native_val.i64 = py::cast<std::int64_t>(obj);
             } else if (caster_u64().load(obj, false)) {
-                native_dtype = NativeType<std::uint64_t>::type;
+                native_dtype = NativeType_v<std::uint64_t>;
                 native_val.u64 = py::cast<std::uint64_t>(obj);
             } else {
-                native_dtype = NativeType<double>::type;
+                native_dtype = NativeType_v<double>;
                 native_val.f64 = py::cast<double>(obj);
             }
             found = true;
         } else if (py::isinstance<py::float_>(obj)) {
             // Note that for conservative cast we use double which is our larger float
-            native_dtype = NativeType<double>::type;
+            native_dtype = NativeType_v<double>;
             native_val.f64 = py::cast<double>(obj);
             found = true;
         }
@@ -226,6 +226,8 @@ static T castToNativeType(const py::object val_obj) {
     DataType dtype;
     getConservativeNativeVal(val_obj, &val, &dtype);
     switch (dtype) {
+    case DataType::Int4:
+        return (T)val.i8;
     case DataType::Int8:
         return (T)val.i8;
     case DataType::Int16:
@@ -287,7 +289,7 @@ void addArrayCtor(pyTensorClass& mTensor) {
         /* Request a buffer descriptor from Python */
         py::buffer_info info = b.request();
         Tensor* newTensor = new Tensor();
-        newTensor->setDataType(NativeType<T>::type);
+        newTensor->setDataType(NativeType_v<T>);
         const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
         newTensor->resize(dims);
 
@@ -315,14 +317,18 @@ void init_Tensor(py::module& m){
     .def(py::self - py::self)
     .def(py::self * py::self)
     .def(py::self / py::self)
+    .def("zeros", &Tensor::zeros)
+    .def("mean", &Tensor::mean)
+    .def("abs", &Tensor::abs)
     .def("clone", &Tensor::clone)
-	.def("sqrt", &Tensor::sqrt)
+    .def("sqrt", &Tensor::sqrt)
     .def("set_datatype", &Tensor::setDataType, py::arg("datatype"), py::arg("copyCast") = true)
     .def("set_data_format", &Tensor::setDataFormat, py::arg("data_format"), py::arg("copyTrans") = true)
     .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true)
     .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims)
     .def("grad", &Tensor::grad)
     .def("set_grad", &Tensor::setGrad)
+    .def("backend", &Tensor::backend)
     .def("dtype", &Tensor::dataType)
     .def("dformat", &Tensor::dataFormat)
     .def("size", &Tensor::size)
@@ -339,7 +345,7 @@ void init_Tensor(py::module& m){
         return b.toString();
     })
     .def("__repr__", [](Tensor& b) {
-        return fmt::format("Tensor(dims = {}, dtype = {})", b.dims(), std::string(EnumStrings<DataType>::data[static_cast<int>(b.dataType())]));
+        return fmt::format("Tensor({}, dims = {}, dtype = {})", b.toString(-1, 7), b.dims(), b.dataType());
     })
     .def("__len__", [](Tensor& b) -> size_t{
         return b.size();
@@ -353,6 +359,22 @@ void init_Tensor(py::module& m){
                 return py::cast(b.get<float>(idx));
             case DataType::Int8:
                 return py::cast(b.get<std::int8_t>(idx));
+            case DataType::Int4:
+                return py::cast(b.get<std::int8_t>(idx));
+            case DataType::Dual_Int4:
+                return py::cast(b.get<std::int8_t>(idx));
+            case DataType::Int3:
+                return py::cast(b.get<std::int8_t>(idx));
+            case DataType::Dual_Int3:
+                return py::cast(b.get<std::int8_t>(idx));
+            case DataType::Int2:
+                return py::cast(b.get<std::int8_t>(idx));
+            case DataType::Quad_Int2:
+                return py::cast(b.get<std::int8_t>(idx));
+            case DataType::Binary:
+                return py::cast(b.get<std::int8_t>(idx));
+             case DataType::Octo_Binary:
+                return py::cast(b.get<std::int8_t>(idx));
             case DataType::Int16:
                 return py::cast(b.get<std::int16_t>(idx));
             case DataType::Int32:
@@ -361,6 +383,18 @@ void init_Tensor(py::module& m){
                 return py::cast(b.get<std::int64_t>(idx));
             case DataType::UInt8:
                 return py::cast(b.get<std::uint8_t>(idx));
+            case DataType::UInt4:
+                return py::cast(b.get<std::uint8_t>(idx));
+            case DataType::Dual_UInt4:
+                return py::cast(b.get<std::uint8_t>(idx));
+            case DataType::UInt3:
+                return py::cast(b.get<std::uint8_t>(idx));
+            case DataType::Dual_UInt3:
+                return py::cast(b.get<std::uint8_t>(idx));
+            case DataType::UInt2:
+                return py::cast(b.get<std::uint8_t>(idx));
+            case DataType::Quad_UInt2:
+                return py::cast(b.get<std::uint8_t>(idx));
             case DataType::UInt16:
                 return py::cast(b.get<std::uint16_t>(idx));
             case DataType::UInt32:
@@ -380,6 +414,22 @@ void init_Tensor(py::module& m){
                 return py::cast(b.get<float>(coordIdx));
             case DataType::Int8:
                 return py::cast(b.get<std::int8_t>(coordIdx));
+            case DataType::Int4:
+                return py::cast(b.get<std::int8_t>(coordIdx));
+            case DataType::Dual_Int4:
+                return py::cast(b.get<std::int8_t>(coordIdx));
+            case DataType::Int3:
+                return py::cast(b.get<std::int8_t>(coordIdx));
+            case DataType::Dual_Int3:
+                return py::cast(b.get<std::int8_t>(coordIdx));
+            case DataType::Int2:
+                return py::cast(b.get<std::int8_t>(coordIdx));
+            case DataType::Quad_Int2:
+                return py::cast(b.get<std::int8_t>(coordIdx));
+            case DataType::Binary:
+                return py::cast(b.get<std::int8_t>(coordIdx));
+             case DataType::Octo_Binary:
+                return py::cast(b.get<std::int8_t>(coordIdx));
             case DataType::Int16:
                 return py::cast(b.get<std::int16_t>(coordIdx));
             case DataType::Int32:
@@ -388,6 +438,18 @@ void init_Tensor(py::module& m){
                 return py::cast(b.get<std::int64_t>(coordIdx));
             case DataType::UInt8:
                 return py::cast(b.get<std::uint8_t>(coordIdx));
+            case DataType::UInt4:
+                return py::cast(b.get<std::uint8_t>(coordIdx));
+            case DataType::Dual_UInt4:
+                return py::cast(b.get<std::uint8_t>(coordIdx));
+            case DataType::UInt3:
+                return py::cast(b.get<std::uint8_t>(coordIdx));
+            case DataType::Dual_UInt3:
+                return py::cast(b.get<std::uint8_t>(coordIdx));
+            case DataType::UInt2:
+                return py::cast(b.get<std::uint8_t>(coordIdx));
+            case DataType::Quad_UInt2:
+                return py::cast(b.get<std::uint8_t>(coordIdx));
             case DataType::UInt16:
                 return py::cast(b.get<std::uint16_t>(coordIdx));
             case DataType::UInt32:
@@ -410,6 +472,30 @@ void init_Tensor(py::module& m){
             case DataType::Int8:
                 b.set(idx, castToNativeType<std::int8_t>(val));
                 break;
+            case DataType::Int4:
+                b.set(idx, castToNativeType<std::int8_t>(val));
+                break;
+            case DataType::Dual_Int4:
+                b.set(idx, castToNativeType<std::int8_t>(val));
+                break;
+            case DataType::Int3:
+                b.set(idx, castToNativeType<std::int8_t>(val));
+                break;
+            case DataType::Dual_Int3:
+                b.set(idx, castToNativeType<std::int8_t>(val));
+                break;
+            case DataType::Int2:
+                b.set(idx, castToNativeType<std::int8_t>(val));
+                break;
+            case DataType::Quad_Int2:
+                b.set(idx, castToNativeType<std::int8_t>(val));
+                break;
+            case DataType::Binary:
+                b.set(idx, castToNativeType<std::int8_t>(val));
+                break;
+             case DataType::Octo_Binary:
+                b.set(idx, castToNativeType<std::int8_t>(val));
+                break;
             case DataType::Int16:
                 b.set(idx, castToNativeType<std::int16_t>(val));
                 break;
@@ -422,6 +508,24 @@ void init_Tensor(py::module& m){
             case DataType::UInt8:
                 b.set(idx, castToNativeType<std::uint8_t>(val));
                 break;
+            case DataType::UInt4:
+                b.set(idx, castToNativeType<std::uint8_t>(val));
+                break;
+            case DataType::Dual_UInt4:
+                b.set(idx, castToNativeType<std::uint8_t>(val));
+                break;
+            case DataType::UInt3:
+                b.set(idx, castToNativeType<std::uint8_t>(val));
+                break;
+            case DataType::Dual_UInt3:
+                b.set(idx, castToNativeType<std::uint8_t>(val));
+                break;
+            case DataType::UInt2:
+                b.set(idx, castToNativeType<std::uint8_t>(val));
+                break;
+            case DataType::Quad_UInt2:
+                b.set(idx, castToNativeType<std::uint8_t>(val));
+                break;
             case DataType::UInt16:
                 b.set(idx, castToNativeType<std::uint16_t>(val));
                 break;
@@ -448,6 +552,30 @@ void init_Tensor(py::module& m){
             case DataType::Int8:
                 b.set(coordIdx, castToNativeType<std::int8_t>(val));
                 break;
+            case DataType::Int4:
+                b.set(coordIdx, castToNativeType<std::int8_t>(val));
+                break;
+            case DataType::Dual_Int4:
+                b.set(coordIdx, castToNativeType<std::int8_t>(val));
+                break;
+            case DataType::Int3:
+                b.set(coordIdx, castToNativeType<std::int8_t>(val));
+                break;
+            case DataType::Dual_Int3:
+                b.set(coordIdx, castToNativeType<std::int8_t>(val));
+                break;
+            case DataType::Int2:
+                b.set(coordIdx, castToNativeType<std::int8_t>(val));
+                break;
+            case DataType::Quad_Int2:
+                b.set(coordIdx, castToNativeType<std::int8_t>(val));
+                break;
+            case DataType::Binary:
+                b.set(coordIdx, castToNativeType<std::int8_t>(val));
+                break;
+             case DataType::Octo_Binary:
+                b.set(coordIdx, castToNativeType<std::int8_t>(val));
+                break;
             case DataType::Int16:
                 b.set(coordIdx, castToNativeType<std::int16_t>(val));
                 break;
@@ -460,6 +588,24 @@ void init_Tensor(py::module& m){
             case DataType::UInt8:
                 b.set(coordIdx, castToNativeType<std::uint8_t>(val));
                 break;
+            case DataType::UInt4:
+                b.set(coordIdx, castToNativeType<std::uint8_t>(val));
+                break;
+            case DataType::Dual_UInt4:
+                b.set(coordIdx, castToNativeType<std::uint8_t>(val));
+                break;
+            case DataType::UInt3:
+                b.set(coordIdx, castToNativeType<std::uint8_t>(val));
+                break;
+            case DataType::Dual_UInt3:
+                b.set(coordIdx, castToNativeType<std::uint8_t>(val));
+                break;
+            case DataType::UInt2:
+                b.set(coordIdx, castToNativeType<std::uint8_t>(val));
+                break;
+            case DataType::Quad_UInt2:
+                b.set(coordIdx, castToNativeType<std::uint8_t>(val));
+                break;
             case DataType::UInt16:
                 b.set(coordIdx, castToNativeType<std::uint16_t>(val));
                 break;
@@ -497,6 +643,48 @@ void init_Tensor(py::module& m){
             case DataType::Float32:
                 dataFormatDescriptor = py::format_descriptor<float>::format();
                 break;;
+            case DataType::Int4:
+                dataFormatDescriptor = py::format_descriptor<std::int8_t>::format();
+                break;
+            case DataType::UInt4:
+                dataFormatDescriptor = py::format_descriptor<std::uint8_t>::format();
+                break;
+            case DataType::Int3:
+                dataFormatDescriptor = py::format_descriptor<std::int8_t>::format();
+                break;
+            case DataType::UInt3:
+                dataFormatDescriptor = py::format_descriptor<std::uint8_t>::format();
+                break;
+            case DataType::Int2:
+                dataFormatDescriptor = py::format_descriptor<std::int8_t>::format();
+                break;
+            case DataType::UInt2:
+                dataFormatDescriptor = py::format_descriptor<std::uint8_t>::format();
+                break;
+            case DataType::Dual_Int4:
+                dataFormatDescriptor = py::format_descriptor<std::int8_t>::format();
+                break;
+            case DataType::Dual_UInt4:
+                dataFormatDescriptor = py::format_descriptor<std::uint8_t>::format();
+                break;
+            case DataType::Dual_Int3:
+                dataFormatDescriptor = py::format_descriptor<std::int8_t>::format();
+                break;
+            case DataType::Dual_UInt3:
+                dataFormatDescriptor = py::format_descriptor<std::uint8_t>::format();
+                break;
+            case DataType::Quad_Int2:
+                dataFormatDescriptor = py::format_descriptor<std::int8_t>::format();
+                break;
+            case DataType::Quad_UInt2:
+                dataFormatDescriptor = py::format_descriptor<std::uint8_t>::format();
+                break;
+            case DataType::Binary:
+                dataFormatDescriptor = py::format_descriptor<std::int8_t>::format();
+                break;
+            case DataType::Octo_Binary:
+                dataFormatDescriptor = py::format_descriptor<std::int8_t>::format();
+                break;
             case DataType::Int8:
                 dataFormatDescriptor = py::format_descriptor<std::int8_t>::format();
                 break;
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 60d80e783d2e7d2e50d5f832b3508bf065edb707..d1eec290741ac6477f3d75ffb4457ea4d21421cd 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -127,7 +127,66 @@ void init_GraphView(py::module& m) {
           .def("clone", &GraphView::clone)
           .def("get_nodes", &GraphView::getNodes)
           .def("get_node", &GraphView::getNode, py::arg("node_name"))
-          .def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>(), py::arg("allow_data_dependency") = false)
+          .def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>(), py::arg("allow_data_dependency") = false,
+          R"mydelimiter(
+            Compute and propagate Tensor dimensions through the GraphView.
+
+            This function computes dimensions of input/output Tensors for each of the
+            Node's associated Operator in the GraphView by propagating dimensions from
+            inputs through the entire network.
+            It handles:
+
+            * Dimension propagation in dependency order
+            * Cyclic dependencies (specifically for Memorize_Op)
+            * Input dimension validation and setting
+            * Optional vs mandatory inputs
+
+            Note
+            ----
+            Dimensions will be propagated through every Node regardless of if
+            dims were previously forwarded or not.
+
+            The algorithm works in several phases:
+
+            1. Input Dimension Setup:
+                * Validates/sets provided input dimensions
+                * Checks compatibility with existing tensors
+
+            2. Connection Verification:
+                * Ensures all node connections are valid
+                * Verifies mandatory inputs are present
+
+            3. Dimension Propagation:
+                * Propagates dimensions through the graph in topological order
+                * Detects and handles circular dependencies induced by Memorize_Op
+
+            Parameters
+            ----------
+            dims : List[List[int]], optional
+                Vector of dimension vectors for graph inputs. Empty by default.
+            allow_data_dependency : bool, optional
+                Whether to allow data-dependent dimension computation, by default False
+
+            Returns
+            -------
+            bool
+                True if dimension propagation succeeded, False otherwise.
+
+            Examples
+            --------
+            >>> graph = GraphView()
+            >>> # ... build graph ...
+            >>>
+            >>> # Forward with default empty dimensions
+            >>> success = graph.forward_dims()
+            >>>
+            >>> # Forward with specific input dimensions
+            >>> input_dims = [
+            ...     [1, 3, 224, 224],  # First input
+            ...     [1, 64, 112, 112]  # Second input
+            ... ]
+            >>> success = graph.forward_dims(input_dims)
+          )mydelimiter")
           .def("compile", &GraphView::compile, py::arg("backend"), py::arg("datatype"), py::arg("device") = 0, py::arg("dims")=std::vector<std::vector<DimSize_t>>())
           .def("__call__", &GraphView::operator(), py::arg("connectors"))
           .def("set_datatype", &GraphView::setDataType, py::arg("datatype"))
@@ -151,6 +210,7 @@ void init_GraphView(py::module& m) {
           .def("get_ranked_nodes", &GraphView::getRankedNodes)
           .def("get_ranked_nodes_name", &GraphView::getRankedNodesName, py::arg("format"), py::arg("mark_non_unicity") = true)
           .def("set_dataformat", &GraphView::setDataFormat, py::arg("dataformat"))
+          .def("update_inputs_outputs", &GraphView::updateInputsOutputs)
             ;
 
      m.def("get_connected_graph_view", &getConnectedGraphView);
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index 1f67b48a08d2fbe5db39436f599ecfc0f236268c..efa18d839b95326c5e4f1c18529e9064c7a598d0 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -66,7 +66,7 @@ void init_Node(py::module& m) {
     .def("__repr__", &Node::repr)
 
     .def("add_child",
-         (void (Node::*)(std::shared_ptr<Node>, const IOIndex_t, IOIndex_t)) &
+         (void (Node::*)(const std::shared_ptr<Node>&, const IOIndex_t, IOIndex_t)) &
                  Node::addChild,
          py::arg("other_node"), py::arg("out_id") = 0, py::arg("other_in_id") = gk_IODefaultIndex,
     R"mydelimiter(
diff --git a/python_binding/graphRegex/pybind_GraphRegex.cpp b/python_binding/graphRegex/pybind_GraphRegex.cpp
deleted file mode 100644
index 921f204d017f90823b9c4a1a024efa271a4691e5..0000000000000000000000000000000000000000
--- a/python_binding/graphRegex/pybind_GraphRegex.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <pybind11/pybind11.h>
-#include <pybind11/functional.h>
-#include "aidge/graphRegex/GraphRegex.hpp"
-
-namespace py = pybind11;
-namespace Aidge {
-void init_GraphRegex(py::module& m){
-
-
-    py::class_<GraphRegex, std::shared_ptr<GraphRegex>>(m, "GraphRegex", "GraphRegex class describes a regex to test a graph.")
-    .def(py::init<>())
-
-    .def("add_query", &GraphRegex::addQuery, py::arg("query"), py::arg("f") = nullptr, R"mydelimiter(
-    :rtype: str
-    )mydelimiter")
-
-    .def("set_key_from_graph", &GraphRegex::setKeyFromGraph, R"mydelimiter(
-    :param ref: The graph use to define type of Node.
-    :type ref: :py:class:`aidge_core.GraphView`
-    )mydelimiter")
-
-//      void setNodeKey(const std::string key, const std::string conditionalExpressions );
-//  void setNodeKey(const std::string key,std::function<bool(NodePtr)> f);
-
-    .def("match", &GraphRegex::match, R"mydelimiter(
-    :param graphToMatch: The graph to perform the matching algorithm on.
-    :type graphToMatch: :py:class:`aidge_core.GraphView`
-    )mydelimiter")
-
-
-
-    .def("set_node_key",
-            (void (GraphRegex::*)(const std::string, const std::string )) &
-                    GraphRegex::setNodeKey,
-            py::arg("key"), py::arg("conditionalExpressions"),
-    R"mydelimiter(
-    Add a node test
-    :param key: the key of the node test to use in the query.
-    :param conditionalExpressions: the test to do .
-
-    )mydelimiter")
-
-
-    .def("set_node_key",
-            (void (GraphRegex::*)(const std::string, std::function<bool(NodePtr)>)) &
-                    GraphRegex::setNodeKey,
-            py::arg("key"), py::arg("f"),
-    R"mydelimiter(
-    Add a node test
-    :param key: the key of the lambda test to use in the conditional expressions.
-    :param f: bool lambda (nodePtr) .
-
-    )mydelimiter")
-
-
-
-    ;
-}
-}
diff --git a/python_binding/graphRegex/pybind_MatchSolution.cpp b/python_binding/graphRegex/pybind_MatchSolution.cpp
deleted file mode 100644
index 81d39f86ed6da5b7b63a2d43b3a4fcbb2f8e9043..0000000000000000000000000000000000000000
--- a/python_binding/graphRegex/pybind_MatchSolution.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <pybind11/pybind11.h>
-#include <pybind11/stl.h>
-#include "aidge/graphRegex/matchFsm/MatchResult.hpp"
-
-namespace py = pybind11;
-namespace Aidge {
-void init_MatchSolution(py::module& m){
-
-
-    py::class_<MatchSolution, std::shared_ptr<MatchSolution>>(m, "MatchSolution", "MatchSolution class contains the result of one match and the associated key, the query and the start node.")
-    .def("at", &MatchSolution::at, py::arg("key"),
-    R"mydelimiter(
-    :rtype: str
-    )mydelimiter")
-
-    .def("get_all",  &MatchSolution::getAll,
-    R"mydelimiter(
-    )mydelimiter")
-
-    .def("get_query",  &MatchSolution::getQuery,
-    R"mydelimiter(
-    )mydelimiter")
-
-    .def("get_start_node",  &MatchSolution::getStartNode,
-    R"mydelimiter(
-    )mydelimiter")
-    ;
-}
-} // namespace Aidge
diff --git a/python_binding/operator/pybind_Abs.cpp b/python_binding/operator/pybind_Abs.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e8ae1c26e5c454cb130c55abd3a77b3ca7d5b7ff
--- /dev/null
+++ b/python_binding/operator/pybind_Abs.cpp
@@ -0,0 +1,31 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Abs.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Abs(py::module& m) {
+    py::class_<Abs_Op, std::shared_ptr<Abs_Op>, OperatorTensor>(m, "AbsOp", py::multiple_inheritance())
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Abs_Op::getInputsName)
+    .def_static("get_outputs_name", &Abs_Op::getOutputsName)
+    .def_readonly_static("Type", &Abs_Op::Type);
+    declare_registrable<Abs_Op>(m, "AbsOp");
+
+    m.def("Abs", &Abs, py::arg("name") = "");
+}
+}  // namespace Aidge
\ No newline at end of file
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index f8adfd5f4becb7677b3a59791f8549bb114fbbc4..cd85c73d3fd1a4181f7042bf0495f09046817fc4 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -21,7 +21,19 @@ namespace py = pybind11;
 namespace Aidge {
 
 void declare_Add(py::module &m) {
-  py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance())
+  py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance(),
+    R"mydelimiter(
+    Initialize an Add operator.
+    This operator performs element-wise addition between two input tensors.
+    The operation is defined as:
+        Output = Input1 + Input2
+    The output tensor shape is determined by taking the maximum size along each dimension of the input tensors after broadcasting.
+    Examples:
+        Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
+        Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
+    :param name : Name of the node (optional).
+    :type name : str
+    )mydelimiter")
     .def(py::init<>())
     .def_static("get_inputs_name", &Add_Op::getInputsName)
     .def_static("get_outputs_name", &Add_Op::getOutputsName)
@@ -29,7 +41,21 @@ void declare_Add(py::module &m) {
 
   declare_registrable<Add_Op>(m, "AddOp");
 
-  m.def("Add", &Add, py::arg("name") = "");
+  m.def("Add", &Add, py::arg("name") = "",
+    R"mydelimiter(
+    Initialize a node containing an Add operator that performs element-wise addition between two tensors.
+    The operation is defined as:
+        Output = Input1 + Input2
+    The output tensor shape is determined by taking the maximum size along each dimension of the input tensors after broadcasting.
+    Examples:
+        Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
+        Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
+
+    :param name : Name of the node (optional).
+    :type name : str
+    :return: A node containing the Add operator.
+    :rtype: :py:class:`AddOp`
+    )mydelimiter");
 }
 
 void init_Add(py::module &m) {
diff --git a/python_binding/operator/pybind_And.cpp b/python_binding/operator/pybind_And.cpp
index 08dddfc8168bb77086a3dd72aca45b110a4cbce9..13c0f9085c0f2cb0c5e278e23a7005a5aa3470f2 100644
--- a/python_binding/operator/pybind_And.cpp
+++ b/python_binding/operator/pybind_And.cpp
@@ -20,15 +20,35 @@ namespace Aidge {
 
 void init_And(py::module& m) {
     py::class_<And_Op, std::shared_ptr<And_Op>, OperatorTensor>(m, "AndOp", py::multiple_inheritance(),
-          R"mydelimiter( Initialize an And operator.)mydelimiter")
+        R"mydelimiter(
+        Initialize an And operator.
+        This operator performs element-wise logical AND between two input tensors. The operation is defined as:
+            Output = Input1 AND Input2
+        The inputs must be boolean tensors (with values 0 or 1).
+
+        :param name : Name of the node (optional).
+        :type name : str
+        )mydelimiter")
     .def(py::init<>())
     .def_static("get_inputs_name", &And_Op::getInputsName)
     .def_static("get_outputs_name", &And_Op::getOutputsName);
+
     declare_registrable<And_Op>(m, "AndOp");
+
     m.def("And", &And, py::arg("name") = "",
-	   R"mydelimiter(
-        Initialize a node containing an And operator.
-			:param name : name of the node.
-		)mydelimiter");
+        R"mydelimiter(
+        Initialize a node containing an And operator that performs element-wise logical AND between two tensors.
+        The operation is defined as:
+            Output = Input1 AND Input2
+
+        The inputs must be boolean tensors (with values 0 or 1).
+
+        :param name : Name of the node (optional).
+        :type name : str
+        :return: A node containing the And operator.
+        :rtype: :py:class:`AndOp`
+        )mydelimiter");
 }
+
 }  // namespace Aidge
+
diff --git a/python_binding/operator/pybind_Atan.cpp b/python_binding/operator/pybind_Atan.cpp
index e9e277fcc336b8d4436aaf8dc1a834e666e400ae..6f2e00333e674998beebc0d3655c1a279eae2036 100644
--- a/python_binding/operator/pybind_Atan.cpp
+++ b/python_binding/operator/pybind_Atan.cpp
@@ -19,13 +19,27 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Atan(py::module& m) {
-    py::class_<Atan_Op, std::shared_ptr<Atan_Op>, OperatorTensor>(m, "AtanOp", py::multiple_inheritance())
+    py::class_<Atan_Op, std::shared_ptr<Atan_Op>, OperatorTensor>(m, "AtanOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize an Atan operator.
+
+        :param type : The type of the Atan operation.
+        :type type : :py:class:`str`
+        )mydelimiter")
     .def(py::init<>())
     .def_static("get_inputs_name", &Atan_Op::getInputsName)
     .def_static("get_outputs_name", &Atan_Op::getOutputsName);
 
     declare_registrable<Atan_Op>(m, "AtanOp");
 
-    m.def("Atan", &Atan, py::arg("name") = "");
+    m.def("Atan", &Atan, py::arg("name") = "",
+        R"mydelimiter(
+        Initialize a node containing an Atan operator.
+
+        :param name : Name of the node.
+        :type name : :py:class:`str`
+        )mydelimiter");
 }
+
 }  // namespace Aidge
+
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 2d137c4de0c62ff1f8391b03cb07bc9230b7c9eb..24549e3f4f331ee1170a07e61a6190a607274fe3 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -26,24 +26,32 @@
 namespace py = pybind11;
 namespace Aidge {
 
+// Template function for declaring AvgPooling operator for different dimensions
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
 
   const std::string pyClassName("AvgPooling" + std::to_string(DIM) + "DOp");
   const std::string pyStaticAttrClassName("StaticAttributes" + pyClassName);
-//   py::class_<StaticAttributes<AvgPoolingAttr,
-//                                              std::array<DimSize_t, DIM>,
-//                                              std::array<DimSize_t, DIM>>,
-//     std::shared_ptr<StaticAttributes<AvgPoolingAttr,
-//                                              std::array<DimSize_t, DIM>,
-//                                              std::array<DimSize_t, DIM>>>, Attributes>(m, pyStaticAttrClassName.c_str());
-
+  
   py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor>(
         m, pyClassName.c_str(),
-        py::multiple_inheritance())
+        py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize an AvgPooling operator for a tensor.
+
+        This operator performs average pooling on the input tensor using the specified kernel dimensions 
+        and stride dimensions.
+
+        :param kernel_dims: The size of the kernel (filter) applied during pooling. 
+                             Specifies the dimensions of the kernel (e.g., [3, 3] for 2D pooling).
+        :type kernel_dims: List[int]
+        :param stride_dims: The stride of the pooling operation. Specifies how much the kernel moves in each step.
+                             By default, the stride is set to 1 for all dimensions.
+        :type stride_dims: List[int], optional
+        )mydelimiter")
     .def(py::init<const std::array<DimSize_t, DIM> &,
                   const std::array<DimSize_t, DIM> &>(),
             py::arg("kernel_dims"),
-            py::arg("stride_dims") = create_array<DimSize_t,DIM>(1))
+            py::arg("stride_dims") = create_array<DimSize_t, DIM>(1))
     .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
     .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
     .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type);
@@ -52,21 +60,30 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
 
   m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
-                                                                  const std::vector<DimSize_t> &stride_dims) {
+                                                                  const std::vector<DimSize_t>& stride_dims) {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
 
         return AvgPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()));
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
-       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
-}
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM, 1),
+       R"mydelimiter(
+        Initialize a node containing an AvgPooling operator.
+
+        This function performs average pooling on the tensor with the given kernel and stride dimensions.
 
+        :param kernel_dims: Size of the kernel applied during pooling.
+        :type kernel_dims: List[int]
+        :param name: Name of the operator node (optional).
+        :type name: str
+        :param stride_dims: Stride dimensions for the pooling operation.
+        :type stride_dims: List[int], optional
+        )mydelimiter");
+}
 
+// Initialize the AvgPooling operator for different dimensions
 void init_AvgPooling(py::module &m) {
-//   py::enum_<AvgPoolingAttr>(m, "_AvgPoolingAttr")
-    // .value("kernel_dims", AvgPoolingAttr::KernelDims)
-    // .value("stride_dims", AvgPoolingAttr::StrideDims);
   declare_AvgPoolingOp<1>(m);
   declare_AvgPoolingOp<2>(m);
   declare_AvgPoolingOp<3>(m);
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index c380f594038fe7f77d1c01a4e9c8285ccf5ad85a..3339db0f2dbf7a82f2a0833bb468941d4dbef6c5 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -25,7 +25,17 @@ template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
     const std::string pyClassName("BatchNorm" + std::to_string(DIM) + "DOp");
     py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor>(
-    m, pyClassName.c_str(), py::multiple_inheritance())
+    m, pyClassName.c_str(), py::multiple_inheritance(),
+    R"mydelimiter(
+    Initialize a BatchNorm operator.
+
+    :param epsilon : A small value added to the denominator for numerical stability.
+    :type epsilon : :py:class:`float`
+    :param momentum : The momentum factor for the moving averages.
+    :type momentum : :py:class:`float`
+    :param training_mode : Whether the operator is in training mode (for batch statistics) or inference mode (for using the moving averages).
+    :type training_mode : :py:class:`bool`
+    )mydelimiter")
         .def(py::init<float, float, bool>(),
             py::arg("epsilon"),
             py::arg("momentum"),
@@ -36,7 +46,21 @@ void declare_BatchNormOp(py::module& m) {
 
     declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
 
-    m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nb_features"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("training_mode") = false, py::arg("name") = "");
+    m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nb_features"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("training_mode") = false, py::arg("name") = "",
+        R"mydelimiter(
+        Initialize a node containing a BatchNorm operator.
+
+        :param nb_features : The number of features in the input tensor.
+        :type nb_features : :py:class:`int`
+        :param epsilon : A small value added to the denominator for numerical stability.
+        :type epsilon : :py:class:`float`
+        :param momentum : The momentum factor for the moving averages.
+        :type momentum : :py:class:`float`
+        :param training_mode : Whether the operator is in training mode or inference mode.
+        :type training_mode : :py:class:`bool`
+        :param name : Name of the node.
+        :type name : :py:class:`str`
+        )mydelimiter");
 }
 
 void init_BatchNorm(py::module &m) {
@@ -44,3 +68,4 @@ void init_BatchNorm(py::module &m) {
 }
 
 }  // namespace Aidge
+
diff --git a/python_binding/operator/pybind_Clip.cpp b/python_binding/operator/pybind_Clip.cpp
index 27c47811a3c192f1f657f339fe4caf047a944224..7c4563a98244e108d274ecb22562c497243fc1bc 100644
--- a/python_binding/operator/pybind_Clip.cpp
+++ b/python_binding/operator/pybind_Clip.cpp
@@ -21,28 +21,39 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Clip(py::module& m) {
-    py::class_<Clip_Op, std::shared_ptr<Clip_Op>, OperatorTensor>(m, "ClipOp", py::multiple_inheritance())
+    py::class_<Clip_Op, std::shared_ptr<Clip_Op>, OperatorTensor>(m, "ClipOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Clip operator.
+
+        :param min : Minimum clipping value. Default is the lowest possible float value.
+        :type min : :py:class:`float`
+        :param max : Maximum clipping value. Default is the highest possible float value.
+        :type max : :py:class:`float`
+        )mydelimiter")
     .def(py::init<float, float>(), py::arg("min") = std::numeric_limits<float>::lowest(), py::arg("max") = std::numeric_limits<float>::max())
     .def_static("get_inputs_name", &Clip_Op::getInputsName)
     .def_static("get_outputs_name", &Clip_Op::getOutputsName)
-    .def("min",&Clip_Op::min,py::return_value_policy::reference_internal)
-    .def("max",&Clip_Op::max,py::return_value_policy::reference_internal);
-    
-    
+    .def("min", &Clip_Op::min, py::return_value_policy::reference_internal)
+    .def("max", &Clip_Op::max, py::return_value_policy::reference_internal);
+
     declare_registrable<Clip_Op>(m, "ClipOp");
-   
-    m.def("Clip", &Clip,py::arg("name") = "",
-    py::arg("min")= std::numeric_limits<float>::lowest(),
-    py::arg("max")= std::numeric_limits<float>::max(),
-    R"mydelimiter(ClipOp is a tensor operator that performs a clipping operation on tensor elements.
-        This class allows limiting tensor values to a specified range, defined by the min 
-        and max parameters. Values outside this range are replaced by the corresponding 
-        limit values. When 'min' is greater than 'max', the clip operator sets all the 'input' values to the value of 'max'
-        :param min: minimum clipping value.
-        :type min: float
-        :param max: maximum clipping value.
-        :type max: float
-        :param name: name of the node.
-    )mydelimiter");
+
+    m.def("Clip", &Clip, py::arg("name") = "",
+        py::arg("min") = std::numeric_limits<float>::lowest(),
+        py::arg("max") = std::numeric_limits<float>::max(),
+        R"mydelimiter(
+        ClipOp is a tensor operator that performs a clipping operation on tensor elements.
+        This class allows limiting tensor values to a specified range, defined by the `min` 
+        and `max` parameters. Values outside this range are replaced by the corresponding 
+        limit values. When `min` is greater than `max`, the clip operator sets all the 'input' values to the value of `max`.
+
+        :param min: Minimum clipping value.
+        :type min: :py:class:`float`
+        :param max: Maximum clipping value.
+        :type max: :py:class:`float`
+        :param name: Name of the node.
+        :type name: :py:class:`str`
+        )mydelimiter");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 854f3783e9961bb5fd29746b88352438a43dd6e4..9e1b3de9e7b1f6bd8c84779196c1918294cedb18 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -20,17 +20,35 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Concat(py::module& m) {
-    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance())
+    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Concat operator.
+
+        :param nb_inputs : The number of input tensors to concatenate.
+        :type nb_inputs : :py:class:`int`
+        :param axis : The axis along which to concatenate the tensors.
+        :type axis : :py:class:`int`
+        )mydelimiter")
         .def(py::init<const IOIndex_t, const int>(),
-                py::arg("nb_inputs"),
-                py::arg("axis"))
+             py::arg("nb_inputs"),
+             py::arg("axis"))
         .def_static("get_inputs_name", &Concat_Op::getInputsName)
         .def_static("get_outputs_name", &Concat_Op::getOutputsName)
         .def_readonly_static("Type", &Concat_Op::Type);
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
 
-    m.def("Concat", &Concat, py::arg("nb_inputs"), py::arg("axis"), py::arg("name") = "");
+    m.def("Concat", &Concat, py::arg("nb_inputs"), py::arg("axis"), py::arg("name") = "",
+        R"mydelimiter(
+        Initialize a node containing a Concat operator.
+
+        :param nb_inputs : The number of input tensors to concatenate.
+        :type nb_inputs : :py:class:`int`
+        :param axis : The axis along which to concatenate the tensors.
+        :type axis : :py:class:`int`
+        :param name : Name of the node.
+        :type name : :py:class:`str`
+        )mydelimiter");
 }
 
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_ConstantOfShape.cpp b/python_binding/operator/pybind_ConstantOfShape.cpp
index 189337a384d55c91f6aceeb97c530ed92ef7b4d0..07079d98301f0f778185d0fb70f6d38b18aec5e8 100644
--- a/python_binding/operator/pybind_ConstantOfShape.cpp
+++ b/python_binding/operator/pybind_ConstantOfShape.cpp
@@ -23,22 +23,31 @@ namespace Aidge {
 
 void init_ConstantOfShape(py::module &m) {
   py::class_<ConstantOfShape_Op, std::shared_ptr<ConstantOfShape_Op>, OperatorTensor>(
-      m, "ConstantOfShapeOp", py::multiple_inheritance())
-      // Here we bind the methods of the Unsqueeze_Op that will want to access
+      m, "ConstantOfShapeOp", py::multiple_inheritance(),
+      R"mydelimiter(
+      Initialize a ConstantOfShape operator.
+
+      :param value : Tensor with a given datatype that contains the value 
+                     that will fill the output tensor.
+      :type value : :py:class:`Tensor`
+      )mydelimiter")
       .def("get_inputs_name", &ConstantOfShape_Op::getInputsName)
       .def("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
       .def("value", &ConstantOfShape_Op::value);
-  // Here we bind the constructor of the ConstantOfShape Node. We add an argument for
-  // each attribute of the operator (in here we only have 'axes') and the last
-  // argument is the node's name.
+
   m.def("ConstantOfShape", &ConstantOfShape, py::arg("value") = Tensor(0.f),
         py::arg("name") = "",
         R"mydelimiter(
-    Initialize a node containing an constantOfShape operator.
-	:param value : tensor with a given datatype that contains the value that will fill the output tensor
-	:type value  : :py:class: Tensor
-    :param name  : name of the node.
-)mydelimiter");
+        Initialize a node containing a ConstantOfShape operator.
+
+        :param value : Tensor with a given datatype that contains the value 
+                       that will fill the output tensor.
+        :type value : :py:class:`Tensor`
+        :param name  : Name of the node.
+        :type name : :py:class:`str`
+        )mydelimiter");
 }
+
 } // namespace Aidge
 
+
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 3bd54da74f90a38b0255e021234786cd21e6c8a3..6ab073be6a494e63bf045c40faed22fa17fafe8e 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -1,14 +1,3 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
 #include <pybind11/pybind11.h>
 #include <pybind11/stl.h>
 #include <string>
@@ -25,11 +14,22 @@
 namespace py = pybind11;
 namespace Aidge {
 
-template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
+template <DimIdx_t DIM> 
+void declare_ConvOp(py::module &m) {
   const std::string pyClassName("Conv" + std::to_string(DIM) + "DOp");
   py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
-    py::multiple_inheritance())
+    py::multiple_inheritance(),
+    R"mydelimiter(
+    Initialize a Convolution operator.
+
+    :param kernel_dims : The dimensions of the convolution kernel (filter size).
+    :type kernel_dims : List[int]
+    :param stride_dims : The stride size for the convolution.
+    :type stride_dims : List[int]
+    :param dilation_dims : The dilation size for the convolution.
+    :type dilation_dims : List[int]
+    )mydelimiter")
         .def(py::init([](const std::vector<DimSize_t>& kernel_dims,
                          const std::vector<DimSize_t> &stride_dims,
                          const std::vector<DimSize_t> &dilation_dims) {
@@ -62,13 +62,34 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
         return Conv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), noBias);
-    }, py::arg("in_channels"),
-       py::arg("out_channels"),
-       py::arg("kernel_dims"),
-       py::arg("name") = "",
-       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("no_bias") = false);
+    }, 
+    py::arg("in_channels"),
+    py::arg("out_channels"),
+    py::arg("kernel_dims"),
+    py::arg("name") = "",
+    py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+    py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+    py::arg("no_bias") = false,
+    R"mydelimiter(
+    Initialize a node containing a convolution operator.
+
+    :param in_channels : The number of input channels (depth of the input tensor).
+    :type in_channels : int
+    :param out_channels : The number of output channels (depth of the output tensor).
+    :type out_channels : int
+    :param kernel_dims : The dimensions of the convolution kernel (filter size).
+    :type kernel_dims : List[int]
+    :param name : The name of the operator (optional).
+    :type name : str
+    :param stride_dims : The stride size for the convolution (default is [1]).
+    :type stride_dims : List[int]
+    :param dilation_dims : The dilation size for the convolution (default is [1]).
+    :type dilation_dims : List[int]
+    :param no_bias : Whether to disable bias (default is False).
+    :type no_bias : bool
+    :return : A new Convolution operator node.
+    :rtype : :py:class:`ConvOp`
+    )mydelimiter");
 }
 
 
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index b69fee02a50b95e4abfda895580f3192e7876766..5e24431d7e7e230f04cb76237108a4efe97f117f 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -26,15 +26,32 @@
 namespace py = pybind11;
 namespace Aidge {
 
-template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
+// Template function to declare the ConvDepthWise operator for a specific dimensionality.
+template <DimIdx_t DIM>
+void declare_ConvDepthWiseOp(py::module &m) {
   const std::string pyClassName("ConvDepthWise" + std::to_string(DIM) + "DOp");
   py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
-    py::multiple_inheritance())
-  .def(py::init<const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &>(),
-        py::arg("kernel_dims"),
+    py::multiple_inheritance(), 
+    R"mydelimiter(
+    Initialize a Depthwise Convolution operator.
+
+    :param kernel_dims : The dimensions of the convolution kernel (filter size).
+    :type kernel_dims : List[int]
+    :param stride_dims : The stride size for the convolution.
+    :type stride_dims : List[int]
+    :param dilation_dims : The dilation size for the convolution.
+    :type dilation_dims : List[int]
+    )mydelimiter")
+  .def(py::init([](const std::array<DimSize_t, DIM> &kernel_dims,
+                   const std::array<DimSize_t, DIM> &stride_dims,
+                   const std::array<DimSize_t, DIM> &dilation_dims) {
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+        return new ConvDepthWise_Op<DIM>(kernel_dims, stride_dims, dilation_dims);
+    }), py::arg("kernel_dims"),
         py::arg("stride_dims"),
         py::arg("dilation_dims"))
   .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
@@ -54,23 +71,39 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
         return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
-    }, py::arg("nb_channenls"),
-       py::arg("kernel_dims"),
-       py::arg("name") = "",
-       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("no_bias")= false);
+    }, 
+    py::arg("nb_channels"),
+    py::arg("kernel_dims"),
+    py::arg("name") = "",
+    py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+    py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+    py::arg("no_bias")= false,
+    R"mydelimiter(
+    Initialize a node containing a depthwise convolution operator.
 
+    :param nb_channels : The number of channels in the input tensor (i.e., depth of the tensor).
+    :type nb_channels : int
+    :param kernel_dims : The dimensions of the convolution kernel (filter size).
+    :type kernel_dims : List[int]
+    :param name : The name of the operator node (optional).
+    :type name : str
+    :param stride_dims : The stride size for the convolution (default is [1]).
+    :type stride_dims : List[int]
+    :param dilation_dims : The dilation size for the convolution (default is [1]).
+    :type dilation_dims : List[int]
+    :param no_bias : Whether to disable bias in the operation (default is False).
+    :type no_bias : bool
+    :return : A new Depthwise Convolution operator node.
+    :rtype : :py:class:`ConvDepthWiseOp`
+    )mydelimiter");
 }
 
-
+// Function to initialize the Depthwise Convolution operators for different dimensionalities.
 void init_ConvDepthWise(py::module &m) {
   declare_ConvDepthWiseOp<1>(m);
   declare_ConvDepthWiseOp<2>(m);
-//   declare_ConvDepthWiseOp<3>(m);
-
-  // FIXME:
-  // m.def("ConvDepthWise1D", static_cast<NodeAPI(*)(const char*, int, int, int const
-  // (&)[1])>(&ConvDepthWise));
+  // Uncomment the following line to add support for 3D convolution
+  // declare_ConvDepthWiseOp<3>(m);
 }
+
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
index d2ad60725533be0b9db269ce5e022ac8560e1d91..f3cbdc5237306944242b21c7fa34319b097c2374 100644
--- a/python_binding/operator/pybind_Div.cpp
+++ b/python_binding/operator/pybind_Div.cpp
@@ -19,13 +19,41 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Div(py::module& m) {
-    py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance())
+    py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance(),
+    R"mydelimiter(
+    Initialize a Div operator.
+    This operator performs element-wise division between two input tensors.
+    The operation is defined as:
+        Output = Input1 / Input2
+    The output tensor shape is determined by taking the maximum size along each dimension of the input tensors after broadcasting.
+    Examples:
+        Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
+        Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
+    :param name : Name of the node (optional).
+    :type name : str
+    )mydelimiter")
         .def(py::init<>())
         .def_static("get_inputs_name", &Div_Op::getInputsName)
         .def_static("get_outputs_name", &Div_Op::getOutputsName)
         .def_readonly_static("Type", &Div_Op::Type);
+
     declare_registrable<Div_Op>(m, "DivOp");
-    m.def("Div", &Div, py::arg("name") = "");
+
+    m.def("Div", &Div, py::arg("name") = "",
+    R"mydelimiter(
+    Initialize a node containing a Div operator that performs element-wise division between two tensors.
+    The operation is defined as:
+        Output = Input1 / Input2
+    The output tensor shape is determined by taking the maximum size along each dimension of the input tensors after broadcasting.
+    Examples:
+        Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
+        Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
+
+    :param name : Name of the node (optional).
+    :type name : str
+    :return: A node containing the Div operator.
+    :rtype: :py:class:`DivOp`
+    )mydelimiter");
 }
 
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp
index 6ca25f9569a53505385f37a02f3ab478a11f82a6..2a16131d4b99388dfd28a4d40ecb14bfb4d03f41 100644
--- a/python_binding/operator/pybind_Erf.cpp
+++ b/python_binding/operator/pybind_Erf.cpp
@@ -10,6 +10,8 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Erf.hpp"
@@ -19,7 +21,12 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Erf(py::module& m) {
-    py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance())
+    py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize an Erf operator, which computes the error function (erf) element-wise.
+        The error function (erf) is defined as:
+            erf(x) = (2 / sqrt(pi)) * integral from 0 to x of exp(-t^2) dt
+        )mydelimiter")
         .def(py::init<>())
         .def_static("get_inputs_name", &Erf_Op::getInputsName)
         .def_static("get_outputs_name", &Erf_Op::getOutputsName)
@@ -27,6 +34,16 @@ void init_Erf(py::module& m) {
 
     declare_registrable<Erf_Op>(m, "ErfOp");
 
-    m.def("Erf", &Erf, py::arg("name") = "");
+    m.def("Erf", &Erf, py::arg("name") = "",
+        R"mydelimiter(
+        Initialize a node containing an Erf operator that computes the error function (erf) element-wise.
+        The error function (erf) is computed element-wise as follows:
+            erf(x) = (2 / sqrt(pi)) * integral from 0 to x of exp(-t^2) dt
+        :param name : name of the node (optional).
+        :type name : str
+        :return : A node containing the Erf operator.
+        :rtype : :py:class:`ErfOp`
+        )mydelimiter");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Expand.cpp b/python_binding/operator/pybind_Expand.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ce8a32329ab51ea3086c689b0156b62244f752c2
--- /dev/null
+++ b/python_binding/operator/pybind_Expand.cpp
@@ -0,0 +1,73 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Expand.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+const std::string pyClassName("ExpandOp");
+void init_Expand(py::module &m) {
+    py::class_<Expand_Op, std::shared_ptr<Expand_Op>, OperatorTensor>(
+        m,
+        pyClassName.c_str(),
+        py::multiple_inheritance(),
+        R"mydelimiter(
+      Operator that broadcasts an input tensor to a larger provided
+      shape.
+
+      This operator takes an input tensor and expands (broadcasts) it
+      to a target shape while following ONNX broadcasting semantics.
+      Broadcasting allows a tensor to be repeated along dimensions to match
+      the desired output dimensions.
+     
+      input#0 : Tensor to be broadcasted
+      input#1 : Expand shape
+     
+       EXAMPLE:
+         input#0 = [[[[2, 1, 3]]]] => dims = [1, 1, 3, 1]
+         input#1 = [2, 1, 1]       => converted to [1,2,1,1]
+         output = [[[[2, 1, 3], [2, 1, 3]]]] => dims = [1, 2, 3, 1]
+     
+      SEE: https://onnx.ai/onnx/repo-docs/Broadcasting.html for detailed ONNX
+      broadcasting rules
+)mydelimiter")
+        .def(py::init<>())
+        .def_static("get_inputs_name", &Expand_Op::getInputsName)
+        .def_static("get_outputs_name", &Expand_Op::getOutputsName)
+        .def_readonly_static("Type", &Expand_Op::Type);
+
+    declare_registrable<Expand_Op>(m, pyClassName);
+
+    m.def("expand",
+          &Expand,
+          py::arg("name") = "",
+          R"mydelimiter(
+    Initialize a node containing an expand operator.
+    This operator will broadcast values given via input#0 to a shape given via input#1's values
+    If one of the inputs has less dimensions than the other, dimension will be appended 1's to the left.
+
+    Example : 
+            input#0 = [[[[2, 1, 3]]]] => dims = [1, 1, 3, 1]
+            input#1 = [2, 1, 1]       => converted to [1,2,1,1]
+            output = [[[[2, 1, 3], [2, 1, 3]]]] => dims = [1, 2, 3, 1]
+
+    See https://onnx.ai/onnx/repo-docs/Broadcasting.html for detailed ONNX broadcasting rules
+
+    :param name : name of the node.
+	:type name : str 
+)mydelimiter");
+}
+
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 2e9c41a16292d1e643415182d660b80105369d33..c29b6e1d3723f03f6a9c9b1f03156b42160c6cf3 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -21,27 +21,38 @@
 namespace py = pybind11;
 namespace Aidge {
 
-
-
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor>(m, "FCOp", py::multiple_inheritance(),
+    R"mydelimiter(
+    Initialize a Fully Connected (FC) operator.
+
+    :param type : The type of the Fully Connected operation.
+    :type type : :py:class:`str`
+    )mydelimiter")
     .def(py::init<>())
     .def_static("get_inputs_name", &FC_Op::getInputsName)
     .def_static("get_outputs_name", &FC_Op::getOutputsName)
     .def_readonly_static("Type", &FC_Op::Type)
     .def("out_channels", &FC_Op::outChannels)
-    // .def_property_readonly("a", &FC_Op::get_a)
-    // .def_property_readonly("a", [](const FC_Op& self) {
-    //     const AttrDict a = AttrDict(self.get_a());
-    //     return a;
-    // })
     .def("__repr__", [](FC_Op& b) {
         return fmt::format("Operator(type='{}')", b.Type);
     });
 
   declare_registrable<FC_Op>(m, "FCOp");
 
-  m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("no_bias") = false, py::arg("name") = "");
+  m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("no_bias") = false, py::arg("name") = "",
+    R"mydelimiter(
+    Initialize a node containing a Fully Connected (FC) operator.
+
+    :param in_channels : The number of input channels (features).
+    :type in_channels : :py:class:`int`
+    :param out_channels : The number of output channels (features).
+    :type out_channels : :py:class:`int`
+    :param no_bias : Whether to include bias in the operation. Defaults to `False`.
+    :type no_bias : :py:class:`bool`
+    :param name : Name of the node.
+    :type name : :py:class:`str`
+    )mydelimiter");
 }
 
 void init_FC(py::module &m) {
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index 0aac0bbad69abb5faaaea3afd0183573db64b31f..fed44a1e283649ab12eb7e57b599984caed764d5 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -21,7 +21,21 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Gather(py::module& m) {
-    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor>(m, "GatherOp", py::multiple_inheritance())
+    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor>(m, "GatherOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Gather operator, which extracts elements from a tensor at specified indices along a given axis.
+        
+        This operation selects values along the specified axis based on the provided indices, which can be 
+        a 1D or multidimensional tensor. The resulting tensor will have the same shape as the input tensor 
+        except along the given axis, where the size will be determined by the indices.
+        
+        :param axis : Axis along which to gather the elements.
+        :type axis : int
+        :param indices : Indices to gather along the axis.
+        :type indices : :py:class:`List[int]`
+        :param gathered_shape : Shape of the gathered result.
+        :type gathered_shape : :py:class:`List[int]`
+        )mydelimiter")
         .def(py::init<std::int8_t,
                       const std::vector<int64_t>,
                       const std::vector<DimSize_t>>(),
@@ -34,7 +48,29 @@ void init_Gather(py::module& m) {
 
     declare_registrable<Gather_Op>(m, "GatherOp");
 
-    m.def("Gather", &Gather, py::arg("axis") = 0, py::arg("indices") = std::vector<std::int64_t>(), py::arg("gathered_shape") = std::vector<std::size_t>(), py::arg("name") = "");
+    m.def("Gather", &Gather, 
+          py::arg("axis") = 0,
+          py::arg("indices") = std::vector<std::int64_t>(),
+          py::arg("gathered_shape") = std::vector<std::size_t>(),
+          py::arg("name") = "",
+          R"mydelimiter(
+          Initialize a node containing a Gather operator that extracts elements from a tensor along a specified axis.
+          
+          This operation selects values along the specified axis using the provided indices. The resulting tensor
+          will have the same shape as the input tensor except along the given axis, where the size will be determined
+          by the indices.
+
+          :param axis : Axis along which to gather the elements (default is 0).
+          :type axis : int
+          :param indices : Indices to gather along the axis.
+          :type indices : :py:class:`List[int]`
+          :param gathered_shape : Shape of the gathered result.
+          :type gathered_shape : :py:class:`List[int]`
+          :param name : Name of the node (optional).
+          :type name : str
+          :return : A node containing the Gather operator.
+          :rtype : :py:class:`GatherOp`
+          )mydelimiter");
 }
 
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index f125291fafb89ec7ae81678a37e2bde2222a1054..f5ab29c679b7cbb06e5bd86876b63117fd8ce56d 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -39,6 +39,30 @@ void init_GenericOperator(py::module& m) {
         .def("set_forward_dims", &GenericOperator_Op::setForwardDims, py::arg("computation_function"));
 
     // &GenericOperator
+    m.def("GenericOperator",
+        []( const std::string& type,
+            const std::vector<Aidge::InputCategory>& inputCategory,
+            IOIndex_t nbOut,
+            const std::string& name,
+            const py::kwargs kwargs){
+            std::shared_ptr<Node> genericNode = GenericOperator(
+                type,
+                inputCategory,
+                nbOut,
+                name
+            );
+            if (kwargs){
+                std::shared_ptr<GenericOperator_Op> gop = std::static_pointer_cast<GenericOperator_Op>(genericNode->getOperator());
+                std::shared_ptr<DynamicAttributes> attr = std::dynamic_pointer_cast<DynamicAttributes>(gop->attributes());
+                for (auto item : kwargs) {
+                    std::string key = py::cast<std::string>(item.first);
+                    py::object value = py::reinterpret_borrow<py::object>(item.second);
+                    attr->setAttrPy(key, std::move(value));
+                }
+            }
+            return genericNode;
+        }, py::arg("type"), py::arg("input_category"), py::arg("nb_out"), py::arg("name") = "");
+
     m.def("GenericOperator",
         []( const std::string& type,
             IOIndex_t nbData,
@@ -65,6 +89,8 @@ void init_GenericOperator(py::module& m) {
             return genericNode;
         }, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"), py::arg("name") = "");
 
+    m.def("GenericOperator", py::overload_cast<const std::string&, std::shared_ptr<OperatorTensor>, const std::string&>(&GenericOperator), py::arg("type"), py::arg("op"), py::arg("name") = "");
+
     declare_registrable<GenericOperator_Op>(m, "GenericOperatorOp");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_GlobalAveragePooling.cpp b/python_binding/operator/pybind_GlobalAveragePooling.cpp
index f37ac11f5c62d0334e34aff59561b2014d1977bd..691456027bb1536c7b27d6ce9a3546dbf59cffb9 100644
--- a/python_binding/operator/pybind_GlobalAveragePooling.cpp
+++ b/python_binding/operator/pybind_GlobalAveragePooling.cpp
@@ -19,10 +19,21 @@ namespace py = pybind11;
 namespace Aidge {
 
 const std::string pyClassName("GlobalAveragePoolingOp");
+
 void init_GlobalAveragePooling(py::module &m) {
   py::class_<GlobalAveragePooling_Op, std::shared_ptr<GlobalAveragePooling_Op>,
              OperatorTensor>(m, pyClassName.c_str(),
-                             py::multiple_inheritance())
+                             py::multiple_inheritance(),
+                             R"mydelimiter(
+                             Initialize a Global Average Pooling operator.
+                             
+                             This operation performs global average pooling on an input tensor, where the input
+                             tensor is reduced across its spatial dimensions (height and width) to a single value
+                             per channel.
+                             
+                             :param name : Name of the node (optional).
+                             :type name : str
+                             )mydelimiter")
       .def(py::init<>())
       .def_static("get_inputs_name", &GlobalAveragePooling_Op::getInputsName)
       .def_static("get_outputs_name", &GlobalAveragePooling_Op::getOutputsName)
@@ -30,7 +41,19 @@ void init_GlobalAveragePooling(py::module &m) {
 
   declare_registrable<GlobalAveragePooling_Op>(m, pyClassName);
 
-  m.def("globalaveragepooling", &GlobalAveragePooling, py::arg("name") = "");
+  m.def("globalaveragepooling", &GlobalAveragePooling, py::arg("name") = "",
+        R"mydelimiter(
+        Initialize a node containing a Global Average Pooling operator.
+        
+        This operation performs global average pooling on the input tensor. The result is a tensor where
+        each channel of the input tensor is reduced to a single value by averaging all the elements in 
+        that channel.
+
+        :param name : Name of the node (optional).
+        :type name : str
+        :return : A node containing the Global Average Pooling operator.
+        :rtype : :py:class:`GlobalAveragePoolingOp`
+        )mydelimiter");
 }
 
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
index 69454c2abfa56a16dbc3f3638d68e98ce93d2538..3464941dda96a7ce0897f43b927c0ac3a79015c1 100644
--- a/python_binding/operator/pybind_GridSample.cpp
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -48,7 +48,14 @@ void declare_GridSampleOp(py::module &m) {
   const std::string pyClassName("GridSampleOp");
   py::class_<GridSample_Op, std::shared_ptr<GridSample_Op>, OperatorTensor>(
     m, pyClassName.c_str(),
-    py::multiple_inheritance())
+    py::multiple_inheritance(),
+    R"mydelimiter(
+    A class representing the GridSample operator, which performs grid sampling.
+
+    :param mode: Interpolation mode to use for sampling.
+    :param padding_mode: Padding mode for out-of-bound coordinates.
+    :param align_corners: Whether to align the corners of the grid.
+    )mydelimiter")
         .def(py::init([](const std::string& mode,
                          const std::string& padding_mode,
                          bool align_corners) {
@@ -71,7 +78,15 @@ void declare_GridSampleOp(py::module &m) {
     }, py::arg("mode"),
        py::arg("padding_mode"),
        py::arg("align_corners"),
-       py::arg("name") = "");
+       py::arg("name") = "",
+       R"mydelimiter(
+    Creates a GridSample operation.
+
+    :param mode: Interpolation mode to use for sampling.
+    :param padding_mode: Padding mode for out-of-bound coordinates.
+    :param align_corners: Whether to align the corners of the grid.
+    :param name: Name of the node.
+    )mydelimiter");
 }
 
 
diff --git a/python_binding/operator/pybind_Identity.cpp b/python_binding/operator/pybind_Identity.cpp
index 7599197226b2f8734c989755c6e7d3581a52974d..22ddf940213d3a18ec42a6cdbd346a47384a5a26 100644
--- a/python_binding/operator/pybind_Identity.cpp
+++ b/python_binding/operator/pybind_Identity.cpp
@@ -19,13 +19,21 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Identity(py::module& m) {
-    py::class_<Identity_Op, std::shared_ptr<Identity_Op>, OperatorTensor>(m, "IdentityOp", py::multiple_inheritance())
+    py::class_<Identity_Op, std::shared_ptr<Identity_Op>, OperatorTensor>(m, "IdentityOp", py::multiple_inheritance(),
+    R"mydelimiter(
+    A class representing the Identity operator, which returns the input as-is.
+    )mydelimiter")
         .def(py::init<>())
         .def_static("get_inputs_name", &Identity_Op::getInputsName)
         .def_static("get_outputs_name", &Identity_Op::getOutputsName)
         .def_readonly_static("Type", &Identity_Op::Type);
 
-    m.def("Identity", &Identity, py::arg("name") = "");
+    m.def("Identity", &Identity, py::arg("name") = "",
+    R"mydelimiter(
+    Creates an Identity operation, which returns the input as-is.
+
+    :param name: Name of the node.
+    )mydelimiter");
 }
 
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_LRN.cpp b/python_binding/operator/pybind_LRN.cpp
index 178af540b3efb280eaba67572e07cb673e563b22..bb04ed1c5d8ddf5ebade09e6bae07de454805119 100644
--- a/python_binding/operator/pybind_LRN.cpp
+++ b/python_binding/operator/pybind_LRN.cpp
@@ -20,12 +20,25 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_LRN(py::module& m) {
-    py::class_<LRN_Op, std::shared_ptr<LRN_Op>, OperatorTensor>(m, "LRNOp", py::multiple_inheritance())
+    py::class_<LRN_Op, std::shared_ptr<LRN_Op>, OperatorTensor>(m, "LRNOp", py::multiple_inheritance(),
+    R"mydelimiter(
+    A class representing the Local Response Normalization (LRN) operator.
+
+    This operator performs Local Response Normalization, which normalizes each element in the input tensor
+    based on its neighbors within a local region defined by the given size parameter.
+    )mydelimiter")
         .def(py::init<std::int32_t>(), py::arg("size"))
         .def_static("get_inputs_name", &LRN_Op::getInputsName)
         .def_static("get_outputs_name", &LRN_Op::getOutputsName)
         .def_readonly_static("Type", &LRN_Op::Type);
-    declare_registrable<LRN_Op>(m, "LRNOp");
-    m.def("LRN", &LRN, py::arg("size"), py::arg("name") = "");
+
+    m.def("LRN", &LRN, py::arg("size"), py::arg("name") = "",
+    R"mydelimiter(
+    Create a node containing the Local Response Normalization operator.
+
+    :param size: The size of the local region for normalization.
+    :param name: The name of the node (optional).
+    )mydelimiter");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index e031d3dfb3348c5aec5bd497b40ff261528725ad..564fd90be04dd4639f7e00943533f190212d5808 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -19,13 +19,29 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance(),
+    R"mydelimiter(
+    A class representing the LeakyReLU operator.
+
+    The LeakyReLU activation function performs element-wise leaky ReLU:
+    f(x) = x if x > 0, else negative_slope * x.
+    The negative_slope parameter controls the angle of the negative part of the function.
+    )mydelimiter")
         .def(py::init<float>(), py::arg("negative_slope"))
         .def_static("get_inputs_name", &LeakyReLU_Op::getInputsName)
         .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName)
         .def_readonly_static("Type", &LeakyReLU_Op::Type);
+
     declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
-    m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
+
+    m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "",
+    R"mydelimiter(
+    Create a LeakyReLU node with a specified negative slope.
+
+    :param negative_slope: The slope for the negative part of the function. Defaults to 0.0.
+    :param name: The name of the node.
+    )mydelimiter");
 }
 
 }  // namespace Aidge
+
diff --git a/python_binding/operator/pybind_Ln.cpp b/python_binding/operator/pybind_Ln.cpp
index 50aa755821c257c174c4603404144dab4da26296..61fc3583d478dddd9a9eb05101ce2e07e07e9759 100755
--- a/python_binding/operator/pybind_Ln.cpp
+++ b/python_binding/operator/pybind_Ln.cpp
@@ -19,12 +19,23 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Ln(py::module& m) {
-    py::class_<Ln_Op, std::shared_ptr<Ln_Op>, OperatorTensor>(m, "LnOp", py::multiple_inheritance())
+    py::class_<Ln_Op, std::shared_ptr<Ln_Op>, OperatorTensor>(m, "LnOp", py::multiple_inheritance(),
+    R"mydelimiter(
+    A class representing the natural logarithm operator (Ln).
+
+    The operator computes the element-wise natural logarithm of the input tensor.
+    )mydelimiter")
     .def(py::init<>())
     .def_static("get_inputs_name", &Ln_Op::getInputsName)
     .def_static("get_outputs_name", &Ln_Op::getOutputsName)
     .def_readonly_static("Type", &Ln_Op::Type);
 
-    m.def("Ln", &Ln, py::arg("name") = "");
+    m.def("Ln", &Ln, py::arg("name") = "",
+    R"mydelimiter(
+    Create a node with the natural logarithm operator.
+
+    :param name: The name of the node.
+    )mydelimiter");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index f4f175afcb35eb1c10dcd1a1d9d2f2b1691dcfc0..ea59955f35f5753aeab5b404165e0c5646b1c24a 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -21,12 +21,43 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_MatMul(py::module &m) {
-  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
+  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(
+    m, "MatMulOp", py::multiple_inheritance(),
+    R"mydelimiter(
+    Initialize an MatMul operator.
+    This operator performs Matrix Multiplication between two input tensors.
+    The operation is defined as:
+        Output = Input1 @ Input2
+    This operator implements generalized matrix multiplication, supporting batched 
+    matrix multiplication and broadcasting rules consistent with Numpy.
+
+    :param name: Optional name of the operator.
+    :type name: str
+    )mydelimiter")
     .def(py::init<>())
     .def_static("get_inputs_name", &MatMul_Op::getInputsName)
     .def_static("get_outputs_name", &MatMul_Op::getOutputsName)
     .def_readonly_static("Type", &MatMul_Op::Type);
+
   declare_registrable<MatMul_Op>(m, "MatMulOp");
-  m.def("MatMul", &MatMul, py::arg("name") = "");
+
+  m.def("MatMul", &MatMul, py::arg("name") = "",
+        R"mydelimiter(
+        Initialize a node containing an MatMul operator that performs Matrix Multiplication between two tensors.
+        The operation is defined as:
+          Output = Input1 @ Input2
+
+        This operator implements generalized matrix multiplication, supporting batched 
+        matrix multiplication and broadcasting rules consistent with Numpy.
+        Example:
+          Input A: (M, K), Input B: (K, N) -> Output: (M, N)
+          Input A: (batch_size, M, K), Input B: (K, N) -> Output: (batch_size, M, N)
+        :param name: Optional name of the node.
+        :type name: str
+        :return: A node containing the MatMul operator.
+        :rtype: :py:class:`MatMulOp`
+
+        )mydelimiter");
 }
+
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 00f6d26bd3ae49b26d72236b71372c1f557ddbdd..8834625a8b5790e146861274cf82d0608b637148 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -29,7 +29,17 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
   const std::string pyClassName("MaxPooling" + std::to_string(DIM) + "DOp");
   py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
-    py::multiple_inheritance())
+    py::multiple_inheritance(),
+    R"mydelimiter(
+        Initialize a MaxPooling operator for a specified dimension.
+
+        :param kernel_dims: The size of the kernel to apply to each dimension.
+        :type kernel_dims: List[int]
+        :param stride_dims: The stride (step size) to move the kernel over the input.
+        :type stride_dims: List[int]
+        :param ceil_mode: Whether to use ceil or floor when calculating the output dimensions.
+        :type ceil_mode: bool
+    )mydelimiter")
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
                 bool>(),
@@ -39,7 +49,9 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
   .def_static("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
   .def_readonly_static("Type", &MaxPooling_Op<DIM>::Type);
+  
   declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName);
+
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
@@ -50,16 +62,33 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         return MaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), ceil_mode);
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
-       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("ceil_mode") = false);
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM, 1),
+       py::arg("ceil_mode") = false,
+    R"mydelimiter(
+        Initialize a node containing a MaxPooling operator.
 
-}
+        This operator performs max pooling, which reduces the input tensor size by selecting 
+        the maximum value in each kernel-sized window, with optional strides and ceiling for dimension 
+        calculation.
 
+        :param kernel_dims: The size of the kernel to apply to each dimension.
+        :type kernel_dims: List[int]
+        :param stride_dims: The stride (step size) to move the kernel over the input.
+        :type stride_dims: List[int]
+        :param ceil_mode: Whether to use ceil or floor when calculating the output dimensions.
+        :type ceil_mode: bool
+        :param name: Name of the node (optional).
+        :type name: str
+        :return: A node containing the MaxPooling operator.
+        :rtype: :py:class:`MaxPoolingOp`
+    )mydelimiter");
+}
 
 void init_MaxPooling(py::module &m) {
   declare_MaxPoolingOp<1>(m);
   declare_MaxPoolingOp<2>(m);
   declare_MaxPoolingOp<3>(m);
-
 }
+
 } // namespace Aidge
+
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index 5f173068af0f1140830d458979ec924c38ade078..b2811fbaab2b6cd33dc2b105f0044cd8a5edbbc7 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -179,6 +179,14 @@ void declare_LSTMOp(py::module &m) {
        py::arg("seq_length"));
 }
 
+void declare_LeakyOp(py::module &m) {
+    m.def("Leaky", &Leaky, 
+          py::arg("nb_timesteps"),
+          py::arg("beta"),
+          py::arg("threshold") = 1.0,
+          py::arg("name") = "");
+}
+
 void init_MetaOperatorDefs(py::module &m) {
   declare_PaddedConvOp<1>(m);
   declare_PaddedConvOp<2>(m);
@@ -193,6 +201,7 @@ void init_MetaOperatorDefs(py::module &m) {
   declare_PaddedMaxPoolingOp<2>(m);
 //   declare_PaddedMaxPoolingOp<3>(m);
   declare_LSTMOp(m);
+  declare_LeakyOp(m);
 
   py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperatorOp", py::multiple_inheritance())
   .def(py::init<const char *, const std::shared_ptr<GraphView>&, const std::vector<InputCategory>&>(),
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
index 23949b5fe3b22edf5b7105abd0de29b727740e35..a21e74f459177017d9f9439f7e23d12885bd0ebe 100644
--- a/python_binding/operator/pybind_Mul.cpp
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -19,12 +19,30 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Mul(py::module& m) {
-    py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance())
+    py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance(),
+    R"mydelimiter(
+        Initialize a Mul operator, which performs element-wise multiplication between two tensors.
+
+        :param name: Name of the node (optional).
+        :type name: str
+    )mydelimiter")
     .def(py::init<>())
     .def_static("get_inputs_name", &Mul_Op::getInputsName)
     .def_static("get_outputs_name", &Mul_Op::getOutputsName)
     .def_readonly_static("Type", &Mul_Op::Type);
     declare_registrable<Mul_Op>(m, "MulOp");
-    m.def("Mul", &Mul, py::arg("name") = "");
+
+    m.def("Mul", &Mul, py::arg("name") = "",
+    R"mydelimiter(
+        Initialize a node containing a Mul operator that performs element-wise multiplication.
+
+        This operator performs element-wise multiplication between two tensors.
+
+        :param name: Name of the node (optional).
+        :type name: str
+        :return: A node containing the Mul operator.
+        :rtype: :py:class:`MulOp`
+    )mydelimiter");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index 7fa9e5825983eb0c82d2b1f84b77557e656a7d78..18708adee1f9644af7d6824cfb7896e786da2108 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -45,15 +45,24 @@ void init_Operator(py::module& m){
     .def("get_raw_input", &Operator::getRawInput, py::arg("inputIdx"))
     .def("nb_inputs", &Operator::nbInputs)
     .def("nb_outputs", &Operator::nbOutputs)
-    .def("input_category", &Operator::inputCategory, py::arg("idx"),
+    .def("input_category", static_cast<std::vector<InputCategory>(Operator::*)() const>(&Operator::inputCategory),
+    R"mydelimiter(
+    Category of the inputs (Data or Param, optional or not).
+    Data inputs exclude inputs expecting parameters (weights or bias).
+
+    :rtype: list(InputCategory)
+    )mydelimiter")
+    .def("input_category", static_cast<InputCategory(Operator::*)(IOIndex_t) const>(&Operator::inputCategory), py::arg("idx"),
     R"mydelimiter(
     Category of a specific input (Data or Param, optional or not).
     Data inputs exclude inputs expecting parameters (weights or bias).
 
     :rtype: InputCategory
     )mydelimiter")
+    .def("is_optional_input", &Operator::isOptionalInput, py::arg("inputIdx"))
     .def("associate_input", &Operator::associateInput, py::arg("inputIdx"), py::arg("data"))
     .def("set_datatype", &Operator::setDataType, py::arg("dataType"))
+    .def("set_dataformat", &Operator::setDataFormat, py::arg("dataFormat"))
     .def("set_backend", py::overload_cast<const std::string&, DeviceIdx_t>(&Operator::setBackend), py::arg("name"), py::arg("device") = 0)
     .def("set_backend", py::overload_cast<const std::vector<std::pair<std::string, DeviceIdx_t>>&>(&Operator::setBackend), py::arg("backends"))
     .def("forward", &Operator::forward)
diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp
index 8c515e321207605c20acc9e5b02271906c9707d1..2602e115d43d805451aa9f0836c8151b2cd4b109 100644
--- a/python_binding/operator/pybind_OperatorTensor.cpp
+++ b/python_binding/operator/pybind_OperatorTensor.cpp
@@ -26,7 +26,9 @@ namespace Aidge {
 void init_OperatorTensor(py::module& m){
     py::class_<OperatorTensor, std::shared_ptr<OperatorTensor>, Operator>(m, "OperatorTensor")
     .def("get_output", &OperatorTensor::getOutput, py::arg("outputIdx"))
+    .def("get_outputs", &OperatorTensor::getOutputs)
     .def("get_input", &OperatorTensor::getInput, py::arg("inputIdx"))
+    .def("get_inputs", &OperatorTensor::getInputs)
 
     .def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&) const) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data"))
     .def("set_input", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setInput, py::arg("outputIdx"), py::arg("data"))
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 7dc4a4bee1a009b3ca033ea29861768c1a6fc19d..fe899a75a5c0c8f74f4905388306835ffeed31ba 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -8,7 +8,6 @@
  * SPDX-License-Identifier: EPL-2.0
  *
  ********************************************************************************/
-
 #include <array>
 #include <pybind11/pybind11.h>
 #include <pybind11/stl.h>
@@ -18,17 +17,31 @@
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Pad.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 
+// Template function for declaring Pad operator for different dimensions
 template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
   const std::string pyClassName("Pad" + std::to_string(DIM) + "DOp");
+
   py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
-    py::multiple_inheritance())
+    py::multiple_inheritance(),
+    R"mydelimiter(
+        Initialize a Pad operator for a tensor.
+
+        This operator applies padding to the input tensor along specified dimensions.
+
+        :param beginEndTuples: Padding configurations for each dimension in the form [begin, end].
+        :type beginEndTuples: List[int]
+        :param borderType: Type of padding to be applied (default is Constant).
+        :type borderType: PadBorderType
+        :param borderValue: Value used for padding if borderType is Constant (default is 0.0).
+        :type borderValue: float
+    )mydelimiter")
   .def(py::init<const std::array<DimSize_t, 2*DIM> &,
                 PadBorderType,
                 double>(),
@@ -37,9 +50,10 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
         py::arg("borderValue") = 0.0)
     .def_static("get_inputs_name", &Pad_Op<DIM>::getInputsName)
     .def_static("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
-    .def_readonly_static("Type", &Pad_Op<DIM>::Type)
-    ;
+    .def_readonly_static("Type", &Pad_Op<DIM>::Type);
+
   declare_registrable<Pad_Op<DIM>>(m, pyClassName);
+
   m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
                                                         const std::string& name,
                                                         PadBorderType borderType = PadBorderType::Constant,
@@ -50,10 +64,25 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
        py::arg("begin_end_tuples"),
        py::arg("name") = "",
        py::arg("border_type") = PadBorderType::Constant,
-       py::arg("border_value") = 0.0);
-}
+       py::arg("border_value") = 0.0,
+       R"mydelimiter(
+        Initialize a node containing a Pad operator.
+
+        This function applies padding to the tensor along the specified dimensions 
+        using the given padding type and value.
 
+        :param begin_end_tuples: Padding configuration for each dimension in the format [begin, end] for each dimension.
+        :type begin_end_tuples: List[int]
+        :param name: Name of the operator node (optional).
+        :type name: str
+        :param border_type: Type of padding (Constant, Edge, Reflect, Wrap) (default is Constant).
+        :type border_type: PadBorderType
+        :param border_value: The value used for padding if border_type is Constant (default is 0.0).
+        :type border_value: float
+    )mydelimiter");
+}
 
+// Initialize the Pad operator for different dimensions
 void init_Pad(py::module &m) {
   py::enum_<PadBorderType>(m, "pad_border_type")
     .value("Constant", PadBorderType::Constant)
@@ -61,8 +90,10 @@ void init_Pad(py::module &m) {
     .value("Reflect",  PadBorderType::Reflect)
     .value("Wrap",     PadBorderType::Wrap)
     .export_values();
+
   declare_PadOp<1>(m);
   declare_PadOp<2>(m);
-  //declare_PadOp<3>(m);
+  //declare_PadOp<3>(m); // Uncomment if needed for 3D pad
 }
+
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
index ec29e3faa7c3efbc2b2dbe23372f57c30568b769..c1144b89de1c4f856a575edd141992130e7a46bb 100644
--- a/python_binding/operator/pybind_Pow.cpp
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -19,13 +19,39 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Pow(py::module& m) {
-    py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance())
+    py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance(),
+    R"mydelimiter(
+    Initialize an Pow operator.
+    This operator performs element-wise power between two input tensors.
+    The operation is defined as:
+        Output = Input1 ^ Input2
+    The output tensor shape is determined by taking the maximum size along each dimension of the input tensors after broadcasting.
+    Examples:
+        Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
+        Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
+    :param name : Name of the node (optional).
+    :type name : str)
+    )mydelimiter")
     .def(py::init<>())
     .def_static("get_inputs_name", &Pow_Op::getInputsName)
     .def_static("get_outputs_name", &Pow_Op::getOutputsName)
     .def_readonly_static("Type", &Pow_Op::Type);
     declare_registrable<Pow_Op>(m, "PowOp");
 
-    m.def("Pow", &Pow, py::arg("name") = "");
+    m.def("Pow", &Pow, py::arg("name") = "",
+    R"mydelimiter(
+    Initialize a node containing an Pow operator that performs element-wise power between two tensors.
+    The operation is defined as:
+        Output = Input1 ^ Input2
+    The output tensor shape is determined by taking the maximum size along each dimension of the input tensors after broadcasting.
+    Examples:
+        Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
+        Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
+
+    :param name : Name of the node (optional).
+    :type name : str
+    :return: A node containing the Pow operator.
+    :rtype: :py:class:`PowOp`
+    )mydelimiter");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index 79720845cf21103d3a9257880e8d2068673e36f0..8222a6a03a86dfa11862814a7038b92e60ba88d2 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -19,13 +19,37 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_ReLU(py::module& m) {
-    py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance())
-    .def(py::init<>())
-    .def_static("get_inputs_name", &ReLU_Op::getInputsName)
-    .def_static("get_outputs_name", &ReLU_Op::getOutputsName)
-    .def_readonly_static("Type", &ReLU_Op::Type);
+    py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a ReLU (Rectified Linear Unit) operator.
+        The ReLU function is applied element-wise to the input tensor.
+        It is defined as:
+            ReLU(x) = max(0, x)
+        This operator sets all negative values in the tensor to zero, while leaving positive values unchanged.
+        :param name : name of the node (optional).
+        :type name : str
+        )mydelimiter")
+        .def(py::init<>())
+        .def_static("get_inputs_name", &ReLU_Op::getInputsName)
+        .def_static("get_outputs_name", &ReLU_Op::getOutputsName)
+        .def_readonly_static("Type", &ReLU_Op::Type);
+
     declare_registrable<ReLU_Op>(m, "ReLUOp");
 
-    m.def("ReLU", &ReLU, py::arg("name") = "");
+    m.def("ReLU", &ReLU, py::arg("name") = "",
+        R"mydelimiter(
+        Initialize a node containing a ReLU operator that applies the ReLU function element-wise.
+
+        The ReLU function is applied element-wise and is defined as:
+            ReLU(x) = max(0, x)
+
+        The operation sets all negative values to zero and leaves positive values unchanged.
+
+        :param name: Name of the node (optional).
+        :type name: str
+        :return: A node containing the ReLU operator.
+        :rtype: :py:class:`ReLUOp`
+        )mydelimiter");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index c0b0e8c30ef127d5cdcaf24ded75b83f06c86588..e3244f5dd600e809bb428cd71bbad08348ec44ca 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -10,21 +10,52 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
 
-#include "aidge/data/Tensor.hpp"
-#include "aidge/operator/Reshape.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Reshape.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Reshape(py::module& m) {
-    py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
-        .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
-        .def_static("get_inputs_name", &Reshape_Op::getInputsName)
-        .def_static("get_outputs_name", &Reshape_Op::getOutputsName)
-        .def_readonly_static("Type", &Reshape_Op::Type);
+    py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Reshape operator that reshapes the input tensor to a specified shape.
+
+        :param shape: The target shape to reshape the tensor to. It should be a list of integers, 
+                      where values are between [-r; r-1], with r = input_tensor.nbDims(), 
+                      representing the dimensions of the input tensor. 
+        :type shape: List[int]
+        :param allowzero: If True, zero-size dimensions are allowed. If False, an error is raised 
+                           if the reshaped tensor has a zero-size dimension.
+        :type allowzero: bool
+        )mydelimiter")
+    .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
+    .def_static("get_inputs_name", &Reshape_Op::getInputsName)
+    .def_static("get_outputs_name", &Reshape_Op::getOutputsName)
+    .def_readonly_static("Type", &Reshape_Op::Type);
+
     declare_registrable<Reshape_Op>(m, "ReshapeOp");
-    m.def("Reshape", &Reshape, py::arg("shape") = std::vector<std::int64_t>(), py::arg("allowzero") = false, py::arg("name") = "");
+
+    m.def("Reshape", &Reshape, py::arg("shape") = std::vector<std::int64_t>(), py::arg("allowzero") = false, py::arg("name") = "",
+        R"mydelimiter(
+        Initialize a node containing a Reshape operator.
+
+        This operator reshapes the input tensor to the specified shape. The shape should be provided as a list of 
+        integers, where values are between [-r; r-1], with r = input_tensor.nbDims(), 
+        representing the dimensions of the input tensor. The operator also has a flag for allowing zero-size dimensions.
+
+        :param shape: The target shape to reshape the tensor to.
+        :type shape: List[int]
+        :param allowzero: Whether to allow zero-size dimensions.
+        :type allowzero: bool
+        :param name: Name of the node (optional).
+        :type name: str
+        :return: A node containing the Reshape operator.
+        :rtype: :py:class:`ReshapeOp`
+        )mydelimiter");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Round.cpp b/python_binding/operator/pybind_Round.cpp
index e9ed0e473eaa820537590633a89ca47382d36672..c055ab7fde3eb021a693a4de40a71bafbfd87d5a 100644
--- a/python_binding/operator/pybind_Round.cpp
+++ b/python_binding/operator/pybind_Round.cpp
@@ -18,19 +18,34 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Round(py::module& m) {
-    py::class_<Round_Op, std::shared_ptr<Round_Op>, OperatorTensor>(m, "RoundOp", py::multiple_inheritance())
+    py::class_<Round_Op, std::shared_ptr<Round_Op>, OperatorTensor>(m, "RoundOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Round operator, which rounds the values of a tensor element-wise.
+
+        This operator rounds each value in the input tensor to the nearest integer. 
+        For values exactly halfway between two integers, it rounds to the nearest even integer.
+
+        :param name: The name of the node (optional).
+        :type name: str
+        )mydelimiter")
     .def(py::init<>())
     .def_static("get_inputs_name", &Round_Op::getInputsName)
     .def_static("get_outputs_name", &Round_Op::getOutputsName)
     .def_readonly_static("Type", &Round_Op::Type);
+
     declare_registrable<Round_Op>(m, "RoundOp");
-    m.def("Round", &Round, py::arg("name") = "", R"mydelimiter(
-    RoundOp is a tensor operator that rounds the values of a tensor element-wise.
-        This class rounds each value to the nearest integer. In the case of halves, 
-        the rule is to round them to the nearest even integer.
-        :param X: input tensor.
-        :type X: tensor of type float, double, float16, or bfloat16.
-        :param Y: output tensor with the same shape and type as the input tensor.
-    )mydelimiter");
+
+    m.def("Round", &Round, py::arg("name") = "",
+        R"mydelimiter(
+        Initialize a node containing a Round operator that rounds tensor values element-wise.
+
+        This operator processes the input tensor and rounds each value to the nearest integer. 
+        If a value is exactly halfway between two integers, it rounds to the nearest even integer.
+
+        :param name: The name of the node (optional).
+        :type name: str
+        :return: A node containing the Round operator.
+        :rtype: :py:class:`RoundOp`
+        )mydelimiter");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
index 22e8011a9cd37f80a0678f2629809d4412ba6fd2..c555bca8971fca4cc570741be5a0a7f5be266fa2 100644
--- a/python_binding/operator/pybind_Scaling.cpp
+++ b/python_binding/operator/pybind_Scaling.cpp
@@ -19,15 +19,54 @@ namespace py = pybind11;
 
 namespace Aidge {
 
-void init_Scaling(py::module& m)
-{
-    py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, OperatorTensor>(m, "ScalingOp", py::multiple_inheritance())
-        .def(py::init<float, size_t, bool>(), py::arg("scaling_factor"), py::arg("nb_bits"), py::arg("is_output_unsigned"))
+void init_Scaling(py::module& m) {
+    py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, OperatorTensor>(
+        m, "ScalingOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Scaling operator for element-wise tensor scaling.
+
+        This operator scales tensor elements by a specified scaling factor, 
+        optionally constraining the output to a specified bit-width and signedness.
+
+        :param scaling_factor: The scaling factor to apply to tensor elements.
+        :type scaling_factor: float
+        :param nb_bits: The number of bits for quantization of the output. Must be a positive integer.
+        :type nb_bits: int
+        :param is_output_unsigned: Specifies whether the output should be unsigned (True) or signed (False).
+        :type is_output_unsigned: bool
+        )mydelimiter")
+        .def(py::init<float, size_t, bool>(),
+             py::arg("scaling_factor"),
+             py::arg("nb_bits"),
+             py::arg("is_output_unsigned"))
         .def_static("get_inputs_name", &Scaling_Op::getInputsName)
         .def_static("get_outputs_name", &Scaling_Op::getOutputsName)
         .def_readonly_static("Type", &Scaling_Op::Type);
+
     declare_registrable<Scaling_Op>(m, "ScalingOp");
-    m.def("Scaling", &Scaling, py::arg("scaling_factor") = 1.0f, py::arg("nb_bits") = 8, py::arg("is_output_unsigned") = true, py::arg("name") = "");
+
+    m.def("Scaling", &Scaling,
+          py::arg("scaling_factor") = 1.0f,
+          py::arg("nb_bits") = 8,
+          py::arg("is_output_unsigned") = true,
+          py::arg("name") = "",
+          R"mydelimiter(
+          Initialize a node containing a Scaling operator to scale tensor elements.
+
+          This operator applies a scaling factor to each element of the input tensor. The result 
+          can optionally be quantized to a specific bit-width and constrained to unsigned or signed output.
+
+          :param scaling_factor: The factor by which to scale the tensor elements. Default is 1.0.
+          :type scaling_factor: float
+          :param nb_bits: The number of bits for quantized output. Default is 8.
+          :type nb_bits: int
+          :param is_output_unsigned: Indicates whether the output tensor values should be unsigned. Default is True.
+          :type is_output_unsigned: bool
+          :param name: The name of the node (optional).
+          :type name: str
+          :return: A node containing the Scaling operator.
+          :rtype: :py:class:`ScalingOp`
+          )mydelimiter");
 }
 
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
index b3511f31eeab7d5df679d16c3bfb89f51d75cdbe..cc7669a24b5c9febe41fea863a0966bfcdffc94e 100644
--- a/python_binding/operator/pybind_Shape.cpp
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -20,18 +20,41 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Shape(py::module& m) {
-    py::class_<Shape_Op, std::shared_ptr<Shape_Op>, OperatorTensor>(m, "ShapeOp", py::multiple_inheritance())
-        .def(py::init<const std::int64_t,
-                      const std::int64_t>(),
-                py::arg("start"),
-                py::arg("end"))
+    py::class_<Shape_Op, std::shared_ptr<Shape_Op>, OperatorTensor>(m, "ShapeOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Shape operator, which extracts a slice of the tensor's shape.
+
+        :param start: The starting index (inclusive) for the shape slice. The accepted range is [-r; r-1], 
+                      where r is the rank of the input tensor.
+        :type start: int
+        :param end: The ending index (exclusive) for the shape slice. The accepted range is [-r; r-1], 
+                    where r is the rank of the input tensor. If not specified, the slice will go until the end of the shape.
+        :type end: int
+        )mydelimiter")
+        .def(py::init<const std::int64_t, const std::int64_t>(), py::arg("start"), py::arg("end"))
         .def_static("get_inputs_name", &Shape_Op::getInputsName)
         .def_static("get_outputs_name", &Shape_Op::getOutputsName)
         .def_readonly_static("Type", &Shape_Op::Type);
 
     declare_registrable<Shape_Op>(m, "ShapeOp");
 
-    m.def("Shape", &Shape, py::arg("start") = 0, py::arg("end") = -1, py::arg("name") = "");
+    m.def("Shape", &Shape, py::arg("start") = 0, py::arg("end") = -1, py::arg("name") = "",
+        R"mydelimiter(
+        Initialize a node containing a Shape operator that extracts a slice of the tensor's shape.
+
+        This operator extracts a slice of the tensor's shape from the specified start to end indices. 
+        The start and end indices are inclusive and exclusive respectively, and can be positive or negative. 
+        The accepted range for both is [-r, r-1], where r is the rank of the input tensor.
+
+        :param start: The starting index (inclusive) for the shape slice. The accepted range is [-r; r-1].
+        :type start: int
+        :param end: The ending index (exclusive) for the shape slice. The accepted range is [-r; r-1].
+        :type end: int
+        :param name: Name of the node (optional).
+        :type name: str
+        :return: A node containing the Shape operator.
+        :rtype: :py:class:`ShapeOp`
+        )mydelimiter");
 }
 
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp
index 09e0e2fa2c1c46bafcd253267d5c33187de6bd69..b061d806f24de3e778e745a7a880cbdcd3d9dbb2 100644
--- a/python_binding/operator/pybind_Sigmoid.cpp
+++ b/python_binding/operator/pybind_Sigmoid.cpp
@@ -19,14 +19,38 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Sigmoid(py::module& m) {
-    py::class_<Sigmoid_Op, std::shared_ptr<Sigmoid_Op>, OperatorTensor>(m, "SigmoidOp", py::multiple_inheritance())
-    .def(py::init<>())
-    .def_static("get_inputs_name", &Sigmoid_Op::getInputsName)
-    .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName)
-    .def_readonly_static("Type", &Sigmoid_Op::Type);
+    py::class_<Sigmoid_Op, std::shared_ptr<Sigmoid_Op>, OperatorTensor>(m, "SigmoidOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Sigmoid operator.
+        The Sigmoid function is applied element-wise to the input tensor.
+        It is defined as:
+            Sigmoid(x) = 1 / (1 + exp(-x))
+        This operator applies the sigmoid activation function to each element of the tensor, squashing each value into the range (0, 1).
+        :param name : name of the node (optional).
+        :type name : str
+        )mydelimiter")
+        .def(py::init<>())
+        .def_static("get_inputs_name", &Sigmoid_Op::getInputsName)
+        .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName)
+        .def_readonly_static("Type", &Sigmoid_Op::Type);
+
 
     declare_registrable<Sigmoid_Op>(m, "SigmoidOp");
 
-    m.def("Sigmoid", &Sigmoid, py::arg("name") = "");
+    m.def("Sigmoid", &Sigmoid, py::arg("name") = "",
+        R"mydelimiter(
+        Initialize a node containing a Sigmoid operator that applies the Sigmoid function element-wise.
+
+        The Sigmoid function is applied element-wise and is defined as:
+            Sigmoid(x) = 1 / (1 + exp(-x))
+
+        This operation squashes each value of the tensor into the range (0, 1), making it commonly used for activation functions in neural networks.
+
+        :param name: Name of the node (optional).
+        :type name: str
+        :return: A node containing the Sigmoid operator.
+        :rtype: :py:class:`SigmoidOp`
+        )mydelimiter");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index c8cae2592b966fff7ebfde1e5905ed31d5b22455..f01751b86a981f19f89e34c90780faeb6bd7e9b0 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -20,26 +20,60 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Slice(py::module& m) {
-    py::class_<Slice_Op, std::shared_ptr<Slice_Op>, OperatorTensor>(m, "SliceOp", py::multiple_inheritance())
+    py::class_<Slice_Op, std::shared_ptr<Slice_Op>, OperatorTensor>(m, "SliceOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Slice operator, which slices a tensor along specified axes.
+
+        :param starts: The start indices for the slice along each axis. The accepted range for each value is [-r, r-1],
+                       where r is the rank of the input tensor.
+        :type starts: List[int]
+        :param ends: The end indices for the slice along each axis. The accepted range for each value is [-r, r-1], 
+                     where r is the rank of the input tensor. The slicing is exclusive at the end index.
+        :type ends: List[int]
+        :param axes: The axes along which to slice the tensor. If not specified, slices all axes.
+        :type axes: List[int]
+        :param steps: The step size for each axis in the slice. Defaults to 1.
+        :type steps: List[int]
+        )mydelimiter")
     .def(py::init<const std::vector<std::int64_t>&,
                   const std::vector<std::int64_t>&,
                   const std::vector<std::int8_t>&,
                   const std::vector<std::int64_t>&>(),
                   py::arg("starts"),
                   py::arg("ends"),
-                  py::arg("axes"),
-                  py::arg("steps"))
+                  py::arg("axes") = std::vector<std::int8_t>(),
+                  py::arg("steps") = std::vector<std::int64_t>())
     .def_static("get_inputs_name", &Slice_Op::getInputsName)
     .def_static("get_outputs_name", &Slice_Op::getOutputsName)
     .def_readonly_static("Type", &Slice_Op::Type);
+
     declare_registrable<Slice_Op>(m, "SliceOp");
 
-    m.def("Slice",
+    m.def("Slice", 
           &Slice,
           py::arg("starts") = std::vector<std::int64_t>(),
           py::arg("ends") = std::vector<std::int64_t>(),
           py::arg("axes") = std::vector<std::int8_t>(),
           py::arg("steps") = std::vector<std::int64_t>(),
-          py::arg("name") = "");
+          py::arg("name") = "",
+          R"mydelimiter(
+          Initialize a node containing a Slice operator that slices a tensor along specified axes.
+
+          The slicing is done by specifying the `starts`, `ends`, `axes`, and `steps` for each axis. The accepted range for each of the `starts` and `ends` is [-r, r-1], where r is the rank of the input tensor. The `axes` specify which axes to apply the slice on. The `steps` specify the step size along each axis. If `steps` is not provided, it defaults to 1.
+
+          :param starts: The start indices for the slice along each axis. The accepted range is [-r, r-1].
+          :type starts: List[int]
+          :param ends: The end indices for the slice along each axis. The accepted range is [-r, r-1], exclusive at the end index.
+          :type ends: List[int]
+          :param axes: The axes along which to slice the tensor. If not specified, slices all axes.
+          :type axes: List[int]
+          :param steps: The step size for each axis in the slice. Defaults to 1.
+          :type steps: List[int]
+          :param name: Name of the node (optional).
+          :type name: str
+          :return: A node containing the Slice operator.
+          :rtype: :py:class:`SliceOp`
+          )mydelimiter");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 3b98ab9dfa1590093c567a363f67d32d613651a2..093f448e46be09ee1d77740efa9ed0cf70654737 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -20,12 +20,29 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Softmax(py::module& m) {
-    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
+    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance(),
+        R"mydelimiter(
+            Initialize a Softmax operator.
+            :param axis: Axis along which to compute the softmax. The accepted range is [-r, r-1], 
+                        where r is the rank (number of dimensions) of the input tensor.
+            :type axis: int
+        )mydelimiter")
         .def(py::init<std::int32_t>(), py::arg("axis"))
         .def_static("get_inputs_name", &Softmax_Op::getInputsName)
         .def_static("get_outputs_name", &Softmax_Op::getOutputsName)
         .def_readonly_static("Type", &Softmax_Op::Type);
     declare_registrable<Softmax_Op>(m, "SoftmaxOp");
-    m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
+    m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "",
+        R"mydelimiter(
+            Initialize a node containing a Softmax operator that computes the softmax along the specified axis.
+
+            :param axis: Axis along which to compute the softmax. The accepted range is [-r, r-1], 
+                        where r is the rank (number of dimensions) of the input tensor.
+            :type axis: int
+            :param name: Name of the node (optional).
+            :type name: str
+            :return: A node containing the Softmax operator.
+            :rtype: :py:class:`SoftmaxOp`
+        )mydelimiter");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp
index 9b3feda9f791e65a9c32f2bda3da4da450838b40..f02a699e44cb2afca131e13fbce415fabcd45b80 100644
--- a/python_binding/operator/pybind_Split.cpp
+++ b/python_binding/operator/pybind_Split.cpp
@@ -10,29 +10,59 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
-#include <string>
 #include <vector>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Split.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Split(py::module& m) {
-    py::class_<Split_Op, std::shared_ptr<Split_Op>, OperatorTensor>(m, "SplitOp", py::multiple_inheritance())
-        .def(py::init<DimSize_t, std::int8_t, std::vector<DimSize_t>&>(),
-                py::arg("nb_outputs"),
-                py::arg("axis"),
-                py::arg("split"))
-        .def_static("get_inputs_name", &Split_Op::getInputsName)
-        .def_static("get_outputs_name", &Split_Op::getOutputsName)
-        .def_readonly_static("Type", &Split_Op::Type);
+    py::class_<Split_Op, std::shared_ptr<Split_Op>, OperatorTensor>(m, "SplitOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Split operator, which splits a tensor along a specified axis.
+
+        :param nb_outputs: The number of outputs to split the input tensor into. Should be a positive integer.
+        :type nb_outputs: int
+        :param axis: The axis along which to split the input tensor. Must be in the range [-r, r-1] where r is the number of dimensions in the input tensor.
+        :type axis: int
+        :param split: A list of integers specifying how to split the input tensor along the specified axis.
+                      The sum of the values in the list should be equal to the size of the input tensor along the split axis.
+        :type split: List[int]
+        )mydelimiter")
+    .def(py::init<DimSize_t, std::int8_t, std::vector<DimSize_t>&>(),
+            py::arg("nb_outputs"),
+            py::arg("axis"),
+            py::arg("split"))
+    .def_static("get_inputs_name", &Split_Op::getInputsName)
+    .def_static("get_outputs_name", &Split_Op::getOutputsName)
+    .def_readonly_static("Type", &Split_Op::Type);
 
     declare_registrable<Split_Op>(m, "SplitOp");
 
-    m.def("Split", &Split, py::arg("nb_outputs"), py::arg("axis") = 0, py::arg("split") = std::vector<DimSize_t>(), py::arg("name") = "");
+    m.def("Split", 
+          &Split, 
+          py::arg("nb_outputs"), 
+          py::arg("axis") = 0, 
+          py::arg("split") = std::vector<DimSize_t>(), 
+          py::arg("name") = "",
+          R"mydelimiter(
+          Initialize a node containing a Split operator that splits a tensor along a specified axis.
+
+          The operator splits the input tensor along the specified axis. The number of splits is defined by `nb_outputs`, and the `split` argument specifies how to divide the input tensor.
+          The `axis` argument defines the axis along which the split occurs.
+
+          :param nb_outputs: The number of splits (outputs) from the input tensor. Must be a positive integer.
+          :type nb_outputs: int
+          :param axis: The axis along which to perform the split. Must be in the range [-r, r-1], where r is the number of dimensions in the input tensor.
+          :type axis: int
+          :param split: A list of integers indicating the size of each split along the specified axis. The sum of all values in the list must be equal to the size of the input tensor along the split axis.
+          :type split: List[int]
+          :param name: The name of the node (optional).
+          :type name: str
+          :return: A node containing the Split operator.
+          :rtype: :py:class:`SplitOp`
+          )mydelimiter");
 }
 
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index ba0c5aab02349df4c50f960bbeb7df2082aa9233..d383ae0a40585a1928fedb33874baf7f21f1dedd 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -18,12 +18,30 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Sqrt(py::module& m) {
-    py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, OperatorTensor>(m, "SqrtOp", py::multiple_inheritance())
+    py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, OperatorTensor>(m, "SqrtOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Square Root operator.
+        This operator computes the square root of each element in the input tensor. The input values must be non-negative.
+        )mydelimiter")
     .def(py::init<>())
     .def_static("get_inputs_name", &Sqrt_Op::getInputsName)
     .def_static("get_outputs_name", &Sqrt_Op::getOutputsName)
     .def_readonly_static("Type", &Sqrt_Op::Type);
+
     declare_registrable<Sqrt_Op>(m, "SqrtOp");
-    m.def("Sqrt", &Sqrt, py::arg("name") = "");
+
+    m.def("Sqrt", 
+          &Sqrt, 
+          py::arg("name") = "",
+          R"mydelimiter(
+          Initialize a node containing a Square Root operator that computes the element-wise square root of the input tensor.
+          The input tensor values must be non-negative for the square root to be computed.
+
+          :param name : The name of the node (optional).
+          :type name : str
+          :return : A node containing the Sqrt operator.
+          :rtype : :py:class:`SqrtOp`
+          )mydelimiter");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
index 52a622f0fdf6480a375d17c9729017fca32b3092..2621e184571d55a0f9a9558189b69308394c830f 100644
--- a/python_binding/operator/pybind_Sub.cpp
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -19,12 +19,40 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Sub(py::module& m) {
-    py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance())
+    py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Subtraction operator.
+        This operator performs element-wise subtraction between two input tensors. The operation is defined as:
+            Output = Input1 - Input2
+        The output tensor shape is determined by taking the maximum size along each dimension of the input tensors after broadcasting.
+        Examples:
+            Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
+            Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
+
+        :param name : Name of the node (optional).
+        :type name : str
+        )mydelimiter")
     .def(py::init<>())
     .def_static("get_inputs_name", &Sub_Op::getInputsName)
     .def_static("get_outputs_name", &Sub_Op::getOutputsName)
     .def_readonly_static("Type", &Sub_Op::Type);
     declare_registrable<Sub_Op>(m, "SubOp");
-    m.def("Sub", &Sub, py::arg("name") = "");
+    m.def("Sub", &Sub, py::arg("name") = "",
+        R"mydelimiter(
+        Initialize a node containing a Subtraction operator that performs element-wise subtraction between two tensors.
+        The operation is defined as:
+            Output = Input1 - Input2
+        The output tensor shape is determined by taking the maximum size along each dimension of the input tensors after broadcasting.
+        Examples:
+            Input A: (3, 4, 2), Input B: (2), Output: (3, 4, 2)
+            Input A: (1, 5, 3), Input B: (2, 1, 3), Output: (2, 5, 3)
+
+        :param name : Name of the node (optional).
+        :type name : str
+        :return: A node containing the Sub operator.
+        :rtype: :py:class:`SubOp`
+        )mydelimiter");
 }
+
 }  // namespace Aidge
+
diff --git a/python_binding/operator/pybind_Tanh.cpp b/python_binding/operator/pybind_Tanh.cpp
index ded15ee78951d389d614d932e4a9c22bf310b814..6c0d026e65bbcd0e9e2f39fdabd25ca6024f148f 100644
--- a/python_binding/operator/pybind_Tanh.cpp
+++ b/python_binding/operator/pybind_Tanh.cpp
@@ -19,12 +19,32 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Tanh(py::module& m) {
-    py::class_<Tanh_Op, std::shared_ptr<Tanh_Op>, OperatorTensor>(m, "TanhOp", py::multiple_inheritance())
+    py::class_<Tanh_Op, std::shared_ptr<Tanh_Op>, OperatorTensor>(m, "TanhOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Tanh operator.
+        The Tanh (hyperbolic tangent) function is applied element-wise to the input tensor.
+        It is defined as:
+            tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))
+        :param name : name of the node (optional).
+        :type name : str
+        )mydelimiter")
     .def(py::init<>())
     .def_static("get_inputs_name", &Tanh_Op::getInputsName)
     .def_static("get_outputs_name", &Tanh_Op::getOutputsName)
     .def_readonly_static("Type", &Tanh_Op::Type);
 
-    m.def("Tanh", &Tanh, py::arg("name") = "");
+    m.def("Tanh", &Tanh, py::arg("name") = "",
+        R"mydelimiter(
+        Initialize a node containing a Tanh operator that applies the tanh function element-wise.
+
+        The tanh function is applied element-wise, and the operation is defined as:
+            tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))
+
+        :param name: Name of the node (optional).
+        :type name: str
+        :return: A node containing the Tanh operator.
+        :rtype: :py:class:`TanhOp`
+        )mydelimiter");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_WeightInterleaving.cpp b/python_binding/operator/pybind_WeightInterleaving.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..25b423bd66503b39f031695121cf673c45c34bbe
--- /dev/null
+++ b/python_binding/operator/pybind_WeightInterleaving.cpp
@@ -0,0 +1,39 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include "aidge/operator/WeightInterleaving.hpp"
+
+namespace py = pybind11;
+
+namespace Aidge {
+
+void declare_WeightInterleaving(py::module &m) {
+  py::class_<WeightInterleaving_Op, std::shared_ptr<WeightInterleaving_Op>, OperatorTensor>(m, "WeightInterleavingOp", py::multiple_inheritance())
+    .def(py::init<>())
+    .def_static("get_inputs_name", &WeightInterleaving_Op::getInputsName)
+    .def_static("get_outputs_name", &WeightInterleaving_Op::getOutputsName)
+    .def_readonly_static("Type", &WeightInterleaving_Op::Type)
+
+    .def("__repr__", [](WeightInterleaving_Op& b) {
+        return fmt::format("Operator(type='{}')", b.Type);
+    });
+
+  declare_registrable<WeightInterleaving_Op>(m, "WeightInterleavingOp");
+
+  m.def("WeightInterleaving", &WeightInterleaving, py::arg("name") = "");
+}
+
+void init_WeightInterleaving(py::module &m) {
+  declare_WeightInterleaving(m);
+}
+
+}  // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 006eeb289f25570ddf337f048b05816102624028..cc6f0bf2502027fea467b9db39561769fcebbd2b 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -16,8 +16,12 @@
 namespace py = pybind11;
 
 namespace Aidge {
+void init_CoreSysInfo(py::module&);
 void init_Random(py::module&);
 void init_Data(py::module&);
+void init_DataFormat(py::module&);
+void init_DataType(py::module&);
+void init_Elts(py::module&);
 void init_Database(py::module&);
 void init_DataProvider(py::module&);
 void init_Interpolation(py::module&);
@@ -30,6 +34,7 @@ void init_Operator(py::module&);
 void init_OperatorTensor(py::module&);
 void init_StaticAnalysis(py::module&);
 
+void init_Abs(py::module&);
 void init_Add(py::module&);
 void init_And(py::module&);
 void init_ArgMax(py::module&);
@@ -46,6 +51,7 @@ void init_ConvDepthWise(py::module&);
 void init_DepthToSpace(py::module&);
 void init_Div(py::module&);
 void init_Erf(py::module&);
+void init_Expand(py::module&);
 void init_FC(py::module&);
 void init_Gather(py::module&);
 void init_GenericOperator(py::module&);
@@ -83,6 +89,7 @@ void init_Sub(py::module&);
 void init_Tanh(py::module&);
 void init_Transpose(py::module&);
 void init_Unsqueeze(py::module&);
+void init_WeightInterleaving(py::module&);
 
 void init_Node(py::module&);
 void init_GraphView(py::module&);
@@ -90,9 +97,6 @@ void init_OpArgs(py::module&);
 void init_Connector(py::module&);
 void init_SinglePassGraphMatching(py::module&);
 
-void init_GraphRegex(py::module&);
-void init_MatchSolution(py::module&);
-
 void init_Recipes(py::module&);
 void init_GraphViewHelper(py::module&);
 
@@ -103,9 +107,13 @@ void init_TensorUtils(py::module&);
 void init_Filler(py::module&);
 
 void init_Aidge(py::module& m) {
+    init_CoreSysInfo(m);
     init_Random(m);
 
     init_Data(m);
+    init_DataFormat(m);
+    init_DataType(m);
+    init_Elts(m);
     init_Database(m);
     init_DataProvider(m);
     init_Interpolation(m);
@@ -125,6 +133,7 @@ void init_Aidge(py::module& m) {
     init_OperatorTensor(m);
     init_StaticAnalysis(m);
 
+    init_Abs(m);
     init_Add(m);
     init_And(m);
     init_ArgMax(m);
@@ -141,6 +150,7 @@ void init_Aidge(py::module& m) {
     init_DepthToSpace(m);
     init_Div(m);
     init_Erf(m);
+    init_Expand(m);
     init_FC(m);
     init_Gather(m);
     init_GenericOperator(m);
@@ -177,12 +187,10 @@ void init_Aidge(py::module& m) {
     init_Tanh(m);
     init_Transpose(m);
     init_Unsqueeze(m);
+    init_WeightInterleaving(m);
 
     init_Producer(m);
 
-    init_GraphRegex(m);
-    init_MatchSolution(m);
-
     init_Recipes(m);
     init_GraphViewHelper(m);
     init_Scheduler(m);
diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp
index 77f20b9d655c6d9f6e95b23c4884bd1bc4f9ffd6..21478a5b14d609801f232b20cda25e7e1c0d9475 100644
--- a/python_binding/recipes/pybind_Recipes.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -144,6 +144,21 @@ void init_Recipes(py::module &m)
     :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
+
+  m.def("to_generic_op", toGenericOp, py::arg("node"), R"mydelimiter(
+    Transform to a Generic Operator.
+
+    :param node: Node which Operator will turn into a Generic Operator
+    :type graph_view: :py:class:`aidge_core.Node`
+    )mydelimiter");
+
+  m.def("apply_weightinterleaving", applyWeightInterleaving, py::arg("node"), R"mydelimiter(
+    Replace weight Producer linked to the given node with a weight producer with interleaving and format NHWC.
+    This recipe is specific to the ARM cortex-m export for low bit integer support.
+
+    :param node: Node which linked weights will recieve interleaving
+    :type graph_view: :py:class:`aidge_core.Node`
+    )mydelimiter");
 }
 
 } // namespace Aidge
diff --git a/python_binding/scheduler/pybind_MemoryManager.cpp b/python_binding/scheduler/pybind_MemoryManager.cpp
index 0f18db405bec0aee9637f2e5f2ecc7b71e502cc5..3fce92349f28e3c6a897356dee60359c1797d9ca 100644
--- a/python_binding/scheduler/pybind_MemoryManager.cpp
+++ b/python_binding/scheduler/pybind_MemoryManager.cpp
@@ -36,10 +36,10 @@ void init_MemoryManager(py::module& m)
         .def_readwrite("released", &MemoryManager::MemorySpace::released);
 
     py::class_<MemoryManager::MemoryPlane, std::shared_ptr<MemoryManager::MemoryPlane>>(m, "MemoryPlane")
-        .def(py::init<std::shared_ptr<MemoryManager::MemorySpace>, 
+        .def(py::init<std::shared_ptr<MemoryManager::MemorySpace>,
                       MemoryManager::Clock_T, unsigned int, unsigned int,
                       unsigned int, unsigned int, unsigned int>(),
-                      py::arg("mem_space"), py::arg("clock"), py::arg("offset"), 
+                      py::arg("mem_space"), py::arg("clock"), py::arg("offset"),
                       py::arg("size"), py::arg("stride"), py::arg("length"), py::arg("count"))
         .def_readwrite("mem_space", &MemoryManager::MemoryPlane::memSpace)
         .def_readwrite("allocated", &MemoryManager::MemoryPlane::allocated)
@@ -101,7 +101,6 @@ void init_MemoryManager(py::module& m)
         .def("get_nb_planes", (unsigned int (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>) const) &MemoryManager::getNbPlanes, py::arg("mem_space"))
         .def("get_current_tick", &MemoryManager::getCurrentTick)
         .def("tick", &MemoryManager::tick)
-        .def("log", &MemoryManager::log, py::arg("file_name"))
         ;
 }
 
diff --git a/python_binding/scheduler/pybind_ProdConso.cpp b/python_binding/scheduler/pybind_ProdConso.cpp
index abd6d5379178916b5842095d50a1de2155345b6f..547e2258dd353178e5a78e9701cc032e15cafd8b 100644
--- a/python_binding/scheduler/pybind_ProdConso.cpp
+++ b/python_binding/scheduler/pybind_ProdConso.cpp
@@ -104,6 +104,7 @@ void init_ProdConso(py::module& m){
     .def(py::init<const Operator&, bool>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>())
     .def_static("default_model", &ProdConso::defaultModel)
     .def_static("in_place_model", &ProdConso::inPlaceModel)
+    .def("get_operator", &ProdConso::getOperator)
     .def("get_nb_required_data", &ProdConso::getNbRequiredData)
     .def("get_nb_required_protected", &ProdConso::getNbRequiredProtected)
     .def("get_required_memory", &ProdConso::getRequiredMemory)
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index 472af2a9465b121593613492f5120ddc9d7fe254..1a3a4b6b2f552b05ae33fea0ecff43c4f1ec689f 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -21,16 +21,24 @@
 namespace py = pybind11;
 namespace Aidge {
 void init_Scheduler(py::module& m){
+    py::enum_<Scheduler::EarlyLateSort>(m, "EarlyLateSort")
+        .value("Default", Scheduler::EarlyLateSort::Default)
+        .value("AsSoonAsPossible", Scheduler::EarlyLateSort::AsSoonAsPossible)
+        .value("AsLateAsPossible", Scheduler::EarlyLateSort::AsLateAsPossible)
+        .export_values();
+
     py::class_<Scheduler, std::shared_ptr<Scheduler>>(m, "Scheduler")
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
     .def("graph_view", &Scheduler::graphView)
     .def("save_scheduling_diagram", &Scheduler::saveSchedulingDiagram, py::arg("file_name"))
     .def("save_static_scheduling_diagram", &Scheduler::saveStaticSchedulingDiagram, py::arg("file_name"))
+    .def("save_factorized_static_scheduling_diagram", &Scheduler::saveFactorizedStaticSchedulingDiagram, py::arg("file_name"), py::arg("min_repeat") = 2)
     .def("resetScheduling", &Scheduler::resetScheduling)
     .def("generate_scheduling", &Scheduler::generateScheduling)
-    .def("get_static_scheduling", &Scheduler::getStaticScheduling, py::arg("step") = 0)
+    .def("get_static_scheduling", &Scheduler::getStaticScheduling, py::arg("step") = 0, py::arg("sorting") = Scheduler::EarlyLateSort::Default)
     .def("graph_view", &Scheduler::graphView)
     .def("generate_memory", &Scheduler::generateMemory, py::arg("inc_producers") = false, py::arg("wrap_around_buffer") = false)
+    .def("generate_memory_auto_concat", &Scheduler::generateMemoryAutoConcat, py::arg("inc_producers") = false, py::arg("wrap_around_buffer") = false)
     ;
 
     py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>, Scheduler>(m, "SequentialScheduler")
diff --git a/python_binding/utils/pybind_Log.cpp b/python_binding/utils/pybind_Log.cpp
index aa42c6605217f63b6871d1a3475b9612097577cd..d91a96a781374662d6242abba6b94b3905b9ea10 100644
--- a/python_binding/utils/pybind_Log.cpp
+++ b/python_binding/utils/pybind_Log.cpp
@@ -1,8 +1,21 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
 #include <pybind11/pybind11.h>
+
 #include "aidge/utils/Log.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
+
 void init_Log(py::module& m){
     py::enum_<Log::Level>(m, "Level")
         .value("Debug", Log::Debug)
@@ -78,17 +91,31 @@ void init_Log(py::module& m){
     .def_static("set_console_level", &Log::setConsoleLevel, py::arg("level"),
           R"mydelimiter(
           Set the minimum log level displayed in the console.
-          Available `Level`s in ascending order : 
+          Available `Level`s in ascending order:
             - Level.Debug
             - Level.Info
             - Level.Notice
             - Level.Warn
             - Level.Error
-            - Level.Fatal          
+            - Level.Fatal
 
           :param level: Log level.
           :type level: Level
           )mydelimiter")
+      .def_static("get_console_level", &Log::getConsoleLevel,
+          R"mydelimiter(
+          Get the current log level. Log level is set to ``Notice`` by default.
+          Log level is shared across modules.
+          Available `Level`s in ascending order:
+            - Level.Debug
+            - Level.Info
+            - Level.Notice
+            - Level.Warn
+            - Level.Error
+            - Level.Fatal
+          :return level: Log level.
+          :rtype level: Level
+          )mydelimiter")
     .def_static("set_console_color", &Log::setConsoleColor, py::arg("enabled"),
           R"mydelimiter(
           Enables or disable color output on comsole.
@@ -100,13 +127,13 @@ void init_Log(py::module& m){
     .def_static("set_file_level", &Log::setFileLevel, py::arg("level"),
           R"mydelimiter(
           Set the minimum log level saved in the log file.
-          Available `Level`s in ascending order : 
+          Available `Level`s in ascending order :
             - Level.Debug
             - Level.Info
             - Level.Notice
             - Level.Warn
             - Level.Error
-            - Level.Fatal          
+            - Level.Fatal
 
           :param level: Log level.
           :type level: Level
@@ -119,7 +146,14 @@ void init_Log(py::module& m){
 
           :param fileName: Log file name.
           :type fileName: str
+          )mydelimiter")
+    .def_static("set_precision", &Log::setPrecision, py::arg("precision"),
+          R"mydelimiter(
+          Set the precision format for floating point numbers.
+
+          :param precision: number of digits displayed on the right-hand of the decimal point.
+          :type precision: int
           )mydelimiter");
 }
 
-}
+} // namespace Aidge
diff --git a/python_binding/utils/sys_info/pybind_CoreVersionInfo.cpp b/python_binding/utils/sys_info/pybind_CoreVersionInfo.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b498e86ed31adad57de4dbd57fa8f358b188f499
--- /dev/null
+++ b/python_binding/utils/sys_info/pybind_CoreVersionInfo.cpp
@@ -0,0 +1,11 @@
+#include <pybind11/pybind11.h>
+#include "aidge/utils/sys_info/CoreVersionInfo.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+void init_CoreSysInfo(py::module& m){
+    m.def("show_version", &showCoreVersion);
+    m.def("get_project_version", &getCoreProjectVersion);
+    m.def("get_git_hash", &getCoreGitHash);
+}
+}
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..bd90e50b282a1e1c08e0b2acaaddae940b0b4ac5
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,4 @@
+# pbr file
+[metadata]
+name = file: project_name.txt
+version = file: version.txt
diff --git a/setup.py b/setup.py
index 4f2e21711f193eb7d5c37ace7b5ad83ac63d3635..69c5b19d22c5baad843068843bd87c64862dbd23 100644
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,11 @@ from setuptools import setup, Extension
 from setuptools.command.build_ext import build_ext
 
 
-PROJECT_NAME = "aidge_core"
+def get_project_name() -> str:
+    return open(pathlib.Path().absolute() / "project_name.txt", "r").read().strip()
+
+
+PROJECT_NAME = get_project_name()
 
 SETUP_DIR = pathlib.Path(__file__).parent
 
@@ -33,6 +37,7 @@ class CMakeBuild(build_ext):
         # This lists the number of processors available on the machine
         # The compilation will use half of them
         max_jobs = str(ceil(multiprocessing.cpu_count() / 2))
+        max_jobs = os.environ.get("AIDGE_NB_PROC", max_jobs)
 
         cwd = pathlib.Path().absolute()
 
@@ -47,14 +52,19 @@ class CMakeBuild(build_ext):
         package_prefix = build_lib if not self.editable_mode else SETUP_DIR
         pybind_install_prefix = (package_prefix / PROJECT_NAME).absolute()
 
-        os.chdir(str(build_temp))
-
-        compile_type = os.environ.get("AIDGE_PYTHON_BUILD_TYPE", "Release")
         install_path = (
             os.path.join(sys.prefix, "lib", "libAidge")
             if "AIDGE_INSTALL" not in os.environ
             else os.environ["AIDGE_INSTALL"]
         )
+
+        # Read environment variables for CMake options
+        c_compiler = os.environ.get("AIDGE_C_COMPILER", "gcc")
+        cxx_compiler = os.environ.get("AIDGE_CXX_COMPILER", "g++")
+        build_type = os.environ.get("AIDGE_BUILD_TYPE", "Release")
+        asan = os.environ.get("AIDGE_ASAN", "OFF")
+        cmake_arch = os.environ.get("AIDGE_CMAKE_ARCH", "")
+
         build_gen = os.environ.get("AIDGE_BUILD_GEN", "")
         build_gen_opts = (
             ["-G", build_gen]
@@ -63,26 +73,35 @@ class CMakeBuild(build_ext):
         )
         test_onoff = os.environ.get("AIDGE_BUILD_TEST", "OFF")
 
-        self.spawn(
-            [
-                "cmake",
-                *build_gen_opts,
-                str(cwd),
-                f"-DTEST={test_onoff}",
-                f"-DCMAKE_INSTALL_PREFIX:PATH={install_path}",
-                f"-DCMAKE_BUILD_TYPE={compile_type}",
-                "-DPYBIND=ON",
-                f"-DPYBIND_INSTALL_PREFIX:PATH={pybind_install_prefix}",
-                "-DCMAKE_EXPORT_COMPILE_COMMANDS=ON",
-                "-DCOVERAGE=OFF",
-            ]
-        )
+        os.chdir(str(build_temp))
+
+        cmake_cmd = [
+            "cmake",
+            *build_gen_opts,
+            str(cwd),
+            f"-DTEST={test_onoff}",
+            f"-DCMAKE_INSTALL_PREFIX:PATH={install_path}",
+            f"-DCMAKE_BUILD_TYPE={build_type}",
+            f"-DCMAKE_C_COMPILER={c_compiler}",
+            f"-DCMAKE_CXX_COMPILER={cxx_compiler}",
+            f"-DENABLE_ASAN={asan}",
+            "-DPYBIND=ON",
+            f"-DPYBIND_INSTALL_PREFIX:PATH={pybind_install_prefix}",
+            "-DCMAKE_EXPORT_COMPILE_COMMANDS=1",
+            "-DCOVERAGE=OFF",
+        ]
+
+        # Append architecture-specific arguments if provided
+        if cmake_arch:
+            cmake_cmd.append(cmake_arch)
+
+        self.spawn(cmake_cmd)
 
         if not self.dry_run:
             self.spawn(
-                ["cmake", "--build", ".", "--config", compile_type, "-j", max_jobs]
+                ["cmake", "--build", ".", "--config", build_type, "-j", max_jobs]
             )
-            self.spawn(["cmake", "--install", ".", "--config", compile_type])
+            self.spawn(["cmake", "--install", ".", "--config", build_type])
         os.chdir(str(cwd))
 
 
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index c74b538a4e566b3b88e77dd4d097344d52838505..71f4f04b2d73e1501a2d428dd91dc8f85dd17649 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -235,14 +235,20 @@ bool Aidge::OperatorImpl::checkIOSpec(const ImplSpec::IOSpec& required, const Im
 }
 
 std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec& spec, const ImplSpec& requiredSpecs) const {
+    // Original graph is:
+    //                               --> {required IO specs} [node] {required IO specs} -->
+    // Obtained meta-op is:
+    // --> {required IO specs} [adapt inputs] --> {IO specs} [node] {IO specs} --> [adapt outputs] {required IO specs}
+
     auto op = std::static_pointer_cast<OperatorTensor>(mOp.clone());
     auto node = std::make_shared<Node>(op);
+    auto adaptedGraph = std::make_shared<GraphView>();
+    adaptedGraph->add(node);
 
     // Adapt inputs
     for (size_t i = 0; i < requiredSpecs.inputs.size(); ++i) {
         const auto IOSpec = (i < spec.inputs.size()) ? spec.inputs[i] : spec.inputs.back();
         const ImplSpec::IOSpec& requiredIOSpec = requiredSpecs.inputs[i];
-        std::shared_ptr<Node> parent = node;
 
         // Input type
         if (requiredIOSpec.type != DataType::Any
@@ -250,7 +256,9 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
             && requiredIOSpec.type != IOSpec.type)
         {
             const auto cast = Cast(IOSpec.type);
-            cast->addChild(parent, 0, i);
+            cast->getOperator()->setBackend(op->backend());
+            cast->addChild(node, 0, i);
+            adaptedGraph->add(cast);
 
             op->getInput(i)->setDataType(IOSpec.type);
         }
@@ -263,8 +271,10 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
             const auto transpose = getDataFormatTranspose(requiredIOSpec.format, IOSpec.format);
             auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
             transposeOp->getOperator()->setDataFormat(IOSpec.format);
-            transposeOp->getOperator()->setDataType(IOSpec.type);
-            transposeOp->addChild(parent, 0, i);
+            transposeOp->getOperator()->setDataType(requiredIOSpec.type);
+            transposeOp->getOperator()->setBackend(op->backend());
+            transposeOp->addChild(node, 0, i);
+            adaptedGraph->add(transposeOp);
 
             op->getInput(i)->setDataFormat(IOSpec.format);
         }
@@ -293,7 +303,6 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
     for (size_t i = 0; i < requiredSpecs.outputs.size(); ++i) {
         const auto IOSpec = (i < spec.outputs.size()) ? spec.outputs[i] : spec.outputs.back();
         const ImplSpec::IOSpec& requiredIOSpec = requiredSpecs.outputs[i];
-        std::shared_ptr<Node> parent = node;
 
         // Output type
         if (requiredIOSpec.type != DataType::Any
@@ -301,7 +310,9 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
             && requiredIOSpec.type != IOSpec.type)
         {
             const auto cast = Cast(requiredIOSpec.type);
-            parent->addChild(cast, i, 0);
+            cast->getOperator()->setBackend(op->backend());
+            node->addChild(cast, i, 0);
+            adaptedGraph->add(cast);
 
             op->getOutput(i)->setDataType(IOSpec.type);
         }
@@ -315,7 +326,9 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
             auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
             transposeOp->getOperator()->setDataFormat(requiredIOSpec.format);
             transposeOp->getOperator()->setDataType(requiredIOSpec.type);
-            parent->addChild(transposeOp, i, 0);
+            transposeOp->getOperator()->setBackend(op->backend());
+            node->addChild(transposeOp, i, 0);
+            adaptedGraph->add(transposeOp);
 
             op->getOutput(i)->setDataFormat(IOSpec.format);
         }
@@ -340,7 +353,12 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
         }
     }
 
-    return MetaOperator(std::string("Adapted_" + op->type()).c_str(), getConnectedGraphView(node));
+    if (adaptedGraph->getNodes().size() > 1) {
+        return MetaOperator(std::string("Adapted_" + op->type()).c_str(), adaptedGraph);
+    }
+    else {
+        return node;
+    }
 }
 
 std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getBestAdaptation(const ImplSpec& requiredSpecs) const {
@@ -354,8 +372,13 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getBestAdaptation(const ImplSp
         auto adaptation = getAdaptation(availableSpec, requiredSpecs);
 
         if (adaptation) {
-            auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(adaptation->getOperator())->getMicroGraph();
-            adaptations.insert(std::make_pair(adaptation, microGraph->getNodes().size()));
+            if (adaptation->getOperator()->isAtomic()) {
+                adaptations.insert(std::make_pair(adaptation, 1));
+            }
+            else {
+                auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(adaptation->getOperator())->getMicroGraph();
+                adaptations.insert(std::make_pair(adaptation, microGraph->getNodes().size()));
+            }
         }
     }
 
diff --git a/src/backend/cpu/data/TensorImpl.cpp b/src/backend/cpu/data/TensorImpl.cpp
index 506287a0c520915e6426f1f0b64d9c562c754d33..236e5bb8e1e867d5a0dad85571d754bc9e2a2a22 100644
--- a/src/backend/cpu/data/TensorImpl.cpp
+++ b/src/backend/cpu/data/TensorImpl.cpp
@@ -95,6 +95,62 @@ void Aidge::TensorImpl_cpu<T>::copyCast(const void *src, const Aidge::DataType s
             std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
                     dstT);
             break;
+        case DataType::Int4:
+            std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
+                    dstT);
+            break;
+        case DataType::UInt4:
+            std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
+                    dstT);
+            break;
+        case DataType::Dual_Int4:
+            std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
+                    dstT);
+            break;
+        case DataType::Dual_UInt4:
+            std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
+                    dstT);
+            break;
+        case DataType::Int3:
+            std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
+                    dstT);
+            break;
+        case DataType::UInt3:
+            std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
+                    dstT);
+            break;
+        case DataType::Dual_Int3:
+            std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
+                    dstT);
+            break;
+        case DataType::Dual_UInt3:
+            std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
+                    dstT);
+            break;
+        case DataType::Int2:
+            std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
+                    dstT);
+            break;
+        case DataType::UInt2:
+            std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
+                    dstT);
+            break;
+        case DataType::Quad_Int2:
+            std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
+                    dstT);
+            break;
+        case DataType::Quad_UInt2:
+            std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
+                    dstT);
+            break;
+        case DataType::Binary:
+            std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
+                    dstT);
+            break;
+        case DataType::Octo_Binary:
+            std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
+                    dstT);
+            break;
         default:
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
             break;
diff --git a/src/data/Data.cpp b/src/data/Data.cpp
deleted file mode 100644
index 865b4ff2dcff68753fb5a4cc9cafccdd129e3c8a..0000000000000000000000000000000000000000
--- a/src/data/Data.cpp
+++ /dev/null
@@ -1,86 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include "aidge/data/Data.hpp"
-
-bool Aidge::isDataTypeFloatingPoint(const DataType& type) {
-    switch (type) {
-    case DataType::Float64:
-    case DataType::Float32:
-    case DataType::Float16:
-    case DataType::BFloat16:  return true;
-    default:                  return false;
-    }
-    return false;
-}
-
-size_t Aidge::getDataTypeBitWidth(const DataType& type) {
-    switch (type) {
-    case DataType::Float64:   return 64;
-    case DataType::Float32:   return 32;
-    case DataType::Float16:   return 16;
-    case DataType::BFloat16:  return 16;
-    case DataType::Binary:    return 1;
-    case DataType::Ternary:   return 2;
-    case DataType::Int2:      return 2;
-    case DataType::Int3:      return 3;
-    case DataType::Int4:      return 4;
-    case DataType::Int5:      return 5;
-    case DataType::Int6:      return 6;
-    case DataType::Int7:      return 7;
-    case DataType::Int8:      return 8;
-    case DataType::Int16:     return 16;
-    case DataType::Int32:     return 32;
-    case DataType::Int64:     return 64;
-    case DataType::UInt2:     return 2;
-    case DataType::UInt3:     return 3;
-    case DataType::UInt4:     return 4;
-    case DataType::UInt5:     return 5;
-    case DataType::UInt6:     return 6;
-    case DataType::UInt7:     return 7;
-    case DataType::UInt8:     return 8;
-    case DataType::UInt16:    return 16;
-    case DataType::UInt32:    return 32;
-    case DataType::UInt64:    return 64;
-    default:                  return 0;
-    }
-    return 0;
-}
-
-Aidge::DataFormatTranspose Aidge::getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
-    // Permutation array from default format to src format
-    const auto srcDefToFormat = DataFormatTransposeDict[static_cast<int>(src)];
-    // Permutation array from default format to dst format
-    const auto dstDefToFormat = DataFormatTransposeDict[static_cast<int>(dst)];
-    // Compute permutation array from src format to default format:
-    DataFormatTranspose srcFormatToDef{};
-    for (size_t i = 0; i < srcDefToFormat.size(); ++i) {
-        if (srcDefToFormat[i] > 0) {
-            srcFormatToDef[srcDefToFormat[i] - 1] = i;
-        }
-        else {
-            srcFormatToDef[i] = i;
-        }
-    }
-
-    // Compute permutation array from src format to dst format:
-    DataFormatTranspose srcToDst{};
-    for (size_t i = 0; i < dstDefToFormat.size(); ++i) {
-        if (dstDefToFormat[srcFormatToDef[i]] > 0) {
-            srcToDst[i] = dstDefToFormat[srcFormatToDef[i]] - 1;
-        }
-        else {
-            srcToDst[i] = i;
-        }
-    }
-
-    return srcToDst;
-}
diff --git a/src/data/DataFormat.cpp b/src/data/DataFormat.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..466da86c469d89e5f1f4fc0895223513783b801c
--- /dev/null
+++ b/src/data/DataFormat.cpp
@@ -0,0 +1,42 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/data/DataFormat.hpp"
+
+Aidge::DataFormatTranspose Aidge::getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
+    // Permutation array from default format to src format
+    const auto srcDefToFormat = DataFormatTransposeDict[static_cast<int>(src)];
+    // Permutation array from default format to dst format
+    const auto dstDefToFormat = DataFormatTransposeDict[static_cast<int>(dst)];
+    // Compute permutation array from src format to default format:
+    DataFormatTranspose srcFormatToDef{};
+    for (size_t i = 0; i < srcDefToFormat.size(); ++i) {
+        if (srcDefToFormat[i] > 0) {
+            srcFormatToDef[srcDefToFormat[i] - 1] = i;
+        }
+        else {
+            srcFormatToDef[i] = i;
+        }
+    }
+
+    // Compute permutation array from src format to dst format:
+    DataFormatTranspose srcToDst{};
+    for (size_t i = 0; i < dstDefToFormat.size(); ++i) {
+        if (dstDefToFormat[srcFormatToDef[i]] > 0) {
+            srcToDst[i] = dstDefToFormat[srcFormatToDef[i]] - 1;
+        }
+        else {
+            srcToDst[i] = srcFormatToDef[i];
+        }
+    }
+
+    return srcToDst;
+}
diff --git a/src/data/DataType.cpp b/src/data/DataType.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d012d2f45a26541a4a21714ffd2c0236e878559d
--- /dev/null
+++ b/src/data/DataType.cpp
@@ -0,0 +1,46 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/data/DataType.hpp"
+
+#include <cstddef>  // std::size_t
+
+std::size_t Aidge::getDataTypeBitWidth(const Aidge::DataType& type) {
+    switch (type) {
+        case DataType::Float64:   return 64;
+        case DataType::Float32:   return 32;
+        case DataType::Float16:   return 16;
+        case DataType::BFloat16:  return 16;
+        case DataType::Binary:    return 1;
+        case DataType::Ternary:   return 2;
+        case DataType::Int2:      return 2;
+        case DataType::Int3:      return 3;
+        case DataType::Int4:      return 4;
+        case DataType::Int5:      return 5;
+        case DataType::Int6:      return 6;
+        case DataType::Int7:      return 7;
+        case DataType::Int8:      return 8;
+        case DataType::Int16:     return 16;
+        case DataType::Int32:     return 32;
+        case DataType::Int64:     return 64;
+        case DataType::UInt2:     return 2;
+        case DataType::UInt3:     return 3;
+        case DataType::UInt4:     return 4;
+        case DataType::UInt5:     return 5;
+        case DataType::UInt6:     return 6;
+        case DataType::UInt7:     return 7;
+        case DataType::UInt8:     return 8;
+        case DataType::UInt16:    return 16;
+        case DataType::UInt32:    return 32;
+        case DataType::UInt64:    return 64;
+        default:                  return 0;
+    }
+}
\ No newline at end of file
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index c834167abe15fb8a7ce96053a87a958b7515fe17..b128833c9099385c72f25057400a65a6b9034c32 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -28,6 +28,12 @@
 
 namespace Aidge {
 
+Tensor::Tensor(const Tensor& other) = default;
+Tensor::Tensor(Tensor&& other) = default;
+
+Tensor& Tensor::operator=(const Tensor& other) = default;
+Tensor& Tensor::operator=(Tensor&& other) = default;
+
 Tensor::~Tensor() noexcept = default;
 
 
@@ -306,115 +312,164 @@ void Tensor::resize(const std::vector<DimSize_t>& dims,
     }
 }
 
-std::string Tensor::toString() const {
-
+std::string Tensor::toString(int precision, std::size_t offset) const {
     if (!hasImpl() || undefined()) {
-        // Return no value on no implementation or undefined size
-        return std::string("{}");
-    }
-
-    // TODO: move lambda elsewhere?
-    auto ptrToString = [](DataType dt, void* ptr, std::size_t idx) {
-        switch (dt) {
-            case DataType::Float64:
-                return std::to_string(static_cast<double*>(ptr)[idx]);
-            case DataType::Float32:
-                return std::to_string(static_cast<float*>(ptr)[idx]);
-            case DataType::Float16:
-                return std::to_string(static_cast<half_float::half*>(ptr)[idx]);
-            case DataType::Int8:
-                return std::to_string(static_cast<int8_t*>(ptr)[idx]);
-            case DataType::Int16:
-                return std::to_string(static_cast<int16_t*>(ptr)[idx]);
-            case DataType::Int32:
-                return std::to_string(static_cast<int32_t*>(ptr)[idx]);
-            case DataType::Int64:
-                return std::to_string(static_cast<int64_t*>(ptr)[idx]);
-            case DataType::UInt8:
-                return std::to_string(static_cast<uint8_t*>(ptr)[idx]);
-            case DataType::UInt16:
-                return std::to_string(static_cast<uint16_t*>(ptr)[idx]);
-            case DataType::UInt32:
-                return std::to_string(static_cast<uint32_t*>(ptr)[idx]);
-            case DataType::UInt64:
-                return std::to_string(static_cast<uint64_t*>(ptr)[idx]);
-            default:
-                AIDGE_ASSERT(true, "unsupported type to convert to string");
-        }
-        return std::string("?");  // To make Clang happy
-    };
+        return "{}";
+    }
+
+    // Use default precision if no override provided
+    precision = (precision >= 0) ? precision : Log::getPrecision();
+
+   // Create a type-specific formatter function upfront
+    std::function<std::string(void*, std::size_t)> formatter;
+
+    switch (mDataType) {
+        case DataType::Float64:
+            formatter = [precision](void* ptr, std::size_t idx) {
+                return fmt::format("{:.{}f}", static_cast<cpptype_t<DataType::Float64>*>(ptr)[idx], precision);
+            };
+            break;
+        case DataType::Float32:
+            formatter = [precision](void* ptr, std::size_t idx) {
+                return fmt::format("{:.{}f}", static_cast<cpptype_t<DataType::Float32>*>(ptr)[idx], precision);
+            };
+            break;
+        case DataType::Float16:
+            formatter = [precision](void* ptr, std::size_t idx) {
+                return fmt::format("{:.{}f}", static_cast<cpptype_t<DataType::Float16>*>(ptr)[idx], precision);
+            };
+            break;
+        case DataType::Binary:
+        case DataType::Octo_Binary:
+        case DataType::Dual_Int4:
+        case DataType::Dual_Int3:
+        case DataType::Dual_UInt3:
+        case DataType::Quad_Int2:
+        case DataType::Quad_UInt2:
+        case DataType::Int4:
+        case DataType::UInt4:
+        case DataType::Int3:
+        case DataType::UInt3:
+        case DataType::Int2:
+        case DataType::UInt2:
+        case DataType::Int8:
+            formatter = [](void* ptr, std::size_t idx) {
+                return fmt::format("{}", static_cast<cpptype_t<DataType::Int32>>(static_cast<cpptype_t<DataType::Int8>*>(ptr)[idx]));
+            };
+            break;
+        case DataType::Dual_UInt4:
+        case DataType::UInt8:
+            formatter = [](void* ptr, std::size_t idx) {
+                return fmt::format("{}", static_cast<cpptype_t<DataType::UInt32>>(static_cast<cpptype_t<DataType::UInt8>*>(ptr)[idx]));
+            };
+            break;
+        case DataType::Int16:
+            formatter = [](void* ptr, std::size_t idx) {
+                return fmt::format("{}", static_cast<cpptype_t<DataType::Int16>*>(ptr)[idx]);
+            };
+            break;
+        case DataType::Int32:
+            formatter = [](void* ptr, std::size_t idx) {
+                return fmt::format("{}", static_cast<cpptype_t<DataType::Int32>*>(ptr)[idx]);
+            };
+            break;
+        case DataType::Int64:
+            formatter = [](void* ptr, std::size_t idx) {
+                return fmt::format("{}", static_cast<cpptype_t<DataType::Int64>*>(ptr)[idx]);
+            };
+            break;
+        case DataType::UInt16:
+            formatter = [](void* ptr, std::size_t idx) {
+                return fmt::format("{}", static_cast<cpptype_t<DataType::UInt16>*>(ptr)[idx]);
+            };
+            break;
+        case DataType::UInt32:
+            formatter = [](void* ptr, std::size_t idx) {
+                return fmt::format("{}", static_cast<cpptype_t<DataType::UInt32>*>(ptr)[idx]);
+            };
+            break;
+        case DataType::UInt64:
+            formatter = [](void* ptr, std::size_t idx) {
+                return fmt::format("{}", static_cast<cpptype_t<DataType::UInt64>*>(ptr)[idx]);
+            };
+            break;
+        default:
+            AIDGE_ASSERT(true, "unsupported type to convert to string");
+            return "{}";
+    }
 
     if (dims().empty()) {
-        // The Tensor is defined with rank 0, hence scalar
-        return ptrToString(mDataType, mImpl->hostPtr(), 0);
-    }
-
-    std::string res;
-    std::size_t dim = 0;
-    std::size_t counter = 0;
-    if (nbDims() >= 2) {
-        std::vector<std::size_t> dimVals(nbDims(), 0);
-        res += "{\n";
-        while (counter < mSize) {
-            std::string spaceString = std::string((dim + 1) << 1, ' ');
-            if (dim < nbDims() - 2) {
-                if (dimVals[dim] == 0) {
-                    res += spaceString + "{\n";
-                    ++dim;
-                } else if (dimVals[dim] <
-                           static_cast<std::size_t>(dims()[dim])) {
-                    res += spaceString + "},\n" + spaceString + "{\n";
-                    ++dim;
-                } else {
-                    res += spaceString + "}\n";
-                    dimVals[dim--] = 0;
-                    dimVals[dim]++;
-                }
-            } else {
-                for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]);
-                     ++dimVals[dim]) {
-                    res += spaceString + "{";
-                    for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
-                        res +=
-                            " " +
-                            ptrToString(mDataType, mImpl->hostPtr(mImplOffset),
-                                        counter++) +
-                            ",";
-                    }
-                    res += " " +
-                           ptrToString(mDataType, mImpl->hostPtr(mImplOffset),
-                                       counter++) +
-                           "}";
-                    if (dimVals[dim] <
-                        static_cast<std::size_t>(dims()[dim] - 1)) {
-                        res += ",";
-                    }
-                    res += "\n";
-                }
-                if (dim == 0) {
-                    break;
-                }
-                dimVals[dim--] = 0;
-                dimVals[dim]++;
+        return formatter(mImpl->hostPtr(), 0);
+    }
+
+    void* dataPtr = mImpl->hostPtr(mImplOffset);
+
+    // Calculate maximum width across all elements
+    std::size_t maxWidth = 0;
+    for (std::size_t i = 0; i < mSize; ++i) {
+        std::string value = formatter(dataPtr, i);
+        maxWidth = std::max(maxWidth, value.length());
+    }
+
+    // Initialize variables similar to Python version
+    std::vector<std::size_t> indexCoord(nbDims(), 0);
+    const std::size_t initialDepth = nbDims() > 1 ? nbDims() - 2 : 0;
+    std::size_t depth = initialDepth;
+    std::size_t nbBrackets = nbDims() - 1;
+    std::size_t index = 0;
+
+    // Calculate number of lines (product of all dimensions except last)
+    std::size_t nbLines = 1;
+    for (std::size_t d = 0; d < nbDims() - 1; ++d) {
+        nbLines *= dims()[d];
+    }
+
+    std::string result = "{";  // Using { instead of [ for C++ style
+
+    for (std::size_t l = 0; l < nbLines; ++l) {
+        // Add spacing and opening braces
+        if (l != 0) {
+            result += std::string(1 + offset, ' ');
+        }
+        result += std::string(nbDims() - 1 - nbBrackets, ' ') +
+                 std::string(nbBrackets, '{');
+
+        // Print numbers of a line
+        for (DimSize_t i = 0; i < dims().back(); ++i) {
+            std::string value = formatter(dataPtr, index);
+            result += std::string(1 + maxWidth - value.length(), ' ') + value;
+            if (i + 1 < dims().back()) {
+                result += ',';
             }
+            ++index;
         }
-        if (nbDims() != 2) {  // If nbDims == 2, parenthesis is already closed
-            for (int i = static_cast<int>(dim); i >= 0; --i) {
-                res += std::string((i + 1) << 1, ' ') + "}\n";
+
+        // Check for end
+        if (index == mSize) {
+            result += std::string(nbDims(), '}');
+            return result;
+        } else {
+            // Update coordinates and depth
+            while (indexCoord[depth] + 1 >= static_cast<std::size_t>(dims()[depth])) {
+                indexCoord[depth] = 0;
+                --depth;
             }
+            ++indexCoord[depth];
+            nbBrackets = initialDepth - depth + 1;
+            depth = initialDepth;
         }
-    } else {
-        res += "{";
-        for (DimSize_t j = 0; j < dims()[0]; ++j) {
-            res += " " +
-                   ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) +
-                   ((j < dims()[0] - 1) ? "," : " ");
+
+        // Add closing braces and newlines
+        result += std::string(nbBrackets, '}') + ",\n";
+        if (nbBrackets > 1) {
+            result += '\n';
         }
     }
-    res += "}";
-    return res;
+
+    return result;
 }
 
+
 Tensor Tensor::extract(
     const std::vector<std::size_t>& fixedCoord) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
@@ -538,6 +593,8 @@ void Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& tran
         }
     }
 
+    AIDGE_ASSERT(src.getImpl(), "Tensor::copyTranspose(): an implementation is required for src Tensor!");
+    AIDGE_ASSERT(mImpl, "Tensor::copyTranspose(): an implementation is required, use setBackend() first!");
     std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, newDims);
 
     std::vector<size_t> indices(newDims.size(), 0);
diff --git a/src/filler/ConstantFiller.cpp b/src/filler/ConstantFiller.cpp
index b2118866f92290103d50290085c7675215a4d997..82ac42293bcb6956ba6bc4d2896f5ffc33286fba 100644
--- a/src/filler/ConstantFiller.cpp
+++ b/src/filler/ConstantFiller.cpp
@@ -23,7 +23,7 @@ template<typename T>
 void Aidge::constantFiller(std::shared_ptr<Aidge::Tensor> tensor, T constantValue) {
     AIDGE_ASSERT(tensor->getImpl(),
                  "Tensor got no implementation, cannot fill it.");
-    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+    AIDGE_ASSERT(NativeType_v<T> == tensor->dataType(), "Wrong data type");
 
     std::shared_ptr<Aidge::Tensor> cpyTensor;
     // Create cpy only if tensor not on CPU
diff --git a/src/filler/HeFiller.cpp b/src/filler/HeFiller.cpp
index ff20b76183c03e7ac90b5c225b3da7a8c6ffb2df..866e6581f3245d4324f5796a571c35c9c46a703d 100644
--- a/src/filler/HeFiller.cpp
+++ b/src/filler/HeFiller.cpp
@@ -20,7 +20,7 @@ void Aidge::heFiller(std::shared_ptr<Aidge::Tensor> tensor,
                      Aidge::VarianceNorm varianceNorm, T meanNorm, T scaling) {
     AIDGE_ASSERT(tensor->getImpl(),
                  "Tensor got no implementation, cannot fill it.");
-    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+    AIDGE_ASSERT(NativeType_v<T> == tensor->dataType(), "Wrong data type");
 
     unsigned int fanIn, fanOut = 0;
     Aidge::calculateFanInFanOut(tensor, fanIn, fanOut);
diff --git a/src/filler/NormalFiller.cpp b/src/filler/NormalFiller.cpp
index f30b32431cf466b10c1b10df8e0e5ccec9f483b6..334c0c12d3e821bf371e32c818acc5e8a3c85ea0 100644
--- a/src/filler/NormalFiller.cpp
+++ b/src/filler/NormalFiller.cpp
@@ -20,7 +20,7 @@ void Aidge::normalFiller(std::shared_ptr<Aidge::Tensor> tensor, double mean,
                          double stdDev) {
     AIDGE_ASSERT(tensor->getImpl(),
                  "Tensor got no implementation, cannot fill it.");
-    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+    AIDGE_ASSERT(NativeType_v<T> == tensor->dataType(), "Wrong data type");
 
     std::normal_distribution<T> normalDist(mean, stdDev);
 
diff --git a/src/filler/UniformFiller.cpp b/src/filler/UniformFiller.cpp
index 1951fcc623612bd688048fcc5fb71526032b2a4a..22dc479b63045d35db8f7319b06a464f191f920e 100644
--- a/src/filler/UniformFiller.cpp
+++ b/src/filler/UniformFiller.cpp
@@ -20,7 +20,7 @@ template <typename T>
 void Aidge::uniformFiller(std::shared_ptr<Aidge::Tensor> tensor, T min, T max) {
     AIDGE_ASSERT(tensor->getImpl(),
                  "Tensor got no implementation, cannot fill it.");
-    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type {} and {}",NativeType<T>::type, tensor->dataType());
+    AIDGE_ASSERT(NativeType_v<T> == tensor->dataType(), "Wrong data type {} and {}",NativeType_v<T>, tensor->dataType());
 
 
      using DistType = typename std::conditional<
diff --git a/src/filler/XavierFiller.cpp b/src/filler/XavierFiller.cpp
index 734874d449c83087ca0e93df7eeb620e178ee7ba..6856ab218f7d78e9b0ed3331a1424734bf0e573a 100644
--- a/src/filler/XavierFiller.cpp
+++ b/src/filler/XavierFiller.cpp
@@ -20,7 +20,7 @@ void Aidge::xavierUniformFiller(std::shared_ptr<Aidge::Tensor> tensor,
                                 T scaling, Aidge::VarianceNorm varianceNorm) {
     AIDGE_ASSERT(tensor->getImpl(),
                  "Tensor got no implementation, cannot fill it.");
-    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+    AIDGE_ASSERT(NativeType_v<T> == tensor->dataType(), "Wrong data type");
 
     unsigned int fanIn, fanOut = 0;
     Aidge::calculateFanInFanOut(tensor, fanIn, fanOut);
@@ -54,7 +54,7 @@ void Aidge::xavierNormalFiller(std::shared_ptr<Aidge::Tensor> tensor, T scaling,
                                Aidge::VarianceNorm varianceNorm) {
     AIDGE_ASSERT(tensor->getImpl(),
                  "Tensor got no implementation, cannot fill it.");
-    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+    AIDGE_ASSERT(NativeType_v<T> == tensor->dataType(), "Wrong data type");
 
     unsigned int fanIn, fanOut = 0;
     Aidge::calculateFanInFanOut(tensor, fanIn, fanOut);
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 465359757eadd2799aa7f272e2d85b032a60cfdd..e1a520865e38b50bbae268b72d420f947ba6885f 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -105,6 +105,10 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
                 ? "<em>" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + "</em>"
                 : "\"" + node_ptr->name() + "<br/><sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
 
+        if (verbose) {
+          givenName += "<br/><span style='color:white; background-color: purple; float: right'>" + node_ptr->getOperator()->backend() + "</span>";
+        }
+
         std::string nodeCls = "";
         if (node_ptr->type() == "Producer") {
           nodeCls = ":::producerCls";
@@ -182,13 +186,14 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
     for (const auto& input : mInputNodes) {
       if (input.first != nullptr) {
         const auto& op_ = std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator());
+        const std::string inputLinkArrow = op_->isOptionalInput(input.second) ? "-.->" : "--->";
         if (op_->getInput(input.second) && (!op_->getInput(input.second)->undefined())) {
             std::string dims = " " + fmt::format("{}", op_->getInput(input.second)->dims());
             std::string dtype = " " + fmt::format("{}", op_->getInput(input.second)->dataType());
-            fmt::print(fp.get(), "input{}((in#{})):::inputCls--->|\"{}{}<br/>&darr;<br/>{}\"|{}_{}\n", inputIdx, inputIdx,
+            fmt::print(fp.get(), "input{}((in#{})):::inputCls{}|\"{}{}<br/>&darr;<br/>{}\"|{}_{}\n", inputIdx, inputIdx, inputLinkArrow,
                     dims, dtype, input.second, input.first->type(), namePtrTable.at(input.first));
         } else {
-            fmt::print(fp.get(), "input{}((in#{})):::inputCls--->|\"&darr;<br/>{}\"|{}_{}\n", inputIdx, inputIdx,
+            fmt::print(fp.get(), "input{}((in#{})):::inputCls{}|\"&darr;<br/>{}\"|{}_{}\n", inputIdx, inputIdx, inputLinkArrow,
                     input.second, input.first->type(), namePtrTable.at(input.first));
         }
       }
@@ -238,7 +243,7 @@ void Aidge::GraphView::setNodesName() const {
     for (const auto& nodePtr: getNodes()) {
         const std::string& t = nodePtr->getOperator()->type();
         typeIds.emplace(t, 0);
-        const std::string nodeName = name() + std::string("_") + t + std::string("#") + std::to_string(typeIds[t]++);
+        const std::string nodeName = name() + std::string("_") + t + std::string("_") + std::to_string(typeIds[t]++);
         nodePtr->setName(nodeName);
     }
 }
@@ -331,7 +336,6 @@ void Aidge::GraphView::setOrderedInputs(const std::vector<std::pair<NodePtr, IOI
 
 void Aidge::GraphView::setOrderedOutputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& outputs) {
   // Note: one can specify any node as graph output!
-  size_t nbOutputs = 0;
   std::vector<std::pair<NodePtr, IOIndex_t>> ignoredOutputs(mOutputNodes);
   for (auto output : outputs) {
     // Allow to specify dummy outputs (nullptr), but this will only be reflected
@@ -342,7 +346,6 @@ void Aidge::GraphView::setOrderedOutputs(const std::vector<std::pair<NodePtr, IO
       if (it != ignoredOutputs.end()) {
         ignoredOutputs.erase(it);
       }
-      ++nbOutputs;
     }
   }
 
@@ -434,23 +437,32 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
 }
 
 bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>>& dims, bool allowDataDependency) {
+    Log::debug("Starting dimension forward propagation for GraphView");
     // remove current Data connections and use dummy inputs to propagate dimensions
     // setInputs
     // Link every tensor to the right pointer
     // following parent - children information
     if (!dims.empty()){
-      Log::debug("forwardDims(): setting graph input dims ({} dims provided).", dims.size());
+      auto msg = fmt::format("Manually setting GraphView input dims with provided parameters:");
+      for (std::size_t i = 0; i< dims.size(); ++i)
+        msg = fmt::format("{}\n\t* input#{} {}", msg, i, dims[i]);
+      Log::info("{}", msg);
 
+      Log::debug("Validating input dimensions against existing graph inputs");
       std::size_t i = 0;
       for (auto& input : mInputNodes) {
         const auto& currentTensorPtr =
             std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator())->getInput(input.second);
         if (i < dims.size() && !dims[i].empty()) {
           if (currentTensorPtr) { // tensor detected
-              AIDGE_ASSERT(currentTensorPtr->dims() == dims[i],
-                "forwardDims(): mismatch between existing and provided size for graph input#{} (existing size: {}, provided size: {})",
-                i, currentTensorPtr->dims(), dims[i])
+            if (currentTensorPtr->dims() != dims[i]) {
+              Log::error("Dimension mismatch for input#{} - Expected: {}, Provided: {}",
+                            i, currentTensorPtr->dims(), dims[i]);
+                return false;
+              }
+              Log::debug("Input#{} dimensions match existing tensor", i);
           } else {
+              Log::debug("Creating new tensor for input#{} with dims {}", i, dims[i]);
               auto tensor = std::make_shared<Tensor>(dims[i]);
               input.first->getOperator()->setInput(input.second, tensor);
           }
@@ -460,12 +472,12 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
             || input.first->inputCategory(input.second) == InputCategory::OptionalParam);
 
           if (currentTensorPtr) {
-            Log::debug("forwardDims(): existing dims are {} for graph input#{} for input#{} of node {} (of type {})",
-              i, input.second, input.first->name(), input.first->type(), currentTensorPtr->dims());
+            Log::debug("Using existing dimensions {} for graph input#{} (matching input#{} of node [\033[1m\033[3m{}\033[0m] - [\033[1m\033[3m{}\033[0m])",
+                    currentTensorPtr->dims(), i, input.second, input.first->name(), input.first->type());
           }
           else if (!optional) {
-            Log::warn("forwardDims(): did not specify dims for mandatory graph input#{} for input#{} of node {} (of type {})",
-              i, input.second, input.first->name(), input.first->type());
+            Log::warn("Missing dimensions for mandatory graph input#{} (matching input#{} of node [\033[1m\033[3m{}\033[0m] - [\033[1m\033[3m{}\033[0m])",
+                    i, input.second, input.first->name(), input.first->type());
           }
         }
         ++i;
@@ -473,29 +485,35 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
     }
 
     // Ensure every node in the graph is correctly connected
+    Log::debug("Verifying graph connections and tensor validity");
     for (std::shared_ptr<Node> nodePtr : getNodes()) {
         for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
             std::pair<std::shared_ptr<Node>, IOIndex_t> inputI = nodePtr->input(i);
             if (inputI.first) {
-                // Check that associated Data are properly connected...
-                AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i) == inputI.first->getOperator()->getRawOutput(inputI.second),
-                  "Input#{} for node {} ({}) is not properly connected to output#{} of node {} ({}): Data or Tensor mismatch!",
-                    i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type());
-            } else if (nodePtr->inputCategory(i) != InputCategory::OptionalData && nodePtr->inputCategory(i) != InputCategory::OptionalParam) {
-                // Input is missing
-                AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i),
-                  "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
-                AIDGE_ASSERT(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->undefined(),
-                  "Undefined input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
+                if (nodePtr->getOperator()->getRawInput(i) != inputI.first->getOperator()->getRawOutput(inputI.second)) {
+                    Log::error("Connection mismatch: Input#{} of node [\033[1m\033[3m{}\033[0m (\033[1m\033[3m{}\033[0m)] -> Output#{} of node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]",
+                        i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type());
+                    return false;
+                }
+            } else if (nodePtr->inputCategory(i) != InputCategory::OptionalData &&
+                    nodePtr->inputCategory(i) != InputCategory::OptionalParam) {
+                if (!nodePtr->getOperator()->getRawInput(i)) {
+                    Log::error("Missing mandatory input#{} for node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]",
+                        i, nodePtr->name(), nodePtr->type());
+                    return false;
+                }
+                if (std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->undefined()) {
+                    Log::error("Undefined mandatory input#{} for node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]",
+                        i, nodePtr->name(), nodePtr->type());
+                    return false;
+                }
             }
-
         }
     }
 
-    // List of nodes that are already dims forwarded
-    std::set<std::shared_ptr<Node>> dimsForwarded;
-    // Establish initial list of dims forwardable nodes:
-    // input nodes and childs from Producers
+    Log::debug("Initializing dimension propagation");
+    // Establish initial list of dims forwardable nodes: graph input node + Producers childs
+    std::set<std::shared_ptr<Node>> dimsForwarded; ///< List of nodes that are already dims forwarded
     std::set<std::shared_ptr<Node>> listNodes = inputNodes();
     for (const auto& nodePtr : getNodes()) {
         if (nodePtr->type() == Producer_Op::Type) {
@@ -940,6 +958,22 @@ void Aidge::GraphView::addChild(
   add(toOtherView);
 }
 
+void Aidge::GraphView::updateNodeName(const std::shared_ptr<Node>& node, const std::string& newName) {
+    if (!newName.empty()) {
+        auto itNew = mNodeRegistry.insert(std::make_pair(newName, node));
+        if (!itNew.second) {
+            Log::notice("Replacing existing node name in graph node name registry: {}", newName);
+            (itNew.first)->second = node;
+        }
+    }
+
+    if (!node->name().empty()) {
+        const auto it = mNodeRegistry.find(node->name());
+        AIDGE_ASSERT(it != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", node->name(), name());
+        mNodeRegistry.erase(it);
+    }
+}
+
 std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getParents() const {
   // TODO: choose if we return a set or a vector
   std::set<std::shared_ptr<Node>> parents;
diff --git a/src/graph/Matching.cpp b/src/graph/Matching.cpp
index 4a62019a7aa044ebcf2089d91f3ba097d85218e7..282ed20207f3c91637b8778c77ed1c97d11b0ee9 100644
--- a/src/graph/Matching.cpp
+++ b/src/graph/Matching.cpp
@@ -1,11 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
 #include "aidge/graph/Matching.hpp"
 
+#include <algorithm>   // std::find_if
+#include <cctype>      // std::isspace
+#include <cstddef>     // std::size_t
+#include <memory>
+#include <set>
+#include <string>      // std::stoi
+#include <utility>     // std::pair
+#include <vector>
+
 #include <fmt/color.h>
 
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Log.hpp"
+
+static void removeLeadingWhitespace(std::string& str) {
+    str.erase(str.begin(),
+        std::find_if(str.begin(),
+                    str.end(),
+                    [](char c) { return !std::isspace(c); }));
+}
+
+////////////////////////////////////////////////////////////
+
 Aidge::SinglePassGraphMatching::Context::Context() = default;
 Aidge::SinglePassGraphMatching::Context::Context(const Context& other) = default;
 Aidge::SinglePassGraphMatching::Context& Aidge::SinglePassGraphMatching::Context::operator=(const Context& other) = default;
-Aidge::SinglePassGraphMatching::Context::~Context() = default;
+Aidge::SinglePassGraphMatching::Context::~Context() noexcept = default;
 
 ////////////////////////////////////////////////////////////
 
@@ -35,7 +68,7 @@ std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphM
     std::set<MatchingResult> matches;
 
     while (matchSequence(ctx, matches) || matchNodeOrBlock(ctx, matches)) {
-        removeWhiteSpace(ctx.query);
+        removeLeadingWhitespace(ctx.query);
         if (!ctx.query.empty() && ctx.query[0] == ';') {
             ctx.query.erase(0, 1);
         }
@@ -44,7 +77,7 @@ std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphM
         }
     }
 
-    removeWhiteSpace(ctx.query);
+    removeLeadingWhitespace(ctx.query);
     if (!ctx.query.empty()) {
         Log::warn("Syntax error, unable to parse remaining query: {}", ctx.query);
     }
@@ -52,18 +85,18 @@ std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphM
     if (disjoint) {
         matches = filterLonguestDisjoint(matches);
     }
-
+    Log::info("Graph matching complete.\nFound {} matches for the query", matches.size());
     return matches;
 }
 
-Aidge::SinglePassGraphMatching::MatchingResult Aidge::SinglePassGraphMatching::matchFrom(NodePtr startNode, const std::string& query) {
+Aidge::SinglePassGraphMatching::MatchingResult Aidge::SinglePassGraphMatching::matchFrom(std::shared_ptr<Node> startNode, const std::string& query) {
     Context ctx;
     ctx.query = query;
     ctx.startNode = startNode;
     std::set<MatchingResult> matches;
 
     while (matchSequence(ctx, matches) || matchNodeOrBlock(ctx, matches)) {
-        removeWhiteSpace(ctx.query);
+        removeLeadingWhitespace(ctx.query);
         if (!ctx.query.empty() && ctx.query[0] == ';') {
             ctx.query.erase(0, 1);
         }
@@ -72,7 +105,7 @@ Aidge::SinglePassGraphMatching::MatchingResult Aidge::SinglePassGraphMatching::m
         }
     }
 
-    removeWhiteSpace(ctx.query);
+    removeLeadingWhitespace(ctx.query);
     if (!ctx.query.empty()) {
         Log::warn("Syntax error, unable to parse remaining query: {}", ctx.query);
     }
@@ -123,8 +156,8 @@ bool Aidge::SinglePassGraphMatching::matchNodeOrBlock(Context& ctx, std::set<Mat
 
     // QUANTIFIER?
     bool matchMore = false;
-    size_t matchQuantity = 0;
-    removeWhiteSpace(newCtx.query);
+    std::size_t matchQuantity = 0;
+    removeLeadingWhitespace(newCtx.query);
     if (!newCtx.query.empty() && (newCtx.query[0] == '?' || newCtx.query[0] == '*')) {
         AIDGE_ASSERT(!(ctx.firstSequence && ctx.firstNode),
             "Ill-formed query; the root node cannot be optional in query at: {}", ctx.query);
@@ -155,7 +188,7 @@ bool Aidge::SinglePassGraphMatching::matchNodeOrBlock(Context& ctx, std::set<Mat
     else if (!newCtx.query.empty() && newCtx.query[0] == '{') {
         newCtx.query.erase(0, 1);
 
-        removeWhiteSpace(newCtx.query);
+        removeLeadingWhitespace(newCtx.query);
         const auto endQuantity = std::find_if(newCtx.query.begin(), newCtx.query.end(),
             [](char c) { return !isdigit(c); });
         if (endQuantity != newCtx.query.begin()) {
@@ -172,7 +205,7 @@ bool Aidge::SinglePassGraphMatching::matchNodeOrBlock(Context& ctx, std::set<Mat
             return false;
         }
 
-        removeWhiteSpace(newCtx.query);
+        removeLeadingWhitespace(newCtx.query);
         if (!newCtx.query.empty() && newCtx.query[0] == '}') {
             newCtx.query.erase(0, 1);
         }
@@ -231,7 +264,7 @@ bool Aidge::SinglePassGraphMatching::matchBlock(Context& ctx, std::set<MatchingR
     ++newCtx.depth;
 
     // '('
-    removeWhiteSpace(newCtx.query);
+    removeLeadingWhitespace(newCtx.query);
     if (!newCtx.query.empty() && newCtx.query[0] == '(') {
         newCtx.query.erase(0, 1);
     }
@@ -252,7 +285,7 @@ bool Aidge::SinglePassGraphMatching::matchBlock(Context& ctx, std::set<MatchingR
     }
 
     // ')'
-    removeWhiteSpace(newCtx.query);
+    removeLeadingWhitespace(newCtx.query);
     if (!newCtx.query.empty() && newCtx.query[0] == ')') {
         newCtx.query.erase(0, 1);
     }
@@ -337,7 +370,7 @@ bool Aidge::SinglePassGraphMatching::matchParallel(Context& ctx, std::set<Matchi
     while (true) {
         // ('&' NODE_OR_BLOCK)+
         //   '&'
-        removeWhiteSpace(newCtx.query);
+        removeLeadingWhitespace(newCtx.query);
         if (!newCtx.query.empty() && newCtx.query[0] == '&') {
             newCtx.query.erase(0, 1);
             found = true;
@@ -402,7 +435,7 @@ bool Aidge::SinglePassGraphMatching::matchAlternative(Context& ctx, std::set<Mat
     while (true) {
         // ('|' NODE_OR_BLOCK)+
         //    '|'
-        removeWhiteSpace(newCtx.query);
+        removeLeadingWhitespace(newCtx.query);
         if (!newCtx.query.empty() && newCtx.query[0] == '|') {
             newCtx.query.erase(0, 1);
             found = true;
@@ -446,7 +479,7 @@ bool Aidge::SinglePassGraphMatching::matchEdge(Context& ctx, std::set<MatchingRe
     Log::debug("{}edge", std::string(2*newCtx.depth, ' '));
 
     // ('-' | '~') or '<'
-    removeWhiteSpace(newCtx.query);
+    removeLeadingWhitespace(newCtx.query);
     if (!newCtx.query.empty() && (newCtx.query[0] == '-' || newCtx.query[0] == '~')) {
         newCtx.singleOutput = (newCtx.query[0] == '-');
         newCtx.query.erase(0, 1); // drop '-'
@@ -550,7 +583,7 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     auto newMatches = matches;
 
     // (TYPE | '.' | '$')
-    removeWhiteSpace(newCtx.query);
+    removeLeadingWhitespace(newCtx.query);
     if (newCtx.query.empty()) {
         Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
         return false;
@@ -833,3 +866,8 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     matches = newMatches;
     return true;
 }
+
+bool Aidge::operator<(const Aidge::SinglePassGraphMatching::MatchingResult& lhs, const Aidge::SinglePassGraphMatching::MatchingResult& rhs) {
+    // Matching rootNode are guaranteed to be different!
+    return lhs.graph->rootNode() < rhs.graph->rootNode();
+}
\ No newline at end of file
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index da6d833f3aa933cd5e707814c279142de5bc4a23..692806dc7c85e5512e9318008d7277881a006232 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -35,6 +35,7 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttribute
     mForward.push_back([this](){ this->mOperator->forward(); return true; });
     // mForward.push_back(std::bind(&Operator::forward, mOperator.get()));
     mBackward.push_back([this](){ this->mOperator->backward(); return true; });
+    op->setInheritedAttrs(attrs);
 }
 
 // Aidge::Node::Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs)
@@ -200,10 +201,15 @@ Aidge::Node::outputs() const {
 std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> Aidge::Node::output(
         Aidge::IOIndex_t outId) const {
     std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> listOutputs =
-            std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(mIdInChildren[outId].size());
+            std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>();
     for (std::size_t i = 0; i < mIdInChildren[outId].size(); ++i) {
-        listOutputs[i] = std::pair<std::shared_ptr<Node>, IOIndex_t>(mChildren[outId][i].lock(),
-                                                                     mIdInChildren[outId][i]);
+        if (std::shared_ptr<Node> child = mChildren[outId][i].lock()) {
+            listOutputs.push_back(std::pair<std::shared_ptr<Node>, IOIndex_t>(child,
+                                                                        mIdInChildren[outId][i]));
+        }
+        else {
+            Log::warn("Node::output(): dangling connection at index #{} of output #{} for node {} (of type {})", i, outId, name(), type());
+        }
     }
     return listOutputs;
 }
@@ -225,6 +231,16 @@ Aidge::IOIndex_t Aidge::Node::nbValidOutputs() const {
     return counter;
 }
 
+std::set<std::shared_ptr<Aidge::GraphView>> Aidge::Node::views() const noexcept {
+    std::set<std::shared_ptr<GraphView>> res;
+    for (const auto &v : mViews) {
+        if (auto p = v.lock()) {
+        res.insert(p);
+        }
+    }
+    return res;
+}
+
 void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId) {
     AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(),
         "Input index ({}) is out of bound ({}) for node {} (of type {})",
@@ -244,7 +260,7 @@ void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId)
 // TOPOLOGY
 ///////////////////////////////////////////////////////
 
-void Aidge::Node::addChildOp(std::shared_ptr<Node> otherNode, const IOIndex_t outId,
+void Aidge::Node::addChildOp(const std::shared_ptr<Node>& otherNode, const IOIndex_t outId,
                              const IOIndex_t otherInId) {
     AIDGE_ASSERT(otherInId < otherNode->nbInputs(),
         "Input index (#{}) of the node {} (of type {}) is out of bound (it has {} inputs), when trying to add it as a child of node {} (of type {})",
@@ -252,8 +268,13 @@ void Aidge::Node::addChildOp(std::shared_ptr<Node> otherNode, const IOIndex_t ou
     AIDGE_ASSERT(outId < nbOutputs(),
         "Output index (#{}) of the node {} (of type {}) is out of bound (it has {} outputs), when trying to add the child node {} (of type {})",
         outId, name(), type(), nbOutputs(), otherNode->name(), otherNode->type());
+    if (otherNode.use_count() == 1) {
+        Log::debug("Node::addChild(): the node {} (of type {}) only holds a weak reference to the added child node {} (of type {})."
+            "If the child node goes out of scope, it will be destructed, leading to a dangling connection."
+            "To avoid this message, consider adding the child node to a GraphView first.", name(), type(), otherNode->name(), otherNode->type());
+    }
     if (otherNode->input(otherInId).second != gk_IODefaultIndex) {
-        Log::notice("Notice: the {}-th Parent of the child node {} (of type {}) already existed", otherInId, otherNode->name(), otherNode->type());
+        Log::notice("the {}-th Parent of the child node {} (of type {}) already existed", otherInId, otherNode->name(), otherNode->type());
     }
     // manage tensors and potential previous parent
     otherNode->setInputId(otherInId, outId);
@@ -273,7 +294,7 @@ void Aidge::Node::addChildView(std::shared_ptr<GraphView> otherGraph, const IOIn
     addChildOp(otherInId.first, outId, otherInId.second);
 }
 
-void Aidge::Node::addChild(std::shared_ptr<Node> otherNode, const IOIndex_t outId,
+void Aidge::Node::addChild(const std::shared_ptr<Node>& otherNode, const IOIndex_t outId,
                            IOIndex_t otherInId) {
     if (otherNode) {
         otherInId =
@@ -331,10 +352,17 @@ bool Aidge::Node::removeParent(const IOIndex_t inId) {
 
 std::set<std::shared_ptr<Aidge::Node>> Aidge::Node::getChildren() const {
     std::set<std::shared_ptr<Node>> children;
+    size_t outId = 0;
     for (const auto& childrenOfOneOutput : mChildren) {
         for (const auto& oneChild : childrenOfOneOutput) {
-            children.insert(oneChild.lock());
+            if (std::shared_ptr<Node> child = oneChild.lock()) {
+                children.insert(child);
+            }
+            else {
+                Log::warn("Node::getChildren(): dangling connection at output #{} for node {} (of type {})", outId, name(), type());
+            }
         }
+        ++outId;
     }
     return children;
 }
@@ -352,7 +380,12 @@ std::vector<std::shared_ptr<Aidge::Node>> Aidge::Node::getChildren(const IOIndex
     assert((outId < nbOutputs()) && "Output index out of bound.");
     std::vector<std::shared_ptr<Node>> children;
     for (std::size_t i = 0; i < mChildren[outId].size(); ++i) {
-        children.push_back(mChildren[outId][i].lock());
+        if (std::shared_ptr<Node> child = mChildren[outId][i].lock()) {
+            children.push_back(child);
+        }
+        else {
+            Log::warn("Node::getChildren(): dangling connection at index #{} of output #{} for node {} (of type {})", i, outId, name(), type());
+        }
     }
     return children;
 }
@@ -407,18 +440,18 @@ void Aidge::Node::resetConnections(bool includeLearnableParam) {
 ///////////////////////////////////////////////////////
 
 Aidge::NodePtr Aidge::Node::cloneSharedOperators() const {
-    return std::make_shared<Node>(mOperator, mAttrs);
+    return std::make_shared<Node>(mOperator, std::make_shared<DynamicAttributes>(*mAttrs));
 }
 
 Aidge::NodePtr Aidge::Node::cloneSharedProducers() const {
     std::shared_ptr<Operator> op =
             (mOperator->type() == Producer_Op::Type) ? mOperator : mOperator->clone();
 
-    return std::make_shared<Node>(op, mAttrs);
+    return std::make_shared<Node>(op, std::make_shared<DynamicAttributes>(*mAttrs));
 }
 
 Aidge::NodePtr Aidge::Node::clone() const {
-    return std::make_shared<Node>(mOperator->clone(), mAttrs);
+    return std::make_shared<Node>(mOperator->clone(), std::make_shared<DynamicAttributes>(*mAttrs));
 }
 
 std::set<Aidge::NodePtr> Aidge::Node::getNodeDelta(int delta, std::set<Aidge::NodePtr> nodeSee) {
diff --git a/src/graph/StaticAnalysis.cpp b/src/graph/StaticAnalysis.cpp
index 033e51022842983caacba9385248c9f02c1e5568..418ae893631839f6b13c16df422f832fee4615b7 100644
--- a/src/graph/StaticAnalysis.cpp
+++ b/src/graph/StaticAnalysis.cpp
@@ -11,39 +11,61 @@
 
 #include "aidge/graph/StaticAnalysis.hpp"
 
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <numeric>  // std::accumulate
+#include <set>
+
+#include <fmt/core.h>  // fmt::println
+#include <fmt/format.h>
+#include <fmt/ranges.h>
+
+#include "aidge/data/DataType.hpp"  // Aidge::isFloatingPoint
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
 Aidge::OperatorStats::OperatorStats(const Operator& op)
   : mOp(op)
 {
     //ctor
 }
 
-size_t Aidge::OperatorStats::getNbArithmIntOps() const {
+Aidge::OperatorStats::~OperatorStats() = default;
+
+std::size_t Aidge::OperatorStats::getNbArithmIntOps() const {
     const auto opTensor = dynamic_cast<const OperatorTensor*>(&mOp);
     if (opTensor) {
-        if (!isDataTypeFloatingPoint(opTensor->getOutput(0)->dataType())) {
+        if (!isFloatingPoint(opTensor->getOutput(0)->dataType())) {
             return getNbArithmOps();
         }
     }
     return 0;
 }
 
+////////////////////////////////////////////////////////////////////////////////
+
 Aidge::StaticAnalysis::StaticAnalysis(std::shared_ptr<GraphView> graph)
   : mGraph(graph)
 {
     //ctor
 }
 
+Aidge::StaticAnalysis::~StaticAnalysis() = default;
+
 void Aidge::StaticAnalysis::summary(bool incProducers) const {
     fmt::println("--------------------------------------------------------------------------------");
     fmt::println("                        Layer (type)               Output Shape         Param #");
     fmt::println("================================================================================");
 
-    size_t nbParams = 0;
-    size_t paramsSize = 0;  // Size in bits
-    size_t fwdBwdSize = 0;  // Size in bits
+    std::size_t nbParams = 0;
+    std::size_t paramsSize = 0;  // Size in bits
+    std::size_t fwdBwdSize = 0;  // Size in bits
 
     const auto namePtrTable = mGraph->getRankedNodesName("{0} ({1}#{3})");
-    for (const auto node : mGraph->getOrderedNodes()) {
+    for (const auto &node : mGraph->getOrderedNodes()) {
         if (node->type() == Producer_Op::Type && !incProducers) {
             continue;
         }
@@ -53,8 +75,8 @@ void Aidge::StaticAnalysis::summary(bool incProducers) const {
         if (opTensor) {
             const auto outputDims = opTensor->getOutput(0)->dims();
             outputDimsStr = fmt::format("{: >27}", fmt::format("{}", outputDims));
-  
-            for (size_t out = 0; out < node->nbOutputs(); ++out) {
+
+            for (std::size_t out = 0; out < node->nbOutputs(); ++out) {
                 const auto output = opTensor->getOutput(out);
                 if (output && node->type() != Producer_Op::Type) {
                     fwdBwdSize += output->size()
@@ -69,8 +91,8 @@ void Aidge::StaticAnalysis::summary(bool incProducers) const {
           namePtrTable.at(node), outputDimsStr, getNbParams(node));
     }
 
-    size_t inputSize = 0;  // Size in bits
-    for (const auto input : mGraph->getOrderedInputs()) {
+    std::size_t inputSize = 0;  // Size in bits
+    for (const auto& input : mGraph->getOrderedInputs()) {
         if (input.first) {
             auto opTensor = std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator());
             if (opTensor && opTensor->getInput(input.second)) {
@@ -90,13 +112,13 @@ void Aidge::StaticAnalysis::summary(bool incProducers) const {
     fmt::println("--------------------------------------------------------------------------------");
 }
 
-size_t Aidge::StaticAnalysis::getNbParams(std::shared_ptr<Node> node) const {
+std::size_t Aidge::StaticAnalysis::getNbParams(std::shared_ptr<Node> node) const {
     const auto opTensor = std::dynamic_pointer_cast<OperatorTensor>(node->getOperator());
 
-    size_t nbParams = 0;
+    std::size_t nbParams = 0;
 
     // Look for Producers directly attached to the node's inputs.
-    size_t i = 0;
+    std::size_t i = 0;
     for (auto parent : node->inputs()) {
         if (parent.first && mGraph->inView(parent.first)) {
             if (parent.first->type() == Producer_Op::Type && opTensor->getInput(i)) {
@@ -109,7 +131,7 @@ size_t Aidge::StaticAnalysis::getNbParams(std::shared_ptr<Node> node) const {
     // Look for internal Producers, in case of meta-op.
     if (!node->getOperator()->isAtomic()) {
         const auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(node->getOperator())->getMicroGraph();
-        for (const auto internalNode : microGraph->getNodes()) {
+        for (const auto &internalNode : microGraph->getNodes()) {
             if (internalNode->type() == Producer_Op::Type) {
                 const auto internalOpTensor = std::dynamic_pointer_cast<OperatorTensor>(internalNode->getOperator());
                 nbParams += internalOpTensor->getOutput(0)->size();
@@ -120,14 +142,14 @@ size_t Aidge::StaticAnalysis::getNbParams(std::shared_ptr<Node> node) const {
     return nbParams;
 }
 
-size_t Aidge::StaticAnalysis::getParamsSize(std::shared_ptr<Node> node) const {
+std::size_t Aidge::StaticAnalysis::getParamsSize(std::shared_ptr<Node> node) const {
     const auto opTensor = std::dynamic_pointer_cast<OperatorTensor>(node->getOperator());
 
-    size_t paramsSize = 0;
+    std::size_t paramsSize = 0;
 
     // Look for Producers directly attached to the node's inputs.
-    size_t i = 0;
-    for (auto parent : node->inputs()) {
+    std::size_t i = 0;
+    for (const auto& parent : node->inputs()) {
         if (parent.first && mGraph->inView(parent.first)) {
             if (parent.first->type() == Producer_Op::Type && opTensor->getInput(i)) {
                 paramsSize += opTensor->getInput(i)->size()
@@ -140,7 +162,7 @@ size_t Aidge::StaticAnalysis::getParamsSize(std::shared_ptr<Node> node) const {
     // Look for internal Producers, in case of meta-op.
     if (!node->getOperator()->isAtomic()) {
         const auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(node->getOperator())->getMicroGraph();
-        for (const auto internalNode : microGraph->getNodes()) {
+        for (const auto &internalNode : microGraph->getNodes()) {
             if (internalNode->type() == Producer_Op::Type) {
                 const auto internalOpTensor = std::dynamic_pointer_cast<OperatorTensor>(internalNode->getOperator());
                 paramsSize += internalOpTensor->getOutput(0)->size()
@@ -160,12 +182,32 @@ std::shared_ptr<Aidge::OperatorStats> Aidge::StaticAnalysis::getOpStats(std::sha
             : std::make_shared<MetaOpStats>(*(node->getOperator()));
 }
 
-size_t Aidge::StaticAnalysis::accumulate(size_t (OperatorStats::*func)() const) const {
+std::size_t Aidge::StaticAnalysis::getNbArithmOps() const { return accumulate(&OperatorStats::getNbArithmOps); }
+std::size_t Aidge::StaticAnalysis::getNbLogicOps() const { return accumulate(&OperatorStats::getNbLogicOps); }
+std::size_t Aidge::StaticAnalysis::getNbCompOps() const { return accumulate(&OperatorStats::getNbCompOps); }
+std::size_t Aidge::StaticAnalysis::getNbNLOps() const { return accumulate(&OperatorStats::getNbNLOps); }
+std::size_t Aidge::StaticAnalysis::getNbOps() const { return accumulate(&OperatorStats::getNbOps); }
+std::size_t Aidge::StaticAnalysis::getNbArithmIntOps() const { return accumulate(&OperatorStats::getNbArithmIntOps); }
+std::size_t Aidge::StaticAnalysis::getNbArithmFpOps() const { return accumulate(&OperatorStats::getNbArithmFpOps); }
+std::size_t Aidge::StaticAnalysis::getNbMACOps() const { return accumulate(&OperatorStats::getNbMACOps); }
+
+std::size_t Aidge::StaticAnalysis::accumulate(std::size_t (OperatorStats::*func)() const) const {
     return std::accumulate(
         mGraph->getNodes().cbegin(),
         mGraph->getNodes().cend(),
         std::size_t(0),
-        [this, func](const size_t& lhs, const std::shared_ptr<Node>& rhs) {
+        [this, func](const std::size_t& lhs, const std::shared_ptr<Node>& rhs) {
             return lhs + (this->getOpStats(rhs).get()->*func)();
         });
 }
+
+////////////////////////////////////////////////////////////////////////////////
+
+Aidge::MetaOpStats::~MetaOpStats() = default;
+
+std::size_t Aidge::MetaOpStats::getNbArithmOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmOps(); }
+std::size_t Aidge::MetaOpStats::getNbLogicOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbLogicOps(); }
+std::size_t Aidge::MetaOpStats::getNbCompOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbCompOps(); }
+std::size_t Aidge::MetaOpStats::getNbNLOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbNLOps(); }
+std::size_t Aidge::MetaOpStats::getNbArithmIntOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmIntOps(); }
+std::size_t Aidge::MetaOpStats::getNbMACOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbMACOps(); }
diff --git a/src/graphRegex/GraphFsmInterpreter.cpp b/src/graphRegex/GraphFsmInterpreter.cpp
deleted file mode 100644
index a2f07129c468c737da022de8a6d1b093cdd08e39..0000000000000000000000000000000000000000
--- a/src/graphRegex/GraphFsmInterpreter.cpp
+++ /dev/null
@@ -1,191 +0,0 @@
-#include "aidge/graphRegex/GraphFsmInterpreter.hpp"
-
-using namespace Aidge; 
-
-
-GraphFsmInterpreter::GraphFsmInterpreter(const std::string graphMatchExpr,std::vector<std::shared_ptr<ConditionalInterpreter>>&nodesCondition):mParser(graphMatchExpr){
-    mActGroupe = 0;
-
-    for (const auto &obj : nodesCondition) {
-        if(mNodesCondition.find(obj->getKey()) ==mNodesCondition.end()){
-             mNodesCondition[obj->getKey()] = obj;
-        }else{
-            throw std::logic_error("GraphFsmInterpreter Bad Key" );
-        }
-    }
-}
-std::shared_ptr<FsmGraph>  GraphFsmInterpreter::interpret(void){
-    mActGroupe = 0;
-    std::shared_ptr<AstNode<gRegexTokenTypes>> tree = mParser.parse();
-    std::shared_ptr<FsmGraph> out = visit(tree);
-    return out;
-}
-
-std::shared_ptr<FsmGraph> GraphFsmInterpreter::visit(std::shared_ptr<AstNode<gRegexTokenTypes>> AstTree){
-
-    std::vector<std::shared_ptr<AstNode<gRegexTokenTypes>>> nextAstNodes = AstTree->getChilds();
-
-    if(AstTree->getType() == gRegexTokenTypes::SEP){
-        return sepF(visit(nextAstNodes[0]),visit(nextAstNodes[1]));
-    }else if(AstTree->getType() == gRegexTokenTypes::NEXT){
-        return nextF(visit(nextAstNodes[0]),visit(nextAstNodes[1]));
-    }else if(AstTree->getType() == gRegexTokenTypes::QOM){
-        return qomF(visit(nextAstNodes[0]));
-    }else if(AstTree->getType() == gRegexTokenTypes::QZM){
-        return qzmF(visit(nextAstNodes[0]));
-    }else if(AstTree->getType() == gRegexTokenTypes::KEY || AstTree->getType() == gRegexTokenTypes::CKEY){
-        return keyF(AstTree);
-    }else if(AstTree->getType() == gRegexTokenTypes::LPAREN){
-        mActGroupe += 1;
-        std::shared_ptr<FsmGraph> out = visit(nextAstNodes[0]);
-        mActGroupe -= 1;
-        return out;
-    }else{
-        throw std::logic_error("visit Bad token type" );
-    }
-}
-
-
-
-
-std::shared_ptr<FsmGraph> GraphFsmInterpreter::keyF(std::shared_ptr<AstNode<gRegexTokenTypes>> AstNode){
-    
-
-    std::shared_ptr<FsmNode>  start = std::make_shared<FsmNode>(false,true);
-    std::shared_ptr<FsmNode>  valid = std::make_shared<FsmNode>(true,false);
-    std::shared_ptr<FsmGraph> graph = std::make_shared<FsmGraph>(mParser.getQuery());
-    std::shared_ptr<FsmEdge> edge;
-    
-
-    if(AstNode->getType() == gRegexTokenTypes::CKEY){
-        edge = FsmEdgeFactory::make(start,valid,FsmEdgeTypes::COMMON,mNodesCondition,AstNode->getValue());
-    }else if (AstNode->getType() == gRegexTokenTypes::KEY)
-    {
-        edge = FsmEdgeFactory::make(start,valid,FsmEdgeTypes::UNIQUE,mNodesCondition,AstNode->getValue());
-    }else{
-
-        throw std::logic_error("keyF Bad in AST" );
-    }
-    
-    graph->addEdge(edge);
-    graph->setGroupe(mActGroupe);
-    return graph;
-}
-
-std::shared_ptr<FsmGraph> GraphFsmInterpreter::sepF(std::shared_ptr<FsmGraph> leftFsm,std::shared_ptr<FsmGraph> rigthFsm){
-
-    size_t idxLeft = leftFsm->getNbSubFsm();
-    rigthFsm->incOriginAllNodeBy(idxLeft);
-    leftFsm->unionG(rigthFsm);
-    //the rigthFsm is no longer usfull
-    return leftFsm;
-}
-
-std::shared_ptr<FsmGraph> GraphFsmInterpreter::nextF(std::shared_ptr<FsmGraph> leftFsm,std::shared_ptr<FsmGraph> rigthFsm){
-    /*
-        combine the 2 Graph
-        all valid node of A are  merge with Start B, Start B is un Start
-        update the relative reference  
-
-           A          B
-        SA -> VA + SB -> VB
-           A    B
-        SA -> q -> VB
-    */
-    leftFsm->mergeOneStartOneValid(rigthFsm);
-    //the rigthFsm is no longer usfull
-    return leftFsm;
-}
-
-std::shared_ptr<FsmGraph> GraphFsmInterpreter::qomF(std::shared_ptr<FsmGraph> fsm){
-    /*
-        +
-        valid node is connect to the child of Start with the same edge condition
-            A
-        S -> V
-
-            A
-        S -> V
-          (E|R)
-        V -> S
-    */
-
-    std::vector<std::shared_ptr<FsmNode>> allStart =  fsm->getStartNodes();
-    std::set<std::shared_ptr<FsmNode>> allValid = fsm->getValidNodes();
-    std::shared_ptr<FsmEdge> edge;
-
-    if(allStart.size() != 1){
-         throw std::logic_error("qomF Bad in AST" );
-    }
-
-    for(auto start : allStart ){
-        for(auto edgeStart :start->getEdges() ){
-            if (auto sharedEdge = edgeStart.lock()) {
-
-                const std::map<size_t, int> commonRef = sharedEdge->getRelative();
-                bool haveCommon = !commonRef.empty();
-
-                for(auto valid : allValid){
-                    if(haveCommon){
-                        /*
-                        the // quantify case 
-                        get the go back and make a lexeme id(number)
-                        we need to go back to the ref delta min #TODO
-                        */
-                        bool hasMinRef = false;
-                        std::pair<size_t, int> minRef;
-                        for (const auto& entry : commonRef) {
-                            if (!hasMinRef || std::abs(minRef.second) > std::abs(entry.second)) {
-                                hasMinRef = true;
-                                minRef = entry;
-                            }
-                        }
-                        std::stringstream lexem;
-                        lexem << "(" << minRef.first << ", " << minRef.second << ")";
-                        edge = FsmEdgeFactory::make(valid,start,FsmEdgeTypes::REF,mNodesCondition, lexem.str());
-                    }else{
-                        /*
-                        the sequential quantify case 
-                        no reference to common 
-                        */
-                        edge = FsmEdgeFactory::make(valid,start,FsmEdgeTypes::EMPTY,mNodesCondition,"");
-
-                    }
-                    fsm->addEdge(edge);
-                }
-            }else{
-                throw std::runtime_error("edgeStart weak pointer is expired" );
-            }
-        }
-     
-    }
-    return fsm;
-
-}
-
-std::shared_ptr<FsmGraph> GraphFsmInterpreter::qzmF(std::shared_ptr<FsmGraph> fsm){
-        /*
-        qomf and a bypass empty start to valid 
-        */
-    fsm = qomF(fsm);
-
-    std::vector<std::shared_ptr<FsmNode>> allStart =  fsm->getStartNodes();
-    std::set<std::shared_ptr<FsmNode>> allValid = fsm->getValidNodes();
-    std::shared_ptr<FsmEdge> edge;
-
-    if(allStart.size() != 1){
-         throw std::logic_error("qzmF Bad in AST" );
-    }
-
-    for(auto start : allStart ){
-       
-        for(auto valid : allValid){
-            edge = FsmEdgeFactory::make(start,valid,FsmEdgeTypes::EMPTY,mNodesCondition,"");
-            fsm->addEdge(edge);
-        }
-    }
-        
-    return fsm;
-
-
-}
\ No newline at end of file
diff --git a/src/graphRegex/GraphLexer.cpp b/src/graphRegex/GraphLexer.cpp
deleted file mode 100644
index 05a23d02cdbfe072337ea2cc6ed92410e914257b..0000000000000000000000000000000000000000
--- a/src/graphRegex/GraphLexer.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-
-#include "aidge/graphRegex/GraphLexer.hpp"
-
-using namespace Aidge; 
-
-
-GraphLexer::GraphLexer( const std::string gRegexExpressions ):
-mRegularExpressions(gRegexExpressions){
-    mPosition = 0;
-}
-
-std::shared_ptr<ParsingToken<gRegexTokenTypes>> GraphLexer::getNextToken(void){
-    std::string currentChars = "";
-    while (mPosition < mRegularExpressions.length())
-    {
-        //erase all space 
-        if (mRegularExpressions[mPosition] != ' ')
-        {
-            currentChars += mRegularExpressions[mPosition];
-        }
-        else
-        {
-            mPosition++;
-            continue;
-        }
-
-        /////
-        // const lent token
-        /////
-
-        if (std::regex_match(currentChars,std::regex("\\->")))// the next TOKEN 
-        {
-            mPosition++;
-            return std::make_shared<ParsingToken<gRegexTokenTypes>>(gRegexTokenTypes::NEXT,"");
-        }
-        else if (std::regex_match(currentChars,std::regex("\\*")))// the * TOKEN 
-        {
-            mPosition++;
-            return std::make_shared<ParsingToken<gRegexTokenTypes>>(gRegexTokenTypes::QZM,"");
-        }
-        else if (std::regex_match(currentChars,std::regex("\\+")))// the + TOKEN 
-        {
-            mPosition++;
-            return std::make_shared<ParsingToken<gRegexTokenTypes>>(gRegexTokenTypes::QOM,"");
-        }
-        else if (std::regex_match(currentChars,std::regex("\\(")))// the LPAREN TOKEN 
-        {
-            mPosition++;
-            return std::make_shared<ParsingToken<gRegexTokenTypes>>(gRegexTokenTypes::LPAREN,"");
-        }
-        else if (std::regex_match(currentChars,std::regex("\\)")))// the RPAREN TOKEN 
-        {
-            mPosition++;
-            return std::make_shared<ParsingToken<gRegexTokenTypes>>(gRegexTokenTypes::RPAREN,"");
-        }
-
-        //
-        else if (std::regex_match(currentChars,std::regex(";")))// the SEP TOKEN 
-        {
-            //test if the last sep
-            //std::string subStr = mRegularExpressions.substr(mPosition);
-            mPosition++;
-            return std::make_shared<ParsingToken<gRegexTokenTypes>>(gRegexTokenTypes::SEP,"");
-        }
-
-        /////
-        //unconst lent token
-        /////
-
-        else if (std::regex_match(currentChars,std::regex("[A-Za-z_0-9]")))// the KEY or CKEY
-        {   
-            
-            //read all the key 
-            bool isCKey = false;
-            std::regex keyRegex("[A-Za-z_0-9]+");
-            std::regex cKeyRegex("[A-Za-z_0-9]+\\#[0-9]*");
-
-            while ( mPosition < mRegularExpressions.length()) {
-
-                if(!std::regex_match(currentChars,keyRegex) && !std::regex_match(currentChars,cKeyRegex))
-                {
-                    currentChars.pop_back(); //the last char is the problems
-                    break;
-                }
-                else if (std::regex_match(currentChars,cKeyRegex)){
-                    isCKey = true;
-                }
-                mPosition++;
-                if (mPosition < mRegularExpressions.length()) currentChars += mRegularExpressions[mPosition];
-                
-            }
-            //we end the match 2 possibility 
-            //we are at the end of the mConditionalExpressions and we need to ensure the match
-            //we are not we can continu
-            if (mPosition == mRegularExpressions.length()-1)
-            {
-                if (!std::regex_match(currentChars,keyRegex) && !std::regex_match(currentChars,cKeyRegex))
-                {
-                    throw badTokenError(currentChars,mPosition);
-                }
-            }
-
-
-            if (isCKey){
-                return std::make_shared<ParsingToken<gRegexTokenTypes>>(gRegexTokenTypes::CKEY,currentChars);
-            } else{
-                return std::make_shared<ParsingToken<gRegexTokenTypes>>(gRegexTokenTypes::KEY,currentChars);
-            }
-        }
-
-        mPosition++;
-    }
-
-
-    //no more to find no one match the currentChars 
-    if (currentChars.empty()) {
-        return  std::make_shared<ParsingToken<gRegexTokenTypes>>(gRegexTokenTypes::STOP,"");  // Null shared pointer ;
-    }else{
-        throw badTokenError(currentChars,mPosition);
-    }
-
-}
-
-void GraphLexer::rstPosition(void){
-    if (isEnd()){
-        mPosition = 0;
-    }else{
-        throw badTokenError("end rst",mPosition);
-    }
-}
-
-bool GraphLexer::isEnd(void){
-    return mPosition >= mRegularExpressions.length();
-}
-
-
-const std::string GraphLexer::getQuery(){
-    return mRegularExpressions;
-}
-
-std::runtime_error GraphLexer::badTokenError(const std::string& currentChars,std::size_t position){
-    std::ostringstream errorMessage;
-    errorMessage << "\nBad syntax " << currentChars << " :\n" << mRegularExpressions << "\n";
-    for (std::size_t i = 0; i < position; i++) {
-        errorMessage << ' ';
-    }
-    errorMessage << "^\n";
-
-    return std::runtime_error(errorMessage.str());
-}
-
-  const std::string GraphLexer::rep(){
-    std::string out = mRegularExpressions;
-    out += "\n";
-    for (std::size_t i = 0; i < mPosition; i++) {
-        out += ' ';
-    }
-    out +=  "^\n";
-    return out ;
-    }
\ No newline at end of file
diff --git a/src/graphRegex/GraphParser.cpp b/src/graphRegex/GraphParser.cpp
deleted file mode 100644
index 9ad96a34bfbe36bdae65cae072eb4f1edcd3faaf..0000000000000000000000000000000000000000
--- a/src/graphRegex/GraphParser.cpp
+++ /dev/null
@@ -1,189 +0,0 @@
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "aidge/graphRegex/GraphParser.hpp"
-
-Aidge::GraphParser::GraphParser(const std::string gRegexExpressions):
-mLexer(gRegexExpressions)
-{
-    mCurrentToken = mLexer.getNextToken();
-}
-
-Aidge::GraphParser::~GraphParser() noexcept = default;
-
-
-const std::string Aidge::GraphParser::getQuery(){
-    return mLexer.getQuery();
-}
-
-std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::parse(void){
-
-    std::shared_ptr<AstNode<gRegexTokenTypes>> astTree = constructAstAllExpr();
-    rstParser();
-    return astTree;
-}
-
-
-void Aidge::GraphParser::rstParser(void){
-    mLexer.rstPosition();
-    mCurrentToken = mLexer.getNextToken();
-}
-
-
-void Aidge::GraphParser::ackToken(gRegexTokenTypes  tokenType){
-
-    if(mCurrentToken->getType() == tokenType ){
-        try {
-            mCurrentToken = mLexer.getNextToken();
-        } catch (const std::runtime_error& e) {
-            std::ostringstream errorMessage;
-            errorMessage << "Graph Lexer error in Parser :\n"<< e.what() << std::endl;
-            throw std::runtime_error(errorMessage.str());
-        }
-    }else{
-        std::ostringstream errorMessage;
-        errorMessage << "Bad syntax GraphParser " << static_cast<int>(mCurrentToken->getType())  <<"!="<< static_cast<int>(tokenType) << "\n";
-        errorMessage << mLexer.rep();
-        throw std::runtime_error(errorMessage.str());
-    }
-}
-
-/*
-exp : KEY(QOM | QZM)?  | CKEY | domain
-*/
-std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::constructAstExp(void)
-{
-
-    try{
-        std::shared_ptr<ParsingToken<gRegexTokenTypes>> token = mCurrentToken->copy();
-        std::shared_ptr<AstNode<gRegexTokenTypes>> node = std::make_shared<AstNode<gRegexTokenTypes>>(token);
-
-        if (mCurrentToken->getType() == gRegexTokenTypes::KEY  ){
-            ackToken(gRegexTokenTypes::KEY );
-            if (mCurrentToken->getType() == gRegexTokenTypes::QOM  ){
-                token = mCurrentToken->copy();
-                ackToken(gRegexTokenTypes::QOM );
-                std::shared_ptr<AstNode<gRegexTokenTypes>> newNode = std::make_shared<AstNode<gRegexTokenTypes>>(token,
-                std::vector<std::shared_ptr<AstNode<gRegexTokenTypes>>>{node});
-                return newNode;
-            }else if (mCurrentToken->getType() == gRegexTokenTypes::QZM  ){
-                token = mCurrentToken->copy();
-                ackToken(gRegexTokenTypes::QZM );
-                std::shared_ptr<AstNode<gRegexTokenTypes>> newNode = std::make_shared<AstNode<gRegexTokenTypes>>(token,
-                std::vector<std::shared_ptr<AstNode<gRegexTokenTypes>>>{node});
-                return newNode;
-            }
-            return node;
-        }else if (mCurrentToken->getType() == gRegexTokenTypes::CKEY){
-            ackToken(gRegexTokenTypes::CKEY );
-            return node;
-        }else{
-            return constructAstDomain();
-        }
-
-    } catch (const std::runtime_error& e) {
-        std::ostringstream errorMessage;
-        errorMessage << "GraphParser constructAstExp :\n"<< e.what() << std::endl;
-        throw std::runtime_error(errorMessage.str());
-    }
-}
-
-/*
-seq :exp (NEXT seq)*
-*/
-std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::constructAstSeq(void)
-{
-
-   try{
-
-        std::shared_ptr<AstNode<gRegexTokenTypes>> left = constructAstExp();
-        if(mCurrentToken->getType() == gRegexTokenTypes::NEXT )
-        {
-            std::shared_ptr<ParsingToken<gRegexTokenTypes>> token = mCurrentToken->copy();
-            ackToken(gRegexTokenTypes::NEXT);
-            std::shared_ptr<AstNode<gRegexTokenTypes>> newNode = std::make_shared<AstNode<gRegexTokenTypes>>(token,
-            std::vector<std::shared_ptr<AstNode<gRegexTokenTypes>>>{left,constructAstSeq()});
-            left = newNode;
-        }
-        return left;
-
-    } catch (const std::runtime_error& e) {
-        std::ostringstream errorMessage;
-        errorMessage << "GraphParser constructAstSeq :\n"<< e.what() << std::endl;
-        throw std::runtime_error(errorMessage.str());
-    }
-
-}
-
-
-/*
-LPAREN seq RPAREN (QOM | QZM)
-*/
-std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::constructAstDomain(void)
-{
-
-   try{
-        std::shared_ptr<ParsingToken<gRegexTokenTypes>> token ;
-        std::shared_ptr<AstNode<gRegexTokenTypes>> node ;
-
-        token = mCurrentToken->copy();
-        ackToken(gRegexTokenTypes::LPAREN);
-        node = std::make_shared<AstNode<gRegexTokenTypes>>(token,
-        std::vector<std::shared_ptr<AstNode<gRegexTokenTypes>>>{constructAstSeq()});
-        ackToken(gRegexTokenTypes::RPAREN);
-        //(QOM | QZM)
-
-        token = mCurrentToken->copy();
-        if (mCurrentToken->getType() == gRegexTokenTypes::QOM){
-            ackToken(gRegexTokenTypes::QOM);
-            node = std::make_shared<AstNode<gRegexTokenTypes>>(token,
-            std::vector<std::shared_ptr<AstNode<gRegexTokenTypes>>>{node});
-        }else if (mCurrentToken->getType() == gRegexTokenTypes::QZM){
-            ackToken(gRegexTokenTypes::QZM);
-            node = std::make_shared<AstNode<gRegexTokenTypes>>(token,
-            std::vector<std::shared_ptr<AstNode<gRegexTokenTypes>>>{node});
-        }else{
-            std::ostringstream errorMessage;
-            errorMessage << "Bad syntax constructAstDomain must have quantifier \n";
-            throw std::runtime_error(errorMessage.str());
-        }
-
-        return node;
-
-    } catch (const std::runtime_error& e) {
-        std::ostringstream errorMessage;
-        errorMessage << "GraphParser constructAstDomain :\n"<< e.what() << std::endl;
-        throw std::runtime_error(errorMessage.str());
-    }
-}
-
-/*
-        allExpr: seq (SEP allExpr)* | STOP
-*/
-std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::constructAstAllExpr(void)
-{
-
-    try{
-        std::shared_ptr<AstNode<gRegexTokenTypes>> left = constructAstSeq();
-        if(mCurrentToken->getType() == gRegexTokenTypes::SEP )
-        {
-            std::shared_ptr<ParsingToken<gRegexTokenTypes>> token = mCurrentToken->copy();
-            ackToken(gRegexTokenTypes::SEP);
-
-            if(mCurrentToken->getType() == gRegexTokenTypes::STOP )
-            {
-                 return left;
-            }
-            std::shared_ptr<AstNode<gRegexTokenTypes>> newNode = std::make_shared<AstNode<gRegexTokenTypes>>(token,
-            std::vector<std::shared_ptr<AstNode<gRegexTokenTypes>>>{left,constructAstAllExpr()});
-            left = newNode;
-        }
-        return left;
-
-    } catch (const std::runtime_error& e) {
-        std::ostringstream errorMessage;
-        errorMessage << "GraphParser constructAstDomain :\n"<< e.what() << std::endl;
-        throw std::runtime_error(errorMessage.str());
-    }
-}
diff --git a/src/graphRegex/GraphRegex.cpp b/src/graphRegex/GraphRegex.cpp
deleted file mode 100644
index ca15ff8dec5ff5ebd4ea69141c6e286849162bb5..0000000000000000000000000000000000000000
--- a/src/graphRegex/GraphRegex.cpp
+++ /dev/null
@@ -1,167 +0,0 @@
-#include "aidge/graphRegex/GraphRegex.hpp"
-using namespace Aidge;
-
-
-void GraphRegex::setKeyFromGraph(std::shared_ptr<GraphView> ref){
-
-    for (const NodePtr& node : ref->getNodes()) {
-        std::string type =  node->type();
-        bool isIn = false;
-        for(const auto &test:mAllTest){
-            if(test->getKey() == type){
-                isIn = true;
-                break;
-            }
-        }
-        if(!isIn){
-             mAllTest.push_back(std::make_shared<ConditionalInterpreter>(type,"getType($) =='" + type + "'"));
-        }
-        // auto it = mAllTest.find(type);
-        // if (it == mAllTest.end()) {
-        //    mAllTest[type] = std::make_shared<ConditionalInterpreter>(type,"getType($) =='" + type + "'");
-        // }
-        // //if the key exist it's ok, but not make 2 ConditionalInterpreter
-    }
-}
-
-
-
-// void GraphRegex::addQuery(const std::string query){
-//     //TODO one query only but the same string is a same query but
-//     //2 different string it's maybe the same query , we need to check the AST
-//     mQueryRecipe[query] = nullptr;
-// }
-
-void GraphRegex::addQuery(const std::string query,RecipesFunctionType f ){
-
-    mQueryRecipe[query] = f;
-
-}
-
-
-// Function to generate all combinations of n elements from a set
-void GraphRegex::_generateCombinationsStart(const std::set<NodePtr>& elements, std::size_t n, std::size_t index, std::vector<NodePtr>& current, std::set<std::vector<NodePtr>>& combinations) {
-    if (n == 0) {
-        combinations.insert(current);
-        return;
-    }
-    for (auto it = elements.begin(); it != elements.end(); ++it) {
-        current.push_back(*it);
-        _generateCombinationsStart(elements, n - 1, index + 1, current, combinations);
-        current.pop_back();
-    }
-}
-
-// factorial(n) tree searched optimized with a stopping condition
-void GraphRegex::_findLargestCompatibleSet(
-    const std::vector<std::shared_ptr<MatchSolution>>& solutions,
-    std::set<std::shared_ptr<MatchSolution>>& currentSet,
-    std::set<std::shared_ptr<MatchSolution>>& largestSet,
-    size_t currentIndex
-) {
-    if (currentIndex >= solutions.size()) {
-        if (currentSet.size() > largestSet.size()) {
-            largestSet = currentSet;
-        }
-        return;
-    }
-
-    for (size_t i = currentIndex; i < solutions.size(); ++i) {
-        if (std::all_of(currentSet.begin(), currentSet.end(),
-            [&](const std::shared_ptr<MatchSolution>& solution) {
-                return solution->areCompatible(solutions[i]);
-            }
-        )) {
-            currentSet.insert(solutions[i]);
-            _findLargestCompatibleSet(solutions, currentSet, largestSet, i + 1);
-            currentSet.erase(solutions[i]);
-            // cut the size of the graph of possibilities
-            if ((currentSet.size() + solutions.size() - currentIndex) <= largestSet.size()) {
-                return;
-            }
-        }
-    }
-}
-
-std::set<std::shared_ptr<MatchSolution>> GraphRegex::_findLargestCompatibleSet(
-    const std::vector<std::shared_ptr<MatchSolution>>& solutions
-) {
-    std::set<std::shared_ptr<MatchSolution>> largestSet;
-    std::set<std::shared_ptr<MatchSolution>> currentSet;
-    _findLargestCompatibleSet(solutions, currentSet, largestSet, 0);
-    return largestSet;
-}
-
-
-
-std::set<std::shared_ptr<MatchSolution>> GraphRegex::match(std::shared_ptr<GraphView> ref){
-
-    std::vector<std::shared_ptr<MatchSolution>> solutions = {};
-
-    //for (const std::string& query : mQuery) {
-    for (auto it = mQueryRecipe.begin(); it != mQueryRecipe.end(); ++it) {
-        const std::string query  = it->first;
-
-        std::shared_ptr<GraphFsmInterpreter>  fsmGenerator = std::make_shared<GraphFsmInterpreter>(query,mAllTest);
-        std::shared_ptr<FsmGraph> fsm = fsmGenerator->interpret();
-
-        // generate all the start possibility
-        std::size_t nb_startSt =  fsm->getNbStart();
-        std::set<std::vector<NodePtr>> combinations;
-        std::vector<NodePtr> current;
-        _generateCombinationsStart(ref->getNodes(), nb_startSt, 0, current, combinations);
-
-
-        // all start
-        for (const auto& combination : combinations) {
-            std::vector<std::shared_ptr<MatchSolution>> solution = fsm->test(combination);
-            solutions.insert(solutions.end(), solution.begin(), solution.end());
-        }
-
-
-    }
-    return _findLargestCompatibleSet(solutions);
-}
-
-void GraphRegex::appliedRecipes(std::shared_ptr<GraphView> ref){
-    std::set<std::shared_ptr<MatchSolution>> matchRef  = match(ref);
-    for (const auto& solution : matchRef) {
-        if(mQueryRecipe[solution->getQuery()] != nullptr){
-            mQueryRecipe[solution->getQuery()](solution);
-        }
-    }
-}
-
-void GraphRegex::setNodeKey(const std::string key, const std::string conditionalExpressions ){
-    mAllTest.push_back(std::make_shared<ConditionalInterpreter>(key,conditionalExpressions));
-    _majConditionalInterpreterLambda();
-}
-
-
-void GraphRegex::setNodeKey(const std::string key,std::function<bool(NodePtr)> f){
-    //we can applied to all key but it's not efficient
-    if(mAllLambda.find(key) != mAllLambda.end()){
-        throw std::runtime_error(key + " is define");
-    }
-    mAllLambda[key] = f;
-    
-    _majConditionalInterpreterLambda();
-    //we add the lambda as key by default 
-    setNodeKey(key, key + "($)==true");
-}
-
-void GraphRegex::_majConditionalInterpreterLambda(){
-
-    for (const auto& test : mAllTest) {
-        for (const auto& pair : mAllLambda) {
-            const std::string& key = pair.first;
-            const std::function<bool(NodePtr)>& lambda = pair.second;
-
-            if(!test->isLambdaRegister(key)){
-                test->insertLambda(key,lambda);
-            }
-
-        }
-    }
-}
-
diff --git a/src/graphRegex/GraphStrInterpreter.cpp b/src/graphRegex/GraphStrInterpreter.cpp
deleted file mode 100644
index 8ad24b5b9b0fee5fba34dd7397132bec2410fd23..0000000000000000000000000000000000000000
--- a/src/graphRegex/GraphStrInterpreter.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-#include "aidge/graphRegex/GraphStrInterpreter.hpp"
-
-using namespace Aidge; 
-
-GraphStrInterpreter::GraphStrInterpreter(const std::string graphMatchExpr):mParser(graphMatchExpr){
-    mToTest = graphMatchExpr;
-    mToTest.erase(std::remove_if(mToTest.begin(), mToTest.end(), ::isspace), mToTest.end());
-}
-
-
-std::string GraphStrInterpreter::visit(std::shared_ptr<AstNode<gRegexTokenTypes>> AstTree){
-
-    std::vector<std::shared_ptr<AstNode<gRegexTokenTypes>>> nextAstNodes = AstTree->getChilds();
-
-    if(AstTree->getType() == gRegexTokenTypes::SEP){
-        return visit(nextAstNodes[0])+";"+visit(nextAstNodes[1]);
-    }else if(AstTree->getType() == gRegexTokenTypes::NEXT){
-        return visit(nextAstNodes[0])+"->"+visit(nextAstNodes[1]);
-    }else if(AstTree->getType() == gRegexTokenTypes::QOM){
-        return visit(nextAstNodes[0])+"+";
-    }else if(AstTree->getType() == gRegexTokenTypes::QZM){
-        return visit(nextAstNodes[0])+"*";
-    }else if(AstTree->getType() == gRegexTokenTypes::KEY || AstTree->getType() == gRegexTokenTypes::CKEY){
-        return AstTree->getValue();
-    }else if(AstTree->getType() == gRegexTokenTypes::LPAREN){
-        return "("+visit(nextAstNodes[0])+")";
-    }else{
-        throw std::logic_error("visit Bad token type" );
-    }
-
-
-}
-
-
-std::string GraphStrInterpreter::interpret(void){
-    std::shared_ptr<AstNode<gRegexTokenTypes>> tree = mParser.parse();
-    return visit(tree);
-}
\ No newline at end of file
diff --git a/src/graphRegex/matchFsm/FsmEdge.cpp b/src/graphRegex/matchFsm/FsmEdge.cpp
deleted file mode 100644
index 170d5e69366d25ba19ea2f514cab6cd59b545ec0..0000000000000000000000000000000000000000
--- a/src/graphRegex/matchFsm/FsmEdge.cpp
+++ /dev/null
@@ -1,302 +0,0 @@
-#include "aidge/graphRegex/matchFsm/FsmEdge.hpp"
-#include "aidge/graphRegex/matchFsm/FsmNode.hpp"
-#include "aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp"
-
-using namespace Aidge; 
-
-std::map<std::string,int> FsmEdgeCommon::mCommonIdxMap;
-
-bool FsmEdge::isCommon(void){
-    return false;
-}
-
-size_t FsmEdge::getCommonIdx(void){
-    return std::numeric_limits<std::size_t>::max();
-}
-const std::map<size_t,int>& FsmEdge::getRelative(void){
-    return mRelativePos;
-}
-void FsmEdge::updateRelative( const std::map<size_t,int>& relativePos ){
-    for (const auto& kvp : relativePos) {
-            mRelativePos.insert(kvp);
-    }
-}
-std::shared_ptr<FsmNode> FsmEdge::getSourceNode(void){
-    return mNodeSource;
-}
-void FsmEdge::reSetSourceNode(const std::shared_ptr<FsmNode>& newSource){
-    mNodeSource->rmEdge(shared_from_this());
-    mNodeSource = newSource;
-    mNodeSource->addEdge(shared_from_this());
-    propagateRelativePos();
-
-}
-std::shared_ptr<FsmNode> FsmEdge::getDestNode(void){
-    return mNodeDest;
-}
-void FsmEdge::reSetDestNode(const std::shared_ptr<FsmNode>& newDest){
-        mNodeDest->rmParent(mNodeSource);
-        mNodeDest = newDest;
-        mNodeDest->addParent(mNodeSource);
-        propagateRelativePos();
-}
-void FsmEdge::propagateRelativePos(void){
-
-    std::set<std::size_t> myRelativeID;
-    for (const auto& kvp : mRelativePos) {
-        myRelativeID.insert(kvp.first);
-    }
-
-    for (const auto& nextWeakEdge : mNodeDest->getEdges()){
-
-        if (auto nextEdge = nextWeakEdge.lock()) {
-            
-            if(this == nextEdge.get()){
-                continue;
-            }
-            
-
-            std::set<std::size_t> nextRelativeID;
-            for (const auto& kvp : nextEdge->getRelative()) {
-                nextRelativeID.insert(kvp.first);
-            }
-
-            // Find elements in myRelativeID but not in nextRelativeID
-            std::set<std::size_t> idxsToPush;
-            std::set_difference(myRelativeID.begin(), myRelativeID.end(),
-                                nextRelativeID.begin(), nextRelativeID.end(),
-                                std::inserter(idxsToPush, idxsToPush.begin()));
-
-            // Find elements in nextRelativeID but not in myRelativeID
-            std::set<std::size_t> idxsToGet;
-            std::set_difference(nextRelativeID.begin(), nextRelativeID.end(),
-                                myRelativeID.begin(), myRelativeID.end(),
-                                std::inserter(idxsToGet, idxsToGet.begin()));
-
-            //  test for integrity we look if 2 edge refer to the same
-            //  ref and are link the ref dif is one
-            //  not working for common node
-            //  we can go deeper by find the all pass to a ref and see if the delta is good
-
-            // Find elements present in both myRelativeID and nextRelativeID
-            std::set<std::size_t> idxsTotest;
-            for (auto idx : nextRelativeID){
-                if (myRelativeID.find(idx) != myRelativeID.end()){
-                    if (std::abs(getRelative().at(idx) - nextEdge->getRelative().at(idx)) != 1) {
-                        throw std::runtime_error("Bad relative");
-                    }
-                }
-            }
-
-
-            
-            // this edge have more relative info than the next
-            std::map<size_t,int> tmpRelative;
-            // we push this info to the next 
-            for(auto idxToPush :idxsToPush ){
-                tmpRelative.insert( std::make_pair(idxToPush, getRelative().at(idxToPush) +1));
-            }
-            if(tmpRelative.size() != 0){
-                nextEdge->updateRelative(tmpRelative);
-                nextEdge->propagateRelativePos();
-            }
-            tmpRelative.clear();
-
-
-            // the next node have more info than me i need to get it
-            for(auto idxToGet :idxsToGet ){
-                tmpRelative.insert( std::make_pair(idxToGet, nextEdge->getRelative().at(idxToGet) -1));
-            }
-            if(tmpRelative.size() != 0){
-                updateRelative(tmpRelative);
-                
-                for(auto weakParent : getSourceNode()->getParentNodes()){
-                    if (auto parent = weakParent.lock()) {
-                        for(auto weakPEdge : parent->getEdges()){
-                            if (auto pEdge = weakPEdge.lock()) {
-                                pEdge->propagateRelativePos();
-                            }else{
-                                throw std::runtime_error("propagateRelativePos parent edge weak pointer is expired" );
-                            }
-                        }
-                    }else{
-                        throw std::runtime_error("propagateRelativePos parent weak pointer is expired" );
-                    }
-                }
-            }
-            tmpRelative.clear();
-        }else{
-            throw std::runtime_error("propagateRelativePos edge weak pointer is expired" );
-        }
-    }
-}
-
-void FsmEdge::updateWeak(void){
-    mNodeSource->addEdge(shared_from_this());
-    mNodeDest->addParent(mNodeSource);
-}
-
-FsmEdge::FsmEdge(std::shared_ptr<FsmNode>& source,std::shared_ptr<FsmNode>& dest, const std::shared_ptr<ConditionalInterpreter>  toTest)
-:mToTest(toTest)
-{
-    mNodeSource = source;
-    mNodeDest   = dest;
-    // when i make the edge I init the nodes
-    // mNodeSource->addEdge(shared_from_this());
-    // mNodeDest->addParent(mNodeSource);
-}
-
-
-/////surchage
-
-FsmEdgeUnique::FsmEdgeUnique(std::shared_ptr<FsmNode>& source,std::shared_ptr<FsmNode>& dest, const std::shared_ptr<ConditionalInterpreter>  toTest)
-:FsmEdge(source,dest,toTest)
-{
-}
-const EdgeTestResult FsmEdgeUnique::test(const std::shared_ptr<FsmRunTimeContext> stmContext){
-    auto opNode = stmContext->getActNode();
-
-    if(opNode == nullptr){
-        return {false,std::set<NodePtr>()};//none
-    }
-    
-    if(mToTest->test(opNode) && opNode->getChildren().size() <= 1){
-        stmContext->setValid(opNode,mToTest);
-        return {true,opNode->getChildren()} ;
-    }else{
-        stmContext->addRejectedNode(opNode);
-        return {false,std::set<NodePtr>()};
-    }
-}
-/////////////////////
-FsmEdgeCommon::FsmEdgeCommon(std::shared_ptr<FsmNode>& source,std::shared_ptr<FsmNode>& dest, const std::shared_ptr<ConditionalInterpreter>  toTest, const std::string commonKey)
-:FsmEdge(source,dest,toTest)
-{
-    //make a uid for common node 
-    if(mCommonIdxMap.find(commonKey) == mCommonIdxMap.end()){
-        mCommonIdxMap.insert(std::make_pair(commonKey, mCommonIdxMap.size()));
-    }
-    mCommonIdx = mCommonIdxMap[commonKey];
-    propagateRelativePos();
-}
-
-
-const EdgeTestResult FsmEdgeCommon::test(const std::shared_ptr<FsmRunTimeContext> stmContext){
-    
-    auto opNode = stmContext->getActNode();
-
-    if(opNode == nullptr){
-        return {false,std::set<NodePtr>()};//none
-    }
-    if(mToTest->test(opNode)){
-        stmContext->setCommon(opNode,mCommonIdx);
-        stmContext->setValid(opNode,mToTest);
-        return {true,opNode->getChildren()} ;
-    }else{
-        stmContext->addRejectedNode(opNode);
-        return {false,std::set<NodePtr>()};
-    }
-}
-bool FsmEdgeCommon::isCommon(void){
-    return true;
- }
-//////////////////// TODO FsmEdgeEmpty must be size_t
-FsmEdgeRef::FsmEdgeRef(std::shared_ptr<FsmNode>& source,std::shared_ptr<FsmNode>& dest, const size_t refCommonIdx,const int deltaCommonIdx)
-:FsmEdge(source,dest,nullptr),mRefCommonIdx(refCommonIdx),mdeltaCommonIdx(deltaCommonIdx)
-{
-
-}
-const EdgeTestResult FsmEdgeRef::test(const std::shared_ptr<FsmRunTimeContext> stmContext){
-    
-    NodePtr refNode = stmContext->getCommonNodeFromIdx(mRefCommonIdx);
-    if (refNode){
-        std::set<std::shared_ptr<Node>> see;
-        return {true,refNode->getNodeDelta(mdeltaCommonIdx,see)};
-    }
-    return  {false,std::set<NodePtr>()};
-}
-////////////////////
-FsmEdgeEmpty::FsmEdgeEmpty(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNode> dest)
-:FsmEdge(source,dest,nullptr)
-{}
-const EdgeTestResult FsmEdgeEmpty::test(const std::shared_ptr<FsmRunTimeContext> stmContext){
-    auto opNode = stmContext->getActNode();
-    if(opNode == nullptr){
-        return {false,std::set<NodePtr>()};
-    }
-    return {true,std::set<NodePtr>({opNode})};//none
-}
-//////////////
-
-FsmEdgeNone::FsmEdgeNone(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNode> dest)
-:FsmEdge(source,dest,nullptr)
-{}
- const EdgeTestResult FsmEdgeNone::test(const std::shared_ptr<FsmRunTimeContext> /*stmContext*/){
-    return {false,std::set<NodePtr>()};
- }
-
-/// factory
-std::shared_ptr<FsmEdge> FsmEdgeFactory::make(
-std::shared_ptr<FsmNode> source, 
-std::shared_ptr<FsmNode> dest, FsmEdgeTypes type, 
-std::map<std::string, std::shared_ptr<ConditionalInterpreter>> allTest,
-const std::string lexeme)
-{
-        if (type == FsmEdgeTypes::EMPTY) {
-            if (lexeme.empty()) {
-                return std::make_shared<FsmEdgeEmpty>(source, dest);
-            } else {
-                throw std::invalid_argument("error lexem EMPTY");
-            }
-        } else if (type == FsmEdgeTypes::REF) {
-            std::smatch m;
-            std::regex refRegex("\\s*\\(\\s*(\\d+)\\s*,\\s*(-?\\d+)\\s*\\)\\s*");
-            if (std::regex_match(lexeme, m, refRegex)) {
-                int refCommonIdx = std::stoi(m[1]);
-                int deltaCommonIdx = std::stoi(m[2]);
-                return std::make_shared<FsmEdgeRef>(source, dest, refCommonIdx, deltaCommonIdx);
-            } else {
-                throw std::invalid_argument("error lexem REF " + lexeme);
-            }
-        } else if (type == FsmEdgeTypes::COMMON) {
-            std::smatch m;
-            std::regex commonRegex("\\s*(\\w+)#(\\d*)");
-            if (std::regex_match(lexeme, m, commonRegex)) {
-                std::string edgeType = m[1];
-                std::string commonId =  m[2];
-                size_t commonIdx = commonId.empty() ? 0 : std::stoi(commonId) + 1;
-                std::string commonKey = edgeType + std::to_string(commonIdx);
-            
-                if(allTest.find(edgeType) == allTest.end()){
-                    //if the key is not linked to a condition 
-                    //by default, it is initialized by a edge that is always false
-                    return std::make_shared<FsmEdgeNone>(source, dest);
-                    //throw std::invalid_argument("Bad Node Test " + edgeType );
-                }
-
-                return  std::make_shared<FsmEdgeCommon> (source, dest, allTest.at(edgeType), commonKey);
-            } else {
-                throw std::invalid_argument("error lexem COMMON " + lexeme);
-            }
-        } else if (type == FsmEdgeTypes::UNIQUE) {
-            std::regex uniqueRegex("\\s*(\\w+)");
-            std::smatch m;
-            if (std::regex_match(lexeme, m, uniqueRegex)) {
-                std::string edgeType = m[1];
-
-                if(allTest.find(edgeType) == allTest.end()){
-
-                    //if the key is not linked to a condition 
-                    //by default, it is initialized by a edge that is always false
-                    return std::make_shared<FsmEdgeNone>(source, dest);
-                    //throw std::invalid_argument("Bad Node Test " + edgeType );
-                }
-
-                return  std::make_shared<FsmEdgeUnique>(source, dest, allTest.at(edgeType));
-            } else {
-                throw std::invalid_argument("error lexem UNIQUE \"" + std::string(lexeme) +" eee\"");
-            }
-        } else {
-            throw std::invalid_argument("Bad edge Type");
-        }
-    }
\ No newline at end of file
diff --git a/src/graphRegex/matchFsm/FsmGraph.cpp b/src/graphRegex/matchFsm/FsmGraph.cpp
deleted file mode 100644
index 2ba11a4d26b933bdbfad5c91d127bfa2682473d4..0000000000000000000000000000000000000000
--- a/src/graphRegex/matchFsm/FsmGraph.cpp
+++ /dev/null
@@ -1,206 +0,0 @@
-#include "aidge/graphRegex/matchFsm/FsmGraph.hpp"
-
-using namespace Aidge;
-
-
-
-FsmGraph::FsmGraph(const std::string query):mQuery(query){
-
-}
-
-//TODO
-    std::vector<std::shared_ptr<MatchSolution>> FsmGraph::test(const std::vector<NodePtr>& startNodes){
-        
-    std::vector<std::shared_ptr<Aidge::FsmNode>> startNodesFsm = getStartNodes();
-    if(startNodes.size() != startNodesFsm.size()){
-         throw std::runtime_error("bad number of Start nodes");
-    }
-
-    std::vector<std::shared_ptr<FsmRunTimeContext>> walks;
-    for(std::size_t i = 0; i < startNodes.size(); i++){
-        walks.push_back(std::make_shared<FsmRunTimeContext>(startNodesFsm[i],startNodes[i]));
-    }
-    std::vector<std::shared_ptr<FsmRunTimeContext>> nextWalks;
-
-    std::vector<std::shared_ptr<FsmRunTimeContext>> allValidContext;
-    std::vector<std::shared_ptr<FsmRunTimeContext>> allContextSee;
-
-
-
-
-    while (!walks.empty())
-    {
-        for(auto fsmContext : walks){
-            allContextSee.push_back(fsmContext);
-            //if we are in a valid st we save it
-            //it's one solution of the possible solution of the matching
-            if(fsmContext->isOnValidState()){
-                //not save 2 time the same end point
-                if(!std::any_of(allValidContext.begin(), allValidContext.end(),
-                    [&](std::shared_ptr<Aidge::FsmRunTimeContext> oldValid) {
-                        return fsmContext->areEqual(oldValid);
-                })){
-                    allValidContext.push_back(fsmContext);
-                }
-
-            }
-
-            //don't test 2 time a fsmContext
-            std::vector<std::shared_ptr<FsmRunTimeContext>> tmpNextWalks = fsmContext->getActState()->test(fsmContext);
-            for(auto PotentialFsmContext : tmpNextWalks){
-
-                if(!std::any_of(allContextSee.begin(), allContextSee.end(),
-                    [&](std::shared_ptr<Aidge::FsmRunTimeContext> oldSee) {
-                        return PotentialFsmContext->areEqual(oldSee);
-                })){
-                    nextWalks.push_back(PotentialFsmContext);
-                }
-            }
-
-        }
-        walks.swap(nextWalks);
-        nextWalks.clear();
-    }
-    
-    MatchResult allMatch(allValidContext,getNbSubFsm(),mQuery,startNodes);
-    return allMatch.getSolutions();
-
-}
-
-
-///////////////
-// FSM construction
-///////////////
-const std::set<std::shared_ptr<FsmEdge>>& FsmGraph::getEdge(void){
-    return mEdges;
-}
-
-void FsmGraph::addEdge(std::shared_ptr<FsmEdge>& edge){
-    edge->updateWeak();
-    mEdges.insert(edge);
-    mAllOrigin.insert(edge->getDestNode()->getOrigin());
-    mAllOrigin.insert(edge->getSourceNode()->getOrigin());
-}
-
-const std::vector<std::shared_ptr<FsmNode>> FsmGraph::getStartNodes(void){
-    std::set<std::shared_ptr<FsmNode>> nodes = getNodes();
-    std::vector<std::shared_ptr<FsmNode>> startNodes;
-    for(auto node :nodes){
-        if(node->isStart()){
-            startNodes.push_back(node);
-        }
-    }
-    return startNodes;
-}
-
-const std::set<std::shared_ptr<FsmNode>> FsmGraph::getValidNodes(void){
-    std::set<std::shared_ptr<FsmNode>> nodes = getNodes();
-    std::set<std::shared_ptr<FsmNode>> ValidNodes;
-    for(auto node :nodes){
-        if(node->isValid()){
-            ValidNodes.insert(node);
-        }
-    }
-    //may short
-    return ValidNodes;
-}
-
-const std::set<std::shared_ptr<FsmNode>> FsmGraph::getNodes(void){
-    std::set<std::shared_ptr<FsmNode>> nodes;
-    for(auto edge : mEdges){
-        nodes.insert(edge->getDestNode());
-        nodes.insert(edge->getSourceNode());
-    }
-    return nodes;
-}
-
-void FsmGraph::setGroupe(std::size_t groupeIdx){
-    std::set<std::shared_ptr<FsmNode>> nodes = getNodes();
-    for(auto node :nodes){
-        node->setGroupe(groupeIdx);
-    }
-}
-
-void FsmGraph::unionG(const std::shared_ptr<FsmGraph> fsmGraph){
-
-    for(auto edge : fsmGraph->getEdge()){
-        addEdge(edge);
-    }
-}
-
-void FsmGraph::mergeOneStartOneValid(const std::shared_ptr<FsmGraph> fsmGraph){
-    std::set<std::shared_ptr<FsmNode>> validNodes = getValidNodes();
-    std::vector<std::shared_ptr<FsmNode>> startNodes = fsmGraph->getStartNodes();
-
-    if (startNodes.size() != 1 || validNodes.size() != 1){
-
-        std::ostringstream errorMessage;
-        errorMessage <<"mergeOneStartOneValid  start size: " << startNodes.size() << " valid size : " << validNodes.size()
-        <<" can only merge FSM 1 start 1 valid";
-        throw std::runtime_error(errorMessage.str());
-    }
-
-    unionG(fsmGraph);
-    //for loop useless but for future merge it's could be used
-    for(auto valid : validNodes){
-        valid->invalid();
-        for(auto start : startNodes){
-            start->unStart();
-            _mergeNode(start,valid);
-        }
-    }
-}
-
-std::size_t FsmGraph::getNbSubFsm(void){
-    return mAllOrigin.size();
-}
-
-std::size_t FsmGraph::getNbStart(void){
-    return getStartNodes().size();
-}
-
-void FsmGraph::incOriginAllNodeBy(std::size_t incr){
-    std::set<std::shared_ptr<FsmNode>> nodes = getNodes();
-    for(auto node :nodes){
-        node->incOrigin(incr);
-    }
-    std::set<std::size_t> updatedOrigin;
-    for(auto origin : mAllOrigin){
-        updatedOrigin.insert(origin + incr);
-    }
-    mAllOrigin.swap(updatedOrigin);
-}
-
-void FsmGraph::_mergeNode(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNode> dest){
-    std::set<std::shared_ptr<FsmNode>> nodes = getNodes();
-
-    if(nodes.find(source) == nodes.end() || nodes.find(dest) == nodes.end()){
-        throw std::runtime_error("FsmGraph can not merge node not in the graph");
-    }
-    nodes.clear();
-
-    //probagate source attribute
-    if(source->isValid()){
-        dest->valid();
-    }
-    if(source->isStart()){
-        dest->start();
-    }
-
-    //merge source to dest by replace source by dest in all EDGE
-    for(auto edge : mEdges){
-        if(edge->getDestNode() == source ){
-            edge->reSetDestNode(dest);
-        }else if(edge->getSourceNode() == source ){
-            edge->reSetSourceNode(dest);
-        }
-
-    }
-    //check is source is not in graph
-    nodes = getNodes();
-    if(nodes.find(source) != nodes.end() ){
-        throw std::runtime_error("FsmGraph merge node not effective");
-    }
-    nodes.clear();
-
-}
diff --git a/src/graphRegex/matchFsm/FsmNode.cpp b/src/graphRegex/matchFsm/FsmNode.cpp
deleted file mode 100644
index 6666d1a72a298f20bdae0eb1c51805e5ae133ba4..0000000000000000000000000000000000000000
--- a/src/graphRegex/matchFsm/FsmNode.cpp
+++ /dev/null
@@ -1,132 +0,0 @@
-#include "aidge/graphRegex/matchFsm/FsmNode.hpp"
-#include "aidge/graphRegex/matchFsm/FsmEdge.hpp"
-#include "aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp"
-
-using namespace Aidge; 
-
-
-
-FsmNode::FsmNode(bool isAValid,bool isAStart ){
-    mIsAStart =isAStart;
-    mIsAValid =isAValid;
-
-}
-const std::vector<std::shared_ptr<FsmRunTimeContext>> FsmNode::test( std::shared_ptr<FsmRunTimeContext> fsmContext){
-
-
-    std::vector<std::shared_ptr<FsmRunTimeContext>> out;
-
-    for(auto edge : mEdges){
-        if (auto sharedEdge = edge.lock()) {
-
-            std::shared_ptr<FsmNode> nextState =  sharedEdge->getDestNode();
-
-            //make copy of the fsmContext
-            std::shared_ptr<FsmRunTimeContext> newFsmContext = std::make_shared<FsmRunTimeContext>(fsmContext);
-
-            EdgeTestResult edgeRes = sharedEdge->test(newFsmContext);
-
-            if(edgeRes.success){
-                if(edgeRes.node.size() != 0){
-                    for(auto nextNode :edgeRes.node ){
-                        if(!newFsmContext->isAlreadyValid(nextNode)|| newFsmContext->isCommonDefined(nextNode) ){
-                            out.push_back( std::make_shared<FsmRunTimeContext>(newFsmContext,nextState,nextNode));
-                           
-                        }else{
-                            out.push_back( std::make_shared<FsmRunTimeContext>(newFsmContext,nextState,nullptr));
-                        }
-
-                    }
-                }else{
-                    out.push_back( std::make_shared<FsmRunTimeContext>(newFsmContext,nextState,nullptr));
-                }
-            }
-            newFsmContext.reset();
-
-        }else{
-            throw std::runtime_error("test FsmNode weak pointer is expired" );
-        }
-
-    }
-    return out;
-}
-
-
-
-std::size_t FsmNode::getOrigin(void){
-    return mOriginFsm;
-}
-void FsmNode::incOrigin(std::size_t inc){
-    mOriginFsm += inc;
-}
-void FsmNode::rmEdge(std::shared_ptr<FsmEdge> edge){
-    mEdges.erase(edge);
-}
-
-void FsmNode::addEdge(std::shared_ptr<FsmEdge> edge){
-    std::weak_ptr<FsmEdge> edgeW(edge);
-    if (!edgeW.expired()) {
-        mEdges.insert(edgeW);
-    }else{
-        throw std::runtime_error("addEdge FsmNode weak pointer is expired" );
-    }
-}
-
-// const std::set<std::shared_ptr<FsmNode>> FsmNode::getChildNodes(void){
-//     std::set<std::shared_ptr<FsmNode>> children;
-//     for(auto edge : mEdges){
-//          if (auto sharedEdge = edge.lock()) {
-//                 children.insert(sharedEdge->getDestNode());
-//          }else{
-//             throw std::runtime_error("getChildNodes FsmNode weak pointer is expired" );
-//          }
-//     }
-//     return children;
-// }
-
-
-const std::set<std::weak_ptr<FsmNode>,lex_compare<FsmNode>>& FsmNode::getParentNodes(void){
-    return mParents;
-}
-const std::set<std::weak_ptr<FsmEdge>,lex_compare<FsmEdge>>& FsmNode::getEdges(void){
-    return mEdges;
-}
-
-void FsmNode::setGroupe(std::size_t groupeIdx){
-    mGroupeFsm = groupeIdx;
-    
-}
-
-bool FsmNode::isValid(void){
-    return mIsAValid;
-}
-bool FsmNode::isStart(void){
-    return mIsAStart;
-}
-void FsmNode::invalid(void){
-    mIsAValid =false;
-}
-void FsmNode::valid(void){
-    mIsAValid =true;
-}
-void FsmNode::unStart(void){
-    mIsAStart =false;
-}
-void FsmNode::start(void){
-    mIsAStart =true;
-}
-
-
-
-void FsmNode::addParent(std::shared_ptr<FsmNode> node){
-
-    std::weak_ptr<FsmNode> nodeW(node);
-    if (!nodeW.expired()) {
-        mParents.insert(nodeW);
-    }else{
-        throw std::runtime_error("addParent FsmNode weak pointer is expired" );
-    }
-}
-void FsmNode::rmParent(std::shared_ptr<FsmNode> node){
-    mParents.erase(node);
-}
\ No newline at end of file
diff --git a/src/graphRegex/matchFsm/FsmRunTimeContext.cpp b/src/graphRegex/matchFsm/FsmRunTimeContext.cpp
deleted file mode 100644
index 89e7faf205ef515049de415a5f057db8a13105e9..0000000000000000000000000000000000000000
--- a/src/graphRegex/matchFsm/FsmRunTimeContext.cpp
+++ /dev/null
@@ -1,217 +0,0 @@
-#include "aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp"
-#include "aidge/graphRegex/matchFsm/FsmNode.hpp"
-
-using namespace Aidge;
-
-std::vector<std::set<NodePtr>> FsmRunTimeContext::mRejectedNodes;
-
-FsmRunTimeContext::FsmRunTimeContext(std::shared_ptr<FsmNode> actState ,NodePtr actOpNode ,std::size_t idxRejeced  ){
-    mActOpNode = actOpNode;
-    mActState  = actState;
-
-    //not define case
-    if(idxRejeced ==  std::numeric_limits<std::size_t>::max()){
-        mLocalIdxRejeced =  mRejectedNodes.size();
-        mRejectedNodes.push_back(std::set<NodePtr>());
-    }else{
-        if(idxRejeced > mRejectedNodes.size()-1 ){
-            throw std::runtime_error("FsmRunTimeContext idxRejeced");
-        }
-        mLocalIdxRejeced =idxRejeced;
-    }
-}
-
-
-
-FsmRunTimeContext::FsmRunTimeContext(std::shared_ptr<FsmRunTimeContext> fsmRunTime){
-    mActOpNode        = fsmRunTime->mActOpNode;
-    mActState         = fsmRunTime->mActState;
-    mCommonNodes      = fsmRunTime->mCommonNodes;
-    mValidNodes       = fsmRunTime->mValidNodes;
-    mLocalIdxRejeced  = fsmRunTime->mLocalIdxRejeced;
-}
-FsmRunTimeContext::FsmRunTimeContext(std::shared_ptr<FsmRunTimeContext> fsmRunTime,std::shared_ptr<FsmNode> actState ,NodePtr actOpNode ){
-    mActOpNode        = actOpNode;
-    mActState         = actState;
-    mCommonNodes      = fsmRunTime->mCommonNodes;
-    mValidNodes       = fsmRunTime->mValidNodes;
-    mLocalIdxRejeced  = fsmRunTime->mLocalIdxRejeced;
-}
-
-void FsmRunTimeContext::addRejectedNode(NodePtr node){
-    mRejectedNodes[mLocalIdxRejeced].insert(node);
-}
-
-bool FsmRunTimeContext::isOnValidState(void){
-    return mActState->isValid();
-}
-
-bool FsmRunTimeContext::isCommonDefined(NodePtr node){
-    //return mCommonNodes.find(node) != mCommonNodes.end();
-
-    std::set<NodePtr> nodes = getCommonNodes();
-    for(const auto& nodeC : nodes){
-        if(nodeC.get() == node.get()){
-            return true;
-        }
-    }
-    return false;
-}
-
-bool FsmRunTimeContext::isAlreadyValid(NodePtr node){
-
-    std::set<NodePtr> nodes = getValidNodes();
-    for(const auto& nodeV : nodes){
-        if(nodeV.get() == node.get()){
-            return true;
-        }
-    }
-    return false;
-
-    //return getValidNodes().find(node) != getValidNodes().end();
-}
-
-bool FsmRunTimeContext::areCompatible(std::shared_ptr<FsmRunTimeContext> fsmContext){
-    /*
-    see if 2 context can be merge
-    it need to have different  mValidNodes except for common
-    and the same idx for the common
-    */
-
-   //common node
-
-   for (const auto& ref : getCommon()) {
-        for (const auto& test : fsmContext->getCommon()) {
-            //same index
-            if(ref.second == test.second){
-                if(ref.first != test.first){
-                    return false;
-                }
-            }
-        }
-   }
-
-   //valid nodes
-    std::set<NodePtr> commonElements;
-    std::set<NodePtr> A = getValidNodesNoCommon();
-    std::set<NodePtr> B = fsmContext->getValidNodesNoCommon();
-    std::set_intersection(
-        A.begin(),A.end(),
-        B.begin(),  B.end(),
-        std::inserter(commonElements, commonElements.end())
-       );
-
-    return (commonElements.empty()) ? true : false;
-}
-
-bool FsmRunTimeContext::areEqual(std::shared_ptr<FsmRunTimeContext> fsmContext){
-    if(getActNode() != fsmContext->getActNode()){
-        return false;
-    }
-    if (getActState() != fsmContext->getActState()){
-        return false;
-    }
-    if (getValidNodes() != fsmContext->getValidNodes()){
-        return false;
-    }
-    if (getCommon() != fsmContext->getCommon()){
-        return false;
-    }
-
-
-    return true;
-}
-
-void FsmRunTimeContext::setCommon(NodePtr node,std::size_t commonIdx){
-    if(isCommonDefined(node)){
-        if (mCommonNodes.at(node) != commonIdx){
-            throw std::runtime_error("conflict idx in the Common node");
-        }
-    }else{
-        mCommonNodes[node] = commonIdx;
-    }
-}
-
-void FsmRunTimeContext::setValid(NodePtr node,std::shared_ptr<ConditionalInterpreter> tag){
-    //we already find a node of this type
-    if(mValidNodes.find(tag) != mValidNodes.end()){
-        if(isAlreadyValid(node) && !isCommonDefined(node) ){
-            throw std::runtime_error("setValid you valid tow time");
-        }
-        mValidNodes[tag].insert(node);
-    }else{
-        mValidNodes[tag] = {node};
-    }
-
-}
-
-std::size_t FsmRunTimeContext::getSubStmId(void){
-    return mActState->getOrigin();
-}
-
-NodePtr FsmRunTimeContext::getCommonNodeFromIdx(std::size_t commonIdx){
-    for (const auto& pair : mCommonNodes) {
-        if (pair.second == commonIdx) {
-            return pair.first; // Return the key when the value is found
-        }
-    }
-    throw std::runtime_error("getCommonNodeFromIdx Value not found in the map");
-}
-
-std::size_t FsmRunTimeContext::getCommonNodeIdx(NodePtr node){
-    if(isCommonDefined(node)){
-        return mCommonNodes.at(node);
-    }
-    throw std::runtime_error("getCommonNodeIdx node not found");
-}
-
-std::set<NodePtr> FsmRunTimeContext::getCommonNodes(void){
-    std::set<NodePtr> nodes;
-    // Iterate over the map and insert values into the set
-    for (const auto& pair : mCommonNodes) {
-        nodes.insert(pair.first);
-    }
-    return nodes;
-}
-
-std::map<NodePtr,std::size_t> FsmRunTimeContext::getCommon(void){
-    return mCommonNodes;
-}
-
-std::set<NodePtr> FsmRunTimeContext::getValidNodes(void){
-
-    auto sharedSet = std::make_shared<std::set<NodePtr>>();
-    // Create a set to store the values from the map
-    std::set<NodePtr> nodes;
-    // Iterate over the map and insert values into the set
-    for (const auto& pair : mValidNodes) {
-        nodes.insert(pair.second.begin(),pair.second.end());
-    }
-    return nodes;
-}
-
-std::set<NodePtr> FsmRunTimeContext::getValidNodesNoCommon(void){
-    std::set<NodePtr> differenceSet;
-    std::set<NodePtr> valid = getValidNodes();
-    std::set<NodePtr> common = getCommonNodes();
-    std::set_difference(valid.begin(), valid.end(), common.begin(), common.end(),std::inserter(differenceSet, differenceSet.end()));
-    return differenceSet;
-}
-
-std::map<std::shared_ptr<ConditionalInterpreter>,std::set<NodePtr>>& FsmRunTimeContext::getValid(void){
-    return mValidNodes;
-}
-
-NodePtr FsmRunTimeContext::getActNode(void){
-    return mActOpNode;
-}
-
-std::shared_ptr<FsmNode> FsmRunTimeContext::getActState(){
-    return mActState;
-}
-
-
-void FsmRunTimeContext::rst(void){
-    mRejectedNodes.clear();
-}
-
diff --git a/src/graphRegex/matchFsm/MatchResult.cpp b/src/graphRegex/matchFsm/MatchResult.cpp
deleted file mode 100644
index 99df00e198a9a30be21e0ad18b4933a40b9b7a06..0000000000000000000000000000000000000000
--- a/src/graphRegex/matchFsm/MatchResult.cpp
+++ /dev/null
@@ -1,135 +0,0 @@
-#include <algorithm> // set_intersection, std::sort
-#include <memory>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "aidge/graphRegex/matchFsm/MatchResult.hpp"
-
-Aidge::MatchSolution::MatchSolution(std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence,const std::string query,const std::vector<NodePtr> startNode):mQueryFrom(query),mStartNode(startNode){
-        //reformat the solution
-        for (const auto& context : precedence) {
-            for (const auto& pair : context->getValid()) {
-
-                if(mSolution.find(pair.first->getKey()) == mSolution.end()){
-                    mSolution[pair.first->getKey()] = pair.second;
-                }else{
-                        mSolution[pair.first->getKey()].insert(pair.second.begin(), pair.second.end());
-                }
-            }
-        }
-}
-
-const std::set<Aidge::NodePtr> Aidge::MatchSolution::getAll(){
-
-        // Create a unique set to store all the elements
-        std::set<NodePtr> uniqueSet;
-
-        // Iterate through the map and insert elements from each set into the unique set
-        for (const auto& pair : mSolution) {
-            const std::set<NodePtr>& nodeSet = pair.second;
-
-            // Insert elements from the current set into the unique set
-            uniqueSet.insert(nodeSet.begin(), nodeSet.end());
-        }
-
-        return uniqueSet;
-}
-
-bool Aidge::MatchSolution::areCompatible(std::shared_ptr<Aidge::MatchSolution> solution){
-    std::set<NodePtr> set1 = solution->getAll();
-    std::set<NodePtr> set2 = getAll();
-    std::set<NodePtr> intersection ;
-    std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(), std::inserter(intersection, intersection.begin()));
-    return intersection.empty();
-}
-
-
-////////////////////////////////
-//
-////////////////////////////////
-Aidge::MatchResult::MatchResult(std::vector<std::shared_ptr<Aidge::FsmRunTimeContext>> allValid,
-                                std::size_t nbSubStm,
-                                const std::string& query,
-                                const std::vector<Aidge::NodePtr>& startNodes)
-        : mIdToRunTime(nbSubStm),
-          mNbSubStm(nbSubStm)
-{
-    mAllValid = allValid;
-
-    //mIdToRunTimm
-    for (const auto& contextPtr : allValid) {
-        mIdToRunTime[contextPtr->getSubStmId()].push_back(contextPtr);
-    }
-
-    std::vector<std::shared_ptr<FsmRunTimeContext>> precedence;
-    //make all solution possible
-    _generateCombination(0,precedence,query,startNodes);
-    //sort by solution number of elements
-    std::sort(mSolve.begin(), mSolve.end(), [](std::shared_ptr<MatchSolution>& set1, std::shared_ptr<MatchSolution>& set2) {
-        return set1->getAll().size() < set2->getAll().size();
-    });
-}
-
-void Aidge::MatchResult::_generateCombination( std::size_t idxSubStm,
-                                        std::vector<std::shared_ptr<Aidge::FsmRunTimeContext>>& precedence,
-                                        const std::string& query,
-                                        const std::vector<Aidge::NodePtr>& startNodes)
-{
-    //it's end , we are below the number of stm
-    if (idxSubStm == mNbSubStm)
-    {
-        //precedence contain a list of FSM compatible, we just need to
-        //check if all the nodes have been validated by at least one context
-
-        //1) make the set of all node for the compute graph that are valid in all the  FsmRunTimeContext
-        std::set<NodePtr> validNode;
-        std::set<NodePtr> rejectNode;
-        for (const auto& contextPtr : precedence) {
-            std::set<NodePtr> tmpV =  contextPtr->getValidNodes();
-            validNode.insert(tmpV.begin(), tmpV.end());
-            std::set<NodePtr> tmpR =  contextPtr->getRejectedNodes();
-            rejectNode.insert(tmpR.begin(),tmpR.end());
-        }
-        // 2) all  RejectedNodes need to be valid by an others stm
-        // if it's not the case the match is not valid
-        if(std::includes(validNode.begin(), validNode.end(), rejectNode.begin(), rejectNode.end())){
-            //we can save the solution
-            mSolve.push_back(std::make_shared<MatchSolution>(precedence,query,startNodes));
-        }
-        precedence.pop_back();
-        return;
-    }
-
-
-    for (const auto& contextPtrOneFsm : mIdToRunTime[idxSubStm])
-    {
-        if(idxSubStm == 0){
-            precedence.push_back(contextPtrOneFsm);
-            _generateCombination(idxSubStm+1,precedence,query,startNodes);
-
-        }else{
-            //test if the new context is compatible with all the context in the precedence
-            //
-            bool compatibleSolutionFsm = true;
-            for (const auto& contextPtrOfOtherFsm : precedence) {
-                if(!(contextPtrOneFsm->areCompatible(contextPtrOfOtherFsm))){
-                    compatibleSolutionFsm = false;
-                    break;
-                }
-            }
-
-            if(compatibleSolutionFsm){
-                precedence.push_back(contextPtrOneFsm);
-                _generateCombination(idxSubStm+1,precedence,query,startNodes);
-            }
-
-        }
-    }
-
-    if(idxSubStm != 0){
-        precedence.pop_back();
-    }
-    return;
-
-}
\ No newline at end of file
diff --git a/src/nodeTester/ConditionalInterpreter.cpp b/src/nodeTester/ConditionalInterpreter.cpp
deleted file mode 100644
index 5d10762d93d2bc0e92bf9d15bb24255bb7e51768..0000000000000000000000000000000000000000
--- a/src/nodeTester/ConditionalInterpreter.cpp
+++ /dev/null
@@ -1,364 +0,0 @@
-
-#include "aidge/nodeTester/ConditionalInterpreter.hpp"
-
-using namespace Aidge;
-
-
-///////////////////////////////
-//ConditionalRegisterFunction
-///////////////////////////////
-
-     std::shared_ptr<ConditionalData> ConditionalRegisterFunction::run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & data){
-
-        auto lambdaIt = mWlambda.find(key);
-        if (lambdaIt != mWlambda.end()) {
-            return lambdaIt->second(data);
-        }else {
-            throw std::runtime_error("can not run Lambda due to invalid key: " + key);
-        }
-    }
-
-
-//////////////////////
-//ConditionalInterpreter
-///////////////////////
-    ConditionalInterpreter::ConditionalInterpreter(const std::string key,const std::string ConditionalExpressions)
-    :mLambdaRegister(),mKey(key)
-    {
-
-        ConditionalParser conditionalParser = ConditionalParser(ConditionalExpressions);
-        mTree = conditionalParser.parse();
-        
-        ///lambda by default
-        mLambdaRegister.insert("getType",+[](NodePtr NodeOp){return NodeOp->type();});
-
-    }
-    
-    bool ConditionalInterpreter::isLambdaRegister(const std::string &key){
-        return mLambdaRegister.isLambdaRegister(key);
-    }
-    
-    const std::string& ConditionalInterpreter::getKey(){
-        return mKey;
-    }
-
-
-    bool ConditionalInterpreter::test( const NodePtr nodeOp)
-    {
-        mResolution.clear();
-        try{
-            std::vector< std::shared_ptr<ConditionalData>> r =  visit({mTree},nodeOp);
-   
-            if (mResolution.size() != 1){
-                throw std::runtime_error("Multi output interpretation output");
-            }else{
-                if (!mResolution[0]->isTypeEqualTo<bool>()){
-                    throw std::runtime_error("TEST OUT MUST BE A BOOL ");
-                }else{
-                    return mResolution[0]->getValue<bool>();
-                }
-            }
-
-        }catch(const std::exception& e){
-            std::ostringstream errorMessage;
-            errorMessage << "Error in test " << "\n\t" << e.what()  << "\n";
-            throw std::runtime_error(errorMessage.str());
-        }
-    }
-
-    void ConditionalInterpreter::insertLambda(const std::string key,std::function<bool(Aidge::NodePtr)> f){
-        mLambdaRegister.insert<std::function<bool(Aidge::NodePtr)> >(key, f);
-    }
-
-    /////
-    std::vector< std::shared_ptr<ConditionalData>> ConditionalInterpreter::visit(const ASTNodeCh& nodes, const NodePtr nodeOp ){
-            std::vector< std::shared_ptr<ConditionalData>> dataVector;
-
-            for ( std::shared_ptr<AstNode<ConditionalTokenTypes>> node : nodes) {
-                try{
-                    switch (node->getType()){
-                        ///////////////////////////////////
-                        //OPERATOR
-                        ///////////////////////////////////
-                        case ConditionalTokenTypes::NOT:
-                            {
-                            visit(node->getChilds(),nodeOp);
-                            fNot();
-                            }
-                            break;
-                        case ConditionalTokenTypes::AND:
-                            {
-                            visit(node->getChilds(),nodeOp);
-                            fAnd();
-                            }
-                            break;
-                        case ConditionalTokenTypes::OR:
-                            {
-                            visit(node->getChilds(),nodeOp);
-                            fOr();
-                            }
-                            break;
-                        case ConditionalTokenTypes::EQ:
-                            {
-                            visit(node->getChilds(),nodeOp);
-                            fEq();
-                            //dataVector.insert(dataVector.end(), tmp.begin(), tmp.end());
-                            }
-                            break;
-                        case ConditionalTokenTypes::NEQ:
-                            {
-                            visit(node->getChilds(),nodeOp);
-                            fNeq();
-                            }
-                            break;
-
-                        ///////////////////////////////////
-                        //VALUE
-                        ///////////////////////////////////
-
-                        case ConditionalTokenTypes::KEY:
-
-                            break;
-                        case ConditionalTokenTypes::INTEGER:
-                            {
-                                fStrToInteger(node);
-                            }
-                            break;
-                        case ConditionalTokenTypes::FLOAT:
-                            {
-                                fStrToFloat(node);
-
-                            }
-                            break;
-                        case ConditionalTokenTypes::STRING:
-                            {
-                                fStrToStr(node);
-                            }
-                            break;
-
-                        case ConditionalTokenTypes::NODE: //TODO
-                            {
-
-                                std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
-                                data->setValue<NodePtr>(nodeOp);
-                                mResolution.push_back(data);
-
-                            }
-                            break;
-
-                        case ConditionalTokenTypes::LAMBDA:
-                            {
-                                visit(node->getChilds(),nodeOp);
-                                fLambda(node);
-
-                            }
-                            break;
-
-                        case ConditionalTokenTypes::BOOL: //TODO
-                            {
-                             std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
-
-                            if(node->getValue() == "true"){
-                                data->setValue<bool>(true);
-                            }else{
-                                data->setValue<bool>(false);
-                            }
-
-                            mResolution.push_back(data);
-
-                            }
-                            break;
-
-                        case ConditionalTokenTypes::ARGSEP:
-                        case ConditionalTokenTypes::LPAREN:
-                        case ConditionalTokenTypes::RPAREN:
-                        case ConditionalTokenTypes::STOP:
-                        default:
-                            throw std::runtime_error("NODE TYPE NOT SUPPORTED IN ConditionalInterpreter");
-                    }
-                }catch(const std::exception& e){
-                    std::ostringstream errorMessage;
-                    errorMessage << "Error in visiting AST for node "<< nodeOp->name() << "\n\t" << e.what()  << "\n";
-                    throw std::runtime_error(errorMessage.str()); 
-                }
-            }
-
-            return dataVector;
-    }
-
-
-    //////////////////////
-    //value converter
-    /////////////////////
-
-
-    void ConditionalInterpreter::fStrToInteger(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node)
-    {
-         std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
-
-        data->setValue<int>(std::stoi(node->getValue()));
-        mResolution.push_back(data);
-    }
-
-    void ConditionalInterpreter::fStrToFloat(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node)
-    {
-
-         std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
-        data->setValue<float>(std::stof(node->getValue()));
-        mResolution.push_back(data);
-    }
-
-    void ConditionalInterpreter::fStrToStr(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node)
-    {
-         std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
-        data->setValue<std::string>(node->getValue());
-        mResolution.push_back(data);
-    }
-
-    void ConditionalInterpreter::fLambda(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node)
-    {
-        //if the lambda have input
-         std::shared_ptr<ConditionalData> data;
-        try {
-            data = mLambdaRegister.run(node->getValue(),mResolution);
-        } catch (const std::exception& e) {
-            std::ostringstream errorMessage;
-            errorMessage << "Error in conditional interpretation when run the "<<  node->getValue() <<" Lambda\n\t" << e.what()  << "\n";
-            throw std::runtime_error(errorMessage.str());
-        }
-
-        //clearRes();
-        mResolution.push_back(data);
-    }
-
-    void ConditionalInterpreter::fEq(void)
-    {
-        if (mResolution.size() < 2){
-            throw std::runtime_error("EQ need 2 arg and get :" + std::to_string(mResolution.size()));
-        }
-        auto a = mResolution.back(); 
-        mResolution.pop_back();
-        auto b = mResolution.back(); 
- 	    mResolution.pop_back();
-     
-
-        if (a->getType() != b->getType()){
-            throw std::runtime_error("EQ Unsupported between type :" + a->getType() +" "+ b->getType());
-        }
-
-
-
-         std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
-
-        if (a->isTypeEqualTo<int>()) {
-           data->setValue<bool>( a->getValue<int>() == b->getValue<int>());
-        }else if (a->isTypeEqualTo<float>()){
-           data->setValue<bool>( a->getValue<float>() == b->getValue<float>());
-        }else if (a->isTypeEqualTo<std::string>()){
-           data->setValue<bool>( a->getValue<std::string>() == b->getValue<std::string>());
-        }else if (a->isTypeEqualTo<bool>()){
-           data->setValue<bool>( a->getValue<bool>() == b->getValue<bool>());
-        }else{
-           throw std::runtime_error("EQ Unknown type encountered :" + a->getType() );
-        }
-
-        
-        mResolution.push_back(data);
-    }
-
-    void ConditionalInterpreter::fNeq(void)
-    {
-        if (mResolution.size() < 2){
-             throw std::runtime_error("NEQ need 2 arg and get :" + std::to_string(mResolution.size()));
-        }
-        auto a = mResolution.back(); 
- 	    mResolution.pop_back();
-        auto b = mResolution.back(); 
- 	    mResolution.pop_back();
-
-        if (a->getType() != b->getType()){
-            throw std::runtime_error("NEQ Unsupported between type :" + a->getType() +" "+ b->getType());
-        }
-
-         std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
-
-        if (a->isTypeEqualTo<int>()) {
-           data->setValue<bool>( a->getValue<int>() != b->getValue<int>());
-        }else if (a->isTypeEqualTo<float>()){
-           data->setValue<bool>( a->getValue<float>() != b->getValue<float>());
-        }else if (a->isTypeEqualTo<std::string>()){
-           data->setValue<bool>( a->getValue<std::string>() != b->getValue<std::string>());
-        }else
-        {
-           throw std::runtime_error("NEQ Unknown type encountered :" + a->getType() );
-        }
-
-        
-        mResolution.push_back(data);
-    }
-
-    void ConditionalInterpreter::fAnd(void)
-    {
-        if (mResolution.size() < 2){
-           throw std::runtime_error("AND need 2 arg and get :" + std::to_string(mResolution.size()));
-        }
-        auto a = mResolution.back(); 
- 	    mResolution.pop_back();
-        auto b = mResolution.back(); 
- 	    mResolution.pop_back();
-
-
-        if (a->getType() != typeid(bool).name() || b->getType() != typeid(bool).name()){
-            throw std::runtime_error("AND Unknown type encountered need bool get :" + a->getType() );
-        }
-
-         std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
-        data->setValue<bool>( a->getValue<bool>() && b->getValue<bool>());
-
-
-        
-        mResolution.push_back(data);
-    }
-
-    void ConditionalInterpreter::fOr(void)
-    {
-        if (mResolution.size() < 2){
-             throw std::runtime_error("OR need 2 arg and get :" + std::to_string(mResolution.size()));
-        }
-        auto a = mResolution.back(); 
- 	    mResolution.pop_back();
-        auto b = mResolution.back(); 
- 	    mResolution.pop_back();
-
-
-        if (a->getType() != typeid(bool).name() || b->getType() != typeid(bool).name()){
-             throw std::runtime_error("OR Unknown type encountered need bool get :" + a->getType() );
-        }
-
-         std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
-        data->setValue<bool>( a->getValue<bool>() || b->getValue<bool>());
-
-
-        
-        mResolution.push_back(data);
-    }
-
-    void ConditionalInterpreter::fNot()
-        {
-            if (mResolution.size() < 1){
-                throw std::runtime_error("NOT need 1 arg and get :" + std::to_string(mResolution.size()));
-            }
-            auto a = mResolution.back(); 
- 	        mResolution.pop_back();
-
-            if (a->getType() != typeid(bool).name()){
-                throw std::runtime_error("NOT Unknown type encountered need bool get :" + a->getType() );
-            }
-
-             std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
-            data->setValue<bool>( !a->getValue<bool>() );
-
-            
-            mResolution.push_back(data);
-
-        }
diff --git a/src/nodeTester/ConditionalLexer.cpp b/src/nodeTester/ConditionalLexer.cpp
deleted file mode 100644
index 9cc480ab29e84a775d9e275fe6ba51dc11e6ea14..0000000000000000000000000000000000000000
--- a/src/nodeTester/ConditionalLexer.cpp
+++ /dev/null
@@ -1,242 +0,0 @@
-#include "aidge/nodeTester/ConditionalLexer.hpp"
-
-using namespace Aidge; 
-
-//////////////////
-//ConditionalLexer
-//////////////////
-
-
-ConditionalLexer::ConditionalLexer( const std::string ConditionalExpressions):
-mConditionalExpressions(ConditionalExpressions)
-{
-    mPosition = 0;
-}
-
-std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextToken(void){
-    std::string currentChars = "";
-
-    while (mPosition < mConditionalExpressions.length())
-    {
-        //erase all space 
-        if (mConditionalExpressions[mPosition] != ' ')
-        {
-            currentChars += mConditionalExpressions[mPosition];
-        }
-        else
-        {
-            mPosition++;
-            continue;
-        }
-        //perform tokenisation, find a regex and make a new token
-        
-        if (std::regex_match(currentChars,std::regex("\\&\\&")))// the AND TOKEN 
-        {
-            mPosition++;
-            return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::AND,"");
-        }
-        else if (std::regex_match(currentChars,std::regex("\\|\\|")))// the OR TOKEN 
-        {
-            mPosition++;
-            return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::OR,"");
-        }
-        else if (std::regex_match(currentChars,std::regex("\\!")))// the Not and not equ
-        {
-            mPosition++;
-            if ( mPosition < mConditionalExpressions.length()){
-                currentChars += mConditionalExpressions[mPosition];
-                if(std::regex_match(currentChars,std::regex("!="))){
-                    mPosition++;
-                    return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::NEQ,"");
-                }else{
-                     return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::NOT,"");
-                }
-            }
-            //a not at the end not ok but it's the parseur work
-            return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::NOT,"");
-        }
-        else if (std::regex_match(currentChars,std::regex("==")))// the EQ TOKEN 
-        {
-            mPosition++;
-            return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::EQ,"");
-        }
-        else if (std::regex_match(currentChars,std::regex("\\(")))// the LPAREN TOKEN 
-        {
-            mPosition++;
-            return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::LPAREN,"");
-        }
-        else if (std::regex_match(currentChars,std::regex("\\)")))// the RPAREN TOKEN 
-        {
-            mPosition++;
-            return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::RPAREN,"");
-        }
-        else if (std::regex_match(currentChars,std::regex(",")))// the RPAREN TOKEN 
-        {
-            mPosition++;
-            return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::ARGSEP,"");
-        }
-        else if (std::regex_match(currentChars,std::regex("\\$")))// the ACTNode TOKEN 
-        {
-            mPosition++;
-            return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::NODE,"");
-        }
-
-
-        /////
-        //non const lent token
-        /////
-
-        //LAMBDA, KEY , bool //the function TAG 
-        else if (std::regex_match(currentChars,std::regex("[A-Za-z_]")))// the KEY TOKEN (a char next )
-        {   
-            //read all the key 
-            bool isLambda = false;
-            std::regex keyRegex("[A-Za-z_0-9]+");
-            std::regex LambdaRegex("[A-Za-z_0-9]+\\(");
-
-            while ( mPosition < mConditionalExpressions.length()) {
-                if(!std::regex_match(currentChars,keyRegex) && !std::regex_match(currentChars,LambdaRegex))
-                {
-                    currentChars.pop_back(); //the last char is the problems
-                    break;
-                }
-                else if (std::regex_match(currentChars,LambdaRegex)){
-                    isLambda = true;
-                }
-                mPosition++;
-                if (mPosition < mConditionalExpressions.length()) currentChars += mConditionalExpressions[mPosition];
-                //currentChars += mConditionalExpressions[mPosition];
-            }
-            //we end the match 2 possibility 
-            //we are at the end of the mConditionalExpressions and we need to ensure the match
-            //we are not we can continu
-            if (mPosition == mConditionalExpressions.length()-1)
-            {
-                if (!std::regex_match(currentChars,keyRegex) && !std::regex_match(currentChars,LambdaRegex))
-                {
-                    throw badTokenError(currentChars,mPosition);
-                }
-                //mPosition++; // we stop all by going pos > length
-            }
-
-
-            if (std::regex_match(currentChars,std::regex("(true|false|True|False)"))){
-                return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::BOOL,currentChars);
-
-            } else if (isLambda){
-                currentChars.pop_back();//pop the ( of the lambda
-                return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::LAMBDA,currentChars);
-            } else{
-                return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::KEY,currentChars);
-            }
-            
-        }
-        //numeric value 
-        else if (std::regex_match(currentChars,std::regex("[0-9]")))// the KEY TOKEN (a char next )
-        {   
-            //read all the key 
-            bool isFloat = false;
-            std::regex integerRegex("[0-9]+$");
-            std::regex floatRegex("[0-9]+\\.[0-9]*$");
-
-            while ( mPosition < mConditionalExpressions.length()) {
-
-                if(!std::regex_match(currentChars,integerRegex) && !std::regex_match(currentChars,floatRegex))
-                {
-                    currentChars.pop_back(); // the last char match is not a good one 
-                    break;
-                }
-                else if (std::regex_match(currentChars,floatRegex)){
-                    isFloat = true;
-                }
-                mPosition++;
-                if (mPosition < mConditionalExpressions.length()) currentChars += mConditionalExpressions[mPosition];
-                //currentChars += mConditionalExpressions[mPosition];
-            }
-            //we end the match 2 possibility 
-            //we are at the end of the mConditionalExpressions and we need to ensure the match
-            //we are not we can continu
-            if (mPosition == mConditionalExpressions.length()-1)
-            {
-                if (!std::regex_match(currentChars,integerRegex) && !std::regex_match(currentChars,floatRegex))
-                {
-                     throw badTokenError(currentChars,mPosition);
-                }
-            }
-            
-            if(isFloat){
-                return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::FLOAT,currentChars);
-            }else{
-                return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::INTEGER,currentChars);
-            }
-            
-        }
-        //string TODO
-        else if (std::regex_match(currentChars,std::regex("\'"))) // TODO ' or \'
-        {
-            std::regex strRegex("\'[A-Za-z_0-9\\s]*\'$");
-            while ( mPosition < mConditionalExpressions.length()) {
-                if(std::regex_match(currentChars,strRegex)){
-                    break;
-                }
-                mPosition++;
-                if (mPosition < mConditionalExpressions.length()) currentChars += mConditionalExpressions[mPosition];
-                //currentChars += mConditionalExpressions[mPosition];
-            }
-
-            //test the end condition
-            if (mPosition == mConditionalExpressions.length()-1 ){
-                if (!std::regex_match(currentChars,strRegex)){
-                     throw badTokenError(currentChars,mPosition);
-                }
-                //mPosition++; // we stop all by going pos > length
-            }
-
-            mPosition++; // go after the last " 
-            //erase the " char
-            currentChars.pop_back();
-            currentChars.erase(0,1);
-
-            return std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::STRING,currentChars);
-
-        }
-
-        //Array TODO
-
-        mPosition++;
-    }
-
-    //no more to find no one match the currentChars 
-    if (currentChars.empty()) {
-        return  std::make_shared<ParsingToken<ConditionalTokenTypes>>(ConditionalTokenTypes::STOP,"");  // Null shared pointer ;
-    }else{
-        //std::ostringstream errorMessage;
-        //errorMessage << "\nBad syntax " << currentChars << " :\n" << mConditionalExpressions;
-        throw badTokenError(currentChars,mPosition);
-    }
-    
-}
-
-void ConditionalLexer::rstPosition(void){
-    if (isEnd()){
-        mPosition = 0;
-    }else{
-        throw badTokenError("end rst",mPosition);
-    }
-    
-}
-
-bool ConditionalLexer::isEnd(void){
-    return mPosition >= mConditionalExpressions.length();
-}
-
-std::runtime_error ConditionalLexer::badTokenError(const std::string& currentChars,std::size_t position){
-    std::ostringstream errorMessage;
-    errorMessage << "\nBad syntax " << currentChars << " :\n" << mConditionalExpressions << "\n";
-     for (std::size_t i = 0; i < position; i++) {
-        errorMessage << ' ';
-    }
-    errorMessage << "^\n";
-
-    return std::runtime_error(errorMessage.str());
-}
\ No newline at end of file
diff --git a/src/nodeTester/ConditionalParser.cpp b/src/nodeTester/ConditionalParser.cpp
deleted file mode 100644
index 5cf6f8617612b09c1a61e694a56dc6ed4d0f2b39..0000000000000000000000000000000000000000
--- a/src/nodeTester/ConditionalParser.cpp
+++ /dev/null
@@ -1,192 +0,0 @@
-#include <memory>
-#include <vector>
-
-#include "aidge/nodeTester/ConditionalParser.hpp"
-
-
-//////////////////////////////
-//ConditionalParser
-//////////////////////////////
-
-Aidge::ConditionalParser::ConditionalParser(const std::string ConditionalExpressions)
-    : mLexer(ConditionalExpressions)
-{
-    mCurrentToken = mLexer.getNextToken();
-}
-
-Aidge::ConditionalParser::~ConditionalParser() noexcept = default;
-
-void Aidge::ConditionalParser::rstParser(void){
-    mLexer.rstPosition();
-    mCurrentToken = mLexer.getNextToken();
-}
-
-void Aidge::ConditionalParser::ackToken(ConditionalTokenTypes  tokenType){
-    if(mCurrentToken->getType() == tokenType ){
-
-        try {
-            mCurrentToken = mLexer.getNextToken();
-        } catch (const std::runtime_error& e) {
-            std::ostringstream errorMessage;
-            errorMessage << "Conditional Lexer error in Parser :\n"<< e.what() << std::endl;
-            throw std::runtime_error(errorMessage.str());
-        }
-    }else{
-
-        std::ostringstream errorMessage;
-        errorMessage << "Bad syntax ConditionalParser " << static_cast<int>(mCurrentToken->getType())  <<"!="<< static_cast<int>(tokenType) << "\n";
-        errorMessage << mLexer.rep();
-        throw std::runtime_error(errorMessage.str());
-    }
-}
-
-
-
-std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::constructAstVal(void){
-    /*
-    val : (KEY|INTEGER|FOAT|STRING|LAMBDA)
-    */
-    std::shared_ptr<ParsingToken<ConditionalTokenTypes>> token = mCurrentToken->copy();
-
-    if (token->getType() == ConditionalTokenTypes::KEY){
-        ackToken(ConditionalTokenTypes::KEY);
-        return std::make_shared<AstNode<ConditionalTokenTypes>>(token);
-    }
-    else if(token->getType() == ConditionalTokenTypes::INTEGER){
-        ackToken(ConditionalTokenTypes::INTEGER);
-        return std::make_shared<AstNode<ConditionalTokenTypes>>(token);
-    }
-    else if(token->getType() == ConditionalTokenTypes::FLOAT){
-        ackToken(ConditionalTokenTypes::FLOAT);
-        return std::make_shared<AstNode<ConditionalTokenTypes>>(token);
-    }
-    else if(token->getType() == ConditionalTokenTypes::BOOL){
-        ackToken(ConditionalTokenTypes::BOOL);
-        return std::make_shared<AstNode<ConditionalTokenTypes>>(token);
-    }
-    else if(token->getType() == ConditionalTokenTypes::STRING){
-        ackToken(ConditionalTokenTypes::STRING);
-        return std::make_shared<AstNode<ConditionalTokenTypes>>(token);
-
-    }else if(token->getType() == ConditionalTokenTypes::NODE){
-        ackToken(ConditionalTokenTypes::NODE);
-        return std::make_shared<AstNode<ConditionalTokenTypes>>(token);
-
-    }else if(token->getType() == ConditionalTokenTypes::LAMBDA){
-        return constructAstLambda();
-    }
-
-   throw std::runtime_error("ConditionalParser unknown val type "+ token->rep().str() + "\n" + mLexer.rep());
-
-}
-
-std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::constructAstLambda(void){
-    /*
-    AstLambda :  LAMBDA val (ARGSEP val)* RPAREN
-    */
-    std::shared_ptr<ParsingToken<ConditionalTokenTypes>> tokenLdb = mCurrentToken->copy();
-    ackToken(ConditionalTokenTypes::LAMBDA);
-    ASTNodeCh paramLambda;
-    //AT LEAST ONE VALUE AS INPUT OF A LAMBDA
-    paramLambda.push_back(constructAstVal());
-    while (mCurrentToken->getType() != ConditionalTokenTypes::RPAREN)
-    {
-        ackToken(ConditionalTokenTypes::ARGSEP);
-        paramLambda.push_back(constructAstVal());
-    }
-    ackToken(ConditionalTokenTypes::RPAREN);
-    return std::make_shared<AstNode<ConditionalTokenTypes>>(tokenLdb,paramLambda);
-}
-
-std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::constructAstCmpr(void){
-      /*
-        cmpr   : val (EQ|NEQ) val | LPAREN expr RPAREN
-        NOT ir ?
-      */
-     std::shared_ptr<ParsingToken<ConditionalTokenTypes>> token = mCurrentToken->copy();
-     //we can check the type relation ir  key (EQ|NEQ) val | val (EQ|NEQ) key , but val (EQ|NEQ) val is valid ?
-     if (token->getType() == ConditionalTokenTypes::LPAREN)
-     {
-        ackToken(ConditionalTokenTypes::LPAREN);
-        std::shared_ptr<AstNode<ConditionalTokenTypes>> node = constructAstExpr();
-        ackToken(ConditionalTokenTypes::RPAREN);
-        return node;
-     }else{
-
-        std::shared_ptr<AstNode<ConditionalTokenTypes>> node = constructAstVal();
-        token = mCurrentToken->copy();
-        if (token->getType() == ConditionalTokenTypes::EQ){
-            ackToken(ConditionalTokenTypes::EQ);
-            return std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{node,constructAstVal()});
-        }else if(token->getType() == ConditionalTokenTypes::NEQ){
-            ackToken(ConditionalTokenTypes::NEQ);
-            return std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{node,constructAstVal()});
-        }else{
-
-            throw std::runtime_error("constructAstCmpr "+ token->rep().str() + "\n" + mLexer.rep());
-        }
-
-     }
-}
-
-std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::constructAstExpr(std::size_t precLimit /*= 0*/){
-    /*
-        expr   : cmpr ((AND | OR) cmpr)*
-        the NOT is not binary OP can be use in pratt
-        precedence H to L: TODO
-        AND
-        OR
-    */
-
-   //the not
-    std::shared_ptr<AstNode<ConditionalTokenTypes>> left;
-    std::shared_ptr<ParsingToken<ConditionalTokenTypes>> token = mCurrentToken->copy();
-
-    if (mCurrentToken->getType() == ConditionalTokenTypes::NOT  ){
-        ackToken(ConditionalTokenTypes::NOT );
-        left= std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{constructAstCmpr()});
-    }else{
-        left= constructAstCmpr();
-    }
-
-    //pratt
-    while (mCurrentToken->getType() != ConditionalTokenTypes::STOP ) //security
-    {
-        token = mCurrentToken->copy();
-        //if the token is not in the map is not a operator so we consider a prec of 0
-        if (ConditionalPrec.find(token->getType()) ==ConditionalPrec.end() ){
-            return left;
-        }
-
-        //if my actual operator have a prec <= of the last operator
-        std::size_t prec = ConditionalPrec.at(token->getType());
-        if (prec <= precLimit){
-            return left;
-        }
-
-        //Act all AND and OR
-        ackToken(token->getType());
-
-        std::shared_ptr<AstNode<ConditionalTokenTypes>> right = constructAstExpr(prec);
-
-        //i'm not sure what append to newNode
-        //std::shared_ptr<AstNode<ConditionalTokenTypes>> newNode = std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{left,constructAstCmpr()});
-        std::shared_ptr<AstNode<ConditionalTokenTypes>> newNode = std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{left,right});
-        left = newNode;
-    }
-    return left;
-}
-
-
-std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::parse(void){
-    /*
-        expr   : cmpr ((AND | OR) cmpr)*
-        cmpr   : val (EQ|NEQ) val | LPAREN expr RPAREN | BOOL | LAMBDA
-        val    : (KEY|INTEGER|FOAT|STRING|LAMBDA)
-        lambda :  LAMBDA val (ARGSEP val)* RPAREN
-    */
-    std::shared_ptr<AstNode<ConditionalTokenTypes>> astTree = constructAstExpr();
-
-    rstParser();
-    return astTree;
-}
\ No newline at end of file
diff --git a/src/operator/ArgMax.cpp b/src/operator/ArgMax.cpp
index 4808b730d2261ba0c1ea6d0d09871b1f322fc8fb..531c41596ed4ef553a4b7ec7d2642b778044cc66 100644
--- a/src/operator/ArgMax.cpp
+++ b/src/operator/ArgMax.cpp
@@ -14,7 +14,6 @@
 #include <cstddef>    // std::size_t
 #include <cstdint>    // std::int32_t
 #include <memory>
-#include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
 
@@ -25,6 +24,21 @@
 
 const std::string Aidge::ArgMax_Op::Type = "ArgMax";
 
+Aidge::ArgMax_Op::ArgMax_Op(const Aidge::ArgMax_Op& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(ArgMax_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::ArgMax_Op::clone() const {
+    return std::make_shared<ArgMax_Op>(*this);
+}
+
 bool Aidge::ArgMax_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         // make Axis attribute positive
@@ -55,3 +69,12 @@ void Aidge::ArgMax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
 std::set<std::string> Aidge::ArgMax_Op::getAvailableBackends() const {
     return Registrar<ArgMax_Op>::getKeys();
 }
+
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ArgMax(std::int32_t axis,
+                                    bool keep_dims,
+                                    bool select_last_index,
+                                    const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<ArgMax_Op>(axis, keep_dims, select_last_index), name);
+}
\ No newline at end of file
diff --git a/src/operator/Clip.cpp b/src/operator/Clip.cpp
index 10b864b54594c86ed1486611fdd91fd916f2291b..62787ebcf4e0633292cf54a3161248b5dcd30fac 100644
--- a/src/operator/Clip.cpp
+++ b/src/operator/Clip.cpp
@@ -32,10 +32,10 @@ bool Aidge::Clip_Op::dimsForwarded() const {
 }
 
 
-bool Aidge::Clip_Op::forwardDims(bool allowDataDependency) 
+bool Aidge::Clip_Op::forwardDims(bool allowDataDependency)
 {
-    if (getInput(1) ) 
-    { 
+    if (getInput(1) )
+    {
         if( this->min() != std::numeric_limits<float>::lowest())
         {
             Log::notice("{} : ignoring non-empty min attribute because input#1 "
@@ -49,11 +49,11 @@ bool Aidge::Clip_Op::forwardDims(bool allowDataDependency)
         return false;
         }
         std::shared_ptr<Tensor> fallback;
-        const auto& minV = mInputs[1]->refCastFrom(fallback, NativeType<float>::type, "cpu");
+        const auto& minV = mInputs[1]->refCastFrom(fallback, NativeType_v<float>, "cpu");
         this->min() = *(static_cast<float*>(minV.getImpl()->hostPtr()));
     }
-    if (getInput(2)) 
-    { 
+    if (getInput(2))
+    {
        if( this->max() != std::numeric_limits<float>::max())
         {
             Log::notice("{} : ignoring non-empty max attribute because input#2 "
@@ -67,7 +67,7 @@ bool Aidge::Clip_Op::forwardDims(bool allowDataDependency)
         return false;
         }
         std::shared_ptr<Tensor> fallback;
-        const auto& maxV = mInputs[2]->refCastFrom(fallback, NativeType<float>::type, "cpu");
+        const auto& maxV = mInputs[2]->refCastFrom(fallback, NativeType_v<float>, "cpu");
         this->max() = *(static_cast<float*>(maxV.getImpl()->hostPtr()));
     }
     if (!inputsAssociated(false)) {
diff --git a/src/operator/Expand.cpp b/src/operator/Expand.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..969dd6e59b1d6b4399a991b498cb4ae046aa7754
--- /dev/null
+++ b/src/operator/Expand.cpp
@@ -0,0 +1,108 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Expand.hpp"
+
+#include <cstdint>
+#include <fmt/core.h>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Log.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+const std::string Expand_Op::Type = "Expand";
+
+Expand_Op::Expand_Op(const Expand_Op &op) : OperatorTensor(op) {
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Expand_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Expand_Op::clone() const {
+    return std::make_shared<Expand_Op>(*this);
+}
+
+bool Expand_Op::forwardDims(bool allowDataDependency) {
+    /////////////////
+    // Error checking
+    if (!inputsAssociated(false)) {
+        return false;
+    }
+    if (!allowDataDependency) {
+        Log::warn("{}: cannot execute forwardDims() as the output "
+                  "dimensions are computed from some input data.",
+                  type());
+        return false;
+    }
+
+    AIDGE_ASSERT(getInput(1)->nbDims() == 1,
+                 "{}: data input (#0) defines output shape. Must have only 1 "
+                 "dimension.",
+                 type());
+    AIDGE_ASSERT(getInput(1)->dataType() == DataType::Int64,
+                 "{}: shape input (#1) Data type must be int64.",
+                 type());
+
+    const std::vector<std::size_t> &inDataDims = getInput(0)->dims();
+    std::vector<DimSize_t> inShape(getInput(1)->size());
+    for (DimSize_t i = 0; i < getInput(1)->size(); ++i) {
+        inShape[i] = getInput(1)->get<std::int64_t>(i);
+    }
+
+    ////////////////////////////////////////
+    // Dimension broadcasting
+    // copy pasted code from Mul forwardDims
+    std::vector<std::size_t> outDims =
+        (inDataDims.size() >= inShape.size()) ? inDataDims : inShape;
+    const std::vector<std::size_t> &lowDims =
+        (inDataDims.size() < inShape.size()) ? inDataDims : inShape;
+
+    std::size_t outId = outDims.size() - 1;
+    std::size_t lowId = lowDims.size() - 1;
+    std::size_t i = 0;
+    while (i++ < lowDims.size()) {
+        if (outDims[outId] == 1) {
+            outDims[outId] = lowDims[lowId];
+        } else if ((lowDims[lowId] != 1) &&
+                   (lowDims[lowId] != outDims[outId])) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error,
+                                 "{}: Incompatible Tensor shape "
+                                 "{} for input#0 vs {} for input#1",
+                                 type(),
+                                 inDataDims,
+                                 inShape);
+        }
+        --outId;
+        --lowId;
+    }
+    mOutputs[0]->resize(outDims);
+    return true;
+}
+
+void Expand_Op::setBackend(const std::string &name,
+                           Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Expand_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Expand_Op::getAvailableBackends() const {
+    return Registrar<Expand_Op>::getKeys();
+}
+
+std::shared_ptr<Aidge::Node> Expand(const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Expand_Op>(), name);
+}
+} // namespace Aidge
diff --git a/src/operator/Flatten.cpp b/src/operator/Flatten.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c77adc37400b0f14faade331965f75ac76c8c7b9
--- /dev/null
+++ b/src/operator/Flatten.cpp
@@ -0,0 +1,90 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Flatten.hpp"
+
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int64_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+void Aidge::Flatten_OpImpl::forward() {
+    const Flatten_Op& op = dynamic_cast<const Flatten_Op&>(mOp);
+    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+}
+
+//////////////////////////////////////////////////
+
+const std::string Aidge::Flatten_Op::Type = "Flatten";
+
+Aidge::Flatten_Op::Flatten_Op(const std::int64_t axis)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<FlattenAttr::Axis>(axis)))
+{
+    mImpl = std::make_shared<Flatten_OpImpl>(*this);
+}
+
+Aidge::Flatten_Op::Flatten_Op(const Aidge::Flatten_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Flatten_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Flatten_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Flatten_Op::clone() const {
+    return std::make_shared<Flatten_Op>(*this);
+}
+
+bool Aidge::Flatten_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        const auto inDims(getInput(0)->dims());
+        const auto firstDim = std::accumulate(inDims.begin(), inDims.begin() + axis(), 1ULL, std::multiplies<DimSize_t>());
+        mOutputs[0]->resize({firstDim, getInput(0)->size() / firstDim});
+        return true;
+    }
+
+    return false;
+}
+
+void Aidge::Flatten_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Flatten_Op>::exists({name})){
+        SET_IMPL_MACRO(Flatten_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Flatten_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Flatten_Op::getAvailableBackends() const {
+    return Registrar<Flatten_Op>::getKeys();
+}
+
+//////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Flatten(std::int64_t axis,
+                            const std::string &name)
+{
+    return std::make_shared<Node>(std::make_shared<Flatten_Op>(axis), name);
+}
\ No newline at end of file
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 0ebc3e3bc81b15d9414d01f12a2768be6a7ddc42..e0990437a06d5b9fb72cf1909d78f6094120bf80 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -104,7 +104,7 @@ bool Aidge::Gather_Op::forwardDims(bool allowDataDependency) {
             this->gatheredShape() = getInput(1)->dims();
             this->indices().clear(); // If both are provided input would override attrs
             this->indices().reserve(getInput(1)->size());
-            const auto& indices = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            const auto& indices = mInputs[1]->refCastFrom(fallback, NativeType_v<int64_t>, "cpu");
             std::copy_n(static_cast<int64_t*>(indices.getImpl()->hostPtr()),
                         indices.size(),
                         std::back_inserter(this->indices()));
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index c5bca92406e518df593fcc6c3a40525a4ba81dfa..1e28cf289960dee280457cd6ea119fcc9477cf9f 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -22,7 +22,8 @@
 Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type,
                                             const std::vector<Aidge::InputCategory>& inputsCategory,
                                             Aidge::IOIndex_t nbOut)
-    : OperatorTensor(type, inputsCategory, nbOut)
+    : OperatorTensor(type, inputsCategory, nbOut),
+        mAttributes(std::make_shared<DynamicAttributes>())
 {
     mImpl = std::make_shared<OperatorImpl>(*this);
 }
@@ -73,7 +74,8 @@ bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) {
         }
 
         const auto& outputsDims = mForwardDims(inputsDims);
-        AIDGE_ASSERT((outputsDims.size() == nbOutputs()), "The provided ComputeDimsFunc function returns the wrong number of outputs");
+        AIDGE_ASSERT(!outputsDims.empty(), "The provided ComputeDimsFunc cannot compute the output dims (an empty vector was returned)");
+        AIDGE_ASSERT(outputsDims.size() == nbOutputs(), "The provided ComputeDimsFunc function returned the wrong number of outputs: {}, but {} are expected", outputsDims.size(), nbOutputs());
         for (std::size_t i = 0; i < nbOutputs(); ++i) {
             mOutputs[i]->resize(outputsDims[i]);
         }
@@ -117,3 +119,40 @@ std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
                                                 const std::string& name) {
     return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
 }
+
+std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
+                                            std::shared_ptr<OperatorTensor> op,
+                                            const std::string& name)
+{
+    // Create a generic op with the same inputs/outputs
+    auto genericOp = std::make_shared<GenericOperator_Op>(type, op->inputCategory(), op->nbOutputs());
+
+    // Copy attributes
+    genericOp->setAttrs(op->attributes()->getAttrs());
+
+    // Set a default forward dims if possible
+    if (op->dimsForwarded()) {
+        auto opInputDims = std::vector<std::vector<DimSize_t>>(op->nbInputs());
+        for (size_t i = 0; i < op->nbInputs(); ++i) {
+            opInputDims[i] = op->getInput(i)->dims();
+        }
+
+        auto opOutputDims = std::vector<std::vector<DimSize_t>>(op->nbOutputs());
+        for (size_t o = 0; o < op->nbOutputs(); ++o) {
+            opOutputDims[o] = op->getOutput(o)->dims();
+        }
+
+        genericOp->setForwardDims([opInputDims, opOutputDims](const std::vector<std::vector<std::size_t>>& inputsDims) {
+            // Check input dims
+            for (size_t i = 0; i < opInputDims.size(); ++i) {
+                if (inputsDims[i] != opInputDims[i]) {
+                    // No matching => unable to compute output dims!
+                    return std::vector<std::vector<std::size_t>>();
+                }
+            }
+            return opOutputDims;
+        });
+    }
+
+    return std::make_shared<Node>(genericOp, name);
+}
diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp
index f0b8720bc1e22d8d6308460eabe436db8a4c9f6d..d01d576782d43a45c3810ab9e0d6d4bd42030662 100644
--- a/src/operator/Identity.cpp
+++ b/src/operator/Identity.cpp
@@ -15,6 +15,9 @@
 
 void Aidge::Identity_OpImpl::forward() {
     const Identity_Op& op = dynamic_cast<const Identity_Op&>(mOp);
+    op.getOutput(0)->setBackend(op.getInput(0)->backend(), op.getInput(0)->device());
+    op.getOutput(0)->setDataType(op.getInput(0)->dataType());
+    op.getOutput(0)->setDataFormat(op.getInput(0)->dataFormat());
     op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
 }
 
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index cd4a4808137a786b04d8d143e50b96b9648e7d9a..c4f0bc4bf7267d24264652d5ed6b0d50935e1aa4 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -76,7 +76,7 @@ void Aidge::Memorize_OpImpl::forward() {
 const std::string Aidge::Memorize_Op::Type = "Memorize";
 
 Aidge::Memorize_Op::Memorize_Op(const std::uint32_t endStep)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 2),
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 2),
         mAttributes(std::make_shared<Attributes_>(
                     attr<MemorizeAttr::ScheduleStep>(0),
                     attr<MemorizeAttr::ForwardStep>(0),
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index cd307c9d15043d3ee5f5de48695e04e4ad2ada6b..ae3c3ed6ca85c059204c524f467f5387f656e30b 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -96,7 +96,9 @@ void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceId
         for(auto i: mGraph->inputNodes()){
             auto op_i = std::static_pointer_cast<OperatorTensor>(i->getOperator());
             for(std::size_t in_idx=0; in_idx < op_i->nbInputs(); ++in_idx){
-                op_i->getInput(in_idx)->setBackend(name, device);
+                if (op_i->getInput(in_idx)) {
+                    op_i->getInput(in_idx)->setBackend(name, device);
+                }
             }
         }
         for(auto o: mGraph->outputNodes()){
diff --git a/src/operator/MetaOperatorDefs/LSTM.cpp b/src/operator/MetaOperatorDefs/LSTM.cpp
index 2ed548805010a6cc87950c4d1f7b89edbea4f75c..22c0469b34b52670a910f63604d02f3f8bf6eab7 100644
--- a/src/operator/MetaOperatorDefs/LSTM.cpp
+++ b/src/operator/MetaOperatorDefs/LSTM.cpp
@@ -11,7 +11,6 @@
 
 #include "aidge/operator/MetaOperatorDefs.hpp"
 
-#include <array>
 #include <memory>
 #include <string>
 
@@ -20,7 +19,6 @@
 #include "aidge/operator/Mul.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/Identity.hpp"
-#include "aidge/operator/Concat.hpp"
 #include "aidge/operator/Tanh.hpp"
 
 namespace Aidge {
diff --git a/src/operator/MetaOperatorDefs/Leaky.cpp b/src/operator/MetaOperatorDefs/Leaky.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c5e8ab3f10bf56b87479d7c5e7b5a71449884304
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/Leaky.cpp
@@ -0,0 +1,113 @@
+#include "aidge/filler/Filler.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Heaviside.hpp"
+#include "aidge/operator/Identity.hpp"
+#include "aidge/operator/Memorize.hpp"
+#include "aidge/operator/MetaOperatorDefs.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/Sub.hpp"
+
+namespace Aidge {
+
+constexpr auto memorizeOpDataOutputRecIndex = 1;
+/* constexpr auto memorizeOpDataOutputIndex = 0; */ // unused variable
+
+std::shared_ptr<Node> Leaky(const int nbTimeSteps,
+                            const float beta,
+                            const float threshold,
+                            const std::string &name) {
+
+    auto microGraph = std::make_shared<GraphView>();
+
+    auto inputNode = Identity((!name.empty()) ? name + "_input" : "");
+    auto addNode = Add(!name.empty() ? name + "_add" : "");
+    auto mulNode = Mul(!name.empty() ? name + "_mul" : "");
+    auto subNode = Sub(!name.empty() ? name + "_sub" : "");
+    auto hsNode = Heaviside(0, !name.empty() ? name + "_hs" : "");
+    auto subNode2 = Sub(!name.empty() ? name + "_threshold" : "");
+    auto reset = Mul(!name.empty() ? name + "_reset" : "");
+
+    auto betaTensor = std::make_shared<Tensor>(beta);
+    auto uthTensor = std::make_shared<Tensor>(static_cast<float>(threshold));
+    uniformFiller<float>(uthTensor, threshold, threshold);
+
+    auto decayRate = Producer(betaTensor, "leaky_beta", true);
+    auto uth = Producer(uthTensor, "leaky_uth", true);
+
+    auto potentialMem =
+        Memorize(nbTimeSteps, (!name.empty()) ? name + "_potential" : "");
+    auto spikeMem =
+        Memorize(nbTimeSteps, (!name.empty()) ? name + "_spike" : "");
+
+    // U[t] = Input[T] + beta * U[T-1] - S[T-1] * U_th
+    // with S[T] = | 1, if U[T] - U_th > 0
+    //             | 0 otherwise
+
+    // beta * U[T-1]
+    decayRate->addChild(/*otherNode=*/mulNode, /*outId=*/0, /*otherInId=*/1);
+    potentialMem->addChild(mulNode, 1, 0);
+
+    // Input[T] + beta * U[T-1]
+    mulNode->addChild(/*otherNode=*/addNode, /*outId=*/0, /*otherInId=*/1);
+    inputNode->addChild(/*otherNode=*/addNode, /*outId=*/0, /*otherInId=*/0);
+
+    // S[T-1] * U_th
+    spikeMem->addChild(reset,
+                       /*outId=*/memorizeOpDataOutputRecIndex,
+                       /*otherInId=*/0);
+
+    // TODO(#219) Handle hard/soft reset
+    uth->addChild(reset, 0, 1);
+
+    // Input[T] + beta * U[T-1] - S[T-1] * U_th
+    addNode->addChild(subNode, 0, 0);
+    reset->addChild(subNode, 0, 1);
+
+    // U[t] = (Input[T] + beta * U[T-1]) - S[T-1]
+    subNode->addChild(potentialMem, 0, 0);
+
+    // U[T] - U_th
+    subNode->addChild(subNode2, 0, 0);
+    uth->addChild(subNode2, 0, 1);
+
+    // with S[T] = | 1, if U[T] - U_th > 0
+    subNode2->addChild(hsNode, 0, 0);
+    hsNode->addChild(spikeMem, 0, 0);
+
+    microGraph->add(inputNode);
+    microGraph->add({addNode,
+                     mulNode,
+                     potentialMem,
+                     decayRate,
+                     uth,
+                     spikeMem,
+                     hsNode,
+                     subNode,
+                     subNode2,
+                     reset},
+                    false);
+
+    microGraph->setOrderedInputs(
+        {{inputNode, 0}, {potentialMem, 1}, {spikeMem, 1}});
+
+    // NOTE: Outputs are NOT the memory nodes (as it is done in LSTM), to avoid
+    // producing data during init. This way, we can plug an operator after
+    // our node, and get correct results.
+    microGraph->setOrderedOutputs({//{potentialMem, memorizeOpDataOutputIndex},
+                                   //{spikeMem, memorizeOpDataOutputIndex}
+                                   {subNode, 0},
+                                   {hsNode, 0}});
+
+    auto metaOp = MetaOperator(/*type*/ "Leaky",
+                               /*graph*/ microGraph,
+                               /*forcedInputsCategory=*/{},
+                               /*name*/ "leaky");
+
+    return metaOp;
+}
+
+std::shared_ptr<MetaOperator_Op> LeakyOp() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Not implemented yet");
+}
+} // namespace Aidge
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index 3bdb4b17127eb8a9115f8dec045db32bf041b00b..cac1ad2264af0e1b687c44eb220ee2c6080e9e73 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -36,9 +36,19 @@ Aidge::OperatorTensor::OperatorTensor(const OperatorTensor& other)
         mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)),
         mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) {
     for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
+        // The characteristics of the output tensors are copied for two reasons:
+        // - Consistency with the operator: the output tensors backend should 
+        //   match the operator backend, which is always copied.
+        // - The user would expect that the data type and format are copied as
+        //   well.
+        // Data is never copied in clone().
         mOutputs[i] = std::make_shared<Tensor>();
-        // mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i)));
-        // datatype already copied
+        mOutputs[i]->setDataType(other.getOutput(i)->dataType());
+        mOutputs[i]->setDataFormat(other.getOutput(i)->dataFormat());
+
+        if (!other.getOutput(i)->backend().empty()) {
+            mOutputs[i]->setBackend(other.getOutput(i)->backend());
+        }
     }
 }
 
@@ -92,6 +102,12 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aid
     return mOutputs[outputIdx];
 }
 
+const std::vector<std::shared_ptr<Aidge::Tensor>>& Aidge::OperatorTensor::getOutputs() const{
+    return mOutputs;
+}
+const std::vector<std::shared_ptr<Aidge::Tensor>>& Aidge::OperatorTensor::getInputs() const{
+    return mInputs;
+}
 
 std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>> Aidge::OperatorTensor::computeReceptiveField(
         const std::vector<DimSize_t>& firstEltDims,
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 0fa9a62816a36ad3afece02052224c966ee121a3..8b42cb51440cbc61bf8d4dbf69524adb15dbeb44 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -25,9 +25,18 @@
 
 void Aidge::Reshape_OpImpl::forward() {
     const Reshape_Op& op = dynamic_cast<const Reshape_Op&>(mOp);
+    AIDGE_ASSERT(op.getInput(0), "missing input#0");
+    // const auto& input = op.getInput(0)->refCastFrom(mInputFallback, *op.getOutput(0));
     op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
 }
 
+void Aidge::Reshape_OpImpl::backward() {
+    const Reshape_Op& op = dynamic_cast<const Reshape_Op&>(mOp);
+    AIDGE_ASSERT(op.getOutput(0)->grad(), "missing gradient for output#0");
+    // const auto& output_grad = op.getOutput(0)->grad()->refCastFrom(mOutputGradFallback, *op.getOutput(0)->grad());
+    op.getInput(0)->grad()->getImpl()->copy(op.getOutput(0)->grad()->getImpl()->rawPtr(), op.getOutput(0)->size());
+}
+
 //////////////////////////////////////////////////
 
 const std::string Aidge::Reshape_Op::Type = "Reshape";
@@ -82,7 +91,7 @@ bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
             std::shared_ptr<Tensor> fallback;
             this->shape().clear(); // If both are provided input would override attrs
             this->shape().reserve(getInput(1)->size());
-            const auto& shape = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            const auto& shape = mInputs[1]->refCastFrom(fallback, NativeType_v<int64_t>, "cpu");
             std::copy_n(static_cast<int64_t*>(shape.getImpl()->hostPtr()),
                         shape.size(),
                         std::back_inserter(this->shape()));
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index 252f55a6abdea13cc43cf21d3e8c7ab33ddbb86e..b2ef56572a5f972cd0f5be6a276780e5f27536de 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -125,7 +125,7 @@ bool Resize_Op::forwardDims(bool allowDataDependency) {
         std::shared_ptr<Tensor> fallback;
         const auto &sizes = resizeParam
                                 ->refCastFrom(fallback,
-                                              NativeType<DimSize_t>::type,
+                                              NativeType_v<DimSize_t>,
                                               resizeParam->backend());
 
         for (std::size_t dim = 0; dim < getInput(inSizesIdx)->size(); ++dim) {
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index ecaa12191173ac74ace8d6d224ddfe08469eb521..c38f52d765eb994553e5ef8fedf66d79be64808c 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -18,10 +18,14 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/Log.hpp"
 
 void Aidge::Shape_OpImpl::forward() {
-    // Do nothing...
     // Output is already valid after forwardDims()
+    // But it may be with the wrong device (default cpu)
+    // This can happen if forwardDims is called before setBackend
+    const Shape_Op& op = dynamic_cast<const Shape_Op&>(mOp);
+    op.getOutput(0)->setBackend(op.getInput(0)->backend(), op.getInput(0)->device());
 }
 
 ///////////////////////////////////////////////
@@ -69,6 +73,10 @@ bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
         AIDGE_ASSERT(roi> 1, "Invalid ROI for Shape");
 
         mOutputs[0]->resize({roi});
+        if (!mOutputs[0]->getImpl()){
+            Log::debug("Shape::forwardDims, no implementation set for output, defaulting to CPU.");
+            mOutputs[0]->setBackend("cpu");
+        }
         // Ensure the output of this operator is valid after forwardDims():
         mOutputs[0]->getImpl()->copyCast(std::next(getInput(0)->dims().data(),
                                                     start),
@@ -98,4 +106,4 @@ std::set<std::string> Aidge::Shape_Op::getAvailableBackends() const {
 
 std::shared_ptr<Aidge::Node> Aidge::Shape(const std::int64_t start, const std::int64_t end, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 3bdee8c13c1759261140d634940b0a4e81210084..7945200aabbae23abce7d1698b5ddbe8f7ec0882 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -24,6 +24,9 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/data/Data.hpp"
+#include "aidge/utils/Registrar.hpp"
+
 
 const std::string Aidge::Slice_Op::Type = "Slice";
 
@@ -43,17 +46,18 @@ Aidge::Slice_Op::Slice_Op(const std::vector<std::int64_t>& starts,
         attr<SliceAttr::Ends>(ends),
         attr<SliceAttr::Axes>(axes),
         attr<SliceAttr::Steps>(steps)))
-{}
+{
+    mImpl = std::make_shared<Slice_OpImpl>(*this);
+}
 
-Aidge::Slice_Op::Slice_Op(const Aidge::Slice_Op &op)
-    : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+Aidge::Slice_Op::Slice_Op(const Aidge::Slice_Op& op)
+    : OperatorTensor(op), mAttributes(op.mAttributes)
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Slice_Op, *this, op.backend());
     }
     else {
-        mImpl = nullptr;
+        mImpl = std::make_shared<Slice_OpImpl>(*this);
     }
 }
 
@@ -61,6 +65,83 @@ std::shared_ptr<Aidge::Operator> Aidge::Slice_Op::clone() const {
     return std::make_shared<Slice_Op>(*this);
 }
 
+// Helper function to calculate the linear index for multi-dimensional data
+size_t getLinearIndex(const std::vector<size_t>& dims, const std::vector<size_t>& indices) {
+    size_t linearIndex = 0;
+    size_t stride = 1;
+    for (int i = dims.size() - 1; i >= 0; --i) {
+        linearIndex += indices[i] * stride;
+        stride *= dims[i];
+    }
+    return linearIndex;
+}
+
+void Aidge::Slice_OpImpl::forward() {
+    const Slice_Op& op = dynamic_cast<const Slice_Op&>(mOp);
+
+    if (!op.getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", op.Type);
+    }
+    AIDGE_ASSERT((op.axes().size() == op.ends().size()) &&
+                 (op.axes().size() == op.starts().size()),
+                 "Starts, Ends and Axes arguments should be the same size.");
+
+    const std::vector<size_t> inputDims = op.getInput(0)->dims();
+    std::vector<size_t> indices(inputDims.size(), 0); // Initialize indices for each dimension
+
+    // Create an array of ranges for each axis
+    std::vector<std::vector<int>> ranges(inputDims.size());
+
+    // Generate ranges dynamically for each dimension
+    for (size_t axisIdx = 0; axisIdx < inputDims.size(); ++axisIdx) {
+        if (std::find(op.axes().begin(), op.axes().end(), axisIdx) != op.axes().end()) {
+            // This axis is being sliced
+            int start = op.starts()[axisIdx];
+            int end = op.ends()[axisIdx];
+            int step = op.steps()[axisIdx];
+
+            start = start >= 0 ? start: start + inputDims[axisIdx];
+            start = std::max(0, std::min(start, static_cast<int>(inputDims[axisIdx])));
+            end = end >= 0 ? end: end + inputDims[axisIdx];
+            end = std::max(0, std::min(end, static_cast<int>(inputDims[axisIdx])));
+            // Generate the range of indices for this axis
+            for (int idx = start; (step > 0) ? (idx < end) : (idx > end); idx += step) {
+                ranges[axisIdx].push_back(idx);
+            }
+        } else {
+            // This axis is not being sliced, keep its full range (just one index in the range)
+            ranges[axisIdx].push_back(0);
+        }
+    }
+
+    // Use iterative stack to handle all dimensions dynamically
+    std::vector<size_t> currentIndex(inputDims.size(), 0); // Track current index in each dimension
+    std::vector<size_t> stackPointer(inputDims.size(), 0); // Pointers to ranges for each dimension
+    size_t dim = 0; // Start at the first dimension
+    size_t offset = 0; // Offset in the output tensor
+
+    while (dim < inputDims.size()) {
+        if (stackPointer[dim] < ranges[dim].size()) {
+            // Set the current index for this dimension
+            currentIndex[dim] = ranges[dim][stackPointer[dim]];
+            stackPointer[dim]++;
+
+            if (dim == inputDims.size() - 1) {
+                // We've reached the last dimension, process this index combination
+                size_t linearIndex = getLinearIndex(inputDims, currentIndex);
+                op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(linearIndex), 1, offset);
+                offset++;
+            } else {
+                // Move to the next dimension
+                dim++;
+            }
+        } else {
+            // Reset this dimension and move back to the previous one
+            stackPointer[dim] = 0;
+            dim--;
+        }
+    }
+}
 
 bool Aidge::Slice_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined())
@@ -91,7 +172,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
 
             this->starts().clear(); // If both are provided input would override attrs
             this->starts().reserve(getInput(1)->size());
-            const auto& starts = getInput(1)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            const auto& starts = getInput(1)->refCastFrom(fallback, NativeType_v<int64_t>, "cpu");
             std::copy_n(static_cast<int64_t*>(starts.getImpl()->hostPtr()),
                         starts.size(),
                         std::back_inserter(this->starts()));
@@ -112,7 +193,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
 
             this->ends().clear(); // If both are provided input would override attrs
             this->ends().reserve(getInput(2)->size());
-            const auto& ends = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            const auto& ends = getInput(2)->refCastFrom(fallback, NativeType_v<int64_t>, "cpu");
             std::copy_n(static_cast<int64_t*>(ends.getImpl()->hostPtr()),
                         ends.size(),
                         std::back_inserter(this->ends()));
@@ -133,7 +214,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
 
             this->axes().clear(); // If both are provided input would override attrs
             this->axes().reserve(getInput(3)->size());
-            const auto& axes = getInput(3)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
+            const auto& axes = getInput(3)->refCastFrom(fallback, NativeType_v<int8_t>, "cpu");
             std::copy_n(static_cast<int8_t*>(axes.getImpl()->hostPtr()),
                         axes.size(),
                         std::back_inserter(this->axes()));
@@ -154,7 +235,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
 
             this->steps().clear(); // If both are provided input would override attrs
             this->steps().reserve(getInput(4)->size());
-            const auto& steps = getInput(4)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            const auto& steps = getInput(4)->refCastFrom(fallback, NativeType_v<int64_t>, "cpu");
             std::copy_n(static_cast<int64_t*>(steps.getImpl()->hostPtr()),
                         steps.size(),
                         std::back_inserter(this->steps()));
@@ -173,12 +254,17 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
             const DimIdx_t axis = this->axes()[i] >= 0 ?
                             static_cast<DimIdx_t>(this->axes()[i]) :
                             static_cast<DimIdx_t>(this->axes()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims()));
-            const DimSize_t start = this->starts()[i] >= 0 ?
+            DimSize_t start = this->starts()[i] >= 0 ?
                                 static_cast<DimSize_t>(this->starts()[i]) :
                                 static_cast<DimSize_t>(this->starts()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
-            const DimSize_t end = this->ends()[i] >= 0 ?
+            // Clamp start to the range [0, axis_dim]
+            start = std::max(static_cast<DimSize_t>(0), std::min(start, getInput(0)->dims()[axis]-1));
+
+            DimSize_t end = this->ends()[i] >= 0 ?
                             static_cast<DimSize_t>(this->ends()[i]) :
                             static_cast<DimSize_t>(this->ends()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
+            // Clamp end to the range [0, axis_dim]
+            end = std::max(static_cast<DimSize_t>(0), std::min(end, getInput(0)->dims()[axis]));
             const std::int64_t step = this->steps()[i];
 
             AIDGE_ASSERT(step != 0, "Slice_Op: Step ({}) must have a non-zero value on axis {}!", this->steps(), axis);
@@ -191,7 +277,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
                 }
             }
 
-            const std::size_t sliceLength = static_cast<std::size_t>(std::ceil((static_cast<float>(end) - static_cast<float>(start)) / static_cast<float>(step)));
+            const std::size_t sliceLength = static_cast<std::size_t>(std::ceil((static_cast<float>(end) - static_cast<float>(start)) / static_cast<float>((step))));
             // Check if slice length is valid
             if (sliceLength > getInput(0)->dims()[axis])
             {
@@ -208,7 +294,12 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
 }
 
 void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Slice_Op, *this, name);
+    if (Registrar<Slice_Op>::exists({name})){
+        SET_IMPL_MACRO(Slice_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Slice_OpImpl>(*this);
+    }
     mOutputs[0]->setBackend(name, device);
 }
 
@@ -224,4 +315,4 @@ std::shared_ptr<Aidge::Node> Aidge::Slice(const std::vector<std::int64_t>& start
                                    const std::vector<std::int64_t>& steps,
                                    const std::string &name) {
     return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes, steps), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index 2191f14a150088dfa1d369d2ef31051e5ab16326..09aad0674bc424f50483c064cb7201bc20499faa 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -109,7 +109,7 @@ bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
             std::shared_ptr<Tensor> fallback;
             this->split().clear(); // If both are provided input would override attrs
             this->split().reserve(getInput(1)->size());
-            const auto& splits = getInput(1)->refCastFrom(fallback, NativeType<DimSize_t>::type, "cpu");
+            const auto& splits = getInput(1)->refCastFrom(fallback, NativeType_v<DimSize_t>, "cpu");
             std::copy_n(static_cast<DimSize_t*>(splits.getImpl()->hostPtr()),
                         splits.size(),
                         std::back_inserter(this->split()));
diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp
index b51b4f346c92f778fe0a044df187cd8d0d0f7304..a44146366d5466768d937261729325697ce24f6e 100644
--- a/src/operator/Squeeze.cpp
+++ b/src/operator/Squeeze.cpp
@@ -67,7 +67,7 @@ bool Squeeze_Op::forwardDims(bool allowDataDependency) {
     this->axes().clear(); // If both are provided input would override attrs
     this->axes().reserve(getInput(1)->size());
     const auto &axes =
-        getInput(1)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
+        getInput(1)->refCastFrom(fallback, NativeType_v<int8_t>, "cpu");
     if (axes.nbDims() == 0) {
       this->axes().clear();
     } else {
diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp
index ab9ddc4f705cb00cebbe5b9ee68fb1433586a043..a938f470ded47d65a9b15b93ca66dfe186d61e9f 100644
--- a/src/operator/Stack.cpp
+++ b/src/operator/Stack.cpp
@@ -102,7 +102,7 @@ bool Aidge::StackOp::forwardDims(bool allowDataDependency) {
             }
 
             std::shared_ptr<Tensor> fallback;
-            const auto& maxElements = getInput(1)->refCastFrom(fallback, NativeType<std::uint32_t>::type, "cpu");
+            const auto& maxElements = getInput(1)->refCastFrom(fallback, NativeType_v<std::uint32_t>, "cpu");
             AIDGE_ASSERT(maxElements.size() > 0, "Input#1 size should be > 0");
             this->maxElements() = static_cast<std::uint32_t*>(maxElements.getImpl()->hostPtr())[0];
         }
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index d24b9c90927830db6f1c1256d133d871ba3dc37f..b550db16dfee8286242df7cfbed9b3b300ee96d5 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -66,12 +66,17 @@ bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
             std::iota(this->outputDimsOrder().rbegin(), this->outputDimsOrder().rend(), 0);
         }
 
-        AIDGE_ASSERT(outputDimsOrder().size() == getInput(0)->nbDims(),
-                     "Permutation vector must have the same rank as input tensor.");
+        AIDGE_ASSERT(outputDimsOrder().size() >= getInput(0)->nbDims(),
+            "Permutation vector ({}) must have at least the same rank as input tensor ({}).", outputDimsOrder(), getInput(0)->dims());
         std::vector<DimSize_t> outputDims;
-        for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) {
+        std::size_t i = 0;
+        for (; i < getInput(0)->nbDims(); ++i) {
             outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]);
         }
+        for (; i < outputDimsOrder().size(); ++i) {
+            AIDGE_ASSERT(i == outputDimsOrder()[i],
+                "Permutation vector ({}) must be the identity above the input tensor rank ({}).", outputDimsOrder(), getInput(0)->dims());
+        }
         mOutputs[0]->resize(outputDims);
         return true;
     }
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
index f3353b45cd6a732fa456ea0585ec5d040d53ef31..414afc10f5ea091ba9f30c327ccfbcfe6b3fd558 100644
--- a/src/operator/Unsqueeze.cpp
+++ b/src/operator/Unsqueeze.cpp
@@ -61,7 +61,7 @@ bool Unsqueeze_Op::forwardDims(bool allowDataDependency) {
     this->axes().clear(); // If both are provided input would override attrs
     this->axes().reserve(getInput(1)->size());
     const auto &axes =
-        getInput(1)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
+        getInput(1)->refCastFrom(fallback, NativeType_v<int8_t>, "cpu");
     std::copy_n(static_cast<int8_t *>(axes.getImpl()->hostPtr()),
                 axes.size(), std::back_inserter(this->axes()));
   }
diff --git a/src/operator/WeightInterleaving.cpp b/src/operator/WeightInterleaving.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..66af1d51f87c24b5b8d7d9c1f0ab3701f122515d
--- /dev/null
+++ b/src/operator/WeightInterleaving.cpp
@@ -0,0 +1,121 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/WeightInterleaving.hpp"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::WeightInterleaving_Op::Type = "WeightInterleaving";
+
+/**
+ * @brief Copy-constructor.
+ * @param op WeightInterleaving_Op to copy.
+ * @details Copies the operator attributes and its output tensor(s), but not
+ * its input tensors. The new operator has no associated input.
+ */
+Aidge::WeightInterleaving_Op::WeightInterleaving_Op(const WeightInterleaving_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(WeightInterleaving_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+
+std::shared_ptr<Aidge::Operator> Aidge::WeightInterleaving_Op::clone() const {
+    return std::make_shared<WeightInterleaving_Op>(*this);
+}
+
+
+bool Aidge::WeightInterleaving_Op::forwardDims(bool /*allowDataDependency*/) {
+    
+    if (inputsAssociated()) {
+
+        // check input data format is NHWC
+        AIDGE_ASSERT((getInput(0)->dataFormat() == DataFormat::NHWC),
+                    "Wrong Input tensor Data Format : {} for WeightInterleaving operator (should be DataFormat::NHWC for STM32).", getInput(0)->dataFormat());
+        
+        // Take the last dimension of the tensor : It is the Channel dimension in format NHWC
+        // The weights will be compacted along side the channel dimension only
+        const DimSize_t& lastDim = getInput(0)->dims().back();
+
+        // Compute the last dimension size of the tensor after the weight interleaving compression
+        // TO DO : implement a mechanism to get the number of bits of the DataType
+        const DataType& dt = getInput(0)->dataType();
+
+        std::uint8_t nbBits = 0;
+
+        switch (dt) {
+            case DataType::Int4:
+                nbBits=4;
+                break;
+            case DataType::Int3:
+                nbBits=3;
+                break;
+            case DataType::Int2:
+                nbBits=2;
+                break;
+            default:
+                AIDGE_ASSERT(true, "Unsupport type for WeightInterleaving {}", dt);
+        }
+
+
+        const auto lastDimCompression = compactDataSize(lastDim, nbBits);
+
+        std::vector<DimSize_t> outputDims = getInput(0)->dims();
+        outputDims.back() = lastDimCompression;
+
+        // <batch, OutChannels>
+        mOutputs[0]->resize(outputDims);
+
+        return true;
+    }
+
+    return false;
+}
+
+
+void Aidge::WeightInterleaving_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(WeightInterleaving_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::WeightInterleaving_Op::getAvailableBackends() const {
+    return Registrar<WeightInterleaving_Op>::getKeys();
+}
+
+std::shared_ptr<Aidge::Node> Aidge::WeightInterleaving(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<WeightInterleaving_Op>(), name);
+}
+
+
+std::size_t Aidge::WeightInterleaving_Op::compactDataSize(std::size_t dataSize, std::uint8_t nbBits) {
+    AIDGE_ASSERT(nbBits > 0 && nbBits < 8, "nbBits must be between 1 and 4"); // Ensure valid bit width
+
+    // Calculate the number of `nbBits` segments that can fit in an 8-bit byte.
+    const unsigned int nbSlot = 8 / nbBits;
+
+    // Calculate the number of compacted bytes needed to store all data elements.
+    // The formula (dataSize + nbSlot - 1) / nbSlot effectively rounds up the division, ensuring that any remaining elements that don't fully fill a byte are accounted for.
+    std::size_t requiredSize = (dataSize + nbSlot - 1) / nbSlot;
+
+    return requiredSize;
+}
\ No newline at end of file
diff --git a/src/recipes/AdaptToBackend.cpp b/src/recipes/AdaptToBackend.cpp
index e625a52f6545c3b2b34f85745fd88087a1b9883b..bb4222c492f6b0bff17f7de68decd3e8c77efba5 100644
--- a/src/recipes/AdaptToBackend.cpp
+++ b/src/recipes/AdaptToBackend.cpp
@@ -33,6 +33,7 @@ void Aidge::adaptToBackend(std::shared_ptr<GraphView> graphView) {
             Log::info("Adapted node {} (of type {}) to backend {}",
                 node->name(), node->type(), impl->backend());
             AIDGE_ASSERT(GraphView::replace({node}, {adaptedNode}), "Unable to replace adapted node!");
+            expandMetaOp(adaptedNode);
         }
     }
 }
diff --git a/src/recipes/ApplyWeightInterleaving.cpp b/src/recipes/ApplyWeightInterleaving.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b9c042a538bc1ece754c5f659048e9c5f6c0d249
--- /dev/null
+++ b/src/recipes/ApplyWeightInterleaving.cpp
@@ -0,0 +1,119 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/operator/WeightInterleaving.hpp"
+#include "aidge/operator/Transpose.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+
+
+
+void Aidge::applyWeightInterleaving(std::shared_ptr<Node> node){
+    auto weightProducer = node->getParent(1);
+    AIDGE_ASSERT(weightProducer, "Cannot Apply Weight Interleaving on {} because it has no weights linked", node->name())
+
+    auto weightTensor = std::make_shared<Aidge::Tensor>(std::static_pointer_cast<Aidge::OperatorTensor>(weightProducer->getOperator())->getOutput(0)->clone());
+    // auto backend = node->getOperator()->backend();
+    // Cover the case of Generic Operators
+    auto backend = node->getOperator()->backend().empty() ? "cpu" : node->getOperator()->backend();
+
+    const Aidge::DataType weightDataType = weightTensor->dataType();
+
+    // 1 - Apply dataformat NHWC to match the custom kernel implementation for ARM cortexM
+    // Issue : If the dataFormat is Default then setting it to NHWC won't permute dimensions
+    // Fix : If the datatype is at default then set it to NCHW THEN set it to NHWC
+    
+    std::shared_ptr<Tensor> transposedWeightTensor;
+
+    // Case 4D tensor (conv)
+    if (weightTensor->nbDims() == 4)
+    {
+        if (weightTensor->dataFormat() == Aidge::DataFormat::Default) {
+            weightTensor->setDataFormat(Aidge::DataFormat::NCHW);
+        }
+        
+        // Apply permutation for NHWC format
+        if (weightTensor->dataFormat() != Aidge::DataFormat::NHWC) {
+            weightTensor->setDataFormat(Aidge::DataFormat::NHWC);
+        }
+
+        transposedWeightTensor = weightTensor;
+        
+    }
+    else if (weightTensor->nbDims() == 2)
+    {
+        std::shared_ptr<Node> myTranspose = Transpose({1, 0});
+        auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
+        op->associateInput(0,weightTensor);
+        op->setDataType(weightDataType);
+        op->setBackend("cpu");
+        myTranspose->forward();
+
+        transposedWeightTensor = op->getOutput(0);
+        transposedWeightTensor->setDataFormat(Aidge::DataFormat::NHWC);
+
+    } else {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Cannot transpose {} weights.", node->name());
+    }
+    
+    // 2 - Apply Weight interleaving 
+    // Instanciate weight Interleaving operator
+    auto WIOp = WeightInterleaving_Op();
+
+    // Forward the Weight INterleaving op
+    WIOp.associateInput(0, transposedWeightTensor);
+
+    switch (weightDataType) {
+        case Aidge::DataType::Int4:
+            WIOp.setDataType(Aidge::DataType::Dual_Int4);
+            break;
+        case Aidge::DataType::UInt4:
+            WIOp.setDataType(Aidge::DataType::Dual_UInt4);
+            break;
+        case Aidge::DataType::Int3:
+            WIOp.setDataType(Aidge::DataType::Dual_Int3);
+            break;
+        case Aidge::DataType::UInt3:
+            WIOp.setDataType(Aidge::DataType::Dual_UInt3);
+            break;
+        case Aidge::DataType::Int2:
+            WIOp.setDataType(Aidge::DataType::Quad_Int2);
+            break;
+        case Aidge::DataType::UInt2:
+            WIOp.setDataType(Aidge::DataType::Quad_UInt2);
+            break;
+        case Aidge::DataType::Binary:
+            WIOp.setDataType(Aidge::DataType::Octo_Binary);
+            break;
+        default:
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Data type {} not supported for weight interleaving.", weightDataType);
+    }
+
+    WIOp.setDataFormat(Aidge::DataFormat::NHWC);
+    WIOp.setBackend(backend);
+
+    WIOp.forward();
+
+    // 3 - Replace the Weight Producer
+    auto newProducer = {Producer(WIOp.getOutput(0), weightProducer->name())};
+    auto oldProducer = {weightProducer};
+
+    GraphView::replace(oldProducer, newProducer);
+    
+}
\ No newline at end of file
diff --git a/src/recipes/ConstantFolding.cpp b/src/recipes/ConstantFolding.cpp
index 40b0bda766ab243805349b13e93391c5a60df63a..613393756f7f6b7104118ea97593e3130055ceeb 100644
--- a/src/recipes/ConstantFolding.cpp
+++ b/src/recipes/ConstantFolding.cpp
@@ -36,6 +36,7 @@ void Aidge::constantFolding(std::shared_ptr<GraphView> graph) {
         for (const auto& node : candidates) {
             bool foldable = true;
             auto replaceGraph = std::make_shared<GraphView>();
+            size_t i = 0;
             for (const auto& input : node->inputs()) {
                 if (input.first) {
                     if (input.first->type() != Producer_Op::Type) {
@@ -53,6 +54,13 @@ void Aidge::constantFolding(std::shared_ptr<GraphView> graph) {
 
                     replaceGraph->add(input.first, false);
                 }
+                else if (node->inputCategory(i) != InputCategory::OptionalData
+                    && node->inputCategory(i) != InputCategory::OptionalParam)
+                {
+                    foldable = false;
+                    break;
+                }
+                ++i;
             }
 
             if (foldable) {
diff --git a/src/recipes/ExpandMetaOps.cpp b/src/recipes/ExpandMetaOps.cpp
index 16f0b4c52f394e32e24fa49951c39a7c2cb35162..459a1ca852c486e83af5b1ab18893fa475caecf8 100644
--- a/src/recipes/ExpandMetaOps.cpp
+++ b/src/recipes/ExpandMetaOps.cpp
@@ -14,6 +14,21 @@
 #include "aidge/recipes/Recipes.hpp"
 #include "aidge/operator/MetaOperator.hpp"
 
+bool Aidge::expandMetaOp(std::shared_ptr<Node> node) {
+    auto metaOp = std::dynamic_pointer_cast<MetaOperator_Op>(node->getOperator());
+
+    if (metaOp != nullptr) {
+        // Replace meta op by its micro-graph
+        // graph will be updated accordingly in GraphView::replace()
+        auto g = std::make_shared<GraphView>();
+        g->add(node, false);
+        GraphView::replace(g, metaOp->getMicroGraph());
+        return true;
+    }
+
+    return false;
+}
+
 void Aidge::expandMetaOps(std::shared_ptr<GraphView> graph, bool recursive) {
     bool found = false;
     const auto nodes = graph->getNodes();
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index 50c8f561c1732d6f7f37ae5b8d6f03c4e135939c..0fd9d7b4429412c52cfbfd57c581eed19f242169 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -117,7 +117,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
 
             auto prod = addProducer(metaNode, inputIdx, {convNbOutChannels}, "b");
             // Add the new bias node to the same views as the meta node
-            for (auto g : metaNode->views()) {
+            for (auto& g : metaNode->views()) {
                 g->add(prod);
             }
         }
@@ -126,12 +126,12 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
             if (convNode->input(1).first) {
                 // Add the new bias node to the same views as the weights node
                 // if possible
-                for (auto g : convNode->input(1).first->views()) {
+                for (auto& g : convNode->input(1).first->views()) {
                     g->add(prod);
                 }
             }
             else {
-                for (auto g : convNode->views()) {
+                for (auto& g : convNode->views()) {
                     g->add(prod);
                 }
             }
@@ -193,8 +193,8 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::GraphView> graphView) {
     auto matches = SinglePassGraphMatching(graphView).match("(Conv2D|ConvDepthWise2D|PaddedConv2D|PaddedConvDepthWise2D)->BatchNorm2D");
 
     for (auto match : matches) {
-        fmt::println("Match !");
         auto rootNode = match.graph->rootNode();
         fuseBatchNorm(rootNode, *rootNode->getChildren().begin());
     }
+    Log::info("[\033[1m\033[3mFuseBatchNorm\033[0m recipe completed.");
 }
diff --git a/src/recipes/MatMulToFC.cpp b/src/recipes/MatMulToFC.cpp
index 8d902c680b8fa0d30a873b6f355734ce19d608f5..c35b2a64a6bf1186ba43b5ffb696302d0f2c1679 100644
--- a/src/recipes/MatMulToFC.cpp
+++ b/src/recipes/MatMulToFC.cpp
@@ -71,7 +71,7 @@ void Aidge::matMulToFC(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
     {
         // If both inputs are producers, there is an ambiguity, but both options
         // result in a correct solution.
-        Log::notice("Notice: both MatMul inputs are Producers, assume data at input#0 and weights at input#1.");
+        Log::notice("both MatMul inputs are Producers, assume data at input#0 and weights at input#1.");
         weight = matmulNode->getParent(1);
     }
     AIDGE_ASSERT(weight != nullptr, "Could not deduce weight input for MatMul operator.");
diff --git a/src/recipes/RemoveFlatten.cpp b/src/recipes/RemoveFlatten.cpp
index bf80ab51749953a5b72d0e01f186265fdbb72e81..5a04746e19f18e65ea601d823cbc9f0e7de7199c 100644
--- a/src/recipes/RemoveFlatten.cpp
+++ b/src/recipes/RemoveFlatten.cpp
@@ -14,13 +14,8 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/recipes/Recipes.hpp"
-
-
-//Graph Regex
-// #include "aidge/graphRegex/GraphRegex.hpp"
 #include "aidge/graph/Matching.hpp"
 
-
 namespace Aidge {
     void removeFlatten(std::shared_ptr<GraphView> graphView){
         const auto matches = SinglePassGraphMatching(graphView).match(
diff --git a/src/recipes/ToGenericOp.cpp b/src/recipes/ToGenericOp.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5f151d8904a21ca49c96dbe089fdc96cd77e7501
--- /dev/null
+++ b/src/recipes/ToGenericOp.cpp
@@ -0,0 +1,23 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+void Aidge::toGenericOp(std::shared_ptr<Node> node) {
+    auto newGenOp = {GenericOperator(node->type(),  std::dynamic_pointer_cast<Aidge::OperatorTensor>(node->getOperator()), node->name())};
+    auto OldOp = {node};
+    GraphView::replace(OldOp, newGenOp);
+}
diff --git a/src/recipes/removeConstantOfShape.cpp b/src/recipes/removeConstantOfShape.cpp
index 5e84f7b494815ecb5a8937bb6f76ba1de80ad3f9..e743050c2c0f13513f639a0690943e0d934f947d 100644
--- a/src/recipes/removeConstantOfShape.cpp
+++ b/src/recipes/removeConstantOfShape.cpp
@@ -34,9 +34,6 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-// Graph Regex
-#include "aidge/graphRegex/GraphRegex.hpp"
-
 namespace Aidge {
 
 size_t removeConstantOfShape(std::shared_ptr<GraphView> graph_view) {
diff --git a/src/scheduler/MemoryManager.cpp b/src/scheduler/MemoryManager.cpp
index ba805f919a607e0b2ae3272d173aa11360548fa7..05f461b82f16b6af4ed412b7336aa2328bcafbe1 100644
--- a/src/scheduler/MemoryManager.cpp
+++ b/src/scheduler/MemoryManager.cpp
@@ -634,152 +634,6 @@ void Aidge::MemoryManager::tick()
     ++mClock;
 }
 
-void Aidge::MemoryManager::log(const std::string& fileName) const
-{
-    auto memData = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen(fileName.c_str(), "w"), &std::fclose);
-
-    if (!memData) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error,
-            "Could not create memory layout log file: {}", fileName);
-    }
-
-    auto gnuplot = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((fileName + "_plot.gnu").c_str(), "w"), &std::fclose);
-
-    if (!gnuplot) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error,
-            "Could not create memory layout log file: {}", (fileName + "_plot.gnu"));
-    }
-
-    const Clock_T maxLifetime = getMaxLifetime();
-    const unsigned int peakUsage = getPeakUsage();
-
-    fmt::print(gnuplot.get(), "#!/usr/bin/gnuplot\n");
-    fmt::print(gnuplot.get(), "set term pngcairo size 1280,768 noenhanced\n");
-    fmt::print(gnuplot.get(), "set output \"{}\"\n", fileName + "_plot.png");
-    fmt::print(gnuplot.get(), "set xrange [{}:{}]\n", 0, maxLifetime + 1);
-    fmt::print(gnuplot.get(), "set yrange [{}:{}]\n", 0, 1.05 * (peakUsage / 1024.0));
-    fmt::print(gnuplot.get(), "set xlabel \"Time\"\n");
-    fmt::print(gnuplot.get(), "set ylabel \"Memory usage (KWords)\"\n");
-    fmt::print(gnuplot.get(), "set grid\n");
-    fmt::print(gnuplot.get(), "set xtics 1\n");
-    fmt::print(gnuplot.get(), "unset key\n");
-    fmt::print(gnuplot.get(), "set palette rgbformulae 30,31,32\n");
-    fmt::print(gnuplot.get(), "unset colorbox\n");
-    fmt::print(gnuplot.get(), "N={}\n", mMemPlanes.size() + 1);
-
-    unsigned int objectId = 1;
-    unsigned int labelId = 1;
-
-    for (std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >
-        ::const_iterator it = mMemPlanes.begin(), itEnd = mMemPlanes.end();
-        it != itEnd; ++it)
-    {
-        const std::string name = (*it).first->name();
-        fmt::print(memData.get(), "{}\n", name);
-
-        double minX = -1;
-        unsigned int maxY = 0;
-
-        for (std::vector<MemoryPlane>::const_iterator itPlanes
-             = (*it).second.begin(), itPlanesBegin = (*it).second.begin(),
-            itPlanesEnd = (*it).second.end(); itPlanes != itPlanesEnd;
-            ++itPlanes)
-        {
-            const unsigned int contiguousOffset
-                = (*itPlanes).getContiguousOffset();
-            const unsigned int contiguousSize = (*itPlanes).getContiguousSize();
-            const unsigned int wrappedOffset = (*itPlanes).getWrappedOffset();
-            const unsigned int wrappedSize = (*itPlanes).getWrappedSize();
-
-            const Clock_T allocated = (*itPlanes).allocated;
-            const Clock_T released = (*itPlanes).memSpace->released;
-            const bool isReleased = (released >= 0
-                                && (*itPlanes).memSpace->dependencies.empty());
-
-            fmt::print(memData.get(), "  {} {} ({:#08x}U) -> {} ({:#08x}U)",
-                (itPlanes - itPlanesBegin), contiguousOffset, contiguousOffset,
-                (contiguousOffset + contiguousSize), (contiguousOffset + contiguousSize));
-
-            if (wrappedSize > 0) {
-                fmt::print(memData.get(), " + {} ({:#08x}U) -> {} ({:#08x}U)",
-                    wrappedOffset, wrappedOffset,
-                    (wrappedOffset + wrappedSize), (wrappedOffset + wrappedSize));
-            }
-
-            fmt::print(memData.get(), " [{}] @ {}", (*itPlanes).getSize(), allocated);
-
-            if (isReleased) {
-                fmt::print(memData.get(), " to {}", released);
-            }
-
-            fmt::print(memData.get(), "\n");
-
-            // Gnuplot
-            const double startX = allocated;
-
-            if (startX < minX || minX < 0) {
-                minX = startX;
-                maxY = contiguousOffset + contiguousSize;
-            }
-
-            if ((*itPlanes).size != (*itPlanes).stride) {
-                for (unsigned int offset = contiguousOffset;
-                    offset < contiguousOffset + contiguousSize;
-                    offset += (*itPlanes).stride)
-                {
-                    fmt::print(gnuplot.get(), "set object {} rectangle from {},{} to {},{} fc palette frac ({} * 1./N)\n",
-                        (allocated * 100 + objectId), startX, (offset / 1024.0),
-                        (((isReleased) ? released : maxLifetime) + 1),
-                        (std::min((offset + (*itPlanes).size),
-                                        contiguousOffset + contiguousSize) / 1024.0),
-                        labelId);
-                    ++objectId;
-                }
-            }
-            else {
-                fmt::print(gnuplot.get(), "set object {} rectangle from {},{} to {},{} fc palette frac ({} * 1./N)\n",
-                    (allocated * 100 + objectId), startX, (contiguousOffset / 1024.0),
-                    (((isReleased) ? released : maxLifetime) + 1),
-                    ((contiguousOffset + contiguousSize) / 1024.0),
-                    labelId);
-                ++objectId;
-            }
-
-            if (wrappedSize > 0) {
-                fmt::print(gnuplot.get(), "set object {} rectangle from {},{} to {},{} fc palette frac ({} * 1./N)\n",
-                    (allocated * 100 + objectId), startX, (wrappedOffset / 1024.0),
-                    (((isReleased) ? released : maxLifetime) + 1),
-                    ((wrappedOffset + contiguousSize) / 1024.0),
-                    labelId);
-                ++objectId;
-
-                fmt::print(gnuplot.get(), "set arrow from {},{} to {},{} nohead\n",
-                    startX, (contiguousOffset / 1024.0),
-                    (startX + 0.1), (contiguousOffset / 1024.0));
-
-                fmt::print(gnuplot.get(), "set arrow from {},{} to {},{} nohead\n",
-                    (startX + 0.05), ((contiguousOffset + contiguousSize) / 1024.0),
-                    (startX + 0.05), (wrappedOffset / 1024.0));
-            }
-        }
-
-        fmt::print(gnuplot.get(), "set label {} '{}' at {},{} rotate by 30 font \",8\" offset char 0.5,0.5\n",
-            labelId, name, minX, (maxY / 1024.0));
-        ++labelId;
-
-        fmt::print(memData.get(), "\n");
-    }
-
-    fmt::print(gnuplot.get(), "set arrow from 0,{} to {},{} nohead lc rgb \"red\"\n",
-        (peakUsage / 1024.0), (maxLifetime + 1),
-        (peakUsage / 1024.0));
-
-    fmt::print(gnuplot.get(), "set label {} 'Peak usage = {} KWords' at 0,{} textcolor rgb \"red\" offset char 0.5,0.5\n",
-        labelId, (peakUsage / 1024.0), (peakUsage / 1024.0));
-
-    fmt::print(gnuplot.get(), "plot 0\n");
-}
-
 unsigned int Aidge::MemoryManager::onStack(unsigned int size)
 {
     unsigned int offset = 0;
diff --git a/src/scheduler/ProdConso.cpp b/src/scheduler/ProdConso.cpp
index a3bff53c3643a5da361dec5944f47a27f148a995..0e20796febb86f7e0e795f905222aa621609cc56 100644
--- a/src/scheduler/ProdConso.cpp
+++ b/src/scheduler/ProdConso.cpp
@@ -9,13 +9,19 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <string>
-
 #include "aidge/scheduler/ProdConso.hpp"
+
+#include <algorithm>  // std::fill
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Elts.hpp"
 #include "aidge/operator/Operator.hpp"
+#include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
 
 Aidge::ProdConso::ProdConso(const Operator& op, bool inPlace):
     mOp(op),
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 2e9dc034ef0bf2b0be2ee27c26b1995a2d0e4244..bbbc3d8076cc1ff5fe25e9f60e4827b94ef0de4f 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -33,6 +33,7 @@
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
+#include "aidge/operator/Concat.hpp"
 #include "aidge/utils/Log.hpp"
 #include "aidge/utils/Types.h"
 
@@ -426,6 +427,83 @@ void Aidge::Scheduler::generateEarlyLateScheduling(std::vector<StaticSchedulingE
     }
 }
 
+std::vector<std::pair<std::vector<Aidge::Scheduler::StaticSchedulingElement*>, size_t>>
+Aidge::Scheduler::getFactorizedScheduling(const std::vector<StaticSchedulingElement*>& schedule, size_t minRepeat) const
+{
+    std::vector<std::pair<std::vector<StaticSchedulingElement*>, size_t>> sequences;
+    size_t offset = 0;
+
+    for (size_t i = 0; i < schedule.size(); ) {
+        // Find all the possible repetitive sequences starting from this element
+        std::vector<StaticSchedulingElement*> seq;
+        std::vector<std::pair<std::vector<StaticSchedulingElement*>, size_t>> longuestSeq;
+        std::vector<size_t> longuestSeqOffset;
+
+        for (size_t k = i; k < schedule.size(); ++k) {
+            // For each sequence length, starting from 1...
+            seq.push_back(new StaticSchedulingElement(
+                schedule[k]->node,
+                schedule[k]->early - offset,
+                schedule[k]->late - offset));
+
+            size_t start = k + 1;
+            size_t nbRepeats = 1;
+            bool repeat = true;
+            const auto seqOffset = (start < schedule.size()) ? schedule[start]->early - offset - seq[0]->early : 0;
+
+            do {
+                // Count the number of consecutive sequences (repetitions)
+                for (size_t r = 0; r < seq.size(); ++r) {
+                    if (start + r >= schedule.size()
+                        || schedule[start + r]->node != seq[r]->node
+                        || schedule[start + r]->early - offset != seq[r]->early + seqOffset * nbRepeats
+                        || schedule[start + r]->late - offset != seq[r]->late + seqOffset * nbRepeats)
+                    {
+                        repeat = false;
+                        break;
+                    }
+                }
+
+                if (repeat) {
+                    start += seq.size();
+                    ++nbRepeats;
+                }
+            }
+            while (repeat);
+
+            if (nbRepeats >= minRepeat) {
+                // If repetitions exist for this sequence length, add it to the list
+                longuestSeq.push_back(std::make_pair(seq, nbRepeats));
+                longuestSeqOffset.push_back(seqOffset);
+            }
+            else if (k == i) {
+                // Ensure that at least the current element is in the list if no
+                // repetition is found
+                longuestSeq.push_back(std::make_pair(seq, 1));
+                longuestSeqOffset.push_back(0);
+            }
+        }
+
+        // Select the one with the best factorization
+        // i.e. which maximize the product sequence length * number of sequences
+        size_t maxS = 0;
+        size_t maxFactorization = 0;
+        for (size_t s = 0; s < longuestSeq.size(); ++s) {
+            const auto factor = longuestSeq[s].first.size() * longuestSeq[s].second;
+            if (factor > maxFactorization) {
+                maxFactorization = factor;
+                maxS = s;
+            }
+        }
+
+        sequences.push_back(longuestSeq[maxS]);
+        i += longuestSeq[maxS].first.size() * longuestSeq[maxS].second;
+        offset += longuestSeqOffset[maxS] * (longuestSeq[maxS].second - 1);
+    }
+
+    return sequences;
+}
+
 void Aidge::Scheduler::resetScheduling() {
     for (auto node : mGraphView->getNodes()) {
         node->getOperator()->resetConsummerProducer();
@@ -475,8 +553,7 @@ Aidge::MemoryManager Aidge::Scheduler::generateMemory(bool incProducers, bool wr
                 std::size_t length = 1;
                 std::size_t count = 1;
 
-                if (op->getOutput(outputIdx) && op->getOutput(outputIdx)->dims().size() > 3) {
-                    // If it is possible, assume a NCHW layout
+                if (op->getOutput(outputIdx) && op->getOutput(outputIdx)->dataFormat() == DataFormat::NHWC) {
                     size = op->getOutput(outputIdx)->dims().end()[-3];
                     stride = size;
                     length = op->getOutput(outputIdx)->dims().end()[-1];
@@ -561,6 +638,235 @@ Aidge::MemoryManager Aidge::Scheduler::generateMemory(bool incProducers, bool wr
     return memManager;
 }
 
+Aidge::MemoryManager Aidge::Scheduler::generateMemoryAutoConcat(bool incProducers, bool wrapAroundBuffer) const {
+    MemoryManager memManager;
+    std::map<NodePtr, MemoryManager::MemoryPlane> concatMemPlane;
+
+    for (std::size_t step = 0; step < mStaticSchedule.size(); ++step) {
+        // AsLateAsPossible ensures that when a node child is Concat, all the parents
+        // of the Concat parents have already been memory mapped!
+        for (const auto& node : getStaticScheduling(step, EarlyLateSort::AsLateAsPossible)) {
+            if (!incProducers && node->type() == Producer_Op::Type) {
+                memManager.releaseDependencies(node);
+                continue;
+            }
+
+            auto itConcat = concatMemPlane.find(node);
+            if (itConcat != concatMemPlane.end()) {
+                // Skip Concat
+                AIDGE_INTERNAL_ASSERT(itConcat->first->type() == Concat_Op::Type);
+                concatMemPlane.erase(itConcat);
+                continue;
+            }
+            itConcat = concatMemPlane.end();
+
+            auto childs = node->getChildren();
+            AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor,
+                "Operator must be of Tensor type for node {} (of type {}).",
+                node->name(), node->type());
+            const auto op = std::static_pointer_cast<OperatorTensor>(node->getOperator());
+
+            std::shared_ptr<Node> concat = nullptr;
+            // If the only child is a concatenation, check if we can allocate
+            // the concatenation result directly and skip allocation for this 
+            // node output:
+            if (childs.size() == 1 && (*childs.begin())->type() == Concat_Op::Type) {
+                concat = *childs.begin();
+
+                for (const auto& concatParent : concat->getParents()) {
+                    if (concatParent->getChildren().size() > 1) {
+                        // not possible: at least one of the Concat parent has
+                        // multiple children.
+                        concat = nullptr;
+                        break;
+                    }
+                }
+            }
+
+            if (concat) {
+                itConcat = concatMemPlane.find(concat);
+            }
+
+            std::vector<const MemoryManager::MemoryPlane*> wrapAroundMemPlane;
+
+            // Allocate a memory plane for each node's output
+            AIDGE_INTERNAL_ASSERT(!concat || node->nbOutputs() == 1);
+            for (IOIndex_t outputIdx = 0; outputIdx < node->nbOutputs(); ++outputIdx) {
+                auto requiredSize = op->getRequiredMemory(outputIdx, {});
+                auto outputDims = (op->getOutput(outputIdx)) ? op->getOutput(outputIdx)->dims() : std::vector<DimSize_t>();
+                auto outputFormat = (op->getOutput(outputIdx)) ? op->getOutput(outputIdx)->dataFormat() : DataFormat::Default;
+
+                // If concat is not nullptr, we directly allocate the concatenation result
+                // Must check that we are on the right output too.
+                if (concat && node->getChildren(outputIdx).size() == 1) {
+                    const auto concatOp = std::static_pointer_cast<OperatorTensor>(concat->getOperator());
+                    requiredSize = concatOp->getRequiredMemory(0, {});
+                    outputDims = (concatOp->getOutput(0)) ? concatOp->getOutput(0)->dims() : std::vector<DimSize_t>();
+                    outputFormat = (concatOp->getOutput(0)) ? concatOp->getOutput(0)->dataFormat() : DataFormat::Default;
+                }
+
+                AIDGE_ASSERT(requiredSize.type == Elts_t::Data,
+                    "Cannot generate memory with token-based producer-consumer model for node {} (of type {}).",
+                    node->name(), node->type());
+
+                // By default, specifies a fully monolithic memory block
+                std::size_t size = requiredSize.data;
+                std::size_t stride = 0;
+                std::size_t length = 1;
+                std::size_t count = 1;
+
+                if (outputFormat == DataFormat::NHWC) {
+                    size = op->getOutput(outputIdx)->dims().end()[-3];
+                    stride = outputDims.end()[-3];
+                    length = outputDims.end()[-1];
+                    count = outputDims.end()[-2];
+
+                    AIDGE_INTERNAL_ASSERT(stride >= size);
+                    AIDGE_INTERNAL_ASSERT(length == op->getOutput(outputIdx)->dims().end()[-1]);
+                    AIDGE_INTERNAL_ASSERT(count == op->getOutput(outputIdx)->dims().end()[-2]);
+                }
+
+                // Check if wrap around buffer is possible for this node
+                // (re-using previous node outputs memory for this node outputs).
+                // => only if this node is the only child of its parent(s)
+                std::size_t wrapAroundSize = 0;
+                std::size_t wrapAroundExtra = 0;
+                wrapAroundMemPlane.push_back(nullptr); // default value of wrapAroundMemPlane[outputIdx]
+
+                // Select the best parent among all allocable nodes for
+                // reallocation, which is the one with most memory (in order
+                // to minimize the reallocation size).
+                const auto allocableNodes = (concat) ? concat->getParents() : std::vector<NodePtr>{node};
+                for (const auto& allocableNode : allocableNodes) {
+                    IOIndex_t inputIdx = 0;
+                    for (const auto& parent : allocableNode->dataInputs()) {
+                        if (parent.first && parent.first->getChildren(parent.second).size() == 1
+                            // there might be no existing plane if the parent was
+                            // not yet scheduled (because it may be a recurrent connection)
+                            && memManager.getNbPlanes(parent.first) >= parent.first->nbOutputs()
+                            // memSpace should not be already released
+                            && memManager.getPlanes(parent.first).end()[-parent.first->nbOutputs()+parent.second].memSpace->released == -1)
+                        {
+                            const auto requiredData = allocableNode->getOperator()->getNbRequiredData(inputIdx);
+                            const auto requiredProtected = allocableNode->getOperator()->getNbRequiredProtected(inputIdx);
+                            AIDGE_ASSERT(requiredData.type == Elts_t::Data && requiredProtected.type == Elts_t::Data,
+                                "Cannot generate memory with token-based producer-consumer model for node {} (of type {}).",
+                                node->name(), node->type());
+
+                            const bool isWrappable = (requiredProtected.data < requiredData.data);
+                            const MemoryManager::MemoryPlane& memPlane
+                                = (concat && itConcat != concatMemPlane.end())
+                                    ? itConcat->second
+                                    : memManager.getPlanes(parent.first).end()[-parent.first->nbOutputs()+parent.second];
+
+                            if (isWrappable || !memManager.isWrapAround(
+                                        memPlane.memSpace,
+                                        memPlane.getFinalOffset()
+                                            - memPlane.memSpace->offset,
+                                        requiredSize.data))
+                            {
+                                if (memPlane.getSize() > wrapAroundSize + requiredProtected.data
+                                    && std::find(wrapAroundMemPlane.begin(), wrapAroundMemPlane.end(), &memPlane) == wrapAroundMemPlane.end())
+                                {
+                                    wrapAroundSize = memPlane.getSize() - requiredProtected.data;
+                                    if (requiredSize.data > wrapAroundSize) {
+                                        wrapAroundExtra = requiredSize.data - wrapAroundSize;
+                                    }
+                                    wrapAroundMemPlane[outputIdx] = &memPlane;
+                                }
+
+                                if (wrapAroundExtra == 0) {
+                                    break;
+                                }
+                            }
+                        }
+                        ++inputIdx;
+                    }
+                }
+
+                size_t concatSize = size;
+                size_t concatOffset = 0;
+
+                if (concat) {
+                    // Dependencies should be concat node *childs*, not concat node
+                    childs = concat->getChildren();
+
+                    // Compute concatOffset
+                    for (auto concatParent : concat->getParents()) {
+                        const auto parentOp = std::static_pointer_cast<OperatorTensor>(concatParent->getOperator());
+                        const auto parentRequiredSize = parentOp->getRequiredMemory(outputIdx, {});
+                        const auto parentOutputDims = (parentOp->getOutput(outputIdx)) ? parentOp->getOutput(outputIdx)->dims() : std::vector<DimSize_t>();
+                        const auto parentOutputFormat = (parentOp->getOutput(outputIdx)) ? parentOp->getOutput(outputIdx)->dataFormat() : DataFormat::Default;
+
+                        if (concatParent == node) {
+                            if (parentOutputFormat != DataFormat::NHWC) {
+                                concatSize = parentRequiredSize.data;
+                            }
+                            break;
+                        }
+                        else {
+                            // By default, specifies a fully monolithic memory block
+                            std::size_t parentSize = parentRequiredSize.data;
+
+                            if (parentOutputFormat == DataFormat::NHWC) {
+                                parentSize = parentOutputDims.end()[-3];
+                            }
+
+                            concatOffset += parentSize;
+                        }
+                    }
+
+                    // Size in reallocate() is counted from the offset, not from 0,
+                    // meaning the offset must be substracted to obtain the correct
+                    // total size without excess.
+                    if (concatOffset > 0) {
+                        concatSize -= concatOffset;
+                    }
+                }
+
+                // MemoryPlane to (re)use
+                const MemoryManager::MemoryPlane& memPlane
+                    = (concat && itConcat != concatMemPlane.end())
+                        ? itConcat->second :
+                      (wrapAroundBuffer && wrapAroundSize > 0)
+                        ? (*wrapAroundMemPlane[outputIdx]) :
+                            memManager.allocate(size, childs, stride, length, count);
+
+                if (wrapAroundBuffer && wrapAroundSize > 0) {
+                    memManager.reallocate(memPlane,
+                        node, concatOffset,
+                        concatSize, true, wrapAroundExtra, childs, stride, length, count);
+                }
+                else {
+                    memManager.reallocate(memPlane.memSpace,
+                        node, memPlane.offset + concatOffset,
+                        concatSize, false, 0, childs, stride, length, count);
+                }
+
+                if (concat && itConcat == concatMemPlane.end()) {
+                    concatMemPlane.emplace(concat, memPlane);
+
+                    if (wrapAroundBuffer && wrapAroundSize > 0) {
+                        memManager.reallocate(memPlane,
+                            concat, 0,
+                            size, true, wrapAroundExtra, childs, stride, length, count);
+                    }
+                    else {
+                        memManager.reallocate(memPlane.memSpace,
+                            concat, memPlane.offset,
+                            size, false, 0, childs, stride, length, count);
+                    }
+                }
+            }
+
+            memManager.releaseDependencies(node);
+            memManager.tick();
+        }
+    }
+
+    return memManager;
+}
+
 void Aidge::Scheduler::connectInputs(const std::vector<std::shared_ptr<Aidge::Tensor>>& data){
     // This version of connect inputs only connects tensor inputs in input data producers.
     auto inputNodes = mGraphView->getOrderedInputs();
@@ -640,8 +946,14 @@ void Aidge::Scheduler::saveStaticSchedulingDiagram(const std::string& fileName)
                 // Mermaid does not allow : character in task title
                 std::replace(name.begin(), name.end(), ':', '_');
 
-                fmt::print(fp.get(), "{} :{}, {}\n",
-                            name, element->early, element->late);
+                if (element->early == element->late) {
+                    fmt::print(fp.get(), "{} :milestone, {}, {}\n",
+                                name, element->early, element->late);
+                }
+                else {
+                    fmt::print(fp.get(), "{} :{}, {}\n",
+                                name, element->early, element->late);
+                }
             }
         }
     }
@@ -649,11 +961,72 @@ void Aidge::Scheduler::saveStaticSchedulingDiagram(const std::string& fileName)
     fmt::print(fp.get(), "\n");
 }
 
-std::vector<std::shared_ptr<Aidge::Node>> Aidge::Scheduler::getStaticScheduling(std::size_t step) const {
+void Aidge::Scheduler::saveFactorizedStaticSchedulingDiagram(const std::string& fileName, size_t minRepeat) const {
+    auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((fileName + ".mmd").c_str(), "w"), &std::fclose);
+
+    if (!fp) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "Could not create scheduling diagram log file: {}", fileName + ".mmd");
+    }
+
+    fmt::print(fp.get(), "gantt\ndateFormat x\naxisFormat %Q\n\n");
+
+    if (!mStaticSchedule.empty()) {
+        const std::map<std::shared_ptr<Node>, std::string> namePtrTable
+            = mGraphView->getRankedNodesName("{0} ({1}#{3})");
+
+        for (const auto& schedule : mStaticSchedule) {
+            const auto factorizedSchedule = getFactorizedScheduling(schedule, minRepeat);
+
+            size_t seq = 0;
+            for (const auto& sequence : factorizedSchedule) {
+                if (sequence.second > 1) {
+                    fmt::print(fp.get(), "section seq#{} (x{})\n", seq, sequence.second);
+                }
+                else {
+                    fmt::print(fp.get(), "section seq#{}\n", seq);
+                }
+
+                for (const auto& element : sequence.first) {
+                    auto name = namePtrTable.at(element->node);
+                    // Mermaid does not allow : character in task title
+                    std::replace(name.begin(), name.end(), ':', '_');
+                    std::string tag = ":";
+
+                    if (element->early == element->late) {
+                        tag += "milestone, ";
+                    }
+
+                    if (sequence.second > 1) {
+                        tag += "active, ";
+                    }
+
+                    fmt::print(fp.get(), "{} {}{}, {}\n",
+                                name, tag, element->early, element->late);
+                }
+                ++seq;
+            }
+        }
+    }
+
+    fmt::print(fp.get(), "\n");
+}
+
+std::vector<std::shared_ptr<Aidge::Node>> Aidge::Scheduler::getStaticScheduling(std::size_t step, EarlyLateSort sorting) const {
     AIDGE_ASSERT(!mStaticSchedule.empty(), "Scheduler::getStaticScheduling(): static scheduling is empty, did you generate scheduling first?");
     AIDGE_ASSERT(step < mStaticSchedule.size(), "Scheduler::getStaticScheduling(): no static scheduling at step {} (available steps: {})", mStaticSchedule.size(), step);
 
-    const auto& staticSchedule = mStaticSchedule.at(step);
+    std::deque<StaticSchedulingElement*> staticSchedule(mStaticSchedule.at(step).begin(), mStaticSchedule.at(step).end());
+
+    if (sorting == EarlyLateSort::AsSoonAsPossible) {
+        std::stable_sort(staticSchedule.begin(), staticSchedule.end(),
+            [](const auto& lhs, const auto& rhs) { return ((lhs->early < rhs->early) || (lhs->early == rhs->early && lhs->late < rhs->late)); });
+    }
+    else if (sorting == EarlyLateSort::AsLateAsPossible) {
+        std::stable_sort(staticSchedule.begin(), staticSchedule.end(),
+            [](const auto& lhs, const auto& rhs) { return ((lhs->late < rhs->late) || (lhs->late == rhs->late && lhs->early < rhs->early)); });
+    }
+
     std::vector<std::shared_ptr<Node>> schedule;
     std::transform(staticSchedule.begin(), staticSchedule.end(), std::back_inserter(schedule), [](const auto& v) { return v->node; });
     return schedule;
@@ -750,7 +1123,7 @@ Aidge::Scheduler::getPriorProducersConsumers(const std::shared_ptr<Node>& node)
                     prior.requiredProducers.insert(parent.first);
                     prior.priorConsumers.insert(node);
                 }
-                else if (parent.first->type() == Memorize_Op::Type) {
+                else if (parent.first->type() == Memorize_Op::Type && parent.second == 1) {
                     // Break cycles
                     return PriorProducersConsumers(); // not scheduled
                 }
diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp
index 136fcc16fe5f83f684f0686fe64105023f8cc688..b4c64d527d361db31e46c6b6dbe6d3b4ebf6765b 100644
--- a/src/utils/Log.cpp
+++ b/src/utils/Log.cpp
@@ -10,74 +10,186 @@
  ********************************************************************************/
 
 #include "aidge/utils/Log.hpp"
-#include "aidge/utils/ErrorHandling.hpp"
 
-#include <cstdlib>
+#include <fmt/core.h>
 
-#include <fmt/color.h>
-#include <fmt/chrono.h>
+#include <cstdlib>  // std::getenv
+#include <memory>
+#include <string>
+#include <vector>
 
-Aidge::Log::Level Aidge::Log::mConsoleLevel = []() {
-    const char* logLevel = std::getenv("AIDGE_LOGLEVEL_CONSOLE");
-    if (logLevel != nullptr) {
-        for (std::size_t i = 0; i < size(EnumStrings<Log::Level>::data); ++i) {
-            if (std::string(logLevel) == EnumStrings<Log::Level>::data[i]) {
-                return static_cast<Log::Level>(i);
-            }
-        }
+namespace Aidge {
+
+/**
+ * @brief Initialize console log level from environment. If compile mode is
+ * DEBUG, then the default level is Log::Level::Debug, else it is
+ * Log::Level::Notice.
+ */
+Log::Level Log::mConsoleLevel = []() {
+#ifndef NDEBUG
+    constexpr Level defaultLevel = Level::Debug;
+#else
+    constexpr Log::Level defaultLevel = Level::Notice;
+#endif
+    if (const char* level = std::getenv("AIDGE_LOGLEVEL_CONSOLE")) {
+        return level[0] == 'D' ? Debug :
+               level[0] == 'I' ? Info :
+               level[0] == 'N' ? Notice :
+               level[0] == 'W' ? Warn :
+               level[0] == 'E' ? Error :
+               level[0] == 'F' ? Fatal : defaultLevel;
     }
-    return Info;
+    return defaultLevel;
 }();
-bool Aidge::Log::mConsoleColor = []() {
-    const char* logColor = std::getenv("AIDGE_LOG_COLOR");
-    if (logColor == nullptr)
-        return true;
-    auto logColorStr = std::string(logColor);
-    if (logColorStr == "off" || logColorStr == "OFF" ||
-        logColorStr == "0")
-        return false;
-    return true;
+
+/**
+ * @brief Initialize color setting from environment or default to enabled
+ */
+bool Log::mConsoleColor = []() {
+    const char* color = std::getenv("AIDGE_LOG_COLOR");
+    return !color || (std::string(color) != "off" &&
+                     std::string(color) != "OFF" &&
+                     std::string(color) != "0");
 }();
-Aidge::Log::Level Aidge::Log::mFileLevel = []() {
-    const char* logLevel = std::getenv("AIDGE_LOGLEVEL_FILE");
-    if (logLevel != nullptr) {
-        for (std::size_t i = 0; i < size(EnumStrings<Log::Level>::data); ++i) {
-            if (std::string(logLevel) == EnumStrings<Log::Level>::data[i]) {
-                return static_cast<Log::Level>(i);
-            }
-        }
+
+/**
+ * @brief Initialize file log level from environment. If compile mode is DEBUG,
+ * then the default level is Log::Level::Debug, else it is Log::Level::Notice.
+ */
+Log::Level Log::mFileLevel = []() {
+#ifndef NDEBUG
+    constexpr Level defaultLevel = Level::Debug;
+#else
+    constexpr Log::Level defaultLevel = Level::Notice;
+#endif
+    if (const char* level = std::getenv("AIDGE_LOGLEVEL_FILE")) {
+        return level[0] == 'D' ? Debug :
+               level[0] == 'I' ? Info :
+               level[0] == 'N' ? Notice :
+               level[0] == 'W' ? Warn :
+               level[0] == 'E' ? Error :
+               level[0] == 'F' ? Fatal : defaultLevel;
     }
-    return Debug;
+    return defaultLevel;
 }();
-std::string Aidge::Log::mFileName = []() {
-    const char* logFile = std::getenv("AIDGE_LOG_FILE");
-    if (logFile != nullptr) {
-        return std::string(logFile);
-    }
-    return std::string();
+
+/**
+ * @brief Initialize file path from environment
+ */
+std::string Log::mFileName = []() {
+    const char* file = std::getenv("AIDGE_LOG_FILE");
+    return file ? std::string(file) : std::string();
 }();
-std::unique_ptr<FILE, Aidge::Log::fcloseDeleter> Aidge::Log::mFile {nullptr, Aidge::Log::fcloseDeleter()};
-std::vector<std::string> Aidge::Log::mContext;
 
-void Aidge::Log::log(Level level, const std::string& msg) {
-    if (level >= mConsoleLevel) {
-        // Apply log level style only for console.
-        // Styles that were already applied to msg with fmt are kept also in
-        // the log file.
-        const auto modifier
-            = !mConsoleColor ? fmt::text_style()
-            : (level == Debug) ? fmt::fg(fmt::color::gray)
-            : (level == Notice) ? fmt::fg(fmt::color::medium_purple)
-            : (level == Warn) ? fmt::fg(fmt::color::orange)
-            : (level == Error) ? fmt::fg(fmt::color::red)
-            : (level == Fatal) ? fmt::bg(fmt::color::red)
-            : fmt::text_style();
+std::unique_ptr<FILE, Log::fcloseDeleter> Log::mFile{nullptr};
+std::vector<std::string> Log::mContext;
+int Log::mFloatingPointPrecision = 5;
 
+/**
+ * @brief Internal logging implementation
+ * @param level Severity level of the message
+ * @param msg The message to log
+ */
+void Log::log(Level level, const std::string& msg) {
+    /**
+     * @brief Helper function to wrap text to a specified width, respecting
+     * explicit line breaks (\n) and accounting for ANSI escape sequences.
+     */
+    const auto wrapText = [](const std::string& text, const std::size_t width) -> std::vector<std::string> {
+        std::vector<std::string> wrappedLines;
+        std::size_t start = 0;
+
+        while (start < text.size()) {
+            std::size_t lineWidth = 0;
+            std::size_t current = start;
+
+            while (current < text.size() && lineWidth < width) {
+                if (text[current] == '\033') {
+                    // Found ANSI escape sequence, skip until 'm'
+                    std::size_t ansiEnd = text.find('m', current);
+                    if (ansiEnd != std::string::npos) {
+                        // Add the length of the ANSI sequence to the width allowance
+                        // width += (ansiEnd - current + 1);
+                        current = ansiEnd + 1;
+                    } else {
+                        // Malformed sequence, treat as normal characters
+                        ++current;
+                    }
+                } else if (text[current] == '\n') {
+                    // Handle explicit line break
+                    break;
+                } else {
+                    // Normal character, increase line width
+                    ++lineWidth;
+                    ++current;
+                }
+            }
+
+            // Find the end of the current line
+            std::size_t lineEnd = current;
+
+            // Adjust for spaces if needed
+            if (lineEnd < text.size() && text[lineEnd] != '\n') {
+                std::size_t lastSpace = text.rfind(' ', lineEnd);
+                if (lastSpace != std::string::npos && lastSpace > start) {
+                    lineEnd = lastSpace;
+                }
+            }
+
+            // Add the wrapped line to the result
+            wrappedLines.push_back(text.substr(start, lineEnd - start));
+
+            start = ++lineEnd;
+        }
+
+        return wrappedLines;
+    };
+
+
+    // Get the terminal color for the log level
+    const auto getColor = [](Level lvl) {
+        switch (lvl) {
+            case Debug:   return fmt::terminal_color::green;
+            case Info:    return fmt::terminal_color::blue;
+            case Notice:  return fmt::terminal_color::bright_blue;
+            case Warn:    return fmt::terminal_color::yellow;
+            case Error:   return fmt::terminal_color::red;
+            case Fatal:   return fmt::terminal_color::bright_magenta;
+            default:      return fmt::terminal_color::white;
+        }
+    };
+
+    // Get the string representation of the log level
+    const auto levelStr = EnumStrings<Level>::data[static_cast<std::size_t>(level)];
+    const std::size_t levelIndentSizes[6] = {10, 9, 11, 12, 10, 10};
+    const std::size_t width = 80 - levelIndentSizes[static_cast<std::size_t>(level)];
+
+    if (level >= mConsoleLevel) {
         for (const auto& context : mContext) {
             fmt::println("Context: {}", context);
         }
 
-        fmt::println("{}", fmt::styled(msg, modifier));
+        // Wrap the message and print each line
+        auto wrappedLines = wrapText(msg, width);
+        for (std::size_t i = 0; i < wrappedLines.size(); ++i) {
+            if (mConsoleColor) {
+                if (i == 0) {
+                    fmt::print("[");
+                    fmt::print(fg(getColor(level)), "{}", levelStr);
+                    fmt::print("] - {}\n", wrappedLines[i]);
+                } else {
+                    fmt::print("[");
+                    fmt::print(fg(getColor(level)), "{}", levelStr);
+                    fmt::print("]   {}\n", wrappedLines[i]);
+                }
+            } else {
+                if (i == 0) {
+                    fmt::print("[{}] - {}\n", levelStr, wrappedLines[i]);
+                } else {
+                    fmt::print("[{}]   {}\n", levelStr, wrappedLines[i]);
+                }
+            }
+        }
     }
 
     if (level >= mFileLevel && !mFileName.empty()) {
@@ -86,25 +198,45 @@ void Aidge::Log::log(Level level, const std::string& msg) {
         }
 
         for (const auto& context : mContext) {
-            fmt::println("Context: {}", context);
+            fmt::println(mFile.get(), "Context: {}", context);
         }
 
-        fmt::println(mFile.get(), "{}", msg);
+        auto wrappedLines = wrapText(msg, width);
+        for (std::size_t i = 0; i < wrappedLines.size(); ++i) {
+            if (i == 0) {
+                fmt::println(mFile.get(), "{}: {}", levelStr, wrappedLines[i]);
+            } else {
+                fmt::println(mFile.get(), "{}   {}", levelStr, wrappedLines[i]);
+            }
+        }
     }
 }
 
-void Aidge::Log::initFile(const std::string& fileName) {
-    FILE* rawFile = std::fopen(fileName.c_str(), "a");
 
-    if (!rawFile) {
-        mFileName.clear(); // prevents AIDGE_THROW_OR_ABORT() to try to log into file
-        AIDGE_THROW_OR_ABORT(std::runtime_error,
-            "Could not create log file: {}", fileName);
-    }
 
-    // Assign the new FILE* with the deleter
-    mFile = std::unique_ptr<FILE, fcloseDeleter>(rawFile, fcloseDeleter());
+/**
+ * @brief Initialize or re-initialize the log file
+ * @param fileName Path to the log file
+ * @throw std::runtime_error if file cannot be opened
+ */
+void Log::initFile(const std::string& fileName) {
+    if (FILE* file = std::fopen(fileName.c_str(), "a")) {
+        mFile.reset(file);
+    } else {
+        mFileName.clear();
+        throw std::runtime_error(
+            fmt::format("Could not create log file: {}", fileName));
+    }
+}
 
-    const std::time_t t = std::time(nullptr);
-    fmt::println(mFile.get(), "###### {:%Y-%m-%d %H:%M:%S} ######", fmt::localtime(t));
+void Log::setFileName(const std::string& fileName) {
+    if (fileName != mFileName) {
+        mFileName = fileName;
+        mFile.reset();
+        if (!fileName.empty()) {
+            initFile(fileName);
+        }
+    }
 }
+
+} // namespace Aidge
diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt
index 2c0c746a412643c568ff07d8c50b5f5d8cbf72d5..cf7e896da87c9b08c08325874e78ad7957ec27da 100644
--- a/unit_tests/CMakeLists.txt
+++ b/unit_tests/CMakeLists.txt
@@ -1,4 +1,8 @@
-find_package(Catch2 3.7.1)
+# Catch2 configuration
+set(CATCH2_MIN_VERSION 3.3.0)
+
+# Try to find system installed Catch2
+find_package(Catch2 ${CATCH2_MIN_VERSION} QUIET)
 
 if(NOT Catch2_FOUND)
     message(STATUS "Catch2 not found in system, retrieving from git")
@@ -7,64 +11,95 @@ if(NOT Catch2_FOUND)
     FetchContent_Declare(
       Catch2
       GIT_REPOSITORY https://github.com/catchorg/Catch2.git
-      GIT_TAG        v3.7.1 # or a later release
+      GIT_TAG        devel # or a later release
     )
-
     FetchContent_MakeAvailable(Catch2)
+    message(STATUS "Fetched Catch2 version ${Catch2_VERSION}")
+else()
+    message(STATUS "Using system Catch2 version ${Catch2_VERSION}")
 endif()
 
+# Get all source files
 file(GLOB_RECURSE src_files "*.cpp")
 
-#file(GLOB_RECURSE src_files "graphRegex/Test_GraphRegex.cpp")
-
+# Create test executable
 add_executable(tests${module_name} ${src_files})
 
+# Set C++14 standard
 target_compile_features(tests${module_name} PRIVATE cxx_std_14)
 
 set(FORCE_CI TRUE)
-if (NOT(FORCE_CI))
-
-if (DOSANITIZE STREQUAL "ON")
-set(SANITIZE_FLAGS -fsanitize=address,leak,undefined,float-divide-by-zero -fno-omit-frame-pointer)
-#TODO sanitizer seems buggy in some situations with msvc, leading to linker errors, temporarily inactivating it
-#set(SANITIZE_MSVC_FLAGS)
-set(SANITIZE_MSVC_FLAGS /fsanitize=address)
-target_compile_definitions(tests${module_name} PUBLIC _DISABLE_VECTOR_ANNOTATION)
-else()
-set(SANITIZE_FLAGS)
-set(SANITIZE_MSVC_FLAGS)
-endif()
+# Compiler flags and options
+if(NOT(FORCE_CI))
+    # Sanitization configuration
+    if(DOSANITIZE STREQUAL "ON")
+        set(SANITIZE_FLAGS
+            -fsanitize=address,leak,undefined,float-divide-by-zero
+            -fno-omit-frame-pointer
+        )
+        set(SANITIZE_MSVC_FLAGS /fsanitize=address)
+
+        # Temporary workaround for MSVC sanitizer issues
+        target_compile_definitions(tests${module_name} PUBLIC _DISABLE_VECTOR_ANNOTATION)
+    else()
+        set(SANITIZE_FLAGS "")
+        set(SANITIZE_MSVC_FLAGS "")
+    endif()
+
+    # Strict aliasing for better type safety
+    set(STRICT_ALIASING_FLAGS -fstrict-aliasing -Wstrict-aliasing=2)
+
+    # Compiler options for different toolchains
+    target_compile_options(tests${module_name} PUBLIC
+        $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
+        -fvisibility=hidden>  # Hide symbols by default
+    ) # -fvisibility=hidden required by Pybind11
+
+    # Common warning and error flags
+    target_compile_options(tests${module_name} PRIVATE
+        $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
+        -Wall -Wextra -Wold-style-cast -pedantic -Werror=narrowing -Wshadow
+        $<$<BOOL:${WERROR}>:-Werror>
+        ${SANITIZE_FLAGS}
+        >
+    )
+    # Strict aliasing for GCC
+    target_compile_options(tests${module_name} PRIVATE
+        $<$<CXX_COMPILER_ID:GNU>:${STRICT_ALIASING_FLAGS}>
+    )
 
-set(STRICT_ALIASING_FLAGS -fstrict-aliasing -Wstrict-aliasing=2)
-
-# -fvisibility=hidden required by pybind11
-target_compile_options(tests${module_name} PUBLIC
-    $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-    -fvisibility=hidden>)
-target_compile_options(tests${module_name} PRIVATE
-$<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
--Wall -Wextra -Wold-style-cast -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror> ${SANITIZE_FLAGS}>)
-target_compile_options(tests${module_name} PRIVATE
-$<$<CXX_COMPILER_ID:GNU>:${STRICT_ALIASING_FLAGS}>)
-target_compile_options(${module_name} PRIVATE
-$<$<CXX_COMPILER_ID:MSVC>:
-/W4 /DWIN32 /D_WINDOWS /GR /EHsc /MP /Zc:__cplusplus /Zc:preprocessor /permissive- ${SANITIZE_MSVC_FLAGS}>)
-if (DOSANITIZE STREQUAL "ON")
-  target_compile_options(${module_name} PRIVATE $<$<CXX_COMPILER_ID:MSVC>:/MDd>)
-endif()
-# TODO FIXME: I'm not sure it's a good idea to propagate this option but, at this point, it was the only way that worked to silence C4477
-target_compile_options(${module_name} PUBLIC $<$<CXX_COMPILER_ID:MSVC>: /wd4477>)
+    # MSVC-specific settings
+    target_compile_options(tests${module_name} PRIVATE
+        $<$<CXX_COMPILER_ID:MSVC>:
+        /W4 /DWIN32 /D_WINDOWS /GR /EHsc /MP
+        /Zc:__cplusplus /Zc:preprocessor /permissive-
+        ${SANITIZE_MSVC_FLAGS}
+        >
+    )
 
-target_link_options(tests${module_name} PUBLIC $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:${SANITIZE_FLAGS}>)
-#target_link_options(tests${module_name} PUBLIC $<$<CXX_COMPILER_ID:MSVC>:${SANITIZE_MSVC_FLAGS}>)
+    # Workaround for C4477 warning in MSVC
+    if(DOSANITIZE STREQUAL "ON")
+        target_compile_options(tests${module_name} PRIVATE $<$<CXX_COMPILER_ID:MSVC>:/MDd>)
+    endif()
 
+    # TODO: Fix this once proper configuration is available
+    # only way to silence C4477
+    target_compile_options(tests${module_name} PUBLIC $<$<CXX_COMPILER_ID:MSVC>: /wd4477>)
 endif()
 
-target_link_libraries(tests${module_name} PRIVATE ${module_name})
-
-target_link_libraries(tests${module_name} PRIVATE Catch2::Catch2WithMain)
+# Link libraries
+target_link_libraries(tests${module_name} PRIVATE
+    ${module_name}
+    Catch2::Catch2WithMain
+)
 
+# Setup testing
 list(APPEND CMAKE_MODULE_PATH ${catch2_SOURCE_DIR}/extras)
 include(CTest)
 include(Catch)
+
+# Discover and add tests
 catch_discover_tests(tests${module_name})
+
+# Set test configuration for CTest
+set(CTEST_CONFIGURATION_TYPE ${CMAKE_BUILD_TYPE})
\ No newline at end of file
diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp
index 6c4b14602aed98ff5736d2cf30ba642f9e7ec57b..bfdc1a6b9c058b348942e9c29a77ac4d6db5086f 100644
--- a/unit_tests/data/Test_Tensor.cpp
+++ b/unit_tests/data/Test_Tensor.cpp
@@ -267,7 +267,7 @@ TEST_CASE("[core/data] Tensor(getter/setter)", "[Tensor][Getter][Setter]") {
     //////////////
     // backend
         // getAvailableBackends()
-        REQUIRE(Tensor::getAvailableBackends() == std::set<std::string>({"cpu"}));
+        REQUIRE(Tensor::getAvailableBackends() == std::set<std::string>({"cpu", "dummy"}));
 
         // setBackend()
         REQUIRE_NOTHROW(T.setBackend("cpu", 0));
diff --git a/unit_tests/graphRegex/Test_Fsm.cpp b/unit_tests/graphRegex/Test_Fsm.cpp
deleted file mode 100644
index c011a50455e9e21f3df66c3ed46a835bed5346b3..0000000000000000000000000000000000000000
--- a/unit_tests/graphRegex/Test_Fsm.cpp
+++ /dev/null
@@ -1,195 +0,0 @@
-#include <memory>
-
-#include <catch2/catch_test_macros.hpp>
-
-#include "aidge/nodeTester/ConditionalInterpreter.hpp"
-
-#include "aidge/graphRegex/matchFsm/FsmNode.hpp"
-#include "aidge/graphRegex/matchFsm/FsmEdge.hpp"
-#include "aidge/graphRegex/matchFsm/FsmGraph.hpp"
-#include "aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp"
-
-using namespace Aidge;
-
-TEST_CASE("matchFSM", "FsmEdge") {
-
-   
-    std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
-    std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
-    std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("A","true==true");
-    FsmEdgeUnique EdgeToTest(nodeA,nodeB,toTest);
-
-    SECTION("FsmEdgeUnique constructor") {
-        REQUIRE(EdgeToTest.getSourceNode() == nodeA);
-        REQUIRE(EdgeToTest.getDestNode() == nodeB);
-        REQUIRE(EdgeToTest.isCommon() == false);
-    }
-    
-    SECTION("FsmEdgeCommon constructor") {
-        std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
-        std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("A","true==true");
-
-        FsmEdgeCommon EdgeToTest(nodeA,nodeB,toTest,"A");
-
-        REQUIRE(EdgeToTest.getSourceNode() == nodeA);
-        REQUIRE(EdgeToTest.getDestNode() == nodeB);
-        REQUIRE(EdgeToTest.isCommon() == true);
-    }
-
-    SECTION("FsmEdgeRef constructor") {
-        std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
-        std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("A","true==true");
-
-        FsmEdgeRef EdgeToTest(nodeA,nodeB,0,-1);
-
-        REQUIRE(EdgeToTest.getSourceNode() == nodeA);
-        REQUIRE(EdgeToTest.getDestNode() == nodeB);
-        REQUIRE(EdgeToTest.isCommon() == false);
-    }
-          
-    SECTION("FsmEdgeEmpty constructor") {
-        std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
-        std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("A","true==true");
-
-        FsmEdgeEmpty EdgeToTest(nodeA,nodeB);
-
-        REQUIRE(EdgeToTest.getSourceNode() == nodeA);
-        REQUIRE(EdgeToTest.getDestNode() == nodeB);
-        REQUIRE(EdgeToTest.isCommon() == false);
-    }
-
-
-    SECTION("FsmEdgeFactory"){
-
-    std::map<std::string, std::shared_ptr<ConditionalInterpreter>> allTest = {
-        {"A",std::make_shared<ConditionalInterpreter>("A","true==true")},
-        {"B",std::make_shared<ConditionalInterpreter>("B","true==true")},
-        {"C",std::make_shared<ConditionalInterpreter>("C","true==true")}
-    };
-
-// make(std::shared_ptr<FsmNode> source, std::shared_ptr<FsmNode> dest, 
-//     FsmEdgeTypes type,std::map<std::string, const std::shared_ptr<ConditionalInterpreter>> allTest,
-//     const std::string& lexeme = "");
-
-        std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(true,false);
-//     EMPTY = 0,
-//     REF,
-//     COMMON,
-//     UNIQUE
-
-        std::shared_ptr<FsmEdge> edgeE = FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::EMPTY,allTest,"");
-        std::shared_ptr<FsmEdge> edgeU = FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::UNIQUE,allTest,"A");
-        std::shared_ptr<FsmEdge> edgeC = FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::COMMON,allTest,"A#");
-        std::shared_ptr<FsmEdge> edgeR = FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::REF,allTest,"(0,1)");
-
-        //test detection of bad syntax lexem
-        REQUIRE_THROWS(FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::EMPTY,allTest,"A"));
-        REQUIRE_THROWS(FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::UNIQUE,allTest,"A#"));
-        REQUIRE_THROWS(FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::COMMON,allTest,"A"));
-        REQUIRE_THROWS(FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::REF,allTest,"A"));
-
-        REQUIRE(edgeE->getSourceNode() == nodeA);
-        REQUIRE(edgeE->getDestNode() == nodeB);
-    }
-
-    SECTION("graph constructor") {
-        //make the nodes 
-        std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
-        std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,false);
-        std::shared_ptr<FsmNode>  nodeC = std::make_shared<FsmNode>(false,true);
-
-        //make the edges
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("A","true==true");
-        std::shared_ptr<FsmEdge> edgeAB =  std::make_shared<FsmEdgeUnique>(nodeA,nodeB,toTest);
-        std::shared_ptr<FsmEdge> edgeBC =  std::make_shared<FsmEdgeUnique>(nodeB,nodeC,toTest);
- 
-        std::shared_ptr<FsmGraph> graph =  std::make_shared<FsmGraph>("");
-
-        graph->addEdge(edgeAB);
-        graph->addEdge(edgeBC);
-        
-
-        REQUIRE(graph->getValidNodes() == std::set<std::shared_ptr<FsmNode>>{nodeA});
-        REQUIRE(graph->getStartNodes() == std::vector<std::shared_ptr<FsmNode>>{nodeC});
-    }
-
-
-    SECTION("graph merge") {
-
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("A","true==true");
-
-        //make the nodes 
-        std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,false);
-        std::shared_ptr<FsmNode>  nodeC = std::make_shared<FsmNode>(true,false);
-
-        //make the edges
-        
-        std::shared_ptr<FsmEdge> edgeAB =  std::make_shared<FsmEdgeUnique>(nodeA,nodeB,toTest);
-        std::shared_ptr<FsmEdge> edgeBC =  std::make_shared<FsmEdgeUnique>(nodeB,nodeC,toTest);
- 
-        std::shared_ptr<FsmGraph> graph =  std::make_shared<FsmGraph>("");
-        graph->addEdge(edgeAB);
-        graph->addEdge(edgeBC);
-
-        REQUIRE(graph->getValidNodes() == std::set<std::shared_ptr<FsmNode>>{nodeC});
-        REQUIRE(graph->getStartNodes() == std::vector<std::shared_ptr<FsmNode>>{nodeA});
-        REQUIRE(graph->getNodes() == std::set<std::shared_ptr<FsmNode>>{nodeA,nodeB,nodeC});
-
-                //make the nodes 
-        std::shared_ptr<FsmNode>  node2A = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<FsmNode>  node2B = std::make_shared<FsmNode>(false,false);
-        std::shared_ptr<FsmNode>  node2C = std::make_shared<FsmNode>(true,false);
-
-
-        std::shared_ptr<FsmEdge> edge2AB =  std::make_shared<FsmEdgeUnique>(node2A,node2B,toTest);
-        std::shared_ptr<FsmEdge> edge2BC =  std::make_shared<FsmEdgeUnique>(node2B,node2C,toTest);
- 
-        std::shared_ptr<FsmGraph> graph2 =  std::make_shared<FsmGraph>("");
-
-
-        graph2->addEdge(edge2AB);
-        graph2->addEdge(edge2BC);
-
-        
-        REQUIRE(graph2->getValidNodes() == std::set<std::shared_ptr<FsmNode>>{node2C});
-        REQUIRE(graph2->getStartNodes() == std::vector<std::shared_ptr<FsmNode>>{node2A});
-        REQUIRE(graph2->getNodes() == std::set<std::shared_ptr<FsmNode>>{node2A,node2B,node2C});
-
-
-        graph->mergeOneStartOneValid(graph2);
-
-        REQUIRE(graph->getValidNodes() == std::set<std::shared_ptr<FsmNode>>{node2C});
-        REQUIRE(graph->getStartNodes() == std::vector<std::shared_ptr<FsmNode>>{nodeA});
-        REQUIRE(graph->getNodes() == std::set<std::shared_ptr<FsmNode>>{nodeA,nodeB,nodeC,node2B,node2C});
-    }
-
-
-
-
-}
-
-// TEST_CASE("matchFSM", "FsmGraph") {
-
-//     SECTION("FsmEdgeUnique constructor") {
-//         //make the nodes 
-//         std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
-//         std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
-
-//         //make the edges
-//         std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
-//         std::shared_ptr<FsmEdgeUnique> edge =  std::make_shared<FsmEdgeUnique>(nodeA,nodeB,toTest);
- 
-//         std::shared_ptr<FsmGraph> graph =  std::make_shared<FsmGraph>("");
-
-//         graph->addEdge(edge);
-        
-
-
-//     }
-
-// }
\ No newline at end of file
diff --git a/unit_tests/graphRegex/Test_FsmMatch.cpp b/unit_tests/graphRegex/Test_FsmMatch.cpp
deleted file mode 100644
index 6229ec62af96802ebdfe871e6058ab1791cf80fd..0000000000000000000000000000000000000000
--- a/unit_tests/graphRegex/Test_FsmMatch.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-
-#include <catch2/catch_test_macros.hpp>
-
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/operator/Conv.hpp"
-#include "aidge/operator/GenericOperator.hpp"
-#include "aidge/operator/Producer.hpp"
-
-
-#include "aidge/graphRegex/GraphFsmInterpreter.hpp"
-
-
-using namespace Aidge;
-TEST_CASE("FsmMatch") {
-
-    SECTION("Construction") {
-        std::vector<std::shared_ptr<ConditionalInterpreter>> allTest = {
-            std::make_shared<ConditionalInterpreter>("A","isConv($)==true"),
-            std::make_shared<ConditionalInterpreter>("B","isConv($)==true"),
-            std::make_shared<ConditionalInterpreter>("C","true==true")
-        };
-
-        allTest[0]->insertLambda("isConv",+[](NodePtr NodeOp){return NodeOp->type() == "Conv";});
-        allTest[1]->insertLambda("isConv",+[](NodePtr NodeOp){return NodeOp->type() == "Conv";});
-
-        std::shared_ptr<GraphFsmInterpreter>  fsmGenerator = std::make_shared<GraphFsmInterpreter>("A->A",allTest);
-        std::shared_ptr<FsmGraph> fsm = fsmGenerator->interpret();
-
-
-
-        //REQUIRE(fsm->getNodes().size() == 3);
-        //REQUIRE(fsm->getStartNodes().size() == 1);
-
-
-
-        std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
-
-        g1->add(conv);
-        g1->addChild(conv1, "c");
-
-
-        REQUIRE(allTest[0]->test(conv) == true);
-        REQUIRE(allTest[1]->test(conv) == true);
-
-        std::vector<std::shared_ptr<Node>> startNodes = {conv};
-
-        auto result = fsm->test(startNodes);
-
-        REQUIRE( result[0]->getAll() == std::set<NodePtr>{conv,conv1});
-    }
-
-
-    SECTION("2 branches graph"){
-
-        std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Fc", 1, 0, 1, "c2");
-
-        g1->add(conv);
-        g1->addChild(conv1,conv);
-        g1->addChild(conv2,conv);
-
-        REQUIRE(g1->getNodes() == std::set<std::shared_ptr<Node>>({conv,conv1,conv2}));
-        REQUIRE(g1->inputNodes() == std::set<std::shared_ptr<Node>>({conv}));
-        REQUIRE(g1->outputNodes() == std::set<std::shared_ptr<Node>>({conv1,conv2}));
-
-
-        /////////////
-
-        std::vector<std::shared_ptr<ConditionalInterpreter>> allTest = {
-            std::make_shared<ConditionalInterpreter>("A","isConv($)==true"),
-            std::make_shared<ConditionalInterpreter>("B","isFc($)==true")
-        };
-        allTest[0]->insertLambda("isConv",+[](NodePtr NodeOp){return NodeOp->type() == "Conv";});
-        allTest[1]->insertLambda("isFc",+[](NodePtr NodeOp){return NodeOp->type() == "Fc";});
-
-        std::shared_ptr<GraphFsmInterpreter>  fsmGenerator = std::make_shared<GraphFsmInterpreter>("A#->A; A#->B",allTest);
-        std::shared_ptr<FsmGraph> fsm = fsmGenerator->interpret();
-
-        std::vector<std::shared_ptr<Node>> startNodes = {conv,conv};
-        auto result = fsm->test(startNodes);
-
-        REQUIRE( result[0]->getAll() == std::set<NodePtr>{conv,conv1,conv2});
-
-    }
-
-}
diff --git a/unit_tests/graphRegex/Test_GraphFsmInterpreter.cpp b/unit_tests/graphRegex/Test_GraphFsmInterpreter.cpp
deleted file mode 100644
index e789677d44efa68071017a9832fa01b5ed340f75..0000000000000000000000000000000000000000
--- a/unit_tests/graphRegex/Test_GraphFsmInterpreter.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-
-#include <catch2/catch_test_macros.hpp>
-
-#include "aidge/graphRegex/GraphFsmInterpreter.hpp"
-
-
-using namespace Aidge;
-TEST_CASE("GraphFsmInterpreter", "GraphFsmInterpreter") {
-
-    SECTION("Construction") {
-        std::vector<std::shared_ptr<ConditionalInterpreter>> allTest = {
-            std::make_shared<ConditionalInterpreter>("A","true==true"),
-            std::make_shared<ConditionalInterpreter>("B","true==true"),
-            std::make_shared<ConditionalInterpreter>("C","true==true")
-        };
-
-        //GraphFsmInterpreter("A->B",allTest);
-        std::shared_ptr<GraphFsmInterpreter>  fsmGenerator = std::make_shared<GraphFsmInterpreter>("A->B",allTest);
-        std::shared_ptr<FsmGraph> fsm = fsmGenerator->interpret();
-
-        
-        REQUIRE(fsm->getNodes().size() == 3);
-        REQUIRE(fsm->getStartNodes().size() == 1);
-        REQUIRE(fsm->getEdge().size() == 2);
-
-        for(auto node : fsm->getNodes()){
-            if(node->isValid()){
-                REQUIRE(node->getEdges().size() == 0);
-            }else{
-                REQUIRE(node->getEdges().size() == 1);
-            }
-           
-        }
-
-        
-    }
-
-
-
-
-
-}
\ No newline at end of file
diff --git a/unit_tests/graphRegex/Test_GraphLexer.cpp b/unit_tests/graphRegex/Test_GraphLexer.cpp
deleted file mode 100644
index 615c052041e40c2941054b7ddcc19229c0994f0d..0000000000000000000000000000000000000000
--- a/unit_tests/graphRegex/Test_GraphLexer.cpp
+++ /dev/null
@@ -1,118 +0,0 @@
-#include <catch2/catch_test_macros.hpp>
-#include "aidge/graphRegex/GraphLexer.hpp"
-#include "aidge/graphRegex/GraphRegexTypes.hpp"
-
-#include "aidge/utilsParsing/ParsingToken.hpp"
-
-
-#include <iostream>
-#include <map>
-#include <functional>
-
-using namespace Aidge;
-
-// NEXT
-// QOM
-// QZM
-// KEY
-// CKEY
-// SEP
-// LPAREN
-// RPAREN
-
-TEST_CASE("GraphRegex", "Lexer") {
-    SECTION("RandomGenerateTest") {
-
-        std::map<gRegexTokenTypes, std::function<std::pair<std::string, std::string>()>> LexerTestMap{
-            {gRegexTokenTypes::NEXT,      +[](){return std::pair<std::string, std::string>("-> ","");}},
-            {gRegexTokenTypes::QOM,       +[](){return std::pair<std::string, std::string>("+ ","");}},
-            {gRegexTokenTypes::QZM,       +[](){return std::pair<std::string, std::string>("* ","");}},
-            {gRegexTokenTypes::SEP,      +[](){return std::pair<std::string, std::string>("; ","");}},
-
-
-
-            {gRegexTokenTypes::KEY,   +[](){
-                std::size_t keyLen = (std::rand() % 20)+1;
-                const std::string characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890_";
-                std::size_t randomIndex = std::rand() % characters.size();
-                std::string key;
-                for (std::size_t i = 0; i < keyLen; ++i) {
-                    key += characters[randomIndex];
-                    randomIndex = std::rand() % characters.size();
-                }
-
-                return std::pair<std::string, std::string>(key+" ",key);}
-            },
-
-           {gRegexTokenTypes::CKEY,   +[](){
-                std::size_t keyLen = (std::rand() % 20)+1;
-                const std::string characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890_";
-                const std::string num = "1234567890";
-
-                std::size_t randomIndex = std::rand() % characters.size();
-                std::size_t randomNum = std::rand() % num.size();
-                std::string key;
-                std::string idx;
-
-                for (std::size_t i = 0; i < keyLen; ++i) {
-                    key += characters[randomIndex];
-                    idx += num[randomNum];
-                    randomIndex = std::rand() % characters.size();
-                    randomNum = std::rand() % num.size();
-                }
-
-                return std::pair<std::string, std::string>(key+"#"+idx+" ",key+"#"+idx);}
-            },
-
-            {gRegexTokenTypes::LPAREN,   +[](){return std::pair<std::string, std::string>("( ","");}},
-            {gRegexTokenTypes::RPAREN,   +[](){return std::pair<std::string, std::string>(") ","");}}
-            //{gRegexTokenTypes::STOP,     +[](){return std::pair<std::string, std::string>("","");}}
-        };
-
-
-        //////////////////
-        //TEST GENERATOR
-        //////////////////
-        const std::size_t numRandomElements = 10000;
-        std::vector<std::tuple<gRegexTokenTypes, std::string>> testVector;
-
-        std::string testString;
-
-        for (std::size_t i = 0; i < numRandomElements; ++i) {
-
-            int randomIndex = std::rand() % LexerTestMap.size();
-            // Get an iterator to the random element in the map
-            auto it = std::next(LexerTestMap.begin(), randomIndex);
-            // Access the random key and lambda value separately using structured binding
-            gRegexTokenTypes randomKey = it->first;
-
-            std::function<std::pair<std::string, std::string>()> randomValue = it->second;
-            std::pair<std::string, std::string> result = randomValue();
-        
-            testString += result.first;
-            testVector.emplace_back(randomKey, result.second);
-
-       
-        }
-
-        GraphLexer graphLexer = GraphLexer(testString);
-
-        for (std::tuple<gRegexTokenTypes, std::string> testToken : testVector) {
-            gRegexTokenTypes tokenToFind = std::get<0>(testToken);
-            std::string lexemToFind = std::get<1>(testToken);
-            std::shared_ptr<ParsingToken<gRegexTokenTypes>> token = graphLexer.getNextToken();
-
-
-            std::ostringstream errorMessage;
-            errorMessage << "\n we want :"<< lexemToFind << "\n we get : "<< token->getLexeme() <<"\n"<< "on \n" << testString << " :\n "  ;
-
-            CAPTURE(errorMessage.str());
-            REQUIRE(token->getLexeme() == lexemToFind);
-            REQUIRE(token->getType() == tokenToFind);
-        }
-        std::shared_ptr<ParsingToken<gRegexTokenTypes>> token = graphLexer.getNextToken();
-        REQUIRE(token->getType() == gRegexTokenTypes::STOP);
-    }
-
-
-}
\ No newline at end of file
diff --git a/unit_tests/graphRegex/Test_GraphParser.cpp b/unit_tests/graphRegex/Test_GraphParser.cpp
deleted file mode 100644
index 857caa06f4e5fa383e79ea22bfe1ca28ac0973c8..0000000000000000000000000000000000000000
--- a/unit_tests/graphRegex/Test_GraphParser.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-
-#include <catch2/catch_test_macros.hpp>
-#include "aidge/graphRegex/GraphParser.hpp"
-#include "aidge/utilsParsing/AstNode.hpp"
-#include <iostream>
-
-
-using namespace Aidge;
-
-    //generative function ,
-    std::string domain();
-    std::string exp() {
-        int randomValue = std::rand() % 3;
-        switch (randomValue) {
-        case 0:
-            return "A";
-        case 1 :
-            return "A#";
-        default:
-             return domain();
-
-        }
-    }
-
-    std::string seq() {
-        int randomValue = std::rand() % 2;
-        switch (randomValue) {
-        case 0:
-            return exp();
-        default:
-             return  exp()+"->"+seq();
-        }
-    }
-
-    std::string domain() {
-        int randomValue = std::rand() % 2;
-
-        switch (randomValue) {
-            // case 0:
-            //     return  seq();
-            // case 1:
-            //     return  seq() + "->" +domain();
-
-            case 0:
-                return  "("+ seq() +")*";
-            default:
-                return  "("+ seq() +")+";
-
-            // case 4:
-            //     return  "("+ domain() +")*" + "->" +domain();
-            // default:
-            //     return  "("+ domain() +")+" + "->" +domain();
-        }
-    }
-
-    std::string allExpr() {
-        int randomValue = std::rand() % 2;
-        switch (randomValue) {
-            case 0:
-                return  seq();
-            default :
-                return  seq()+ ";" +allExpr();
-        }
-    }
-
-/*
-exp : KEY(QOM | QZM)?  | CKEY | domain
-seq :exp (NEXT seq)* 
-domain :  LPAREN seq RPAREN (QOM | QZM) 
-allExpr: seq (SEP allExpr)*
-*/
-TEST_CASE("GraphParser", "Test_GraphParser") {
-
-    SECTION("Empty") {
-        for (int i = 0; i < 100; ++i) {
-            const std::string test = allExpr();
-            std::cout << test <<"\n";
-            GraphParser graphParser = GraphParser(test);
-            std::shared_ptr<AstNode<gRegexTokenTypes>> tree = graphParser.parse();
-        }
-    }
-}
\ No newline at end of file
diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp
deleted file mode 100644
index 79e471d44a49dfb52fd5eb4aa1ed2dc4ab8dc0bb..0000000000000000000000000000000000000000
--- a/unit_tests/graphRegex/Test_GraphRegex.cpp
+++ /dev/null
@@ -1,194 +0,0 @@
-
-#include <catch2/catch_test_macros.hpp>
-#include "aidge/graphRegex/GraphRegex.hpp"
-
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/operator/Add.hpp"
-#include "aidge/operator/FC.hpp"
-#include "aidge/operator/MatMul.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/recipes/Recipes.hpp"
-
-#include "aidge/operator/Conv.hpp"
-#include "aidge/operator/GenericOperator.hpp"
-
-using namespace Aidge;
-
-TEST_CASE("GraphRegexUser") {
-
-
-    SECTION("Match using custom lambda") {
-
-        std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
-        std::shared_ptr<Node> fc = GenericOperator("FC", 1, 0, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
-        std::shared_ptr<Node> fc2 = GenericOperator("FC", 1, 0, 1, "c3");
-
-        g1->add(conv);
-        g1->addChild(fc, "c");
-        g1->addChild(conv2, "c1");
-        g1->addChild(fc2, "c2");
-
-        ///
-        std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>();
-        sut->setNodeKey("C",+[](NodePtr NodeOp){return NodeOp->type() == "FC";});
-
-        sut->setNodeKey("A","C($)==True");
-        sut->addQuery("A");
-        auto match = sut->match(g1);
-        REQUIRE(match.size() == 2);
-
-    }
-
-
-    SECTION("INIT") {
-
-        const std::string query = "Conv->FC";
-
-        std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>();
-
-        std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
-        std::shared_ptr<Node> fc = GenericOperator("FC", 1, 0, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
-        std::shared_ptr<Node> fc2 = GenericOperator("FC", 1, 0, 1, "c3");
-
-        g1->add(conv);
-        g1->addChild(fc, "c");
-        g1->addChild(conv2, "c1");
-        g1->addChild(fc2, "c2");
-
-
-        sut->setKeyFromGraph(g1);
-        sut->addQuery(query);
-
-        for (const auto& solution : sut->match(g1)) {
-
-            REQUIRE(solution->getQuery() == query);
-            if(solution->getStartNode() == std::vector<NodePtr>{conv}){
-                REQUIRE(solution->at("Conv") == std::set<NodePtr>{conv} );
-                REQUIRE(solution->at("FC") == std::set<NodePtr>{fc} );
-            }else if (solution->getStartNode() == std::vector<NodePtr>{conv2})
-            {
-                REQUIRE(solution->at("Conv") == std::set<NodePtr>{conv2} );
-                REQUIRE(solution->at("FC") == std::set<NodePtr>{fc2} );
-            }
-        }
-        //REQUIRE( sut->match(g1)[1]->getAll() == std::set<NodePtr>{conv,fc});
-
-    }
-
-   SECTION("2 query") {
-        std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>();
-
-        std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
-        std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 0, 1, "c3");
-
-        g1->add(conv);
-        g1->addChild(conv1, "c");
-        g1->addChild(conv2, "c1");
-        g1->addChild(conv3, "c2");
-
-
-        sut->setKeyFromGraph(g1);
-
-        const std::string query = "Conv->Conv";
-        const std::string query2 = "Conv->FC";
-
-        sut->setNodeKey("FC","getType($) =='FC'");
-
-        sut->addQuery(query);
-        sut->addQuery(query2);
-
-
-        for (const auto& solution : sut->match(g1)) {
-            REQUIRE(solution->getQuery() == query);
-        }
-
-    }
-
-
-   SECTION("Not define node Test") {
-
-        //test if the FC is not define only match query not query2
-        std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>();
-
-        std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
-        std::shared_ptr<Node> conv3 = GenericOperator("FC", 1, 0, 1, "c3");
-
-        g1->add(conv);
-        g1->addChild(conv1, "c");
-        g1->addChild(conv2, "c1");
-        g1->addChild(conv3, "c2");
-
-
-        //sut->setKeyFromGraph(g1);
-
-        const std::string query = "Conv->Conv";
-        const std::string query2 = "Conv->FC";
-
-        sut->setNodeKey("Conv","getType($) =='Conv'");
-
-        sut->addQuery(query);
-        sut->addQuery(query2);
-
-
-        for (const auto& solution : sut->match(g1)) {
-            REQUIRE(solution->getQuery() == query);
-        }
-
-    }
-
-
-    SECTION("Applied Recipes"){
-
-      // generate the original GraphView
-        auto matmul0 = MatMul("matmul0");
-        auto add0 = Add("add0");
-        auto matmul1 = MatMul("matmul1");
-        auto add1 = Add("add1");
-
-        auto b0 = Producer({5}, "B0");
-        auto w0 = Producer({5, 5}, "W0");
-        auto b1 = Producer({5}, "B1");
-        auto w1 = Producer({5,5},"W1");
-        auto input = Producer({2,5}, "input");
-
-        input->addChild(matmul0, 0, 1);
-        w0->addChild(matmul0, 0, 0);
-
-        matmul0->addChild(add0, 0, 0);
-        b0->addChild(add0, 0, 1);
-
-        add0->addChild(matmul1, 0, 1);
-        w1->addChild(matmul1, 0, 0);
-
-        matmul1->addChild(add1, 0, 0);
-        b1->addChild(add1, 0, 1);
-
-        auto fc = GenericOperator("FC", 1, 0, 1, "fc1");
-        auto fl = GenericOperator("Flatten", 1, 0, 1, "flatten0");
-        add1->addChild(fl, 0, 0);
-        fl->addChild(fc, 0, 0);
-        auto g = std::make_shared<GraphView>();
-        g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1, fl, fc});
-
-        matMulToFC(g);
-        removeFlatten(g);
-        std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
-        REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1,fl,fc}));
-	    //REQUIRE(newNodes.size() == 6);
-
-
-    }
-
-}
diff --git a/unit_tests/graphRegex/Test_examples.cpp b/unit_tests/graphRegex/Test_examples.cpp
deleted file mode 100644
index 0ccc05b5a957673167a16643b22bf047fc80f43f..0000000000000000000000000000000000000000
--- a/unit_tests/graphRegex/Test_examples.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <catch2/catch_test_macros.hpp>
-#include <set>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/graph/OpArgs.hpp"
-#include "aidge/operator/ReLU.hpp"
-#include "aidge/operator/MetaOperatorDefs.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/graphRegex/GraphRegex.hpp"
-#include "aidge/recipes/Recipes.hpp"
-
-namespace Aidge {
-
-
-TEST_CASE("Examples", "[GraphMatching]") {
-    auto g1 = Sequential({
-        Producer({16, 3, 512, 512}, "dataProvider"),
-        Conv(3, 4, {5, 5}, "conv1"),
-        ReLU(),
-        PaddedConv(4, 8, {5, 5}, "conv2", {1, 1}, {2, 2, 2, 2}),
-        ReLU(),
-        PaddedConv(8, 16, {5, 5}, "conv3", {1, 1}, {2, 2, 2, 2}),
-        ReLU()
-    });
-
-    expandMetaOps(g1);
-    g1->save("Test_examples");
-
-    auto regex = std::make_shared<GraphRegex>();
-    regex->setKeyFromGraph(g1);
-    regex->addQuery("Pad2D->Conv2D->ReLU");
-    // Won't work, wrong number of matches:
-    //regex->addQuery("Pad*->Conv->ReLU*");
-
-    const auto match = regex->match(g1);
-    REQUIRE(match.size() == 2);
-
-    for (const auto& solution : match) {
-        REQUIRE(solution->getAll().size() == 3);
-    }
-}
-
-}  // namespace Aidge
diff --git a/unit_tests/graphRegex/Test_graphRegexAST.cpp b/unit_tests/graphRegex/Test_graphRegexAST.cpp
deleted file mode 100644
index f9c7a7c5dc8ab103b3b566f2df77883b0b1966f1..0000000000000000000000000000000000000000
--- a/unit_tests/graphRegex/Test_graphRegexAST.cpp
+++ /dev/null
@@ -1,71 +0,0 @@
-#include <catch2/catch_test_macros.hpp>
-#include "aidge/graphRegex/GraphStrInterpreter.hpp"
-
-
-using namespace Aidge;
-TEST_CASE("GraphStrInterpreter") {
-
-
-
-    std::vector<std::string> tests = {
-           
-
-            //sequ
-            "A;",
-            "A->B",
-            "A->B->C",
-            //seq and common
-            "A#",
-            "A#->B",
-            "A#->B#",
-            "A#->B#->C",
-            "A#->B#->C#",
-            "A->B#->C",
-            //sequ quantif +
-            "A+",
-            "A+->B+",
-            "A->B+->C",
-            //sequ quantif *
-            "A*",
-            "A*->B*",
-            "A->B*->C",
-
-            //sequ quantif 
-            "A*",
-            "A*->B+",
-            "A+->B*->C",
-            //others
-
-            "(A#->B->C#)+",
-            "(A#->B)+;A#->B->C",
-            "B+->B->B",
-            "B#->R*",
-            "(B#->R)*",
-            "A->C->B#->B;B#->R",
-            "B#->R",
-            "A->C#;A->C#;A->C#;A->C#;A->C#;A->C#",
-            "B#->R;B#->R",
-            "A# -> C -> B#; B#->A#",
-            
-        // Add more test cases here
-    };
-
-    SECTION("AST Regex bijection") {
-
-        for (const std::string& test : tests) {
-            std::shared_ptr<GraphStrInterpreter>  strGenerator = std::make_shared<GraphStrInterpreter>(test);
-            std::string astString = strGenerator->interpret();
-            //suppress space in the test because erase in the AST
-            std::string testNoS = test;
-            testNoS.erase(std::remove_if(testNoS.begin(), testNoS.end(), ::isspace), testNoS.end());
-            //if the last char is ; (SEP) it will not in the AST and it's not a bug erase it
-            if (!testNoS.empty() && testNoS.back() == ';') {
-                // Remove the last character
-                testNoS.pop_back();
-            }
-            //test
-            REQUIRE(astString == testNoS);
-        }
-
-    }
-}
\ No newline at end of file
diff --git a/unit_tests/nodeTester/Test_ConditionalInterpreter.cpp b/unit_tests/nodeTester/Test_ConditionalInterpreter.cpp
deleted file mode 100644
index ec068358a34567e57c417a664284bd1db76d7a69..0000000000000000000000000000000000000000
--- a/unit_tests/nodeTester/Test_ConditionalInterpreter.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-
-#include <catch2/catch_test_macros.hpp>
-#include "aidge/nodeTester/ConditionalInterpreter.hpp"
-#include "aidge/operator/GenericOperator.hpp"
-
-
-using namespace Aidge;
-
-
-
-TEST_CASE("ConditionalInterpreter", "ConditionalInterpreter") {
-
-    SECTION("custom Lambda") {
-
-        
-        ConditionalInterpreter conditionalParserB = ConditionalInterpreter("A"," bad($) == false ");
-        ConditionalInterpreter conditionalParserG = ConditionalInterpreter("A"," good($) == true ");
-
-
-        conditionalParserB.insertLambda("bad",+[](NodePtr NodeOp){return NodeOp->name() == "ZZ";});
-        conditionalParserG.insertLambda("good",+[](NodePtr NodeOp){return NodeOp->name() == "Gop1";});
-        std::shared_ptr<Node> nodeOp = GenericOperator("conv", 0, 0, 0, "Gop1");
-
-        REQUIRE(conditionalParserB.test(nodeOp) == true);
-        REQUIRE(conditionalParserG.test(nodeOp) == true);
-    }
-    
-
-     ConditionalInterpreter conditionalParserT = ConditionalInterpreter("A","isConv($)==true");
-     conditionalParserT.insertLambda("isConv",+[](NodePtr NodeOp){return NodeOp->type() == "Conv";});
-     std::shared_ptr<Node> zz = GenericOperator("conv", 0, 0, 0, "Gop1");
-     conditionalParserT.test(zz);
-
-    SECTION("Lambdas") {
-        ConditionalInterpreter conditionalParser = ConditionalInterpreter("OP_test","getType($) =='Conv' || getType($) =='FC' ");
-
-        std::shared_ptr<Node> A = GenericOperator("Conv", 0, 0, 0, "A");
-        REQUIRE(conditionalParser.test(A) == true);
-
-        std::shared_ptr<Node> B = GenericOperator("FC", 0, 0, 0, "B");
-        REQUIRE(conditionalParser.test(B) == true);
-
-
-        std::shared_ptr<Node> C = GenericOperator("A", 0, 0, 0, "C");
-        conditionalParser.test(C);
-        REQUIRE(conditionalParser.test(C) == false);
-    }
-
-    SECTION("syntax error") {
-
-        const std::string test = "'A' == 'A' ,&& ";
-        REQUIRE_THROWS_AS( ConditionalInterpreter("A",test), std::runtime_error);
-  
-    }
-
-
-    SECTION("test false int ") {
-
-        const std::string test = " 10 == 11 " ;
-        ConditionalInterpreter conditionalParser = ConditionalInterpreter("A",test);
-        std::shared_ptr<Node> nodeOp = GenericOperator("conv", 0, 0, 0, "Gop1");
-        bool result = conditionalParser.test(nodeOp);
-        REQUIRE(result == false);
-    }
-
-    SECTION("test true int ") {
-        const std::string test = " 42 == 42 " ;
-        ConditionalInterpreter conditionalParser = ConditionalInterpreter("A",test);
-        std::shared_ptr<Node> nodeOp = GenericOperator("conv", 0, 0, 0, "Gop1");
-        bool result = conditionalParser.test(nodeOp);
-        REQUIRE(result == true);
-    }
-    
-    SECTION("test false str ") {
-        const std::string test = " 'toto' == 'Corgi' " ;
-        ConditionalInterpreter conditionalParser = ConditionalInterpreter("A",test);
-        std::shared_ptr<Node> nodeOp = GenericOperator("conv", 0, 0, 0, "Gop1");
-        bool result = conditionalParser.test(nodeOp);
-        REQUIRE(result == false);
-    }
-
-    SECTION("test true str ") {
-
-        const std::string test = " 'Corgi' == 'Corgi' " ;
-        ConditionalInterpreter conditionalParser = ConditionalInterpreter("A",test);
-        std::shared_ptr<Node> nodeOp = GenericOperator("conv", 0, 0, 0, "Gop1");
-        bool result = conditionalParser.test(nodeOp);
-        REQUIRE(result == true);
-    }
-
-}
\ No newline at end of file
diff --git a/unit_tests/nodeTester/Test_ConditionalLexer.cpp b/unit_tests/nodeTester/Test_ConditionalLexer.cpp
deleted file mode 100644
index d79824e2e53d0c9621fdc6847ebca00faba03af4..0000000000000000000000000000000000000000
--- a/unit_tests/nodeTester/Test_ConditionalLexer.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-#include <catch2/catch_test_macros.hpp>
-#include "aidge/nodeTester/ConditionalLexer.hpp"
-#include "aidge/utilsParsing/ParsingToken.hpp"
-
-#include <iostream>
-#include <map>
-#include <functional>
-
-using namespace Aidge;
-
-TEST_CASE("nodeTester", "Lexer") {
-    SECTION("RandomGenerateTest") {
-
-        std::map<ConditionalTokenTypes, std::function<std::pair<std::string, std::string>()>> LexerTestMap{
-            {ConditionalTokenTypes::AND,      +[](){return std::pair<std::string, std::string>("&& ","");}},
-            {ConditionalTokenTypes::OR,       +[](){return std::pair<std::string, std::string>("|| ","");}},
-            {ConditionalTokenTypes::EQ,       +[](){return std::pair<std::string, std::string>("== ","");}},
-            {ConditionalTokenTypes::NEQ,      +[](){return std::pair<std::string, std::string>("!= ","");}},
-
-            {ConditionalTokenTypes::KEY,      +[](){return std::pair<std::string, std::string>("A ","A");}},
-
-            {ConditionalTokenTypes::BOOL,     +[](){
-                std::size_t keyLen = (std::rand() % 2);
-                const std::vector<std::string> characters = {"true","false"};
-          
-                return std::pair<std::string, std::string>(characters[keyLen]+" ",characters[keyLen]);}
-            },
-
-            {ConditionalTokenTypes::INTEGER,  +[](){
-                std::size_t keyLen = (std::rand() % 20)+1;
-                const std::string characters = "1234567890";
-                std::size_t randomIndex = std::rand() % characters.size();
-                std::string key;
-                for (std::size_t i = 0; i < keyLen; ++i) {
-                    key += characters[randomIndex];
-                    randomIndex = std::rand() % characters.size();
-                }
-                return std::pair<std::string, std::string>(key+" ",key);}
-            },
-
-            {ConditionalTokenTypes::FLOAT,    +[](){
-                std::size_t keyLen = (std::rand() % 20)+2;
-                const std::string characters = "1234567890";
-                std::size_t randomIndex = std::rand() % characters.size();
-                std::string key;
-                for (std::size_t i = 0; i < keyLen/2; ++i) {
-                    key += characters[randomIndex];
-                    randomIndex = std::rand() % characters.size();
-                }
-                key += ".";
-                for (std::size_t i = 0; i < keyLen/2; ++i) {
-                    key += characters[randomIndex];
-                    randomIndex = std::rand() % characters.size();
-                }
-                return std::pair<std::string, std::string>(key+" ",key);}
-            },
-
-            {ConditionalTokenTypes::STRING,   +[](){
-                std::size_t keyLen = (std::rand() % 20)+1;
-                const std::string characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890 ";
-                std::size_t randomIndex = std::rand() % characters.size();
-                std::string key;
-                for (std::size_t i = 0; i < keyLen; ++i) {
-                    key += characters[randomIndex];
-                    randomIndex = std::rand() % characters.size();
-                }
-
-                return std::pair<std::string, std::string>("'"+key+"' ",key);}
-            },
-
-            {ConditionalTokenTypes::LAMBDA,   +[](){
-
-                std::size_t keyLen = (std::rand() % 20)+1;
-                const std::string characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890";
-                const std::string Startchar = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
-
-                std::size_t randomIndex = std::rand() % characters.size();
-                std::size_t randomStartIndex = std::rand() % Startchar.size();
-
-                std::string key;
-                key += Startchar[randomStartIndex];
-
-                for (std::size_t i = 0; i < keyLen; ++i) {
-                    key += characters[randomIndex];
-                    randomIndex = std::rand() % characters.size();
-                }
-
-                return std::pair<std::string, std::string>(key+"( ",key);}
-            },
-
-            {ConditionalTokenTypes::ARGSEP,   +[](){return std::pair<std::string, std::string>(", ","");}},
-            {ConditionalTokenTypes::NODE,     +[](){return std::pair<std::string, std::string>("$ ","");}},
-            {ConditionalTokenTypes::LPAREN,   +[](){return std::pair<std::string, std::string>("( ","");}},
-            {ConditionalTokenTypes::RPAREN,   +[](){return std::pair<std::string, std::string>(") ","");}}
-            //{ConditionalTokenTypes::STOP,     +[](){return std::pair<std::string, std::string>("","");}}
-        };
-
-
-        //////////////////
-        //TEST GENERATOR
-        //////////////////
-        const std::size_t numRandomElements = 100;
-        std::vector<std::tuple<ConditionalTokenTypes, std::string>> testVector;
-
-        std::string testString;
-
-        for (std::size_t i = 0; i < numRandomElements; ++i) {
-
-            int randomIndex = std::rand() % LexerTestMap.size();
-            // Get an iterator to the random element in the map
-            auto it = std::next(LexerTestMap.begin(), randomIndex);
-            // Access the random key and lambda value separately using structured binding
-            ConditionalTokenTypes randomKey = it->first;
-
-            std::function<std::pair<std::string, std::string>()> randomValue = it->second;
-            std::pair<std::string, std::string> result = randomValue();
-        
-            testString += result.first;
-            testVector.emplace_back(randomKey, result.second);
-
-       
-        }
-
-        ConditionalLexer conditionalLexer = ConditionalLexer(testString);
-
-        for (std::tuple<ConditionalTokenTypes, std::string> testToken : testVector) {
-            ConditionalTokenTypes tokenToFind = std::get<0>(testToken);
-            std::string lexemToFind = std::get<1>(testToken);
-            std::shared_ptr<ParsingToken<ConditionalTokenTypes>> token = conditionalLexer.getNextToken();
-
-
-            std::ostringstream errorMessage;
-            errorMessage << "\n we want :"<< lexemToFind << "\n we get : "<< token->getLexeme() <<"\n"<< "on \n" << testString << " :\n "  ;
-
-            CAPTURE(errorMessage.str());
-            REQUIRE(token->getLexeme() == lexemToFind);
-            REQUIRE(token->getType() == tokenToFind);
-        }
-        std::shared_ptr<ParsingToken<ConditionalTokenTypes>> token = conditionalLexer.getNextToken();
-        REQUIRE(token->getType() == ConditionalTokenTypes::STOP);
-    }
-
-
-}
\ No newline at end of file
diff --git a/unit_tests/nodeTester/Test_ConditionalParser.cpp b/unit_tests/nodeTester/Test_ConditionalParser.cpp
deleted file mode 100644
index 56adb92b41745001e1790e087f07369918794c5d..0000000000000000000000000000000000000000
--- a/unit_tests/nodeTester/Test_ConditionalParser.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-
-#include <catch2/catch_test_macros.hpp>
-#include "aidge/nodeTester/ConditionalParser.hpp"
-#include "aidge/utilsParsing/AstNode.hpp"
-
-using namespace Aidge;
-
-    std::string gVal() {
-        int randomValue = std::rand() % 5;
-        switch (randomValue) {
-        case 0:
-            return std::to_string(std::rand() % 101);
-
-        case 1:
-            return std::to_string(std::rand() % 101)+"."+std::to_string(std::rand() % 101);
-
-        case 2:
-             return " 'toto' ";
-        case 3:
-             return " A ";
-
-        case 4:
-             return " A(10) ";
-
-        default:
-             return " true ";
-
-        }
-    }
-
-    std::string gExpr() ;
-    std::string gCmpr() {
-        int randomValue = std::rand() % 3;
-        switch (randomValue) {
-            case 0:
-                return  gVal() + " == " +gVal();
-            case 1:
-                return  "("+ gExpr() +")";
-            default:
-                return  gVal() + " != " +gVal();
-
-        }
-
-
-        return gVal() + " == " +gVal();
-    }
-
-    std::string gExpr() {
-        std::string out = gCmpr();
-        int iterations = std::rand() % 100;
-        for (int i = 0; i < iterations; ++i) {
-            int randomValue = std::rand() % 2;
-            switch (randomValue) {
-                case 0:
-                    return  out +" && " + gCmpr();
-                    break;
-                default:
-                    return  out +" || " + gCmpr();
-                    break;
-            }
-        }
-        return out;
-    }
-
-
-TEST_CASE("ConditionalParser", "ConditionalParser") {
-
-    SECTION("Empty") {
-        for (int i = 0; i < 100; ++i) {
-            const std::string test = gExpr();
-            ConditionalParser conditionalParser = ConditionalParser(test);
-            std::shared_ptr<AstNode<ConditionalTokenTypes>> tree = conditionalParser.parse();
-        }
-    }
-}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_ConcatImpl.cpp b/unit_tests/operator/Test_ConcatImpl.cpp
index fcdf3e8cc1bc07493cfa84608f200f9f334a29cc..677f78e54f850001ab648bfd03c3415b212ba3f2 100644
--- a/unit_tests/operator/Test_ConcatImpl.cpp
+++ b/unit_tests/operator/Test_ConcatImpl.cpp
@@ -33,22 +33,27 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
         std::shared_ptr<Tensor> input4 = std::make_shared<Tensor>(Array1D<int,5>{{ 11, 12, 13, 14, 15 }});
         std::shared_ptr<Tensor> input5 = std::make_shared<Tensor>(Array1D<int,6>{{ 16, 17, 18, 19, 20, 21 }});
 
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,20>{
-            { 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20,21 }});
-
-        auto myConcat = Concat(5, 0);
-        myConcat->getOperator()->associateInput(0, input1);
-        myConcat->getOperator()->associateInput(1, input2);
-        myConcat->getOperator()->associateInput(2, input3);
-        myConcat->getOperator()->associateInput(3, input4);
-        myConcat->getOperator()->associateInput(4, input5);
-        myConcat->getOperator()->setBackend("cpu");
-        myConcat->getOperator()->setDataType(DataType::Int32);
-        myConcat->forward();
-
-        std::static_pointer_cast<Tensor>(myConcat->getOperator()->getRawOutput(0))->print();
-
-        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
+        Tensor expectedOutput = Array1D<int,20>{
+            { 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20,21 }};
+
+        std::shared_ptr<Concat_Op> op = std::make_shared<Concat_Op>(5,0);
+        op->associateInput(0, input1);
+        op->associateInput(1, input2);
+        op->associateInput(2, input3);
+        op->associateInput(3, input4);
+        op->associateInput(4, input5);
+        op->setBackend("cpu");
+        op->setDataType(DataType::Int32);
+        fmt::print("{}\n", *(op->getInput(0)));
+        fmt::print("{}\n", *(op->getInput(1)));
+        fmt::print("{}\n", *(op->getInput(2)));
+        fmt::print("{}\n", *(op->getInput(3)));
+        fmt::print("{}\n", *(op->getInput(4)));
+        op->forward();
+
+        fmt::print("res: {}\n", *(op->getOutput(0)));
+
+        REQUIRE(*(op->getOutput(0)) == expectedOutput);
     }
     SECTION("Concat 4D inputs on 1st axis") {
         std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
@@ -75,7 +80,7 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
             }                                       //
         });                                         //
 
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
+        Tensor expectedOutput = Array4D<int,3,3,3,2> {
             {                                       //
                 {                                   //
                     {{20, 47},{21, 48},{22, 49}},   //
@@ -93,18 +98,19 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
                     {{44, 71},{45, 72},{46, 73}}    //
                 }                                   //
             }                                       //
-        });                                         //
+        };                                         //
 
         auto myConcat = Concat(2, 0);
-        myConcat->getOperator()->associateInput(0, input1);
-        myConcat->getOperator()->associateInput(1, input2);
-        myConcat->getOperator()->setBackend("cpu");
-        myConcat->getOperator()->setDataType(DataType::Int32);
+        std::shared_ptr<Concat_Op> op = std::static_pointer_cast<Concat_Op>(myConcat->getOperator());
+        op->associateInput(0, input1);
+        op->associateInput(1, input2);
+        op->setBackend("cpu");
+        op->setDataType(DataType::Int32);
         myConcat->forward();
 
-        std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0)->print();
+        fmt::print("res: {}\n", *(op->getOutput(0)));
 
-        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == expectedOutput);
     }
 
     SECTION("Concat 4D inputs on 3rd axis") {
@@ -127,7 +133,7 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
             }
         });
 
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,1,3,9,2> {
+        Tensor expectedOutput = Array4D<int,1,3,9,2> {
             {                                                                                             //
                 {                                                                                         //
                     {{20, 47},{21, 48},{22, 49},{29, 56},{30, 57},{31, 58},{38, 65},{39, 66},{40, 67}},   //
@@ -135,17 +141,18 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
                     {{26, 53},{27, 54},{28, 55},{35, 62},{36, 63},{37, 64},{44, 71},{45, 72},{46, 73}}    //
                 },                                                                                        //
             }                                                                                             //
-        });                                                                                               //
+        };                                                                                               //
 
         auto myConcat = Concat(2, 2);
-        myConcat->getOperator()->associateInput(0, input1);
-        myConcat->getOperator()->associateInput(1, input2);
-        myConcat->getOperator()->setBackend("cpu");
-        myConcat->getOperator()->setDataType(DataType::Int32);
+        std::shared_ptr<Concat_Op> op = std::static_pointer_cast<Concat_Op>(myConcat->getOperator());
+        op->associateInput(0, input1);
+        op->associateInput(1, input2);
+        op->setBackend("cpu");
+        op->setDataType(DataType::Int32);
         myConcat->forward();
 
         std::static_pointer_cast<Tensor>(myConcat->getOperator()->getRawOutput(0))->print();
 
-        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == expectedOutput);
     }
 }
diff --git a/unit_tests/operator/Test_Expand.cpp b/unit_tests/operator/Test_Expand.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5758c6dbf48e6975ddc86cafa53e16dea1ef95cc
--- /dev/null
+++ b/unit_tests/operator/Test_Expand.cpp
@@ -0,0 +1,157 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <cstddef> // std::size_t
+#include <memory>
+#include <random>  // std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Expand.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+////////////////////////////////////////////////////////////////////////////////////////
+//                                  DISCLAIMER
+// Since this is a broadcastable operator, most of the test has been copied
+// from Mul
+////////////////////////////////////////////////////////////////////////////////////////
+
+TEST_CASE("[core/operator] Expand_Op(forwardDims)", "[Expand][forwardDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    auto rd = Catch::Generators::Detail::getSeed;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Mul Operator
+    std::shared_ptr<Node> expand = Expand("expand_node");
+    auto op = std::static_pointer_cast<Expand_Op>(expand->getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> dataTensor = std::make_shared<Tensor>();
+    op->associateInput(0, dataTensor);
+    // input_1
+    std::shared_ptr<Tensor> shapeTensor = std::make_shared<Tensor>();
+    op->associateInput(1, shapeTensor);
+
+    shapeTensor->setDataType(DataType::Int64);
+    shapeTensor->setBackend("cpu");
+
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::forwardDims()``
+     */
+    SECTION("Scalar / Scalar") {
+        // input_0
+        dataTensor->resize({});
+
+        // input_1
+        shapeTensor->resize({});
+
+        REQUIRE_THROWS(op->forwardDims(true));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        dataTensor->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            // input_1
+            const std::size_t nbDims = nbDimsDist(gen);
+            shapeTensor->resize({nbDims});
+            std::vector<std::size_t> expandShape(nbDims);
+            for (std::size_t i = 0; i < nbDims; ++i) {
+                expandShape[i] = dimsDist(gen);
+            }
+            shapeTensor->getImpl()->setRawPtr(expandShape.data(),
+                                              expandShape.size());
+
+            REQUIRE_NOTHROW(op->forwardDims(true));
+            REQUIRE((op->getOutput(0)->dims()) == expandShape);
+        }
+    }
+    SECTION("Failure when shape to expand tensor has not 1 dimension.") {
+
+        // input_0
+        const std::size_t nbDims = nbDimsDist(gen);
+        std::vector<std::size_t> dims(nbDims);
+        for (std::size_t i = 0; i < nbDims; ++i) {
+            dims[i] = dimsDist(gen);
+        }
+        dataTensor->resize(dims);
+
+        // input_1
+        shapeTensor->resize({});
+
+        REQUIRE_THROWS(op->forwardDims(true));
+    }
+    SECTION("+1-D / +1-D") {
+        // same size
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nbDims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nbDims);
+            for (std::size_t i = 0; i < nbDims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            dataTensor->resize(dims0);
+            shapeTensor->resize({dims0.size()});
+            shapeTensor->getImpl()->setRawPtr(dims0.data(), dims0.size());
+            REQUIRE_NOTHROW(op->forwardDims(true));
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+
+        // broadcast
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nbDims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nbDims);
+            for (std::size_t i = 0; i < nbDims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nbDims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(
+                dims1.cbegin(),
+                dims1.cbegin() + std::min(nbDimsDist(gen), nbDims - 1));
+
+            dataTensor->resize(dims0);
+            shapeTensor->resize({dims1.size()});
+            shapeTensor->getImpl()->setRawPtr(dims1.data(), dims1.size());
+
+            REQUIRE_NOTHROW(op->forwardDims(true));
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+
+            // input_0 - wrong
+            // T1->resize({dims[0] + 1});
+            std::vector<std::size_t> dims1Wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1Wrong[i];
+            }
+            shapeTensor->resize({dims1Wrong.size()});
+            shapeTensor->getImpl()->setRawPtr(dims1Wrong.data(),
+                                              dims1Wrong.size());
+            REQUIRE(dims0 != dims1Wrong);
+            REQUIRE_THROWS(op->forwardDims(true));
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_FlattenImpl.cpp b/unit_tests/operator/Test_FlattenImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1990c0b61645722526c59c5e7a136b33e8d87b7a
--- /dev/null
+++ b/unit_tests/operator/Test_FlattenImpl.cpp
@@ -0,0 +1,98 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Flatten.hpp"
+
+#include <memory>
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Flatten(forward)") {
+    std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int32_t,1,2,3,5> {
+        {
+            {
+                {
+                    { 1,  2,  3,  4,  5},
+                    { 6,  7,  8,  9, 10},
+                    {11, 12, 13, 14, 15}
+                },
+                {
+                    {16, 17, 18, 19, 20},
+                    {21, 22, 23, 24, 25},
+                    {26, 27, 28, 29, 30}
+                }
+            }
+        }
+    });
+
+    SECTION("Default (axis = 1)") {
+        std::shared_ptr<Node> myFlatten = Flatten();
+        auto op = std::static_pointer_cast<OperatorTensor>(myFlatten -> getOperator());
+        op->associateInput(0, input);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myFlatten->forward();
+
+        auto expectedOutput = input->clone();
+        expectedOutput.resize({1, input->size()});
+
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput.dims());
+        REQUIRE(*(op->getOutput(0)) == expectedOutput);
+    }
+
+    SECTION("Axis = 0") {
+        std::shared_ptr<Node> myFlatten = Flatten(0);
+        auto op = std::static_pointer_cast<OperatorTensor>(myFlatten -> getOperator());
+        op->associateInput(0, input);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myFlatten->forward();
+
+        auto expectedOutput = input->clone();
+        expectedOutput.resize({1, input->size()});
+
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput.dims());
+        REQUIRE(*(op->getOutput(0)) == expectedOutput);
+    }
+
+    SECTION("Axis = 2") {
+        std::shared_ptr<Node> myFlatten = Flatten(2);
+        auto op = std::static_pointer_cast<OperatorTensor>(myFlatten -> getOperator());
+        op->associateInput(0, input);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myFlatten->forward();
+
+        auto expectedOutput = input->clone();
+        expectedOutput.resize({2, input->size() / 2});
+
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput.dims());
+        REQUIRE(*(op->getOutput(0)) == expectedOutput);
+    }
+
+    SECTION("Axis = 4") {
+        std::shared_ptr<Node> myFlatten = Flatten(4);
+        auto op = std::static_pointer_cast<OperatorTensor>(myFlatten -> getOperator());
+        op->associateInput(0, input);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myFlatten->forward();
+
+        auto expectedOutput = input->clone();
+        expectedOutput.resize({input->size(), 1});
+
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput.dims());
+        REQUIRE(*(op->getOutput(0)) == expectedOutput);
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_MatMul_Op.cpp b/unit_tests/operator/Test_MatMul_Op.cpp
index 876c1ac764efe54475f6d45982acca76aacb7528..0c7bc503fc69cd93d8f4aa385da922954349c29d 100644
--- a/unit_tests/operator/Test_MatMul_Op.cpp
+++ b/unit_tests/operator/Test_MatMul_Op.cpp
@@ -9,13 +9,15 @@
  *
  ********************************************************************************/
 
-#include <catch2/catch_test_macros.hpp>
-#include <catch2/generators/catch_generators_random.hpp>
 #include <cstddef>  // std::size_t
 #include <memory>
 #include <random>   // std::mt19937, std::uniform_int_distribution
 #include <vector>
 
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <fmt/core.h>
+
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
@@ -27,7 +29,8 @@ TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dist(1, 10);
 
-    std::cerr << "Test case start, random " << dist(gen) << " " << rd() << std::endl;
+    fmt::print(stderr, "Test case start, random {} {}\n", dist(gen), rd());
+
     // Create MatMul Operator
     std::shared_ptr<Node> myMatMul = MatMul();
     auto op = std::static_pointer_cast<OperatorTensor>(myMatMul -> getOperator());
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index 6711e1524fe0595b4effd68397f8cb684df590a9..042b04f01bdc1430c8b9f1b9df6951f12b821ed1 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -23,6 +23,7 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Testing.hpp"
 #include "aidge/recipes/Recipes.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 
 using namespace Aidge;
 
@@ -145,4 +146,17 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
 
         REQUIRE(g->getNodes().size() == 33);
     }
+
+    SECTION("Leaky") {
+        auto myLeaky = Leaky(10, 1.0, 0.9);
+        auto op = std::static_pointer_cast<OperatorTensor>(myLeaky->getOperator());
+
+        auto inputs = myLeaky->inputs();
+
+        // Two memorize nodes + real data input
+        REQUIRE(myLeaky->nbInputs() == 3);
+        // Outputs for spike and memory + 2 Memorize node 
+        REQUIRE(myLeaky->nbOutputs() == 4);
+    	REQUIRE(true);
+    }
 }
diff --git a/unit_tests/operator/Test_Operator.cpp b/unit_tests/operator/Test_Operator.cpp
index 6bd12c51ef367ad1cf1859afc56af8a21a706237..b726b83a12f9b3760a643804330264981ef87e70 100644
--- a/unit_tests/operator/Test_Operator.cpp
+++ b/unit_tests/operator/Test_Operator.cpp
@@ -9,13 +9,13 @@
  *
  ********************************************************************************/
 
-#include <catch2/catch_test_macros.hpp>
 #include <cstddef>
-#include <iostream>
 #include <memory>
 #include <string>
 #include <vector>
 
+#include <catch2/catch_test_macros.hpp>
+
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Add.hpp"
diff --git a/unit_tests/operator/Test_ShapeImpl.cpp b/unit_tests/operator/Test_ShapeImpl.cpp
index 45df89df061148fbfb892112e3c6d01edf27ffb4..56ca3eaa1669b66312f3d96a9e4d3ddcd96a54e9 100644
--- a/unit_tests/operator/Test_ShapeImpl.cpp
+++ b/unit_tests/operator/Test_ShapeImpl.cpp
@@ -42,12 +42,11 @@ TEST_CASE("[cpu/operator] Shape(forward)", "[Shape][CPU]") {
             {1, 2, 3, 5}
         });
 
-        std::shared_ptr<Node> myShape = Shape();
-        auto op = std::static_pointer_cast<OperatorTensor>(myShape -> getOperator());
-        op->associateInput(0,input);
+        std::shared_ptr<Shape_Op> op = std::make_shared<Shape_Op>();
+        op->associateInput(0, input);
         op->setDataType(DataType::Int32);
         op->setBackend("cpu");
-        myShape->forward();
+        op->forward();
 
         REQUIRE(*(op->getOutput(0)) == *expectedOutput);
 
@@ -83,4 +82,4 @@ TEST_CASE("[cpu/operator] Shape(forward)", "[Shape][CPU]") {
         REQUIRE(*(op->getOutput(0)) == *expectedOutput);
 
     }
-}
\ No newline at end of file
+}
diff --git a/unit_tests/operator/Test_SliceImpl.cpp b/unit_tests/operator/Test_SliceImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b856535f373fb498324674a7ab169da83f1746d2
--- /dev/null
+++ b/unit_tests/operator/Test_SliceImpl.cpp
@@ -0,0 +1,278 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Slice.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
+    SECTION("1D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
+            {0, 1, -2,-3, 4,-5,-6, 7, 8, 9}
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,3> {
+            {0, 1, -2}
+        });
+        std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,1>{{0}});
+        std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,1>{{3}});
+        std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,1>{{0}});
+
+        std::shared_ptr<Node> mySlice = Slice();
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->associateInput(1,starts);
+        mySlice->getOperator()->associateInput(2,ends);
+        mySlice->getOperator()->associateInput(3,axes);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("2D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array2D<int,2,10> {
+            {
+                { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<int,2,3> {
+            {
+                {-5,-6, 7},
+                {-5,-6, 7}
+            }
+        });
+        std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,2>{{0,5}});
+        std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,2>{{2,8}});
+        std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,2>{{0,1}});
+
+        std::shared_ptr<Node> mySlice = Slice();
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->associateInput(1,starts);
+        mySlice->getOperator()->associateInput(2,ends);
+        mySlice->getOperator()->associateInput(3,axes);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+        op->getOutput(0)->print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("3D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<int,2,2,10> {
+            {
+                {
+                    { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                    {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                },
+                {
+                    { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                    {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,1,1,3> {
+            {
+                {
+                    { 4,-5,-6}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,3>{{0,1,4}});
+        std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,3>{{1,2,7}});
+        std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,3>{{0,1,2}});
+
+        std::shared_ptr<Node> mySlice = Slice();
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->associateInput(1,starts);
+        mySlice->getOperator()->associateInput(2,ends);
+        mySlice->getOperator()->associateInput(3,axes);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+        // mySlice->getOperator()->output(0).print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("4D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
+            {
+                {
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    }
+                },
+                {
+                    {
+                        { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3,11,-5,-6, 7,-1,10}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
+            {
+                {
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    }
+                },
+                {
+                    {
+                        { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3,11,-5,-6, 7,-1,10}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,4>{{0,0,0,0}});
+        std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,4>{{2,2,2,10}});
+        std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,4>{{0,1,2,3}});
+
+        std::shared_ptr<Node> mySlice = Slice();
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->associateInput(1,starts);
+        mySlice->getOperator()->associateInput(2,ends);
+        mySlice->getOperator()->associateInput(3,axes);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+        // op->getOutput(0)->print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("Attributes instead of inputs") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
+            {
+                {
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    }
+                },
+                {
+                    {
+                        { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3,11,-5,-6, 7,-1,10}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,1,1,1,5> {
+            {
+                {
+                    {
+                        { 0, 1, 2,-3, 4}
+                    }
+                }
+            }
+        });
+
+        std::shared_ptr<Node> mySlice = Slice({0,0,0,0}, {1,1,1,5}, {0,1,2,3}, {1,1,1,1});
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+        // op->getOutput(0)->print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("Different Steps") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<int,4,2,8> {
+            {
+                {
+                    { 0, 1, 2,-3, 4,-5,-6,7},
+                    {-5, 4, 2,-3, 4,-5,-6,-7}
+                },
+                {
+                    { 10, 11, 12,-13, 14,-15,-16,17},
+                    {-15, 14, 12,-13, 14,-15,-16,-17}
+                },
+                {
+                    { 20, 21, 22,-23, 24,-25,-26,27},
+                    {-25, 24, 22,-23, 24,-25,-26,-27}
+                },
+                {
+                    { 30, 31, 32,-33, 34,-35,-36,37},
+                    {-35, 34, 32,-33, 34,-35,-36,-37}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,2,1,3> {
+            {
+                {
+                    { 7, 4, 1}
+                },
+                {
+                    { 27, 24, 21}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> mySlice = Slice({0,0,7}, {4,1,0}, {0,1,2}, {2,1,-3});
+        // on Axis 0: from 0 to 4 by step of 2
+        // on Axis 1: from 0 to 1 by step of 1
+        // on Axis 2: from 7 to 0 by step of -3 (reverse the order of elements)
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+        // op->getOutput(0)->print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+}
diff --git a/unit_tests/operator/Test_Squeeze_Op.cpp b/unit_tests/operator/Test_Squeeze_Op.cpp
index 660e970dd65bb7c4b9a52b0ccb62350ca355d243..41822742c36bfcbb62c4be3784b9028b39ab1cb2 100644
--- a/unit_tests/operator/Test_Squeeze_Op.cpp
+++ b/unit_tests/operator/Test_Squeeze_Op.cpp
@@ -11,30 +11,31 @@
 
 #include "aidge/operator/Squeeze.hpp"
 
-#include <aidge/utils/Types.h>
-#include <algorithm>
+#include <algorithm>  // std::copy_if, std::count_if, std::generate, std::sort, std::transform
 #include <array>
-#include <catch2/catch_test_macros.hpp>
-#include <catch2/generators/catch_generators_random.hpp>
 #include <chrono>
 #include <cmath>
 #include <cstddef> // std::size_t
-#include <cstdint> // std::uint16_t
-#include <fmt/core.h>
-#include <iostream>
-#include <iterator>
+#include <cstdint> // std::int8_t, std::uint16_t
+#include <functional>  // std::multiplies
+#include <iterator>  // std::back_inserter
 #include <memory>
 #include <numeric> // std::accumulate
-#include <ostream>
 #include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
 #include <vector>
 
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <fmt/core.h>
+
+#include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/TensorUtils.hpp"
+#include "aidge/utils/Types.h"
 
 namespace Aidge {
 TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
-  Log::setConsoleLevel(Log::Notice);
   constexpr std::uint16_t NB_TRIALS = 10;
   // Create a random number generator
   auto random_seed = Catch::Generators::Detail::getSeed;
@@ -74,7 +75,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
   SECTION("ERROR : nb_dims_to_squeeze>input.size()") {
     constexpr size_t nb_dims_to_squeeze = 100;
 
-    std::vector<int8_t> dims_to_squeeze(nb_dims_to_squeeze);
+    std::vector<std::int8_t> dims_to_squeeze(nb_dims_to_squeeze);
     std::generate(dims_to_squeeze.begin(), dims_to_squeeze.end(),
                   [&gen, &idx_dims_to_squeeze_dist]() {
                     return idx_dims_to_squeeze_dist(gen);
@@ -102,7 +103,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
     SECTION("axes is given via attribute") {
       SECTION("Squeeze a 1-sized-axis") {
         int8_t nb_dims = 4;
-        std::shared_ptr<Node> squeeze_node = Squeeze(std::vector<int8_t>({0}));
+        std::shared_ptr<Node> squeeze_node = Squeeze(std::vector<std::int8_t>({0}));
         auto op = std::static_pointer_cast<OperatorTensor>(
             squeeze_node->getOperator());
         op->associateInput(0, input_T);
@@ -131,7 +132,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
       }
       SECTION("Squeeze a non-1-Sized axis") {
         int8_t nb_dims = 4;
-        std::shared_ptr<Node> squeeze_node = Squeeze(std::vector<int8_t>({3}));
+        std::shared_ptr<Node> squeeze_node = Squeeze(std::vector<std::int8_t>({3}));
         auto op = std::static_pointer_cast<OperatorTensor>(
             squeeze_node->getOperator());
         op->associateInput(0, input_T);
@@ -228,7 +229,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
       CHECK(op->getOutput(0)->dims() == dims_out);
 
       int nb_ones = std::count_if(dims_in.begin(), dims_in.end(),
-                                  [](int8_t dim) { return dim == 1; });
+                                  [](std::int8_t dim) { return dim == 1; });
       CHECK((op->getInput(0)->dims().size() -
              op->getOutput(0)->dims().size()) == nb_ones);
     }
@@ -310,7 +311,6 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
 }
 
 TEST_CASE("[core/operator] Squeeze(forward)", "[Squeeze][forward]") {
-  Log::setConsoleLevel(Log::Notice);
   constexpr std::uint16_t NB_TRIALS = 10;
   // Create a random number generator
   auto random_seed = Catch::Generators::Detail::getSeed;
@@ -333,7 +333,6 @@ TEST_CASE("[core/operator] Squeeze(forward)", "[Squeeze][forward]") {
   std::chrono::time_point<std::chrono::system_clock> end;
   std::chrono::duration<double, std::micro> duration{};
 
-  Log::setConsoleLevel(Log::Notice);
   int number_of_operation{0};
   for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
     // Create the Operator
@@ -444,12 +443,9 @@ TEST_CASE("[core/operator] Squeeze(forward)", "[Squeeze][forward]") {
 
         delete[] array_in;
       }
-      std::cout << "Squeeze total execution time : " << duration.count() << "µs"
-                << std::endl;
-      std::cout << "Number of operations : " << number_of_operation
-                << std::endl;
-      std::cout << "Operation / µs = " << number_of_operation / duration.count()
-                << std::endl;
+    Log::info("GlobalAveragePooling total execution time: {}µs\n", duration.count());
+    Log::info("Number of operations : {}\n", number_of_operation);
+    Log::info("Operation / µs = {}\n", number_of_operation / duration.count());
     }
   }
 }
diff --git a/unit_tests/operator/Test_Unsqueeze_Op.cpp b/unit_tests/operator/Test_Unsqueeze_Op.cpp
index a436ab5a54e2f66fe87bfc28157d691cf6548dd4..5cedcfdfa9c7608aa99426f7c4b496537c43c324 100644
--- a/unit_tests/operator/Test_Unsqueeze_Op.cpp
+++ b/unit_tests/operator/Test_Unsqueeze_Op.cpp
@@ -9,22 +9,24 @@
  *
  ********************************************************************************/
 
-#include <algorithm>
-#include <chrono>
-#include <cmath>
-#include <cstddef> // std::size_t
-#include <cstdint> // std::uint16_t
-#include <fmt/core.h>
-#include <iostream>
+#include <algorithm>  // std::ajacent_find, std::all_of, std::sort, std::transform
+#include <array>
+#include <chrono>      // std::micro, std::chrono::time_point,
+                       // std::chrono::system_clock
+#include <cstddef>     // std::size_t
+#include <cstdint>     // std::int8_t, std::uint16_t
+#include <functional>  // std::multiplies
 #include <memory>
-#include <numeric> // std::accumulate
-#include <ostream>
-#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
+#include <numeric>     // std::accumulate
+#include <random>      // std::random_device, std::mt19937
+                       // std::uniform_int_distribution, std::uniform_real_distribution
 #include <vector>
 
 #include <catch2/catch_test_macros.hpp>
 #include <catch2/generators/catch_generators_random.hpp>
+#include <fmt/core.h>
 
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Unsqueeze.hpp"
@@ -261,7 +263,7 @@ TEST_CASE("[core/operator] Unsqueeze(forward)", "[Unsqueeze][forward]") {
   // Create a random number generator
   std::random_device rd;
   auto random_seed = rd();
-  std::cout << "True random seed : " << random_seed << std::endl;
+  fmt::print("True random seed: {}\n", random_seed);
   std::mt19937 gen(random_seed);
   // Random float distribution between 0 and 1
   std::uniform_real_distribution<float> valueDist(0.1f, 1.1f);
@@ -371,11 +373,9 @@ TEST_CASE("[core/operator] Unsqueeze(forward)", "[Unsqueeze][forward]") {
         delete[] array_in;
       }
     }
-    std::cout << "Unsqueeze total execution time : " << duration.count() << "µs"
-              << std::endl;
-    std::cout << "Number of operations : " << number_of_operation << std::endl;
-    std::cout << "Operation / µs = " << number_of_operation / duration.count()
-              << std::endl;
+    Log::info("GlobalAveragePooling total execution time: {}µs\n", duration.count());
+    Log::info("Number of operations : {}\n", number_of_operation);
+    Log::info("Operation / µs = {}\n", number_of_operation / duration.count());
   }
 }
 
diff --git a/unit_tests/recipes/Test_AdaptToBackend.cpp b/unit_tests/recipes/Test_AdaptToBackend.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1238d1dc448f1d6cbf10245b03351c477d063141
--- /dev/null
+++ b/unit_tests/recipes/Test_AdaptToBackend.cpp
@@ -0,0 +1,117 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <set>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/Transpose.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/recipes/Recipes.hpp"
+#include "aidge/scheduler/SequentialScheduler.hpp"
+
+namespace Aidge {
+
+////////////////////////////////////////////////////////////////////////////////
+// Create a dummy implementation
+template <class Op>
+class OperatorImpl_dummy : public OperatorImpl,
+    public Registrable<OperatorImpl_dummy<Op>, ImplSpec, Impl<void(), void()>>
+{
+public:
+    OperatorImpl_dummy(const Op& op) : OperatorImpl(op, "dummy") {}
+
+    static std::unique_ptr<OperatorImpl_dummy<Op>> create(const Op& op) {
+        return std::make_unique<OperatorImpl_dummy<Op>>(op);
+    }
+
+    virtual std::shared_ptr<ProdConso> getProdConso() const override {
+        const auto impl = Registrar<OperatorImpl_dummy>::create(getBestMatch(getRequiredSpec()));
+        return impl.prodConso(mOp);
+    }
+
+    virtual std::vector<ImplSpec> getAvailableImplSpecs() const override {
+        std::set<ImplSpec> implSpecsSet = Registrar<OperatorImpl_dummy>::getKeys();
+        return std::vector<ImplSpec>(implSpecsSet.begin(), implSpecsSet.end());
+    }
+
+    void forward() override {
+        fmt::println("forward: {}", mOp.type());
+    }
+};
+
+// Register it
+using Conv2D_Op_Impl_dummy = OperatorImpl_dummy<Conv_Op<2>>;
+REGISTRAR(Conv2D_Op_Impl_dummy,
+    {{ // Inputs
+        {DataType::Any, DataFormat::NHWC},
+        {DataType::Any, DataFormat::NHWC},
+        {DataType::Any, DataFormat::Default}},
+    { // Outputs
+        {DataType::Float32, DataFormat::NHWC}}},
+    {ProdConso::inPlaceModel, nullptr, nullptr});
+
+using Conv2D_Op = Conv_Op<2>;
+REGISTRAR(Conv2D_Op, "dummy", OperatorImpl_dummy<Conv2D_Op>::create);
+
+using ReLU_Op_Impl_dummy = OperatorImpl_dummy<ReLU_Op>;
+REGISTRAR(ReLU_Op_Impl_dummy,
+    {{DataType::Any, DataFormat::Default}},
+    {ProdConso::inPlaceModel, nullptr, nullptr});
+
+REGISTRAR(ReLU_Op, "dummy", OperatorImpl_dummy<ReLU_Op>::create);
+
+REGISTRAR(Tensor, {"dummy", DataType::Float32}, Registrar<Tensor>::create({"cpu", DataType::Float32}));
+////////////////////////////////////////////////////////////////////////////////
+
+
+TEST_CASE("[cpu/recipes] AdaptToBackend", "[AdaptToBackend][recipes]") {
+    auto g1 = Sequential({
+        Producer({1, 3, 22, 22}, "dataProvider"),
+        Conv(3, 4, {3, 3}, "conv1"),
+        ReLU("relu1"),
+        Conv(4, 8, {3, 3}, "conv2"),
+        ReLU("relu2"),
+        Conv(8, 10, {1, 1}, "conv3")
+    });
+
+    g1->setBackend("dummy");
+    auto convOp = std::static_pointer_cast<Conv2D_Op>(g1->getNode("conv1")->getOperator());
+    REQUIRE(convOp->getInput(0)->dataFormat() == DataFormat::Default);
+    REQUIRE(convOp->getInput(1)->dataFormat() == DataFormat::Default);
+    REQUIRE(convOp->getOutput(0)->dataFormat() == DataFormat::Default);
+
+    g1->save("adapttobackend_before", true);
+    adaptToBackend(g1);
+    g1->save("adapttobackend_after", true);
+
+    auto matches = SinglePassGraphMatching(g1).match("Conv2D#<-Transpose<-Producer;Conv2D#<1-Transpose<-Producer;Conv2D#<2-Producer;Conv2D#->Transpose#->ReLU");
+    REQUIRE(matches.size() == 1);
+    convOp = std::static_pointer_cast<Conv2D_Op>(matches.begin()->graph->rootNode()->getOperator());
+    auto outTransOp = std::static_pointer_cast<Transpose_Op>(matches.begin()->anchors.at("Transpose").at("#")->getOperator());
+    REQUIRE(convOp->getInput(0)->dataFormat() == DataFormat::NHWC);
+    REQUIRE(convOp->getInput(1)->dataFormat() == DataFormat::NHWC);
+    REQUIRE(convOp->getOutput(0)->dataFormat() == DataFormat::NHWC);
+    REQUIRE(outTransOp->getOutput(0)->dataFormat() == DataFormat::Default);
+
+    // TODO: uncomment when support of NHWC will be implemented in Conv_Op::forwardDims()
+    //REQUIRE(g1->forwardDims());
+    //g1->save("adapttobackend_after_forwarddims", true);
+
+    //SequentialScheduler sched(g1);
+    //sched.forward();
+}
+
+}  // namespace Aidge
diff --git a/unit_tests/recipes/Test_FuseToMetaOps.cpp b/unit_tests/recipes/Test_FuseToMetaOps.cpp
index 7bb3ae63add6568f7de08a996b27a495a644cf46..3fe0fee1c8e520c99a3e056c06bc0fa034c6c878 100644
--- a/unit_tests/recipes/Test_FuseToMetaOps.cpp
+++ b/unit_tests/recipes/Test_FuseToMetaOps.cpp
@@ -34,7 +34,6 @@ TEST_CASE("[cpu/recipes] FuseToMetaOps", "[FuseToMetaOps][recipes]") {
     });
     g1->save("FuseToMetaOps_before");
 
-    // FIXME: GraphRegex also matches the Conv Producers, which are not in the query!
     const auto nbFused = fuseToMetaOps(g1, "Conv2D->ReLU", "ConvReLU");
     g1->save("FuseToMetaOps_after", true);
 
diff --git a/unit_tests/recipes/Test_ToGenericOp.cpp b/unit_tests/recipes/Test_ToGenericOp.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cb75fdb1072dee476c88c1f6d502a792b2e6abd9
--- /dev/null
+++ b/unit_tests/recipes/Test_ToGenericOp.cpp
@@ -0,0 +1,94 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <memory>
+#include <set>
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+namespace Aidge {
+
+TEST_CASE("[graph/convert] toGenericOp", "[toGenericOp][recipies]") {
+    // Create a convolution operator
+    std::shared_ptr<GraphView> g =
+                Sequential({
+                    Conv(1, 3, {3, 3}, "conv1"),
+                    ReLU(),
+                    Conv(3, 4, {1, 1}, "conv2"),
+                    ReLU(),
+                    Conv(4, 3, {1, 1}, "conv3"),
+                    ReLU(),
+                    FC(2028, 256, false, "fc1"),
+                    ReLU(),
+                    FC(256, 10, false, "fc2")});
+    
+    // NCHW - MNIST DATA like
+    g->forwardDims({{5, 1, 28, 28}});
+
+    SECTION("Test Operator to Generic Operator") {
+        auto convOp = g->getNode("conv2");
+
+        // Convert to GenericOperator
+        toGenericOp(convOp);
+
+        auto newGenOp = g->getNode("conv2");
+
+        // Ensure the conversion
+        REQUIRE(newGenOp->type() == "Conv2D");
+
+        const auto convOpAttr = convOp->getOperator()->attributes()->getAttrs();
+        const auto newGenOpAttr = (newGenOp->getOperator()->attributes()->getAttrs());
+        REQUIRE((!(newGenOpAttr < convOpAttr) && !(convOpAttr < newGenOpAttr)));
+    }
+
+    SECTION("Test MetaOperator to Generic Operator") {
+
+        const auto nbFused = fuseToMetaOps(g, "Conv2D->ReLU->FC", "ConvReLUFC");
+
+        REQUIRE(nbFused == 1);
+
+        std::shared_ptr<Node> metaOpNode;
+
+        const auto nodes = g->getNodes(); // g nodes gets modified in the loop!
+        for (const auto& nodePtr : nodes) 
+        {
+            if (nodePtr->type() == "ConvReLUFC") 
+            {
+                nodePtr->setName("ConvReLUFC_0");
+                metaOpNode = nodePtr;
+                // Convert to GenericOperator
+                toGenericOp(nodePtr);
+            }
+        }
+
+        REQUIRE(metaOpNode);
+        REQUIRE(!metaOpNode->getOperator()->isAtomic());
+        auto newGenOp = g->getNode("ConvReLUFC_0");
+
+        // Ensure the conversion
+        REQUIRE(newGenOp->type() == "ConvReLUFC");
+        REQUIRE(std::dynamic_pointer_cast<GenericOperator_Op>(newGenOp->getOperator()));
+
+        const auto metaOpAttr = *std::static_pointer_cast<DynamicAttributes>(metaOpNode->getOperator()->attributes());
+        const auto newGenOpAttr = *std::static_pointer_cast<DynamicAttributes>(newGenOp->getOperator()->attributes());
+        REQUIRE((!(newGenOpAttr < metaOpAttr) && !(metaOpAttr < newGenOpAttr)));
+
+    }
+
+}
+
+} // namespace Aidge
diff --git a/unit_tests/scheduler/Test_MemoryManager.cpp b/unit_tests/scheduler/Test_MemoryManager.cpp
index a4941203644b7ba291682f3932926a36fa83b745..b6cedfac47d53e0f8ab464fad8a2f6cc6c8dcc15 100644
--- a/unit_tests/scheduler/Test_MemoryManager.cpp
+++ b/unit_tests/scheduler/Test_MemoryManager.cpp
@@ -136,7 +136,6 @@ TEST_CASE("allocate1", "[MemoryManager]") {
     REQUIRE(memManager.getPlanes(node4).back().memSpace->allocated == 3);
     REQUIRE(memManager.getPlanes(node4).back().memSpace->released == 4);
 
-    memManager.log("MemoryManager_allocate1.log");
 }
 
 TEST_CASE("allocate2", "[MemoryManager]") {
@@ -281,7 +280,6 @@ TEST_CASE("allocate2", "[MemoryManager]") {
     REQUIRE(memManager.getPlanes(node4).back().memSpace->allocated == 3);
     REQUIRE(memManager.getPlanes(node4).back().memSpace->released == 4);
 
-    memManager.log("MemoryManager_allocate2.log");
 }
 
 TEST_CASE("allocate3", "[MemoryManager]") {
@@ -438,7 +436,6 @@ TEST_CASE("allocate3", "[MemoryManager]") {
     REQUIRE(memManager.getPlanes(node4).back().memSpace->allocated == 0);
     REQUIRE(memManager.getPlanes(node4).back().memSpace->released == 4);
 
-    memManager.log("MemoryManager_allocate3.log");
 }
 
 TEST_CASE("allocate3_wrapAround", "[MemoryManager]") {
@@ -595,5 +592,4 @@ TEST_CASE("allocate3_wrapAround", "[MemoryManager]") {
     REQUIRE(memManager.getPlanes(node4).back().memSpace->allocated == 0);
     REQUIRE(memManager.getPlanes(node4).back().memSpace->released == 4);
 
-    memManager.log("MemoryManager_allocate3_wrapAround.log");
 }
diff --git a/unit_tests/utils/Test_Log.cpp b/unit_tests/utils/Test_Log.cpp
index 3d8e672b84f5055a12185c3684c34bd888f0545b..1c44e232f9c8ee05bf6f8147d7789131c5a0d803 100644
--- a/unit_tests/utils/Test_Log.cpp
+++ b/unit_tests/utils/Test_Log.cpp
@@ -17,15 +17,22 @@
 
 using namespace Aidge;
 
-TEST_CASE("[core/log] Log") {
+TEST_CASE("[core/log] Log", "[Logger]") {
     SECTION("TestLog") {
-        Log::setConsoleLevel(Log::Debug);
-        Log::debug("debug");
+        Log::setConsoleLevel(Log::Level::Debug);
+        Log::debug("this is a debug message");
         Log::debug("{}", fmt::styled("green debug", fmt::fg(fmt::color::green)));
-        Log::info("info");
-        Log::notice("notice");
-        Log::warn("warn");
-        Log::error("error");
+        Log::info("this is an info message");
+        Log::notice("this is a notice message");
+        Log::warn("this is a warn message");
+        Log::error("this is an error message");
+        AIDGE_LOG_CONTEXT("Testing the context of logger");
         Log::fatal("fatal");
+        Log::debug("Now debug messages are supposed to [{}].", fmt::styled("appear", fmt::emphasis::italic));
+        Log::info("Current consol level is {}", Log::getConsoleLevel());
+        Log::setConsoleLevel(Log::Level::Warn);
+        Log::notice("This message should not appear.");
+
+
     }
 }
diff --git a/version.txt b/version.txt
index 1d0ba9ea182b0f7354f3daf12120744ec5e0c2f8..8f0916f768f0487bcf8d33827ce2c8dcecb645c1 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-0.4.0
+0.5.0