diff --git a/CMakeLists.txt b/CMakeLists.txt
index 499c2971cb60f979e72419cf65b9897d0613bf0a..beec9fbb427afadccef156139bd277d743e6999f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -5,7 +5,7 @@ file(STRINGS "${CMAKE_SOURCE_DIR}/version.txt" version)
 
 project(aidge_core
         VERSION ${version}
-        DESCRIPTION "Core algorithms for operators and graph of the AIDGE framework" 
+        DESCRIPTION "Core algorithms for operators and graph of the AIDGE framework"
         LANGUAGES CXX)
 message(STATUS "Project name: ${CMAKE_PROJECT_NAME}")
 message(STATUS "Project version: ${version}")
@@ -14,6 +14,9 @@ add_definitions(-DPROJECT_VERSION="${version}")
 message(STATUS "Project name: ${CMAKE_PROJECT_NAME}")
 message(STATUS "Project version: ${version}")
 
+# helper for LSP users
+set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
+
 # Note : project name is {project} and python module name is also {project}
 set(module_name _${CMAKE_PROJECT_NAME}) # target name
 set(pybind_module_name ${CMAKE_PROJECT_NAME}) # name of submodule for python bindings
@@ -26,6 +29,7 @@ option(TEST "Enable tests" ON)
 option(COVERAGE "Enable coverage" OFF)
 option(ENABLE_ASAN "Enable ASan (AddressSanitizer) for runtime analysis of memory use (over/underflow, memory leak, ...)" OFF)
 
+
 ##############################################
 # Import utils CMakeLists
 set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
@@ -36,22 +40,28 @@ endif()
 
 ##############################################
 # Find system dependencies
-Include(FetchContent)
-
 set(FMT_VERSION 10.2.1)
-message(STATUS "Retrieving fmt ${FMT_VERSION} from git")
-FetchContent_Declare(
-    fmt
-    GIT_REPOSITORY https://github.com/fmtlib/fmt.git
-    GIT_TAG        ${FMT_VERSION} # or a later release
-)
 
-set(FMT_SYSTEM_HEADERS ON)
-FetchContent_MakeAvailable(fmt)
-set_property(TARGET fmt PROPERTY POSITION_INDEPENDENT_CODE ON)
+find_package(fmt ${FMT_VERSION} QUIET)
 
-find_package(Threads REQUIRED)
+if(NOT fmt_FOUND)
+    message(STATUS "fmt not found in system, retrieving from git")
+    Include(FetchContent)
 
+    FetchContent_Declare(
+        fmt
+        GIT_REPOSITORY https://github.com/fmtlib/fmt.git
+        GIT_TAG        ${FMT_VERSION}
+    )
+
+    set(FMT_SYSTEM_HEADERS ON)
+    FetchContent_MakeAvailable(fmt)
+    set_property(TARGET fmt PROPERTY POSITION_INDEPENDENT_CODE ON)
+else()
+    message(STATUS "Found system fmt version ${fmt_VERSION}")
+endif()
+
+find_package(Threads REQUIRED)
 ##############################################
 # Create target and set properties
 
diff --git a/aidge_core/aidge_export_aidge/registry.py b/aidge_core/aidge_export_aidge/registry.py
index fe94a22399438b9a07673d21220ff1d0ba4a1dda..cfed0822ff2d8351af60cd211c190a802fe4a674 100644
--- a/aidge_core/aidge_export_aidge/registry.py
+++ b/aidge_core/aidge_export_aidge/registry.py
@@ -5,6 +5,3 @@ import aidge_core
 
 class ExportSerialize(ExportLib):
     _name="export_serialize"
-
-aidge_core.register_Tensor(["export_serialize", aidge_core.dtype.float32],
-                           aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.float32]))
diff --git a/aidge_core/export_utils/export_registry.py b/aidge_core/export_utils/export_registry.py
index 70c3e5fa47cb35cbf6611a5359e6a37e0f17620d..e5b6b2098cd760c4d425b96caf7b41cc8e82c46e 100644
--- a/aidge_core/export_utils/export_registry.py
+++ b/aidge_core/export_utils/export_registry.py
@@ -2,24 +2,36 @@ from typing import Dict, List
 import aidge_core
 from aidge_core.export_utils import ExportNode
 
+
 class classproperty:
-    """Helper class to define class properties,
-    Is equivalent to applying the decorator ``@property`` and ``@classmethod``.
-    But these two decorator are exclusive with python > 12.
-    See discussion https://discuss.python.org/t/add-a-supported-read-only-classproperty-decorator-in-the-stdlib/18090/12
+    """Helper class to define class-level properties.
+
+    Equivalent to applying both the ``@property`` and ``@classmethod`` decorators,
+    allowing methods to be accessed as class properties. These two decorators
+    are otherwise incompatible prior to Python 3.12.
+
+    See discussion: https://discuss.python.org/t/add-a-supported-read-only-classproperty-decorator-in-the-stdlib/18090/12
     """
 
     def __init__(self, fget):
+        """
+        :param fget: Function to be wrapped as a class property.
+        :type fget: Callable
+        """
         self.fget = fget
 
     def __get__(self, instance, owner):
         return self.fget(owner)
 
 
-# TODO: very naive implementation !
-# error handling should be added !
 class ExportLib(aidge_core.OperatorImpl):
-    """Aidge export lib, define a registry
+    """Aidge export library that manages a registry for operators and static files.
+
+    This class provides a structure for registering different operator types and
+    export nodes, facilitating access and management of these elements.
+
+    :ivar _name: The name of the export library, used for namespacing.
+    :ivar static_files: A dictionary mapping paths of static files to their target locations relative to the export root.
     """
     # PUBLIC
     # Lib name useful ?
@@ -30,12 +42,44 @@ class ExportLib(aidge_core.OperatorImpl):
     static_files: Dict[str, str] = {}
     # Main memory section
     mem_section = None
+    # Custom forward generation jinja file
+    forward_template: str = None
+    forward_header_template: str = None
     # PRIVATE
     # Registry of exportNode, class level dictionary, shared across all ExportLib
     _cls_export_node_registry = {}
 
     def __init__(self, operator):
         super(ExportLib, self).__init__(operator, self._name)
+        if self._name is None:
+            raise ValueError("ExportLib  {self.__class__.__name__} does not define the attribute ``_name``.")
+
+        # TODO: This is a patch, setBackend method is used to set an ExportLib as an Implementation.
+        # But it also set the backend of the Tensor and we don't define a backend for Tensor.
+        # The following code block associate to the export backend the "cpu" implementation.
+        # This is tracked by the issue :
+        #         https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/issues/178
+
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.float32],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.float32]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.float16],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.float16]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.int8],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int8]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.int16],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int16]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.int32],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int32]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.int64],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int64]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.uint8],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint8]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.uint16],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint16]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.uint32],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint32]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.uint64],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint64]))
 
     @classproperty
     def _export_node_registry(cls) -> Dict[str, List['ExportNode']]:
@@ -46,25 +90,48 @@ class ExportLib(aidge_core.OperatorImpl):
         """
         return cls._cls_export_node_registry.setdefault(cls, {})
 
-    # Override the virtual OperatorImpl method, in order to provide available
-    # implementation specifications
-    def get_available_impl_specs(self):
+    def get_available_impl_specs(self) -> List[aidge_core.ImplSpec]:
+        """Override the virtual OperatorImpl method, in order to provide available
+        implementation specifications.
+
+        :return: List of implementation specification available for the type of operator.
+        :rtype: List[aidge_core.ImplSpec]
+        """
         if self.get_operator().type() in self._export_node_registry:
             spec_vec = [i for i, _ in self._export_node_registry[self.get_operator().type()]]
             return spec_vec
         else:
             return []
 
-    def get_export_node(self, spec: aidge_core.aidge_core.ImplSpec):
+    def get_export_node(self, spec: aidge_core.ImplSpec) -> ExportNode:
+        """Given an :py:class:`aidge_core.ImplSpec`, return the ExportNode that is the closest match.
+
+        :param spec: Implementation specification to match
+        :type spec: :py:class:`aidge_core.ImplSpec
+        :return: The class ExportNode that is the closest match
+        :rtype: aidge_core.ImplSpec
+        """
         for registered_spec, export_node in self._export_node_registry[self.get_operator().type()]:
             if registered_spec == spec:
 
                 return export_node
         return None
 
-    # Decorator to register kernels for this export
     @classmethod
     def register(cls, op_type, spec):
+        """Decorator to register an operator implementation for a specified operator type.
+
+        Registers an operator under a given operator type and specification,
+        adding it to the export library registry. This method supports both
+        single operator types (str) and lists of types (List[str]).
+
+        :param op_type: The operator type(s) to register.
+        :type op_type: Union[str, List[str]]
+        :param spec: Implementation specification for the operator.
+        :type spec: :py:class:``aidge_core.ImplSpec``
+        :return: A wrapper class that initializes the registered operator.
+        :rtype: Callable
+        """
         def decorator(operator):
             class Wrapper(operator):
                 def __init__(self, *args, **kwargs):
@@ -92,9 +159,20 @@ class ExportLib(aidge_core.OperatorImpl):
             return Wrapper
         return decorator
 
-    # Decorator to register kernels for this export
     @classmethod
     def register_metaop(cls, op_type, spec):
+        """Decorator to register a MetaOperator with the export library.
+
+        Registers a MetaOperator under a given operator type and specification. This decorator
+        is intended for operator types that are grouped as meta operators.
+
+        :param op_type: Operator type(s) to register as a ``MetaOperator``.
+        :type op_type: Union[str, List[str]]
+        :param spec: Implementation specification for the MetaOperator.
+        :type spec: aidge_core.ImplSpec
+        :return: A wrapper class that initializes the registered MetaOperator.
+        :rtype: Callable
+        """
         def decorator(operator):
             class Wrapper(operator):
                 def __init__(self, *args, **kwargs):
@@ -114,3 +192,38 @@ class ExportLib(aidge_core.OperatorImpl):
                 spec.attrs.add_attr("type", type_name) # MetaOperator specs need to verify the type
             return Wrapper
         return decorator
+
+
+    @classmethod
+    def register_generic(cls, op_type, spec):
+        """Decorator to register a GenericOperator with the export library.
+
+        Registers a GenericOperator under a given operator type and specification. This decorator
+        is intended for operator types that are grouped as meta operators.
+
+        :param op_type: Operator type(s) to register as a ``GenericOperator``.
+        :type op_type: Union[str, List[str]]
+        :param spec: Implementation specification for the GenericOperator.
+        :type spec: aidge_core.ImplSpec
+        :return: A wrapper class that initializes the registered GenericOperator.
+        :rtype: Callable
+        """
+        def decorator(operator):
+            class Wrapper(operator):
+                def __init__(self, *args, **kwargs):
+                    return operator(*args, **kwargs)
+            type_list = []
+            if isinstance(op_type, list):
+                type_list = op_type
+            elif isinstance(op_type, str):
+                type_list = [op_type]
+            else:
+                raise TypeError("Argument 'op_type' of register method should be of type 'List[str]' or 'str', got {type(type)}")
+            for type_name in type_list:
+                if (type_name not in cls._export_node_registry):
+                    cls._export_node_registry[type_name] = []
+                cls._export_node_registry[type_name].append((spec, operator))
+                aidge_core.register_GenericOperatorOp([cls._name, type_name], cls)
+                spec.attrs.add_attr("type", type_name) # GenericOperator specs need to verify the type
+            return Wrapper
+        return decorator
diff --git a/aidge_core/export_utils/generate_main.py b/aidge_core/export_utils/generate_main.py
index b7eee930669cc5f15f88714acf4884f5e30333b1..58946136fbdbeb35a0919d656efcc1de587e5136 100644
--- a/aidge_core/export_utils/generate_main.py
+++ b/aidge_core/export_utils/generate_main.py
@@ -2,7 +2,7 @@ import aidge_core
 from pathlib import Path
 from aidge_core.export_utils import generate_file, data_conversion
 
-def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView) -> None:
+def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView, inputs_tensor=None) -> None:
     """
     Generate a C++ file to manage the forward pass of a model using the given graph structure.
 
@@ -10,11 +10,15 @@ def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView) -> N
     and tensor sizes. It uses this data to populate a C++ file template (`main.jinja`), creating a file (`main.cpp`)
     that call the `model_forward` function, which handles data flow and processing for the exported model.
 
+    This function also generate files containing input tensor if they have been set.
+
     :param export_folder: Path to the folder where the generated C++ file (`main.cpp`) will be saved.
     :type export_folder: str
     :param graph_view: An instance of :py:class:`aidge_core.graph_view`, providing access to nodes and
                        ordered input/output data within the computational graph.
     :type graph_view: aidge_core.graph_view
+    :param inputs_tensor: **For future** argument to provide tensor to use in the main function, not implemented yet!
+    :type inputs_tensor: None
     :raises RuntimeError: If there is an inconsistency in the output arguments (names, data types, sizes),
                           indicating an internal bug in the graph representation.
     """
@@ -27,14 +31,23 @@ def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView) -> N
 
     for in_node, in_idx in gv_inputs:
         in_node_input, in_node_input_idx = in_node.input(in_idx)
-        inputs_name.append(f"{in_node.name()}_input_{in_idx}" if in_node_input is None else f"{in_node_input.name()}_output_{in_node_input_idx}")
+        in_name = f"{in_node.name()}_input_{in_idx}" if in_node_input is None else f"{in_node_input.name()}_output_{in_node_input_idx}"
+        inputs_name.append(in_name)
+        input_tensor = in_node.get_operator().get_input(in_idx)
+        if input_tensor is None or input_tensor.undefined() or not input_tensor.has_impl():
+            if inputs_tensor is not None:
+                aidge_core.Log.notice("No support for inputs_tensor argument yet.")
+                aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
+            else:
+                aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
+        else:
+            aidge_core.export_utils.generate_input_file(export_folder=export_folder, array_name=in_name, tensor=input_tensor)
+
     for out_node, out_id in gv_outputs:
         outputs_name.append(f"{out_node.name()}_output_{out_id}")
         out_tensor = out_node.get_operator().get_output(out_id)
         outputs_dtype.append(data_conversion.aidge2c(out_tensor.dtype()))
         outputs_size.append(out_tensor.size())
-        print(out_tensor.size())
-
 
     if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
             raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
diff --git a/aidge_core/export_utils/node_export.py b/aidge_core/export_utils/node_export.py
index 479eaf01ff8c8e85a3bf83adac88f5ee7fe86857..5777814a0b10c49d0f75245bdc4e9681027bdfb8 100644
--- a/aidge_core/export_utils/node_export.py
+++ b/aidge_core/export_utils/node_export.py
@@ -7,8 +7,6 @@ from typing import List
 
 
 def get_chan(tensor: aidge_core.Tensor) -> int:
-    """Given a tensor return the number of channel
-    """
     dformat = tensor.dformat()
     dims = tensor.dims()
     if dformat == aidge_core.dformat.default:
@@ -89,39 +87,78 @@ def get_width(tensor: aidge_core.Tensor) -> int:
 class ExportNode(ABC):
     """Abstract class to interface node with export generation.
 
-    This class expose a dictionary ``attributes`` which contains all the information required to generate an export:
-    - All the attributes of the Aidge node are automatically fetch, the key to get an attribute is the attribute name in python format, example ``no_bias``
-    - **name**: Name of the Node, ``str``
-    - **node**: Aidge Node, :py:class:`aidge_core.Node`
-    - **nb_in**: Number of inputs, ``int``
-    - **in_name**: unique name for each input, if no input node the name is ``{node_name}_input_{in_id}``, if there is a parent, the name is ``{parent_name}_output_{out_id}``, ``list[str]``
-    - **in_dims**: A list of the dimension for each inputs, ``list[list[int]]``
-    - **in_node**: A list of Node associated for each inputs, ``list[aidge_core.Node]``
-    - **in_size**: A list of the size for each inputs, ``list[int]``
-    - **in_chan**: A list of channel for each inputs, deduced by the dataformat, ``list[int]``
-    - **in_height**: A list of height for each inputs, deduced by the dataformat, ``list[int]``
-    - **in_width**: A list of width for each inputs, deduced by the dataformat, ``list[int]``
-    - **in_dtype**: A list of type (Aidge format) for each input, ``List[:py:class:`aidge_core.dtype`]``
-    - **in_cdtype**: A list of type (C/C++ format) for each input, ``List[str]``
-    - **out_name**: unique name for each output, the name is ``{name}_output_{out_id}``, ``list[str]``
-    - **out_node**: A list of list of Node associated for each outputs, ``list[list[aidge_core.Node]]``
-    - **nb_out**: Number of outputs, ``int``
-    - **out_dims**: A list of the dimension for each inputs, ``list[list[int]]``
-    - **out_size**: A list of the size for each outputs, ``list[int]``
-    - **out_chan**: A list of channel for each outputs, deduced by the dataformat, ``list[int]``
-    - **out_height**: A list of height for each outputs, deduced by the dataformat, ``list[int]``
-    - **out_width**: A list of width for each outputs, deduced by the dataformat, ``list[int]``
-    - **out_dtype**: A list of type (Aidge format) for each output, ``List[:py:class:`aidge_core.dtype`]``
-    - **out_cdtype**: A list of type (C/C++ format) for each output, ``List[str]``
-    - **mem_info**: True if mem_info is available for this node, ``bool``
-    - **mem_info_size**: A list of memory size for each output, ``List[int]``
-    - **mem_info_offset**: A list of offset to access each output, ``List[int]``
-    - **mem_info_stride**: old N2D2
-    - **mem_info_length**: old N2D2
-    - **mem_info_cont_size**: old N2D2
-    - **mem_info_cont_offset**: old N2D2
-    - **mem_info_wrap_offset**: old N2D2
-    - **mem_info_wrap_size**: old N2D2
+    This class exposes a dictionary, ``attributes``, which contains all the information
+    required to generate an export for a given node, including input/output names,
+    dimensions, types, and optional memory information.
+
+    - All the attributes of the Aidge Operator are automatically fetch, the key to get an attribute is the attribute name in python format, example ``no_bias``
+
+    - **node** (aidge_core.Node): The Aidge Node instance associated with this ExportNode.
+
+    - **name** (str): Name of the node, typically set via the Aidge node.
+
+    - **nb_in** (int): Number of input connections for the node.
+
+    - **nb_out** (int): Number of output connections for the node.
+
+    - **in_name** (list[str]): Unique name for each input connection.
+
+      Format:
+        - If no input node: ``{node_name}_input_{in_id}``
+        - If there is a parent node: ``{parent_name}_output_{out_id}``
+
+    - **in_dims** (list[list[int]]): Dimensions of each input.
+
+    - **in_node** (list[aidge_core.Node]): List of associated input nodes.
+
+    - **in_size** (list[int]): Size of each input.
+
+    - **in_chan** (list[int]): Channels in each input, based on data format.
+
+    - **in_height** (list[int]): Height of each input, based on data format.
+
+    - **in_width** (list[int]): Width of each input, based on data format.
+
+    - **in_dtype** (list[:py:class:`aidge_core.dtype`]): Data type for each input (Aidge format).
+
+    - **in_cdtype** (list[str]): Data type for each input (C/C++ format).
+
+    - **out_name** (list[str]): Unique name for each output, formatted as ``{name}_output_{out_id}``.
+
+    - **out_node** (list[list[aidge_core.Node]]): Associated output nodes for each output.
+
+    - **out_dims** (list[list[int]]): Dimensions of each output.
+
+    - **out_size** (list[int]): Size of each output.
+
+    - **out_chan** (list[int]): Channels in each output, based on data format.
+
+    - **out_height** (list[int]): Height of each output, based on data format.
+
+    - **out_width** (list[int]): Width of each output, based on data format.
+
+    - **out_dtype** (list[:py:class:`aidge_core.dtype`]): Data type for each output (Aidge format).
+
+    - **out_cdtype** (list[str]): Data type for each output (C/C++ format).
+
+    - **mem_info** (bool): True if `mem_info` is available for this node.
+
+    - **mem_info_size** (list[int]): Memory size for each output, if applicable.
+
+    - **mem_info_offset** (list[int]): Offset to access each output, if applicable.
+
+    - **mem_info_stride** (list[int]): Stride for accessing each output.
+
+    - **mem_info_length** (list[int]): Length of each output.
+
+    - **mem_info_cont_size** (list[int]): Continuous size for each output.
+
+    - **mem_info_cont_offset** (list[int]): Continuous offset for each output.
+
+    - **mem_info_wrap_offset** (list[int]): Wrap offset for each output.
+
+    - **mem_info_wrap_size** (list[int]): Wrap size for each output.
+
     """
 
     @abstractmethod
@@ -260,7 +297,64 @@ class ExportNode(ABC):
             else:
                 raise RuntimeError(f"No output for {self.node.name()}")
 
+    # **Class Attributes:**
+
+    # - **config_template** (str): Path to the template defining how to export the node definition.
+    #   This template is required for exporting; if undefined, raises an error, if no config
+    #   template is required set this to an empty string.
+
+    # - **forward_template** (str): Path to the template for generating code to perform a forward pass
+    #   of the node. Required for exporting the forward pass; raises an error if undefined.
+
+    # - **include_list** (list[str]): List of include paths (e.g., "include/toto.hpp") to be added to
+    #   the generated export files. Must be defined before export; raises an error if undefined.
+
+    # - **kernels_to_copy** (list[str]): List of paths to kernel files that should be copied during
+    #   export. The kernels are copied to ``kernels_path``, and are automatically
+    #   added to the include list.
+
+    # - **kernels_path** (str): Path where all kernels are stored in the export, prefixed by the
+    #   `export_root`. Defaults to "include/kernels".
+
+    # - **config_path** (str): Path of the configuration folder where node definitions are exported.
+    #   Defaults to "include/layers".
+
+    # - **config_extension** (str): File extension for the configuration files, typically for header
+    #   files. Defaults to "h".
+
 class ExportNodeCpp(ExportNode):
+    """Class for exporting Aidge nodes with C++ code generation.
+
+    This subclass of :class:`ExportNode` defines specific templates,
+    configuration paths, and other attributes required to generate
+    C++ code for Aidge nodes, including header and source files
+    for node definitions and forward passes.
+
+    :var config_template: Path to the template defining how to export the node definition.
+        This template is required for exporting; if undefined, raises an error, if no config
+        template is required set this to an empty string.
+    :vartype config_template: str
+    :var forward_template: Path to the template for generating code to perform a forward pass
+        of the node. Required for exporting the forward pass; raises an error if undefined.
+    :vartype forward_template: str
+    :var include_list: List of include paths (e.g., "include/toto.hpp") to be added to
+        the generated export files. Must be defined before export; raises an error if undefined.
+    :vartype include_list: list[str]
+    :var kernels_to_copy: List of paths to kernel files that should be copied during
+        export. The kernels are copied to ``kernels_path``, and are automatically
+        added to the include list.
+    :vartype kernels_to_copy: list[str]
+    :var kernels_path: Path where all kernels are stored in the export, prefixed by the
+        `export_root`. Defaults to "include/kernels".
+    :vartype kernels_path: str
+    :var config_path: Path of the configuration folder where node definitions are exported.
+        Defaults to "include/layers".
+    :vartype config_path: str
+    :var config_extension: File extension for the configuration files, typically for header
+        files. Defaults to "h".
+    :vartype config_extension: str
+    """
+
     # Path to the template defining how to export the node definition
     config_template: str = None
     # Path to the template defining how to export the node definition
@@ -277,8 +371,20 @@ class ExportNodeCpp(ExportNode):
     config_path: str = "include/layers"
     # Config_folder_extension
     config_extension: str = "h"
+
+
     def export(self, export_folder: str):
-        """Define how to export the node definition.
+        """Defines how to export the node definition.
+
+        This method checks that `config_template`, `include_list`, and
+        `kernels_to_copy` are defined, then copies each kernel to the export folder,
+        appends their paths to the include list, and generates the configuration file
+        based on the `config_template`.
+
+        :param export_folder: Folder path where the files are exported.
+        :type export_folder: str
+        :return: List of include paths with the paths for kernels and configuration files.
+        :rtype: list[str]
         """
         if self.config_template is None:
             raise ValueError("config_template have not been defined")
@@ -313,7 +419,7 @@ class ExportNodeCpp(ExportNode):
         return self.include_list + kernel_include_list
 
     def forward(self):
-        """Define how to generate code to perform a forward pass.
+        """Generates code for a forward pass using the `forward_template`.
         """
         if self.forward_template is None:
             raise ValueError("forward_template have not been defined")
diff --git a/aidge_core/export_utils/scheduler_export.py b/aidge_core/export_utils/scheduler_export.py
index f1de6f823356b0d217dc7fd98db006a8c97e450c..90862578f04ecd0bdf5ee0bfd7643e7bc3d0a455 100644
--- a/aidge_core/export_utils/scheduler_export.py
+++ b/aidge_core/export_utils/scheduler_export.py
@@ -6,130 +6,203 @@ from aidge_core.export_utils import ExportLib, generate_file, copy_file
 from typing import List, Tuple
 
 
-def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib = None, memory_manager=None, memory_manager_args=None, labels=False) -> None:
-        graphview = scheduler.graph_view()
-        export_folder = Path().absolute() / export_folder_path
-
-        os.makedirs(str(export_folder), exist_ok=True)
-
-        dnn_folder = export_folder / "dnn"
-        os.makedirs(str(dnn_folder), exist_ok=True)
-
-        if memory_manager_args is None:
-            memory_manager_args = {}
-
-        if memory_manager is None:
-            raise ValueError("A memory manager is required (no default value yet).")
-        peak_mem, mem_info = memory_manager(
-            scheduler, **memory_manager_args)
-
-        # List of function call for forward.cpp
-        list_actions: List[str] = []
-        # List of headers for forward.cpp
-        list_configs: List[str] = []
-
-        inputs_name: List[str] = []
-        inputs_dtype: List[str] = []
-        outputs_name: List[str] = []
-        outputs_dtype: List[str] = []
-        outputs_size: List[int] = []
-
-        # List of aidge_core.Node ordered by scheduler
-        list_forward_nodes: List[aidge_core.Node] = scheduler.get_static_scheduling()
-
-        # If exportLib define use it
-        # else parse component in platform
-        # if export_lib is None:
-        #     raise ValueError("Export need an ExportLib.")
-        for node in list_forward_nodes:
-            if export_lib is not None:
-                aidge_core.Log.debug(f"Setting backend {export_lib._name} to {node.name()}[{node.type()}].")
-                node.get_operator().set_backend(export_lib._name)
-
-            op_impl = node.get_operator().get_impl()
-            if op_impl is None:
-                raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an implementation.")
-            if not isinstance(op_impl, ExportLib):
-                raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an exportable backend ({op_impl}).")
-
-            is_input:bool  = node in graphview.get_input_nodes()
-            is_output:bool = node in graphview.get_output_nodes()
-
-            if is_input:
-                # GraphView.get_inputs_nodes() returns the nodes that have an Input set to None or not in the graph
-                # However, some inputs are Optional and thus the node may not be an input of the graph!
-                # So we need ot check that all the inputs of the nodes or in the graph or not optional
-                # This is what the following code block is checking.
-                for idx, node_in in enumerate(node.inputs()):
-                    optional:bool = node.get_operator().input_category(idx) == aidge_core.InputCategory.OptionalData
-                    # Note: node_in is a Tuple(Node, out_idx)
-                    in_graph:bool = node_in[0] in graphview.get_nodes()
-                    is_input &= (in_graph or not optional)
-
-            # Get operator current specs
-            required_specs = op_impl.get_required_spec()
-            # Get specs of the implementation that match current specs
-            specs = op_impl.get_best_match(required_specs)
-            # Retrieve said implementation
-            export_node = op_impl.get_export_node(specs)
-
-            if export_node is None:
-                raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].")
-            # Instanciate ExportNode
-            op = export_node(node, mem_info[node])
-
-            # For configuration files
-            list_configs += op.export(dnn_folder)
-            # For forward file
-            list_actions += op.forward()
-            if is_input:
-                for idx, node_in in enumerate(node.inputs()):
-                    if node_in[0] not in graphview.get_nodes():
-                        inputs_name.append(op.attributes["in_name"][idx])
-                        inputs_dtype.append(
-                            op.attributes["in_cdtype"][idx]
-                        )
-            if is_output:
-                for idx in range(len(node.outputs())):
-                    outputs_name.append(op.attributes["out_name"][idx])
-                    outputs_dtype.append(
-                        op.attributes["out_cdtype"][idx]
-                    )
-                    outputs_size.append(op.attributes["out_size"][idx])
-
-        func_name = "model_forward"
-        ROOT = Path(__file__).resolve().parents[0]
-        generate_file(
-            str(dnn_folder / "src" / "forward.cpp"),
-            str(ROOT / "templates" / "forward.jinja"),
-            func_name=func_name,
-            headers=set(list_configs),
-            actions=list_actions,
-            mem_ctype=inputs_dtype[0],  # Legacy behavior ...
-            mem_section=export_lib.mem_section,
-            peak_mem=peak_mem,
-            inputs_name=inputs_name,
-            inputs_dtype=inputs_dtype,
-            outputs_name=outputs_name,
-            outputs_dtype=outputs_dtype
-        )
-
-        # Generate dnn API
-        generate_file(
-            str(dnn_folder / "include" / "forward.hpp"),
-            str(ROOT / "templates" / "forward_header.jinja"),
-            libraries=[],
-            func_name=func_name,
-            inputs_name=inputs_name,
-            inputs_dtype=inputs_dtype,
-            outputs_name=outputs_name,
-            outputs_dtype=outputs_dtype
-        )
-
-        if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
-            raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
-
+def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib = None, memory_manager=None, memory_manager_args=None, test_mode=False) -> None:
+    """Exports an aidge_core.Scheduler to C++ code.
+
+    This function generates files for a given computation graph, including forward-pass functions,
+    configuration headers, and the main API entry point for the exported model. It requires a
+    memory manager to allocate resources, and optionally an `ExportLib` instance to handle backend
+    configurations for node operators.
+
+
+    1. **Export Preparation**:
+        - Initializes export and DNN folders, checking that required memory management functions are defined.
+        - Retrieves peak memory usage and memory details for each node using the `memory_manager`.
+
+    2. **Configuration Generation**:
+        - Iterates over nodes scheduled by `scheduler`, configuring backends if `export_lib` is specified.
+        - Exports configuration headers and forward-pass actions for each node by invoking `op.export()`
+            and `op.forward()`, appending these to `list_configs` and `list_actions`, respectively.
+        - Collects information on input and output nodes, including their names, data types, and sizes.
+
+    3. **Code Generation**:
+        - Defines the forward-pass function, `model_forward`, with inputs and outputs based on node attributes.
+        - Generates the following files:
+
+            - **forward.cpp**: Implements the model forward pass using templates, applying configurations
+            and actions for each node.
+
+            - **forward.hpp**: Exports the forward API, defining inputs and outputs.
+
+            - **main.cpp**: Main entry file, serving as the model's forward-pass interface.
+
+    4. **Static File Export (Optional)**:
+        - If `export_lib` is specified, static files are copied to the export folder based on `export_lib`
+        specifications.
+
+
+    :param scheduler: Scheduler instance managing the computation graph.
+                    Uses `graph_view` and `get_static_scheduling` methods
+                    to retrieve the computation graph layout and ordered nodes.
+    :type scheduler: aidge_core.Scheduler
+    :param export_folder_path: Path to the folder where the generated export files will be saved.
+                            Creates this folder, along with subdirectories for model and source files.
+    :type export_folder_path: str
+    :param export_lib: Library providing the backend implementation for node operators.
+                    Defaults to None. If provided, each node's backend is set to the library's name.
+    :type export_lib: ExportLib, optional
+    :param memory_manager: Required function for managing memory allocation. It should take
+                        `scheduler` and optional `memory_manager_args` as parameters, returning
+                        `peak_mem` (peak memory usage) and `mem_info` (memory details for each node).
+    :type memory_manager: callable
+    :param memory_manager_args: Additional arguments passed to `memory_manager`. Defaults to an empty dictionary.
+    :type memory_manager_args: dict, optional
+    :param test_mode: Additional argument which may be used during forward generation.
+    :type test_mode: bool, optional
+    """
+    graphview = scheduler.graph_view()
+    export_folder = Path().absolute() / export_folder_path
+
+    os.makedirs(str(export_folder), exist_ok=True)
+
+    dnn_folder = export_folder / "dnn"
+    os.makedirs(str(dnn_folder), exist_ok=True)
+
+    if memory_manager_args is None:
+        memory_manager_args = {}
+
+    if memory_manager is None:
+        raise ValueError("A memory manager is required (no default value yet).")
+    peak_mem, mem_info = memory_manager(
+        scheduler, **memory_manager_args)
+
+    # List of function call for forward.cpp
+    list_actions: List[str] = []
+    # List of headers for forward.cpp
+    list_configs: List[str] = []
+
+    inputs_name: List[str] = []
+    inputs_dtype: List[str] = []
+    outputs_name: List[str] = []
+    outputs_dtype: List[str] = []
+    outputs_size: List[int] = []
+
+    # List of aidge_core.Node ordered by scheduler
+    list_forward_nodes: List[aidge_core.Node] = scheduler.get_static_scheduling()
+
+    # If exportLib define use it
+    # else parse component in platform
+    # if export_lib is None:
+    #     raise ValueError("Export need an ExportLib.")
+    for node in list_forward_nodes:
         if export_lib is not None:
-            # Copy all static files in the export
-            for source, destination in export_lib.static_files.items():
-                copy_file(source, str(export_folder / destination))
+            aidge_core.Log.debug(f"Setting backend {export_lib._name} to {node.name()}[{node.type()}].")
+            node.get_operator().set_backend(export_lib._name)
+
+        op_impl = node.get_operator().get_impl()
+        if op_impl is None:
+            raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an implementation.")
+        if not isinstance(op_impl, ExportLib):
+            raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an exportable backend ({op_impl}).")
+
+        is_input:bool  = node in graphview.get_input_nodes()
+        is_output:bool = node in graphview.get_output_nodes()
+
+        if is_input:
+            # GraphView.get_inputs_nodes() returns the nodes that have an Input set to None or not in the graph
+            # However, some inputs are Optional and thus the node may not be an input of the graph!
+            # So we need to check that all the inputs of the nodes or in the graph or not optional
+            # This is what the following code block is checking.
+            for idx, node_in in enumerate(node.inputs()):
+                optional:bool = node.get_operator().input_category(idx) == aidge_core.InputCategory.OptionalData
+                # Note: node_in is a Tuple(Node, out_idx)
+                in_graph:bool = node_in[0] in graphview.get_nodes()
+                is_input &= (in_graph or not optional)
+
+        # Get operator current specs
+        required_specs = op_impl.get_required_spec()
+        # Get specs of the implementation that match current specs
+        specs = op_impl.get_best_match(required_specs)
+        # Retrieve said implementation
+        export_node = op_impl.get_export_node(specs)
+
+        if export_node is None:
+            raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].")
+        # Instanciate ExportNode
+        op = export_node(node, mem_info[node])
+
+        # For configuration files
+        list_configs += op.export(dnn_folder)
+        # For forward file
+        list_actions += op.forward()
+        if is_input:
+            for idx, node_in in enumerate(node.inputs()):
+                if node_in[0] not in graphview.get_nodes():
+                    inputs_name.append(op.attributes["in_name"][idx])
+                    inputs_dtype.append(
+                        op.attributes["in_cdtype"][idx]
+                    )
+        if is_output:
+            for idx in range(len(node.outputs())):
+                outputs_name.append(op.attributes["out_name"][idx])
+                outputs_dtype.append(
+                    op.attributes["out_cdtype"][idx]
+                )
+                outputs_size.append(op.attributes["out_size"][idx])
+
+    func_name = "model_forward"
+    ROOT = Path(__file__).resolve().parents[0]
+
+    forward_template = str(ROOT / "templates" / "forward.jinja")
+    if export_lib.forward_template != None:
+        forward_template = export_lib.forward_template
+
+    list_node_names = []
+    for node in list_forward_nodes:
+        if node.type() != "Producer":
+            list_node_names.append(node.name())
+
+    generate_file(
+        str(dnn_folder / "src" / "forward.cpp"),
+        forward_template,
+        func_name=func_name,
+        headers=set(list_configs),
+        actions=list_actions,
+        # Note: Graph may not have inputs, so we need to check with output
+        # In the future, we should remove this as it is not compatible
+        # with a mix precision approach.
+        mem_ctype=outputs_dtype[0],  # Legacy behavior ...
+        mem_section=export_lib.mem_section,
+        peak_mem=peak_mem,
+        inputs_name=inputs_name,
+        inputs_dtype=inputs_dtype,
+        outputs_name=outputs_name,
+        outputs_dtype=outputs_dtype,
+        test_mode=test_mode,
+        list_node_names=list_node_names
+    )
+
+    forward_header_template = str(ROOT / "templates" / "forward_header.jinja")
+    if export_lib.forward_header_template != None:
+        forward_header_template = export_lib.forward_header_template
+
+    # Generate dnn API
+    generate_file(
+        str(dnn_folder / "include" / "forward.hpp"),
+        forward_header_template,
+        libraries=[],
+        func_name=func_name,
+        inputs_name=inputs_name,
+        inputs_dtype=inputs_dtype,
+        outputs_name=outputs_name,
+        outputs_dtype=outputs_dtype,
+        test_mode=test_mode
+    )
+
+    if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
+        raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
+
+    if export_lib is not None:
+        # Copy all static files in the export
+        for source, destination in export_lib.static_files.items():
+            copy_file(source, str(export_folder / destination))
diff --git a/aidge_core/export_utils/templates/main.jinja b/aidge_core/export_utils/templates/main.jinja
index d0c22719a5d9b9eaa15d3ef9ef86307a060b54be..697a97b53ef05eedddf5ac66f262581475e655c6 100644
--- a/aidge_core/export_utils/templates/main.jinja
+++ b/aidge_core/export_utils/templates/main.jinja
@@ -26,7 +26,7 @@ int main()
     {% endfor %}
 
     // Call the forward function
-    {{ func_name }}({{ inputs_name|join(", ") }}, &{{ outputs_name|join(", &") }});
+    {{ func_name }}({{ inputs_name|join(", ") }}{% if inputs_name %}, {% endif %}&{{ outputs_name|join(", &") }});
 
     // Print the results of each output
     {%- for o in range(outputs_name | length) %}
diff --git a/aidge_core/mem_info.py b/aidge_core/mem_info.py
index 1f2f48dd64aba276e70940d5bf461da6f50a4b38..1a800f9488905a4aed3a907deca8327979effbe4 100644
--- a/aidge_core/mem_info.py
+++ b/aidge_core/mem_info.py
@@ -42,6 +42,26 @@ def compute_default_mem_info(scheduler: aidge_core.Scheduler) -> Tuple[int, List
 
 
 def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder: Path, wrapping: bool = False) -> Tuple[int, List[dict]]:
+    """Generates optimized memory information for a computation graph managed by a scheduler.
+
+    This function analyzes the memory usage of a computation graph, determining the memory peak
+    and detailed memory management information for each node in the scheduler. It supports optional
+    wrapping of memory buffers and logs memory usage statistics to facilitate memory optimization.
+
+    :param scheduler: Scheduler instance that organizes the computation graph. It manages the
+                      nodes and facilitates memory planning by invoking its `generate_memory` method.
+    :type scheduler: aidge_core.Scheduler
+    :param stats_folder: Directory path to store memory statistics and plots generated by `mem_manager`.
+                         If provided as a string, it is converted to a `Path` object.
+    :type stats_folder: Path
+    :param wrapping: Boolean flag to enable or disable wrap-around buffer optimization.
+                     Defaults to `False`.
+    :type wrapping: bool, optional
+    :return: A tuple containing the peak memory size and a list of memory information for each
+             scheduled node. The memory information for each node includes details such as size,
+             offset, stride, length, count, and optional wrap-around details.
+    :rtype: Tuple[int, List[dict]]
+    """
     # The forward dims has to done outside the function
     # Also supposed the generation of the scheduler has been performed outside
     # Otherwise decomment the following line
diff --git a/aidge_core/simplify_graph.py b/aidge_core/simplify_graph.py
new file mode 100644
index 0000000000000000000000000000000000000000..30ee04e6c62c1dfd465d425d1b430d2b5813383b
--- /dev/null
+++ b/aidge_core/simplify_graph.py
@@ -0,0 +1,56 @@
+import numpy as np
+import aidge_core
+
+def simplify_graph(graph: aidge_core.GraphView):
+    """
+    Simplify a graph loaded from ONNX.
+
+    :param graph: The GraphView to simplify.
+    :type graph: aidge_core.GraphView
+    """
+
+    def check_constant_producer(value):
+        def _check_constant_producer(node):
+            out = node.get_operator().get_output(0)
+            return (len(out) == 1 and np.isclose(out[0], value))
+        return _check_constant_producer
+
+    gm = aidge_core.SinglePassGraphMatching(graph)
+    gm.add_node_lambda("Constant_sqrt2", check_constant_producer(np.sqrt(2)))
+    gm.add_node_lambda("Constant_1", check_constant_producer(1))
+    gm.add_node_lambda("Constant_0_5", check_constant_producer(0.5))
+
+    # Linear [from PyTorch ONNX]
+    aidge_core.fuse_to_metaops(gm, "MatMul-*>Add", "Linear")
+
+    # LayerNorm [from PyTorch ONNX]
+    aidge_core.fuse_to_metaops(gm, "ReduceMean-*>Sub#1~>(Pow#1->ReduceMean-*>Add#1->Sqrt)-*>Div#1-*>Mul#1-*>Add#2;"
+                                   "Sub#1~*>Div#1;"
+                                   "Pow#1<1~Producer;"
+                                   "Add#1<*~Producer;"
+                                   "Mul#1<*~Producer;"
+                                   "Add#2<*~Producer;"
+                                   "Sub#1~>$", "LayerNorm")
+
+    # ScaledDotProductAttention [from PyTorch ONNX]
+    aidge_core.fuse_to_metaops(gm, "MatMul->Div#1->Softmax-*>MatMul;"
+                                   "Div#1<1~Producer", "ScaledDotProductAttention")
+
+    # MultiHeadAttention [from PyTorch ONNX]
+    aidge_core.fuse_to_metaops(gm, "ScaledDotProductAttention#1->Transpose->Reshape#1->Linear;"
+                                   "Reshape#1<1~Producer;"
+                                   "ScaledDotProductAttention#1<0-(Transpose<-Reshape#2<-Add#1);"
+                                   "ScaledDotProductAttention#1<1-(Transpose<-Reshape#3<-Add#2);"
+                                   "ScaledDotProductAttention#1<2-(Transpose<-Reshape#4<-Add#3);"
+                                   "Reshape#2<1~Producer;"
+                                   "Add#1<*-0-Split#1;"
+                                   "Add#2<*-1-Split#1;"
+                                   "Add#3<*-2-Split#1;"
+                                   "Split#1<-MatMul;"
+                                   "Split#1<1~Producer", "MultiHeadAttention")
+
+    # GeLU [from PyTorch ONNX]
+    aidge_core.fuse_to_metaops(gm, "Div#1->Erf->Add#1-*>Mul->Mul#2;"
+                                   "Div#1<1~Producer[Constant_sqrt2];"
+                                   "Add#1<*~Producer[Constant_1];"
+                                   "Mul#2<*~Producer[Constant_0_5]", "GeLU")
diff --git a/aidge_core/static_analysis.py b/aidge_core/static_analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..c65a102a10601605cd2ca988a2ad3cf2cbd00e6e
--- /dev/null
+++ b/aidge_core/static_analysis.py
@@ -0,0 +1,195 @@
+import matplotlib
+import matplotlib.pyplot as plt
+from functools import partial
+import numpy as np
+import aidge_core
+
+class StaticAnalysisExt(aidge_core.StaticAnalysis):
+    def log_nb_params(self, filename, title=None, log_scale=False):
+        namePtrTable = self.get_graph().get_ranked_nodes_name("{0} ({1}#{3})");
+        nodes = self.get_graph().get_ordered_nodes()
+        series = []
+        legend = None
+
+        for node in nodes:
+            if node.type() == "Producer":
+                continue
+
+            name = namePtrTable[node]
+            series.append([name, self.get_nb_params(node)])
+
+        if title is None: title = "log_nb_params"
+        if filename is not None:
+            self._log_bar(series, filename, title, legend, log_scale)
+        return series
+
+    def log_params_size(self, filename, title=None, log_scale=False):
+        namePtrTable = self.get_graph().get_ranked_nodes_name("{0} ({1}#{3})");
+        nodes = self.get_graph().get_ordered_nodes()
+        series = []
+        legend = None
+
+        for node in nodes:
+            if node.type() == "Producer":
+                continue
+
+            name = namePtrTable[node]
+            series.append([name, self.log_params_size(node)])
+
+        if title is None: title = "log_params_size"
+        if filename is not None:
+            self._log_bar(series, filename, title, legend, log_scale)
+        return series
+
+    def log_nb_arithm_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_arithm_ops, filename, title, log_scale)
+
+    def log_nb_logic_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_logic_ops, filename, title, log_scale)
+
+    def log_nb_comp_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_comp_ops, filename, title, log_scale)
+
+    def log_nb_nl_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_nl_ops, filename, title, log_scale)
+
+    def log_nb_mac_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_mac_ops, filename, title, log_scale)
+
+    def log_nb_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_ops, filename, title, log_scale)
+
+    def log_nb_arithm_int_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_arithm_int_ops, filename, title, log_scale)
+
+    def log_nb_arithm_fp_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_arithm_fp_ops, filename, title, log_scale)
+
+    def log_nb_ops_by_type(self, filename, title=None, log_scale=False):
+        return self._log_callback([aidge_core.OperatorStats.get_nb_arithm_int_ops,
+                                  aidge_core.OperatorStats.get_nb_arithm_fp_ops,
+                                  aidge_core.OperatorStats.get_nb_logic_ops,
+                                  aidge_core.OperatorStats.get_nb_comp_ops,
+                                  aidge_core.OperatorStats.get_nb_nl_ops], filename, title, log_scale)
+
+    def _log_callback(self, callback, filename, title=None, log_scale=False):
+        """
+        Log a statistic given by an OperatorStats callback member function.
+        Usage:
+
+            stats = StaticAnalysisExt(model)
+            stats.log_callback(aidge_core.OperatorStats.get_nb_params, "stats.png", "Nb params per operator")
+
+        :param func: OperatorStats member function to call.
+        :param filename: Output graph file name.
+        :type filename: str
+        :param title: Title of the graph.
+        :type title: str
+        """
+
+        namePtrTable = self.get_graph().get_ranked_nodes_name("{0} ({1}#{3})");
+        nodes = self.get_graph().get_ordered_nodes()
+        series = []
+        legend = None
+
+        for node in nodes:
+            if node.type() == "Producer":
+                continue
+
+            stats = self.get_op_stats(node)
+            name = namePtrTable[node]
+            attr = {}
+            if type(node.get_operator()) is aidge_core.GenericOperatorOp:
+                # Display Generic Op in orange
+                attr = {'color': 'orange'}
+            elif not node.get_operator().is_atomic():
+                # Display Meta Op in bold
+                attr = {'fontweight': 'bold'}
+            elif node.type() not in aidge_core.get_keys_OperatorStats():
+                # Display unsupported operator in red labels
+                attr = {'color': 'red'}
+            if attr:
+                name = (name, attr)
+            if isinstance(callback, list):
+                series.append([name, [partial(cb, stats)() for cb in callback]])
+                legend = [cb.__name__ for cb in callback]
+                if title is None: title = str(legend)
+            else:
+                series.append([name, partial(callback, stats)()])
+                if title is None: title = callback.__name__
+
+        if title is None: title = str(callback)
+        if filename is not None:
+            self._log_bar(series, filename, title, legend, log_scale)
+        return series
+
+    def _log_bar(self, series, filename, title=None, legend=None, log_scale=False):
+        names, values = zip(*series)
+        names_only = [item[0] if isinstance(item, tuple) else item for item in names]
+        fig, ax = plt.subplots(figsize=(max(5, len(names)/4), 5))
+        plt.xlim(-0.5, len(names) - 0.5)
+        if isinstance(values[0], list):
+            series = [list(i) for i in zip(*values)]
+            bot = np.zeros(len(series[0]))
+            for i, serie in enumerate(series):
+                plt.bar(names_only, serie, bottom=bot)
+                bot += serie
+        else:
+            plt.bar(names_only, values)
+        ax.yaxis.minorticks_on()
+        plt.grid(axis='y', which='major', linestyle='--', color='gray')
+        plt.grid(axis='y', which='minor', linestyle=':', color='lightgray')
+        formatter0 = matplotlib.ticker.EngFormatter(unit='')
+        ax.yaxis.set_major_formatter(formatter0)
+        plt.gca().set_axisbelow(True)
+
+        labels = plt.gca().get_xticks()
+        tick_labels = plt.gca().get_xticklabels()
+        for i, label in enumerate(labels):
+            if isinstance(names[i], tuple):
+                if 'color' in names[i][1]:
+                    tick_labels[i].set_color(names[i][1]['color'])
+                elif 'fontweight' in names[i][1]:
+                    tick_labels[i].set_fontweight(names[i][1]['fontweight'])
+
+        plt.xticks(rotation='vertical')
+        if log_scale: plt.yscale('log')
+        if title is not None: plt.title(title)
+        if legend is not None: plt.legend(legend)
+        plt.savefig(filename, bbox_inches='tight')
+
+    def _log_barh(self, series, filename, title=None, legend=None, log_scale=False):
+        names, values = zip(*series)
+        names_only = [item[0] if isinstance(item, tuple) else item for item in names]
+        fig, ax = plt.subplots(figsize=(10, max(5, len(names)/4)))
+        plt.ylim(-0.5, len(names) - 0.5)
+        if isinstance(values[0], list):
+            series = [list(i) for i in zip(*values)]
+            left = np.zeros(len(series[0]))
+            for i, serie in enumerate(series):
+                plt.barh(names_only, serie, left=left)
+                left += serie
+        else:
+            plt.barh(names_only, values)
+        ax.xaxis.minorticks_on()
+        plt.grid(axis='x', which='major', linestyle='--', color='gray')
+        plt.grid(axis='x', which='minor', linestyle=':', color='lightgray')
+        formatter0 = matplotlib.ticker.EngFormatter(unit='')
+        ax.xaxis.set_major_formatter(formatter0)
+        plt.gca().set_axisbelow(True)
+        plt.gca().xaxis.set_label_position('top')
+        plt.gca().xaxis.tick_top()
+
+        labels = plt.gca().get_yticks()
+        tick_labels = plt.gca().get_yticklabels()
+        for i, label in enumerate(labels):
+            if isinstance(names[i], tuple):
+                if 'color' in names[i][1]:
+                    tick_labels[i].set_color(names[i][1]['color'])
+                elif 'fontweight' in names[i][1]:
+                    tick_labels[i].set_fontweight(names[i][1]['fontweight'])
+
+        if log_scale: plt.xscale('log')
+        if title is not None: plt.title(title)
+        if legend is not None: plt.legend(legend)
+        plt.savefig(filename, bbox_inches='tight')
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index acf5b950edadcf67e29b60cf316dac1b10cded8e..623a520f99e8b6e506f19ef2426cd5789287e320 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -52,6 +52,9 @@ enum class DataType {
     Any
 };
 
+bool isDataTypeFloatingPoint(const DataType& type);
+size_t getDataTypeBitWidth(const DataType& type);
+
 enum class DataFormat {
     Default,
     NCHW,
diff --git a/include/aidge/data/Interpolation.hpp b/include/aidge/data/Interpolation.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..2d53ebdd0dd5141acc9a3bce8e906f42f7a557a2
--- /dev/null
+++ b/include/aidge/data/Interpolation.hpp
@@ -0,0 +1,151 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_INTERPOLATION_H_
+#define AIDGE_CORE_UTILS_INTERPOLATION_H_
+
+#include <cstdint>  // std::int64_t
+#include <utility>  // std::pair
+#include <vector>
+
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+/* @brief generic class to hold interpolation */
+class Interpolation {
+  public:
+    /**
+     * @brief simple type alias to describe a coordinates
+     * @note the indexes are deliberately chosen to be signed values as some
+     * points retrieved by interpolation are out of bound, hence their coords
+     * can be < 0
+     */
+    using Coords = std::vector<std::int64_t>;
+    /**
+     * @brief type alias to designate a point of any type : hence coordinates &
+     * associated value
+     */
+    template <class T> using Point = std::pair<Coords, T>;
+
+    /**
+     * @brief details how coordinates are transformed from interpolated tensor
+     * to original tensor
+     */
+    enum CoordinateTransformation {
+        HalfPixel,
+        HalfPixelSymmetric,
+        PytorchHalfPixel,
+        AlignCorners,
+        Asymmetric,
+    };
+
+    /**
+     * @brief apply transformation to coords in interpolated Tensor to find
+     * equivalent coordinates in original tensor reference frame.
+     * @warning it is assumed that all parameters have the same
+     * number of dimensions.
+     * @param[in] transformedCoords : coords in interpolated tensor
+     * @param[in] inputDims: input dimensions of tensor
+     * @param[in] inputDims: output dimensions of tensor
+     * @return std::vector containing coords in orginal tensor reference frame
+     */
+    static std::vector<float> untransformCoordinates(
+        const std::vector<DimSize_t> &transformedCoords,
+        const std::vector<DimSize_t> &inputDims,
+        const std::vector<DimSize_t> &outputDims,
+        const Interpolation::CoordinateTransformation coordTransfoMode);
+
+    /**
+     * @brief retrieves neighbouring value of a given index
+     * @param[in] tensorValues raw pointer of the tensor values
+     * retrieved with
+     * @code
+     * tensor->getImpl()->rawPtr()
+     * @endcode
+     * @param[in] tensorDimensions dimensions of given tensor
+     * retrieved with
+     * @code
+     * tensor->dims()
+     * @endcode
+     * @param[in] coords coordinates in the tensor of the values we want to
+     * find the neighbours of.
+     * @return static std::vector<std::pair<std::vector<DimSize_t>, T>>
+     * containing both indexes of neighbours & their values
+     */
+    template <typename T>
+    static std::set<Point<T>>
+    retrieveNeighbours(const T *tensorValues,
+                       const std::vector<DimSize_t> &tensorDims,
+                       const std::vector<float> &coords,
+                       const PadBorderType paddingMode = PadBorderType::Zero);
+
+    /* @brief interpolation type */
+    enum Mode {
+        Cubic,
+        Linear,
+        RoundPreferFloor,
+        RoundPreferCeil,
+        Floor,
+        Ceil
+    };
+
+    /*
+     * @brief Interpolates values given via input in given mode.
+     *
+     * @warning This function is empty and is meant to be overriden in derived
+     * class in backend libraries.
+     *
+     * Values are contiguously arranged in a "square" shape around the point to
+     * interpolate. Depending on interpolation mode.
+     * The point that will be interpolated is located right in the
+     * middle of all points.
+     * Immediate neighbours :
+     * 1D interp :     2D interp :
+     *                 . . . . . .
+     * . . 1 2 . .     . . . . . .
+     *                 . . 1 2 . .
+     *                 . . 3 4 . .
+     *                 . . . . . .
+     *                 . . . . . .
+     *
+     * 2 neighbours :
+     * 1D interp :         2D interp :
+     *                   .  .  .  .  .  .  . .
+     *                   .  .  .  .  .  .  . .
+     * . . 1 2 3 4 . .   .  .  1  2  3  4  . .
+     *                   .  .  5  6  7  8  . .
+     *                   .  .  9 10 11 12  . .
+     *                   .  . 13 14 15 16  . .
+     *                   .  .  .  .  .  .  . .
+     *                   .  .  .  .  .  .  . .
+     *
+     * @param[in] originalIndex: index of the point to in the original picture
+     * Since the coord are being transformed from the interpolatedTensor frame
+     * to originalTensor frame, the result might be in float.
+     * @param[in] points : points to interpolate, arranged in a vector of a
+     * pairs ((point_coord), value) :
+     * [[[X1, X2, ..., XN], Xval], ...., [[A1, A2, ..., AN],Aval]].
+     * With :
+     * - N: the number of dimensions.
+     * - A: the number of points of the grid to interpolate.
+     * - All coordinates expressed in originalTensor frame.
+     * @param[in] interpMode: interpolation mode
+     * @return interpolated value
+     */
+    template <typename T>
+    [[noreturn]] static T interpolate(const std::vector<float> &originalIndex,
+                                      const std::vector<Point<T>> &points,
+                                      const Mode interpMode);
+};
+} // namespace Aidge
+
+#endif
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 58e893ca5d5339d93799415f076dd69d54db69ca..5c84f52e052e67ca27bfc851f510e522d485e4b7 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -12,6 +12,7 @@
 #ifndef AIDGE_CORE_DATA_TENSOR_H_
 #define AIDGE_CORE_DATA_TENSOR_H_
 
+#include <algorithm>
 #include <cstddef>      // std::size_t
 #include <cstring>
 #include <functional>   // std::multiplies
@@ -24,10 +25,10 @@
 
 #include "aidge/backend/TensorImpl.hpp"
 #include "aidge/data/Data.hpp"
-
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/utils/ArrayHelpers.hpp"
 
 namespace Aidge {
 /**
@@ -486,13 +487,14 @@ public:
     /**
      * @brief Return the current capacity of the tensor, i.e. the actual memory
      * currently being allocated. It can be different from the size:
+     * - Capacity conservatively returns 0 is no implementation is provided.
      * - Capacity can be 0 if the tensor memory was not yet initialized (because
      *   of lazy initialization, memory is allocated only when it needs to be
      *   accessed the first time).
      * - Capacity can be > size if the tensor was downsized but memory was not
      *   reallocated.
     */
-    inline std::size_t capacity() const noexcept { return mImpl->capacity(); }
+    inline std::size_t capacity() const noexcept { return mImpl ? mImpl->capacity(): 0; }
 
 
     /**
@@ -561,9 +563,9 @@ public:
 
     template <typename expectedType>
     const expectedType& get(std::size_t idx) const {
-        AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
-        AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer");
-        AIDGE_ASSERT(idx < mSize, "idx out of range");
+        AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "Tensor::get<>({}): wrong data type, expected {}, got {}", idx, mDataType, NativeType<expectedType>::type);
+        AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "Tensor::get<>({}): can only be used for backends providing a valid host pointer.", idx);
+        AIDGE_ASSERT(idx < mSize, "Tensor::get<>({}): idx {} out of range, tensor size {}", idx, mSize);
         return *reinterpret_cast<expectedType *>(mImpl->hostPtr(mImplOffset + idx));
     }
 
@@ -620,20 +622,41 @@ public:
      * @brief From the the 1D contiguous index, return the coordinate of an element in the tensor.
      * Beware: do not use this function with the storage index!
      *
-     * @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor.
+     * @param index 1D contiguous index of the value considering a flatten, contiguous, tensor.
      * @return std::vector<DimSize_t>
      */
-    std::vector<std::size_t> getCoord(std::size_t flatIdx) const {
-        std::vector<std::size_t> coordIdx(mDims.size());
-        std::size_t i = mDims.size();
+    static std::vector<std::size_t>
+    toCoord(const std::vector<Aidge::DimSize_t> &dimensions, std::size_t index);
+
 
-        while (i-- > 0) {
-            coordIdx[i] = (flatIdx % mDims[i]);
-            flatIdx/=mDims[i];
+    /**
+     * @brief From the the 1D contiguous index, return the coordinate of an element in the tensor.
+     * Beware: do not use this function with the storage index!
+     *
+     * @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor.
+     * @return std::vector<DimSize_t>
+     */
+    std::vector<std::size_t> getCoord(std::size_t index) const {
+        if (isInBounds(mDims, index)) {
+            return toCoord(mDims, index);
+        } else {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Out of bound coordinates.");
         }
-        return coordIdx;
     }
 
+    /**
+     * @brief From the coordinate returns the 1D contiguous index of an element in the tensor.
+     * If the number of coordinates is inferior to the number of dimensions,
+     * the remaining coordinates are assumed to be 0.
+     * Beware: the contiguous index will only correspond to the storage index
+     * if the tensor is contiguous!
+     * Note that the coordIdx may be an empty vector.
+     *
+     * @param coords Coordinate to an element in the tensor
+     * @return DimSize_t Contiguous index
+     */
+    static std::size_t toIndex(const std::vector<DimSize_t>& dimensions, const std::vector<std::size_t>& coords);
+
     /**
      * @brief From the coordinate returns the 1D contiguous index of an element in the tensor.
      * If the number of coordinates is inferior to the number of dimensions,
@@ -645,18 +668,27 @@ public:
      * @param coordIdx Coordinate to an element in the tensor
      * @return DimSize_t Contiguous index
      */
-    std::size_t getIdx(const std::vector<std::size_t>& coordIdx) const {
-        AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
-        std::size_t flatIdx = 0;
-        for(std::size_t i = 0; i < mDims.size(); ++i) {
-            auto coord = i < coordIdx.size() ? coordIdx[i]: 0;
-            AIDGE_ASSERT(coord < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
-            auto nextDimSize  = i + 1 < mDims.size() ? mDims[i + 1]: 1;
-            flatIdx = (flatIdx + coord) * nextDimSize;
+    std::size_t getIdx(const std::vector<std::size_t>& coords) const {
+        if (isInBounds<std::size_t>(mDims, coords)) {
+            return toIndex(mDims, coords);
+        } else {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Out of bound coordinates.");
         }
-        return flatIdx;
     }
 
+    /**
+     * @brief check if index is in bound of given tensor dimensions
+     * @warning this function is templated in order to welcome cases like interpolation where indexes are not integers.
+     * However, the only types accepted are floating, integer & size_t
+     * @param tensorDims : tensor dimensions
+     * @param coords : coords of the tensor you want to flattened index of
+     * @return true if all coords are in bound. False otherwise
+     */
+    template<typename T>
+    static bool isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<T>& coords);
+
+    static bool isInBounds(const std::vector<DimSize_t>& dimensions, const std::size_t index);
+
     /**
      * @brief From the coordinate returns the 1D storage index of an element in the tensor.
      * If the number of coordinates is inferior to the number of dimensions,
diff --git a/include/aidge/data/half_fmt.hpp b/include/aidge/data/half_fmt.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..5e2072038c10fffd8db5f7fe93381f002f2119b1
--- /dev/null
+++ b/include/aidge/data/half_fmt.hpp
@@ -0,0 +1,18 @@
+#include "aidge/data/half.hpp"
+#include <fmt/core.h>
+
+// Specialize fmt::formatter for half_float::half
+template <>
+struct fmt::formatter<half_float::half> : fmt::formatter<float> {
+    // Parses the format specifications and stores them in the base formatter
+    template <typename ParseContext>
+    constexpr auto parse(ParseContext& ctx) {
+        return fmt::formatter<float>::parse(ctx);
+    }
+
+    // Formats the half type by first converting it to float
+    template <typename FormatContext>
+    auto format(const half_float::half& value, FormatContext& ctx) const {
+        return fmt::formatter<float>::format(static_cast<float>(value), ctx);
+    }
+};
diff --git a/include/aidge/graph/Matching.hpp b/include/aidge/graph/Matching.hpp
index 951aa6b29d73d9055cf9f13c8ddc6313cb506879..b846af10b87b4088dab7fee41187ded91bf531d1 100644
--- a/include/aidge/graph/Matching.hpp
+++ b/include/aidge/graph/Matching.hpp
@@ -154,13 +154,13 @@ public:
     */
     std::set<MatchingResult> filterLonguestDisjoint(const std::set<MatchingResult>& matches);
 
-    inline void addNodeLambda(const std::string& name, bool(func)(const NodePtr&)) {
+    inline void addNodeLambda(const std::string& name, std::function<bool(const NodePtr&)> func) {
         mLambda[name] = func;
     }
 
 private:
     std::shared_ptr<GraphView> mGraph;
-    std::map<std::string, bool(*)(const NodePtr&)> mLambda;
+    std::map<std::string, std::function<bool(const NodePtr&)>> mLambda;
 
     /**
      * QUANTIFIER = '?' | '*' | '+' | ('{' [0-9]+ '}')
diff --git a/include/aidge/graph/StaticAnalysis.hpp b/include/aidge/graph/StaticAnalysis.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..d92356b72b8f1408c3084f9afa6f467d2043e620
--- /dev/null
+++ b/include/aidge/graph/StaticAnalysis.hpp
@@ -0,0 +1,571 @@
+
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_GRAPH_STATICANALYSIS_H_
+#define AIDGE_CORE_GRAPH_STATICANALYSIS_H_
+
+#include <memory>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+#include "aidge/operator/AvgPooling.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+#include "aidge/operator/ReduceMean.hpp"
+#include "aidge/operator/ReduceSum.hpp"
+#include "aidge/operator/Softmax.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+
+namespace Aidge {
+/**
+ * @brief Base class to compute statistics from an Operator.
+ * 
+ */
+class OperatorStats : public Registrable<OperatorStats, std::string, std::function<std::shared_ptr<OperatorStats>(const Operator&)>> {
+public:
+    OperatorStats(const Operator& op);
+    const Operator& getOperator() const noexcept { return mOp; }
+
+    /**
+     * @brief Get the worst case total number of arithmetic operations for the 
+     * operator data flow. This includes base arithmetic operations: +, -, / and *.
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * Example of Operator with only arithmetic operatons: Conv.
+     * 
+     * @return size_t Number of arithmetic operations.
+     */
+    virtual size_t getNbArithmOps() const { return 2 * getNbMACOps(); };
+
+    /**
+     * @brief Get the worst case total number of logic operations for the 
+     * operator data flow. This includes operations like logical shift, or, and...
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * Example of Operator with only logic operatons: BitShift.
+     * 
+     * @return size_t Number of logic operations.
+     */
+    virtual size_t getNbLogicOps() const { return 0; };
+
+    /**
+     * @brief Get the worst case total number of comparison operations for the 
+     * operator data flow. This includes operations like <, >, =...
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * Example of Operator with only comparison operatons: MaxPool.
+     * 
+     * @return size_t Number of comparison operations.
+     */
+    virtual size_t getNbCompOps() const { return 0; };
+
+    /**
+     * @brief Get the worst case total number of non-linear (NL) operations for the
+     * operator data flow. This includes operations like calls to tanh(), erf(), cos()...
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * Example of Operator with only NL operatons: Tanh.
+     * Non-linear operations are necessarily of floating-point type.
+     * 
+     * @return size_t Number of non-linear (NL) operations.
+     */
+    virtual size_t getNbNLOps() const { return 0; };
+
+    /**
+     * @brief Get the worst case total number of operations for the operator data flow.
+     * Total number of operations = arithmetic ops + logic ops + comp ops + NL ops.
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * 
+     * @return size_t Number of operations.
+     */
+    size_t getNbOps() const { return getNbArithmOps() + getNbLogicOps() + getNbCompOps() + getNbNLOps(); };
+
+    /**
+     * @brief Get the worst case total number of INT arithmetic operations for
+     * the operator data flow.
+     * Such that getNbArithmOps() = getNbArithmIntOps() + getNbArithmFpOps()
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * 
+     * @return size_t Number of INT arithmetic operations.
+     */
+    virtual size_t getNbArithmIntOps() const;
+
+    /**
+     * @brief Get the worst case total number of FP arithmetic operations for 
+     * the operator data flow.
+     * Such that getNbArithmOps() = getNbArithmIntOps() + getNbArithmFpOps()
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * 
+     * @return size_t Number of FP arithmetic operations.
+     */
+    size_t getNbArithmFpOps() const { return getNbArithmOps() - getNbArithmIntOps(); };
+
+    /**
+     * @brief Get the worst case total number of MAC operations for the operator
+     * data flow. MAC operations are included in getNbArithmOps(), with 1 MAC 
+     * operation counted as 2 arithmetic operations. MAC can be INT of FP.
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * 
+     * @return size_t Number of MAC operations.
+     */
+    virtual size_t getNbMACOps() const { return 0; };
+    virtual ~OperatorStats() = default;
+
+protected:
+    const Operator &mOp;
+};
+
+/**
+ * @brief Base class to compute statistics from a GraphView
+ * 
+ */
+class StaticAnalysis : public std::enable_shared_from_this<StaticAnalysis> {
+public:
+    StaticAnalysis(std::shared_ptr<GraphView> graph);
+    const std::shared_ptr<GraphView> getGraph() const noexcept { return mGraph; }
+
+    /**
+     * @brief Get the Operator Stats object corresponding to the given node.
+     * 
+     * @param node Node
+     * @return std::shared_ptr<OperatorStats> Node's Operator stats
+     */
+    std::shared_ptr<OperatorStats> getOpStats(std::shared_ptr<Node> node) const;
+
+    /**
+     * @brief Get the number of parameters associated to a node. This includes
+     * all Producers directly connected to the node's inputs as well as all
+     * internal Producers (in case of a meta operator).
+     * 
+     * Note: this function does not check if parameters are shared between
+     * several nodes or not. This means that simply adding parameters count from
+     * several nodes may lead to a higher number of parameters than in reality
+     * if some of them are shared.
+     * 
+     * @param node Node
+     * @return size_t Number of parameters
+     */
+    virtual size_t getNbParams(std::shared_ptr<Node> node) const;
+
+    /**
+     * @brief Get the total parameters memory size, in bits, associated to a node.
+     * This includes all Producers directly connected to the node's inputs as 
+     * well as all internal Producers (in case of a meta operator).
+     * 
+     * Note: this function does not check if parameters are shared between
+     * several nodes or not. This means that simply adding parameters size from
+     * several nodes may lead to a higher parameter size than in reality
+     * if some of them are shared.
+     * 
+     * @param node Node
+     * @return size_t Total parameters memory, in bits
+     */
+    virtual size_t getParamsSize(std::shared_ptr<Node> node) const;
+
+    size_t getNbArithmOps() const { return accumulate(&OperatorStats::getNbArithmOps); }
+    size_t getNbLogicOps() const { return accumulate(&OperatorStats::getNbLogicOps); }
+    size_t getNbCompOps() const { return accumulate(&OperatorStats::getNbCompOps); }
+    size_t getNbNLOps() const { return accumulate(&OperatorStats::getNbNLOps); }
+    size_t getNbOps() const { return accumulate(&OperatorStats::getNbOps); }
+    size_t getNbArithmIntOps() const { return accumulate(&OperatorStats::getNbArithmIntOps); }
+    size_t getNbArithmFpOps() const { return accumulate(&OperatorStats::getNbArithmFpOps); }
+    size_t getNbMACOps() const { return accumulate(&OperatorStats::getNbMACOps); }
+    virtual void summary(bool incProducers = false) const;
+    virtual ~StaticAnalysis() = default;
+
+protected:
+    const std::shared_ptr<GraphView> mGraph;
+
+    size_t accumulate(size_t (OperatorStats::*func)() const) const;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+class MetaOpStats : public OperatorStats {
+public:
+    MetaOpStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<MetaOpStats> create(const Operator& op) {
+        return std::make_unique<MetaOpStats>(op);
+    }
+
+    size_t getNbArithmOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmOps(); }
+    size_t getNbLogicOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbLogicOps(); }
+    size_t getNbCompOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbCompOps(); }
+    size_t getNbNLOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbNLOps(); }
+    size_t getNbArithmIntOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmIntOps(); }
+    size_t getNbMACOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbMACOps(); }
+};
+
+template <class OP>
+class ConvStats : public OperatorStats {
+public:
+    ConvStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<ConvStats<OP>> create(const Operator& op) {
+        return std::make_unique<ConvStats<OP>>(op);
+    }
+
+    size_t getNbMACOps() const override {
+        const OP& op_ = dynamic_cast<const OP&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+	    const std::size_t weightsSize = op_.getInput(1)->size();
+        const std::size_t outputSize
+            = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
+                              op_.getOutput(0)->dims().cend(),
+                              1,
+                              std::multiplies<size_t>()); // NCHW...
+        const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
+        return batchSize * (weightsSize * outputSize);
+    }
+};
+
+// Beware: cannot use Conv_Op<2>::Type as key because static variable initialization order is undefined!
+REGISTRAR(OperatorStats, "Conv1D", ConvStats<Conv_Op<1>>::create);
+REGISTRAR(OperatorStats, "ConvDepthWise1D", ConvStats<ConvDepthWise_Op<1>>::create);
+REGISTRAR(OperatorStats, "Conv2D", ConvStats<Conv_Op<2>>::create);
+REGISTRAR(OperatorStats, "ConvDepthWise2D", ConvStats<ConvDepthWise_Op<2>>::create);
+
+template <class OP>
+class MaxPoolingStats : public OperatorStats {
+public:
+    MaxPoolingStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<MaxPoolingStats<OP>> create(const Operator& op) {
+        return std::make_unique<MaxPoolingStats<OP>>(op);
+    }
+
+    size_t getNbCompOps() const override {
+        const OP& op_ = dynamic_cast<const OP&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+	    const std::size_t poolSize
+            = std::accumulate(op_.kernelDims().cbegin(),
+                              op_.kernelDims().cend(),
+                              1,
+                              std::multiplies<size_t>());
+        const std::size_t outputSize
+            = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
+                              op_.getOutput(0)->dims().cend(),
+                              1,
+                              std::multiplies<size_t>()); // NCHW...
+        const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
+        return batchSize * ((poolSize - 1) * outputSize);
+    }
+};
+
+REGISTRAR(OperatorStats, "MaxPooling1D", MaxPoolingStats<MaxPooling_Op<1>>::create);
+REGISTRAR(OperatorStats, "MaxPooling2D", MaxPoolingStats<MaxPooling_Op<2>>::create);
+REGISTRAR(OperatorStats, "MaxPooling3D", MaxPoolingStats<MaxPooling_Op<3>>::create);
+
+template <class OP>
+class AvgPoolingStats : public OperatorStats {
+public:
+    AvgPoolingStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<AvgPoolingStats<OP>> create(const Operator& op) {
+        return std::make_unique<AvgPoolingStats<OP>>(op);
+    }
+
+    size_t getNbArithmOps() const override {
+        const OP& op_ = dynamic_cast<const OP&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+	    const std::size_t poolSize
+            = std::accumulate(op_.kernelDims().cbegin(),
+                              op_.kernelDims().cend(),
+                              1,
+                              std::multiplies<size_t>());
+        const std::size_t outputSize
+            = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
+                              op_.getOutput(0)->dims().cend(),
+                              1,
+                              std::multiplies<size_t>()); // NCHW...
+        const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
+        // (poolSize - 1) additions + 1 division for each output
+        return batchSize * (poolSize * outputSize);
+    }
+};
+
+REGISTRAR(OperatorStats, "AvgPooling1D", AvgPoolingStats<AvgPooling_Op<1>>::create);
+REGISTRAR(OperatorStats, "AvgPooling2D", AvgPoolingStats<AvgPooling_Op<2>>::create);
+REGISTRAR(OperatorStats, "AvgPooling3D", AvgPoolingStats<AvgPooling_Op<3>>::create);
+REGISTRAR(OperatorStats, "AvgPooling4D", AvgPoolingStats<AvgPooling_Op<4>>::create);
+
+class FCStats : public OperatorStats {
+public:
+    FCStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<FCStats> create(const Operator& op) {
+        return std::make_unique<FCStats>(op);
+    }
+
+    size_t getNbMACOps() const override {
+        const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+	    const std::size_t weightsSize = op_.getInput(1)->size();
+        const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
+        return batchSize * weightsSize;
+    }
+};
+
+REGISTRAR(OperatorStats, "FC", FCStats::create);
+
+class MatMulStats : public OperatorStats {
+public:
+    MatMulStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<MatMulStats> create(const Operator& op) {
+        return std::make_unique<MatMulStats>(op);
+    }
+
+    size_t getNbMACOps() const override {
+        const MatMul_Op& op_ = dynamic_cast<const MatMul_Op&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        const size_t n = (op_.getInput(0)->dims().size() > 1)
+            ? op_.getInput(0)->dims().end()[-2] : 1;
+        const size_t k = op_.getInput(0)->dims().back();
+        const size_t m = (op_.getInput(1)->dims().size() > 1)
+            ? op_.getInput(1)->dims().back() : 1;
+        const size_t nb = (op_.getInput(0)->dims().size() > 2)
+            ? std::accumulate(op_.getInput(0)->dims().cbegin(),
+                              op_.getInput(0)->dims().cend() - 2,
+                              1,
+                              std::multiplies<size_t>())
+            : 1; 
+
+        return nb * n * m * k;
+    }
+};
+
+REGISTRAR(OperatorStats, "MatMul", MatMulStats::create);
+
+class ReLUStats : public OperatorStats {
+public:
+    ReLUStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<ReLUStats> create(const Operator& op) {
+        return std::make_unique<ReLUStats>(op);
+    }
+
+    size_t getNbCompOps() const override {
+        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        return op_.getOutput(0)->size();
+    }
+};
+
+REGISTRAR(OperatorStats, "ReLU", ReLUStats::create);
+
+class AbsStats : public OperatorStats {
+public:
+    AbsStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<AbsStats> create(const Operator& op) {
+        return std::make_unique<AbsStats>(op);
+    }
+
+    size_t getNbCompOps() const override {
+        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        return op_.getOutput(0)->size();
+    }
+
+    // This is in the worst case (all values are negative)
+    size_t getNbArithmOps() const override {
+        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        return op_.getOutput(0)->size();
+    }
+};
+
+REGISTRAR(OperatorStats, "Abs", AbsStats::create);
+
+class ReduceMeanStats : public OperatorStats {
+public:
+    ReduceMeanStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<ReduceMeanStats> create(const Operator& op) {
+        return std::make_unique<ReduceMeanStats>(op);
+    }
+
+    size_t getNbArithmOps() const override {
+        const ReduceMean_Op& op_ = dynamic_cast<const ReduceMean_Op&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        const size_t nbIn = op_.getInput(0)->size();
+        const size_t nbOut = op_.getOutput(0)->size();
+        const size_t nbReduce = nbIn / nbOut;
+        // (nbReduce - 1) additions + 1 division for each output
+        return nbOut * nbReduce;
+    }
+};
+
+REGISTRAR(OperatorStats, "ReduceMean", ReduceMeanStats::create);
+
+class ReduceSumStats : public OperatorStats {
+public:
+    ReduceSumStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<ReduceSumStats> create(const Operator& op) {
+        return std::make_unique<ReduceSumStats>(op);
+    }
+
+    size_t getNbArithmOps() const override {
+        const ReduceSum_Op& op_ = dynamic_cast<const ReduceSum_Op&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        const size_t nbIn = op_.getInput(0)->size();
+        const size_t nbOut = op_.getOutput(0)->size();
+        const size_t nbReduce = nbIn / nbOut;
+        // (nbReduce - 1) additions for each output
+        return nbOut * (nbReduce - 1);
+    }
+};
+
+REGISTRAR(OperatorStats, "ReduceSum", ReduceSumStats::create);
+
+class SoftmaxStats : public OperatorStats {
+public:
+    SoftmaxStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<SoftmaxStats> create(const Operator& op) {
+        return std::make_unique<SoftmaxStats>(op);
+    }
+
+    size_t getNbArithmOps() const override {
+        const Softmax_Op& op_ = dynamic_cast<const Softmax_Op&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        const size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
+        const size_t nbReduce = op_.getInput(0)->dims()[axis];
+        const size_t nbOut = op_.getOutput(0)->size();
+        // nbOut divisions + (nbReduce - 1) additions
+        return nbOut + (nbReduce - 1);
+    }
+
+    size_t getNbNLOps() const override {
+        const Softmax_Op& op_ = dynamic_cast<const Softmax_Op&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        const size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
+        const size_t nbReduce = op_.getInput(0)->dims()[axis];
+        const size_t nbOut = op_.getOutput(0)->size();
+        // nbOut exp + nbReduce exp
+        return nbOut + nbReduce;
+    }
+};
+
+REGISTRAR(OperatorStats, "Softmax", SoftmaxStats::create);
+
+class MemOpStats : public OperatorStats {
+public:
+    MemOpStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<MemOpStats> create(const Operator& op) {
+        return std::make_unique<MemOpStats>(op);
+    }
+};
+
+REGISTRAR(OperatorStats, "Reshape", MemOpStats::create);
+REGISTRAR(OperatorStats, "Transpose", MemOpStats::create);
+REGISTRAR(OperatorStats, "Concat", MemOpStats::create);
+REGISTRAR(OperatorStats, "Split", MemOpStats::create);
+REGISTRAR(OperatorStats, "Slice", MemOpStats::create);
+REGISTRAR(OperatorStats, "Squeeze", MemOpStats::create);
+REGISTRAR(OperatorStats, "Unsqueeze", MemOpStats::create);
+REGISTRAR(OperatorStats, "Gather", MemOpStats::create);
+REGISTRAR(OperatorStats, "Identity", MemOpStats::create);
+
+class ElemWiseOpStats : public OperatorStats {
+public:
+    ElemWiseOpStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<ElemWiseOpStats> create(const Operator& op) {
+        return std::make_unique<ElemWiseOpStats>(op);
+    }
+
+    size_t getNbArithmOps() const override {
+        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        return op_.getOutput(0)->size();
+    }
+};
+
+REGISTRAR(OperatorStats, "Add", ElemWiseOpStats::create);
+REGISTRAR(OperatorStats, "Sub", ElemWiseOpStats::create);
+REGISTRAR(OperatorStats, "Mul", ElemWiseOpStats::create);
+REGISTRAR(OperatorStats, "Div", ElemWiseOpStats::create);
+
+class ElemWiseLogicOpStats : public OperatorStats {
+public:
+    ElemWiseLogicOpStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<ElemWiseLogicOpStats> create(const Operator& op) {
+        return std::make_unique<ElemWiseLogicOpStats>(op);
+    }
+
+    size_t getNbArithmOps() const override {
+        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        return op_.getOutput(0)->size();
+    }
+};
+
+REGISTRAR(OperatorStats, "And", ElemWiseLogicOpStats::create);
+
+class ElemWiseNLOpStats : public OperatorStats {
+public:
+    ElemWiseNLOpStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<ElemWiseNLOpStats> create(const Operator& op) {
+        return std::make_unique<ElemWiseNLOpStats>(op);
+    }
+
+    size_t getNbNLOps() const override {
+        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        return op_.getOutput(0)->size();
+    }
+};
+
+REGISTRAR(OperatorStats, "Atan", ElemWiseNLOpStats::create);
+REGISTRAR(OperatorStats, "Sqrt", ElemWiseNLOpStats::create);
+REGISTRAR(OperatorStats, "Erf", ElemWiseNLOpStats::create);
+REGISTRAR(OperatorStats, "Ln", ElemWiseNLOpStats::create);
+REGISTRAR(OperatorStats, "Sigmoid", ElemWiseNLOpStats::create);
+REGISTRAR(OperatorStats, "Tanh", ElemWiseNLOpStats::create);
+REGISTRAR(OperatorStats, "Pow", ElemWiseNLOpStats::create);
+}
+
+#endif /* AIDGE_CORE_GRAPH_STATICANALYSIS_H_ */
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 89b2c06a52f180ffb35363cb6ab07d4242e12033..327f4f7c3d43b5194f23cfaed8674ee0b47bd6a2 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 class GenericOperator_Op
     : public OperatorTensor,
-      public Registrable<GenericOperator_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>> {
+      public Registrable<GenericOperator_Op, std::array<std::string, 2>, std::function<std::shared_ptr<OperatorImpl>(const GenericOperator_Op &)>> {
 private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
diff --git a/include/aidge/operator/LRN.hpp b/include/aidge/operator/LRN.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..669564d4a8fb0da2fac4f67e164fc08653131472
--- /dev/null
+++ b/include/aidge/operator/LRN.hpp
@@ -0,0 +1,84 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_LRN_H_
+#define AIDGE_CORE_OPERATOR_LRN_H_
+
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class LRNAttr { Alpha, Beta, Bias, Size };
+
+class LRN_Op : public OperatorTensor,
+                public Registrable<LRN_Op,
+                                   std::string,
+                                   std::function<std::shared_ptr<OperatorImpl>(const LRN_Op&)>> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<LRNAttr, float, float, float, std::int32_t>;
+    template <LRNAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    LRN_Op() = delete;
+
+    LRN_Op(std::int32_t size);
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    LRN_Op(const LRN_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::LRN_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline float& alpha() const noexcept { return mAttributes -> getAttr<LRNAttr::Alpha>(); }
+    inline float& beta() const noexcept { return mAttributes -> getAttr<LRNAttr::Beta>(); }
+    inline float& bias() const noexcept { return mAttributes -> getAttr<LRNAttr::Bias>(); }
+    inline std::int32_t& size() const noexcept { return mAttributes -> getAttr<LRNAttr::Size>(); }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> LRN(std::int32_t size, const std::string& name = "");
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_LRN_H_ */
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index bc1852ec0759ffaafa015143f22b0a1c8f6c893e..181a4e88a0fe30e2a86c44adda2195d7e6f5293d 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -26,7 +26,15 @@
 
 namespace Aidge {
 enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
-enum class PadBorderType { Constant, Edge, Reflect, Wrap };
+enum class PadBorderType {
+    /** @brief all out of bound values will be set to a given value.*/
+    Constant,
+    Edge,
+    Reflect,
+    Wrap,
+    /** @brief all out of bound values will be set to 0.*/
+    Zero,
+};
 
 template <DimIdx_t DIM>
 class Pad_Op : public OperatorTensor,
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index a48b95aff2a18750d83f12a62c408ad41b20afee..c3c7838efc16a0d091f5f0422442225cef8a0ab5 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -9,60 +9,226 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CORE_OPERATOR_Resize_H_
-#define AIDGE_CORE_OPERATOR_Resize_H_
+#ifndef AIDGE_CORE_OPERATOR_RESIZE_H_
+#define AIDGE_CORE_OPERATOR_RESIZE_H_
 
 #include <memory>
 #include <string>
 #include <vector>
 
 #include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Interpolation.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Pad.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 
-class Resize_Op : public OperatorTensor,
-                  public Registrable<Resize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Resize_Op&)>>{
+/* @brief attributes for the aidge operator */
+enum class ResizeAttr {
+    //   antialias,
+    // axes,
+    CoordinateTransformationMode,
+    CubicCoeffA,
+    // excludeOutside,
+    //   extrapolation_value,
+    //   keep_aspect_ratio_policy,
+    InterpolationMode,
+    PaddingMode
+};
 
-public:
+/**
+ * @brief Resize operator, will up/downscale a given tensor given the input.
+ * @verbatim
+ * Output size can be computed in 2 ways :
+ * 1. Image can be rescaled proportionally to the input size :
+ *    output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)
+ * 2. Output dimensions are directly given via the size input(#4)
+ *
+ * Hence, either input Scale or Input Sizes can be defined, if both are
+ * connected, the operator will throw an error.
+ *
+ * Resize takes (up to) 4 different inputs :
+ * #1 Input to resize :
+ *   N-D tensor.
+ *
+ * #2 ROI (optional) :
+ *   1-D tensor of coordinates given as [start1, …, startN, end1, …, endN]
+ *   where N is the rank of X or the length of axes, if provided. The RoIs’
+ *   coordinates are normalized in the coordinate system of the input image.
+ *   If not set default ROI is the entier image.
+ * #3 scales (optional) - tensor(float):
+ *   The scale array along each dimension.
+ *    The number of elements of ‘scales’ should be the same as the rank of
+ * input ‘X’ or the length of ‘axes’, if provided. Accepted values: (0,inf)
+ *    - (0,1)   : downsampling
+ *    - 1       : identity
+ *    - (1,inf) : upsampling
+ * #4. Sizes - tensor(int64):
+ *   Target size of the output tensor.
+ *   Its interpretation depends on the ‘keep_aspect_ratio_policy’ value.
+ *   The number of elements of ‘sizes’ should be the same as either :
+ *   - The rank of input ‘X’
+ *   - The length of ‘axes’ attribute, if provided.
+ * @endverbatim
+ * @warning : Only one of ‘scales’ and ‘sizes’ can be specified.
+ * @param coordinate_transformation_mode
+ * @param cubic_coeff_a the a coefficient of cubic interpolation. Moost often
+ * it is set to -0.75
+ * @param InterpolationMode type of interpolation (currently only support cubic
+ * interpolation)
+ */
+class Resize_Op
+    : public OperatorTensor,
+      public Registrable<
+          Resize_Op,
+          std::string,
+          std::function<std::shared_ptr<OperatorImpl>(const Resize_Op &)>> {
+
+  private:
+    using Attributes_ =
+        StaticAttributes<ResizeAttr,
+                         Interpolation::CoordinateTransformation,
+                         float,
+                         Interpolation::Mode,
+                         PadBorderType>;
+    template <ResizeAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+  public:
     static const std::string Type;
-
-    Resize_Op();
+    /**
+     * @brief creates a resize operator
+     * This node can take 4 different inputs, more details in the class
+     * doxygen.
+     * 1. Input to resize :
+     * 2. ROI NOT SUPPORTED (optional) :
+     * 3. scales (optional) - tensor(float):
+     * 4. sizes - tensor(int64):
+     * @param[in] coordinate_transformation_mode
+     * @param[in] cubic_coeff_a the a coefficient of cubic interpolation. Only
+     * used if interpolation_mode = Interpolation::Mode::Cubic
+     * @param[in] interpolationMode : Type of interpolation used for
+     * up/downsampling
+     * @warning Scales & ROI input cannot be set simultaneously. If bot are
+     * set, forward will fail.
+     * @return NodePtr
+     */
+    Resize_Op(
+        Interpolation::CoordinateTransformation coordTransfoMode,
+        Interpolation::Mode interpol_mode = Interpolation::Mode::RoundPreferFloor,
+        float cubic_coef_a = -.75f,
+        PadBorderType paddingMode = PadBorderType::Edge)
+        : OperatorTensor(Type,
+                         {InputCategory::Data,
+                          InputCategory::OptionalData,
+                          InputCategory::OptionalData,
+                          InputCategory::OptionalData},
+                         1),
+          mAttributes(std::make_shared<Attributes_>(
+              attr<ResizeAttr::CubicCoeffA>(cubic_coef_a),
+              attr<ResizeAttr::CoordinateTransformationMode>(coordTransfoMode),
+              attr<ResizeAttr::InterpolationMode>(interpol_mode),
+              attr<ResizeAttr::PaddingMode>(paddingMode))) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
-     * but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output
+     * tensor(s), but not its input tensors : The new operator has no input
+     * associated).
      * @param op Operator to copy.
      */
-    Resize_Op(const Resize_Op& op);
+    Resize_Op(const Resize_Op &op)
+        : OperatorTensor(op), mAttributes(op.mAttributes) {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Resize_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Resize_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<Operator> clone() const override final {
+        return std::make_shared<Resize_Op>(*this);
+    }
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
+    void setBackend(const std::string &name,
+                    DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override {
+        return Registrar<Resize_Op>::getKeys();
+    }
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    inline Interpolation::CoordinateTransformation
+    coordinateTransformationMode() const {
+        return mAttributes
+            ->template getAttr<ResizeAttr::CoordinateTransformationMode>();
+    }
+    inline float cubicCoefA() const {
+        return mAttributes->template getAttr<ResizeAttr::CubicCoeffA>();
+    }
+    inline Interpolation::Mode interpolationMode() const {
+        return mAttributes->template getAttr<ResizeAttr::InterpolationMode>();
+    }
+    inline PadBorderType paddingMode() const {
+        return mAttributes->template getAttr<ResizeAttr::PaddingMode>();
+    }
 
-    static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName() {
         //  roi, scales, sizes, even if considered as const parameters/input
         return {"data_input", "roi ", "scales", "sizes"};
     }
-    static const std::vector<std::string> getOutputsName(){
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
-std::shared_ptr<Node> Resize(const std::string &name = "");
-
-}  // namespace Aidge
-
-
-#endif /* AIDGE_CORE_OPERATOR_Resize_H_ */
\ No newline at end of file
+/**
+ * @brief creates a node that contains a resize operator
+ * This node can take 4 different inputs, more details in the class doxygen.
+ * #0 Input to resize
+ * #1 ROI NOT SUPPORTED (optional) - Tensor(double|float|float16)
+ * #2 scales (optional) - tensor(float)
+ * #3 sizes - tensor(int64)
+ * @param[in] coordinate_transformation_mode
+ * @param[in] interpolationMode type of interpolation used in case of
+ * upsampling
+ * @param[in] cubic_coeff_a the "a" coefficient of cubic interpolation. Only
+ * used if interpolation_mode = Interpolation::Mode::Cubic
+ * @warning Scales & ROI input cannot be set simultaneously. If bot are set,
+ * forward will fail.
+ * @warning Padding mode will tell how values out of bound are treated.
+ * @return NodePtr
+ */
+std::shared_ptr<Node>
+Resize(std::vector<float> scale = std::vector<float>(),
+        std::vector<std::size_t> size = std::vector<std::size_t>(),
+       Interpolation::CoordinateTransformation coordTransfoMode =
+           Interpolation::CoordinateTransformation::HalfPixel,
+       Interpolation::Mode interpolMode =
+           Interpolation::Mode::RoundPreferFloor,
+       float cubicCoefA = -.75f,
+       const std::string &name = "");
+
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
+    "coordinateTransformationMode",
+    "cubicCoeffA",
+    "InterpolationMode",
+    "PaddingMode"
+};
+}
+#endif /* AIDGE_CORE_OPERATOR_RESIZE_H_ */
diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9644620d71276c5e35fc9daaf634f4d4cdb28405
--- /dev/null
+++ b/include/aidge/operator/Stack.hpp
@@ -0,0 +1,106 @@
+#ifndef AIDGE_CORE_OPERATOR_STACK_H_
+#define AIDGE_CORE_OPERATOR_STACK_H_
+
+#include <memory>
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+class StackProdConso : public ProdConso {
+  public:
+    StackProdConso(const Operator &op) : ProdConso(op) {}
+    Elts_t getRequiredMemory(
+        const IOIndex_t outputIdx,
+        const std::vector<DimSize_t> &inputsSize) const override final;
+    void resetConsummerProducer() override;
+};
+
+class StackOpImpl : public OperatorImpl {
+  public:
+    StackOpImpl(const Operator &op, const std::string &backend = "")
+        : OperatorImpl(op, backend) {}
+
+    std::shared_ptr<ProdConso> getProdConso() const override {
+        return std::make_shared<StackProdConso>(mOp);
+    };
+    void forward() override;
+};
+
+enum class StackAttr { ForwardStep, MaxElements };
+
+class StackOp
+    : public OperatorTensor,
+      public Registrable<
+          StackOp,
+          std::string,
+          std::function<std::unique_ptr<OperatorImpl>(const StackOp &)>> {
+
+  private:
+    using Attributes_ =
+        StaticAttributes<StackAttr, std::uint32_t, std::uint32_t>;
+    template <StackAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+  public:
+    static const std::string s_type;
+
+    StackOp(std::uint32_t maxElements);
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output
+     * tensor(s), but not its input tensors (the new operator has no input
+     * associated).
+     * @param op Operator to copy.
+     */
+    StackOp(const StackOp &op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::StackOp
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    void setBackend(const std::string &name,
+                    DeviceIdx_t device = 0) override final;
+
+    std::set<std::string> getAvailableBackends() const override;
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+    void forward() override;
+
+    inline std::shared_ptr<Attributes> attributes() const override {
+        return mAttributes;
+    }
+
+    inline std::uint32_t &maxElements() const {
+        return mAttributes->template getAttr<StackAttr::MaxElements>();
+    }
+
+    inline std::uint32_t &forwardStep() const {
+        return mAttributes->template getAttr<StackAttr::ForwardStep>();
+    }
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> stack(std::uint32_t maxElements,
+                            const std::string &name = "");
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::StackAttr>::data[] = {"max_elements"};
+}
+
+#endif
diff --git a/include/aidge/recipes/GraphViewHelper.hpp b/include/aidge/recipes/GraphViewHelper.hpp
index 3b8ba7627362c945a6bfbe587ec952fdda013e98..bfd0a5364f858c7518de4be0c18888d37ab669da 100644
--- a/include/aidge/recipes/GraphViewHelper.hpp
+++ b/include/aidge/recipes/GraphViewHelper.hpp
@@ -22,11 +22,12 @@
 namespace Aidge {
 
 /**
- * @brief Getter for every Producer operator in a GraphView.
+ * @brief Getter for every Tensor held by a Producer operator in a GraphView.
  * @param graphview GraphView instance where Producers should be searched.
+ * @param constant If true, Producer with attribute ``constant=true`` are also included in the returned set, default=false
  * @return std::set<std::shared_ptr<Node>>
  */
-std::set<std::shared_ptr<Tensor>> producers(std::shared_ptr<GraphView> graphview);
+std::set<std::shared_ptr<Tensor>> producers(std::shared_ptr<GraphView> graphview, bool constant=false);
 
 
 // TODO: change for every Tensor of Operator Producer not constant
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index 82ecc7d28b723d2b3e268f4fb6fbf20d595240ff..86c722b158657633d4509c1181b1f18201d0d514 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -17,7 +17,7 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
-#include "aidge/graphRegex/matchFsm/MatchResult.hpp"
+#include "aidge/graph/Matching.hpp"
 
 
 namespace Aidge {
@@ -81,9 +81,6 @@ size_t removeIdentity(std::shared_ptr<GraphView> graph);
  */
 void removeFlatten(std::shared_ptr<Node> flatten);
 
-
-void removeFlatten(std::shared_ptr<MatchSolution> solution);
-
 /**
  * @brief Remove ``Flatten`` before :cpp:function:`Aidge::FC` Node.
  *
@@ -151,6 +148,15 @@ void expandMetaOps(std::shared_ptr<GraphView> graph, bool recursive = false);
  */
 void matMulTiling(NodePtr matMul, const std::vector<DimSize_t>& maxDims);
 
+/**
+ * Fuse each sub-graph matching a query in a Meta Operator.
+ * @param gm SinglePassGraphMatching containing the graph to manipulate
+ * @param query Sub-graph matching query
+ * @param type Type name of the resulting meta operators
+ * @return size_t Number of replacement
+*/
+size_t fuseToMetaOps(SinglePassGraphMatching& gm, const std::string& query, const std::string& type = "");
+
 /**
  * Fuse each sub-graph matching a query in a Meta Operator.
  * @param graph Graph to manipulate
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 52056852bc454f65f7d12cfc0608e5b6b0b1d933..1b55d7afbf8263a77cf70752fc92f72ef5027904 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -166,7 +166,14 @@ public:
                 attributes[elt.first.c_str()] = future_std::any_cast<const DynamicAttributes&>(elt.second).dict();
             }
             else {
-                attributes[elt.first.c_str()] = mAnyUtils.at(elt.second.type())->cast(elt.second);
+                // At this point, not every attribute may be known to mAnyUtils
+                const auto anyUtilsIt = mAnyUtils.find(elt.second.type());
+                if (anyUtilsIt != mAnyUtils.end()) {
+                    attributes[elt.first.c_str()] = anyUtilsIt->second->cast(elt.second);
+                }
+                else {
+                    attributes[elt.first.c_str()] = "???";
+                }
             }
         }
         return attributes;
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index 6b2ace1c6aa013ae81e5144665e2edde830cdc54..794f14124436668cd9ab0895ff602b8d43ad5dcc 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -9,7 +9,6 @@
  *
  ********************************************************************************/
 
-
 #ifndef AIDGE_LOG_H_
 #define AIDGE_LOG_H_
 
@@ -19,44 +18,36 @@
 #include <fmt/format.h>
 #include <fmt/ranges.h>
 
+#include "aidge/data/half_fmt.hpp"
+
 #include "aidge/utils/Attributes.hpp"
 
 namespace Aidge {
 /**
  * Helper to define a context anywhere, hidding the scoped variable name
  * which has no relevance.
-*/
-#define AIDGE_LOG_CONTEXT(...) const Log::Context logContext_##__LINE__(__VA_ARGS__)
-
+ */
+#define AIDGE_LOG_CONTEXT(...)                                                \
+    const Log::Context logContext_##__LINE__(__VA_ARGS__)
 
-template<class U>
-static void discard_args(U parg) {
+template <class U> static void discard_args(U parg) {
     (void)parg;
 }
-template<class U, class... Us>
-static void discard_args(U parg, Us... pargs) {
+template <class U, class... Us> static void discard_args(U parg, Us... pargs) {
     (void)parg;
     discard_args(pargs...);
 }
 
 /**
  * Aidge logging class, for displaying and file logging of events.
-*/
+ */
 class Log {
-public:
-    enum Level {
-        Debug = 0,
-        Info,
-        Notice,
-        Warn,
-        Error,
-        Fatal
-    };
+  public:
+    enum Level { Debug = 0, Info, Notice, Warn, Error, Fatal };
 
     class Context {
-    public:
-        template <typename... Args>
-        Context(Args&&... args) {
+      public:
+        template <typename... Args> Context(Args &&...args) {
             Log::mContext.push_back(fmt::format(std::forward<Args>(args)...));
         }
 
@@ -68,13 +59,12 @@ public:
     /**
      * Detailed messages for debugging purposes, providing information helpful
      * for developers to trace and identify issues.
-     * Detailed insights of what is appening in an operation, not useful for the
-     * end-user. The operation is performed nominally.
+     * Detailed insights of what is appening in an operation, not useful for
+     * the end-user. The operation is performed nominally.
      * @note This level is disabled at compile time for Release, therefore
      * inducing no runtime overhead for Release.
-    */
-    template <typename... Args>
-    static void debug(Args&&... args) {
+     */
+    template <typename... Args> static void debug(Args &&...args) {
 #ifndef NDEBUG
         // only when compiled in Debug
         log(Debug, fmt::format(std::forward<Args>(args)...));
@@ -86,22 +76,19 @@ public:
     /**
      * Messages that provide a record of the normal operation, about
      * the application's state, progress, or important events.
-     * Reports normal start, end and key steps in an operation. The operation is
-     * performed nominally.
-    */
-    template <typename... Args>
-    static void info(Args&&... args) {
+     * Reports normal start, end and key steps in an operation. The operation
+     * is performed nominally.
+     */
+    template <typename... Args> static void info(Args &&...args) {
         log(Info, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
-     * Applies to normal but significant conditions that may require monitoring,
-     * like unusual or normal fallback events.
-     * Reports specific paths in an operation. The operation can still be
-     * performed normally.
-    */
-    template <typename... Args>
-    static void notice(Args&&... args) {
+     * Applies to normal but significant conditions that may require
+     * monitoring, like unusual or normal fallback events. Reports specific
+     * paths in an operation. The operation can still be performed normally.
+     */
+    template <typename... Args> static void notice(Args &&...args) {
         log(Notice, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -110,9 +97,8 @@ public:
      * not necessarily cause immediate problems.
      * Some specific steps of the operation could not be performed, but it can
      * still provide an exploitable result.
-    */
-    template <typename... Args>
-    static void warn(Args&&... args) {
+     */
+    template <typename... Args> static void warn(Args &&...args) {
         log(Warn, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -121,26 +107,24 @@ public:
      * recover from, but attention is needed to prevent further issues.
      * The operation could not be performed, but it does not prevent potential
      * further operations.
-    */
-    template <typename... Args>
-    static void error(Args&&... args) {
+     */
+    template <typename... Args> static void error(Args &&...args) {
         log(Error, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
-     * Represents a critical error or condition that leads to the termination of
-     * the application, indicating a severe and unrecoverable problem.
-     * The operation could not be performed and any further operation is
+     * Represents a critical error or condition that leads to the termination
+     * of the application, indicating a severe and unrecoverable problem. The
+     * operation could not be performed and any further operation is
      * impossible.
-    */
-    template <typename... Args>
-    static void fatal(Args&&... args) {
+     */
+    template <typename... Args> static void fatal(Args &&...args) {
         log(Fatal, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
      * Set the minimum log level displayed in the console.
-    */
+     */
     static void setConsoleLevel(Level level) {
         mConsoleLevel = level;
     }
@@ -148,14 +132,14 @@ public:
     /**
      * Set or disable colors on console.
      * Initial value should be assumed true.
-    */
+     */
     static void setConsoleColor(bool enabled) {
         mConsoleColor = enabled;
     }
 
     /**
      * Set the minimum log level saved in the log file.
-    */
+     */
     constexpr static void setFileLevel(Level level) {
         mFileLevel = level;
     }
@@ -164,8 +148,8 @@ public:
      * Set the log file name.
      * Close the current log file and open the one with the new file name.
      * If empty, stop logging into a file.
-    */
-    static void setFileName(const std::string& fileName) {
+     */
+    static void setFileName(const std::string &fileName) {
         if (fileName != mFileName) {
             mFileName = fileName;
             mFile.release();
@@ -176,6 +160,22 @@ public:
         }
     }
 
+    /**
+     * @struct fcloseDeleter
+     * @brief Custom deleter to prevent compiler warnings when using
+     * std::unique_ptr with FILE*.
+     *
+     * Using function pointers with attributes (e.g., __attribute__((nonnull)))
+     * as deleters can trigger warnings like -Wignored-attributes. By defining a
+     * custom deleter struct, these attributes are preserved, eliminating the
+     * warnings.
+     */
+    struct fcloseDeleter {
+        void operator()(FILE *f) const noexcept {
+            std::fclose(f);
+        }
+    };
+
 private:
     static void log(Level level, const std::string& msg);
     static void initFile(const std::string& fileName);
@@ -184,14 +184,15 @@ private:
     static bool mConsoleColor;
     static Level mFileLevel;
     static std::string mFileName;
-    static std::unique_ptr<FILE, decltype(&std::fclose)> mFile;
+    static std::unique_ptr<FILE, fcloseDeleter> mFile;
     static std::vector<std::string> mContext;
 };
-}
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::Log::Level>::data[] = {"Debug", "Info", "Notice", "Warn", "Error", "Fatal"};
+const char *const EnumStrings<Aidge::Log::Level>::data[] =
+    {"Debug", "Info", "Notice", "Warn", "Error", "Fatal"};
 }
 
-#endif //AIDGE_LOG_H_
+#endif // AIDGE_LOG_H_
diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp
index 1bfe0929bf67bb0c6d3b893f3dbaf6993dcfd6ff..88312280d572302ecce4157c34db0ba1efd52da9 100644
--- a/include/aidge/utils/TensorUtils.hpp
+++ b/include/aidge/utils/TensorUtils.hpp
@@ -49,6 +49,7 @@ bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float
     }
     return true;
 }
-}
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_TENSOR_UTILS_H_s */
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index b1e879d8387a71a3819ee7e0f8bbcd1e9936c146..cdf8cc23250366c62a4102118a95e68cec28ec3d 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -10,6 +10,7 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
 
 #include "aidge/data/Data.hpp"
 
@@ -65,6 +66,7 @@ void init_Data(py::module& m){
 
     m.def("format_as", (const char* (*)(DataType)) &format_as, py::arg("dt"));
     m.def("format_as", (const char* (*)(DataFormat)) &format_as, py::arg("df"));
+    m.def("get_data_format_transpose", &getDataFormatTranspose, py::arg("src"), py::arg("dst"));  
 
 }
 }
diff --git a/python_binding/data/pybind_Interpolation.cpp b/python_binding/data/pybind_Interpolation.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0839d1c04925f46595630191da9291217d40f10f
--- /dev/null
+++ b/python_binding/data/pybind_Interpolation.cpp
@@ -0,0 +1,42 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/data/Interpolation.hpp"
+#include "aidge/utils/Registrar.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Interpolation(py::module &m) {
+    auto pyInterpolation = py::class_<Aidge::Interpolation>(m, "Interpolation");
+
+    py::enum_<Interpolation::Mode>(pyInterpolation, "Mode")
+    .value("CUBIC", Interpolation::Mode::Cubic)
+    .value("LINEAR", Interpolation::Mode::Linear)
+    .value("ROUND_PREFER_FLOOR", Interpolation::Mode::RoundPreferFloor)
+    .value("ROUND_PREFER_CEIL", Interpolation::Mode::RoundPreferCeil)
+    .value("FLOOR", Interpolation::Mode::Floor)
+    .value("CEIL", Interpolation::Mode::Ceil)
+    .export_values();
+
+    py::enum_<Interpolation::CoordinateTransformation>(pyInterpolation, "CoordinateTransformation")
+    .value("HALF_PIXEL", Interpolation::CoordinateTransformation::HalfPixel)
+    .value("HALF_PIXEL_SYMETRIC", Interpolation::CoordinateTransformation::HalfPixelSymmetric)
+    .value("PYTORCH_HALF_PIXEL", Interpolation::CoordinateTransformation::PytorchHalfPixel)
+    .value("ALIGN_CORNERS", Interpolation::CoordinateTransformation::AlignCorners)
+    .value("ASYMMETRIC", Interpolation::CoordinateTransformation::Asymmetric)
+    .export_values();
+}
+
+} // namespace Aidge
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index d94acaa817eb3c9cbeb9f60c359ea1451852973b..570da04f5123d497478784d495bd92501d66146b 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -320,6 +320,7 @@ void init_Tensor(py::module& m){
     .def("clone", &Tensor::clone)
 	.def("sqrt", &Tensor::sqrt)
     .def("set_datatype", &Tensor::setDataType, py::arg("datatype"), py::arg("copyCast") = true)
+    .def("set_data_format", &Tensor::setDataFormat, py::arg("data_format"), py::arg("copyTrans") = true)
     .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true)
     .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims)
     .def("grad", &Tensor::grad)
@@ -330,18 +331,14 @@ void init_Tensor(py::module& m){
     .def("capacity", &Tensor::capacity)
     .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize, py::arg("dims"), py::arg("strides") = std::vector<DimSize_t>())
     .def("has_impl", &Tensor::hasImpl)
-    .def("get_coord", &Tensor::getCoord)
-    .def("get_idx", &Tensor::getIdx)
+    .def("get_coord", (std::vector<std::size_t> (Tensor::*)(const std::size_t)) &Tensor::getCoord, py::arg("flatIdx"))
+    .def("get_idx",(std::size_t (Tensor::*)(const std::vector<std::size_t> &)) &Tensor::getIdx, py::arg("coords"))
     .def_static("get_available_backends", &Tensor::getAvailableBackends)
     .def("undefined", &Tensor::undefined)
     .def("cpy_transpose", (void (Tensor::*)(const Tensor& src, const std::vector<DimSize_t>& transpose)) &Tensor::copyTranspose, py::arg("src"), py::arg("transpose"))
 
     .def("__str__", [](Tensor& b) {
-        if (b.empty() && b.undefined()) {
-                return std::string("{}");
-        } else {
-            return b.toString();
-        }
+        return b.toString();
     })
     .def("__repr__", [](Tensor& b) {
         return fmt::format("Tensor(dims = {}, dtype = {})", b.dims(), std::string(EnumStrings<DataType>::data[static_cast<int>(b.dataType())]));
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index febb6f2ed594174a7aeef60f26b8f9a5ee0e23e3..4b9d2ad545c47971b7c0dff029585bb4c9ae5638 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -149,8 +149,8 @@ void init_GraphView(py::module& m) {
           //           }
           //      })
           .def("get_ranked_nodes", &GraphView::getRankedNodes)
+          .def("get_ranked_nodes_name", &GraphView::getRankedNodesName, py::arg("format"), py::arg("mark_non_unicity") = true)
           .def("set_dataformat", &GraphView::setDataFormat, py::arg("dataformat"))
-          
             ;
 
      m.def("get_connected_graph_view", &getConnectedGraphView);
diff --git a/python_binding/graph/pybind_Matching.cpp b/python_binding/graph/pybind_Matching.cpp
index 94f2471c3f234c1e401484c099a3815dd26d3c30..af385798175afe68d2e89cd65f3645fc7b806eb3 100644
--- a/python_binding/graph/pybind_Matching.cpp
+++ b/python_binding/graph/pybind_Matching.cpp
@@ -10,6 +10,7 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+#include <pybind11/functional.h>
 #include <pybind11/stl.h>
 #include <memory>
 #include <string>
@@ -31,21 +32,20 @@ void init_SinglePassGraphMatching(py::module& m) {
     py::class_<Aidge::SinglePassGraphMatching>(m, "SinglePassGraphMatching") 
         .def(py::init<std::shared_ptr<GraphView>>(), py::arg("graph"))
         .def("match", 
-        [](Aidge::SinglePassGraphMatching& self, const std::string& query, bool disjoint){
-            // Note: Need to convert set to vector has MatchingResult is not hashable and 
-            // set<MatchingResult> cannot be binded
-            std::set<Aidge::SinglePassGraphMatching::MatchingResult> set_res = self.match(query, disjoint);
-            std::vector<Aidge::SinglePassGraphMatching::MatchingResult> vec_res(set_res.begin(), set_res.end());
-            return vec_res;
-        },
-        py::arg("query"), py::arg("disjoint") = false, 
-        R"mydelimiter( Matches a query by direct, single-pass parse and match.
-        :param query: The query string to search.
-        :param disjoint: If true, only keep the longest disjoint matches.
-        :return: A set of MatchingResult instances.
-        )mydelimiter");
-
-
+            [](Aidge::SinglePassGraphMatching& self, const std::string& query, bool disjoint){
+                // Note: Need to convert set to vector has MatchingResult is not hashable and 
+                // set<MatchingResult> cannot be binded
+                std::set<Aidge::SinglePassGraphMatching::MatchingResult> set_res = self.match(query, disjoint);
+                std::vector<Aidge::SinglePassGraphMatching::MatchingResult> vec_res(set_res.begin(), set_res.end());
+                return vec_res;
+            },
+            py::arg("query"), py::arg("disjoint") = false, 
+            R"mydelimiter( Matches a query by direct, single-pass parse and match.
+            :param query: The query string to search.
+            :param disjoint: If true, only keep the longest disjoint matches.
+            :return: A set of MatchingResult instances.
+            )mydelimiter")
+        .def("add_node_lambda", &SinglePassGraphMatching::addNodeLambda, py::arg("name"), py::arg("func"));
 
 }
 }  // namespace Aidge
diff --git a/python_binding/graph/pybind_StaticAnalysis.cpp b/python_binding/graph/pybind_StaticAnalysis.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b7c704d722e81b36e8d4988a4503428918e16a5a
--- /dev/null
+++ b/python_binding/graph/pybind_StaticAnalysis.cpp
@@ -0,0 +1,141 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include "aidge/graph/StaticAnalysis.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+/**
+ * @brief Trampoline class for binding
+ *
+ */
+class pyOperatorStats: public OperatorStats {
+public:
+    using OperatorStats::OperatorStats; // Inherit constructors
+
+    size_t getNbArithmOps() const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            OperatorStats,
+            getNbArithmOps
+        );
+    }
+
+    size_t getNbLogicOps() const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            OperatorStats,
+            getNbLogicOps
+        );
+    }
+
+    size_t getNbCompOps() const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            OperatorStats,
+            getNbCompOps
+        );
+    }
+
+    size_t getNbNLOps() const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            OperatorStats,
+            getNbNLOps
+        );
+    }
+
+    size_t getNbArithmIntOps() const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            OperatorStats,
+            getNbArithmIntOps
+        );
+    }
+
+    size_t getNbMACOps() const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            OperatorStats,
+            getNbMACOps
+        );
+    }
+};
+
+class pyStaticAnalysis: public StaticAnalysis {
+public:
+    using StaticAnalysis::StaticAnalysis; // Inherit constructors
+
+    size_t getNbParams(std::shared_ptr<Node> node) const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            StaticAnalysis,
+            getNbParams,
+            node
+        );
+    }
+
+    size_t getParamsSize(std::shared_ptr<Node> node) const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            StaticAnalysis,
+            getParamsSize,
+            node
+        );
+    }
+
+    void summary(bool incProducers) const override {
+        PYBIND11_OVERRIDE(
+            void,
+            StaticAnalysis,
+            summary,
+            incProducers
+        );
+    }
+};
+
+void init_StaticAnalysis(py::module& m){
+    py::class_<OperatorStats, std::shared_ptr<OperatorStats>, pyOperatorStats>(m, "OperatorStats", py::multiple_inheritance(), py::dynamic_attr())
+    .def(py::init<const Operator&>(), py::arg("op"))
+    .def("get_operator", &OperatorStats::getOperator)
+    .def("get_nb_arithm_ops", &OperatorStats::getNbArithmOps)
+    .def("get_nb_logic_ops", &OperatorStats::getNbLogicOps)
+    .def("get_nb_comp_ops", &OperatorStats::getNbCompOps)
+    .def("get_nb_nl_ops", &OperatorStats::getNbNLOps)
+    .def("get_nb_ops", &OperatorStats::getNbOps)
+    .def("get_nb_arithm_int_ops", &OperatorStats::getNbArithmIntOps)
+    .def("get_nb_arithm_fp_ops", &OperatorStats::getNbArithmFpOps)
+    .def("get_nb_mac_ops", &OperatorStats::getNbMACOps)
+    ;
+    declare_registrable<OperatorStats>(m, "OperatorStats");
+
+    py::class_<StaticAnalysis, std::shared_ptr<StaticAnalysis>, pyStaticAnalysis>(m, "StaticAnalysis", py::multiple_inheritance(), py::dynamic_attr())
+    .def(py::init<std::shared_ptr<GraphView>>(), py::arg("graph"))
+    .def("get_graph", &StaticAnalysis::getGraph)
+    .def("get_nb_params", &StaticAnalysis::getNbParams, py::arg("node"))
+    .def("get_params_size", &StaticAnalysis::getParamsSize, py::arg("node"))
+    .def("get_nb_arithm_ops", &StaticAnalysis::getNbArithmOps)
+    .def("get_nb_logic_ops", &StaticAnalysis::getNbLogicOps)
+    .def("get_nb_comp_ops", &StaticAnalysis::getNbCompOps)
+    .def("get_nb_nl_ops", &StaticAnalysis::getNbNLOps)
+    .def("get_nb_ops", &StaticAnalysis::getNbOps)
+    .def("get_nb_arithm_int_ops", &StaticAnalysis::getNbArithmIntOps)
+    .def("get_nb_arithm_fp_ops", &StaticAnalysis::getNbArithmFpOps)
+    .def("get_nb_mac_ops", &StaticAnalysis::getNbMACOps)
+    .def("summary", &StaticAnalysis::summary, py::arg("inc_producers") = false)
+    .def("get_op_stats", &StaticAnalysis::getOpStats, py::arg("node"))
+    ;
+}
+}
diff --git a/python_binding/operator/pybind_Cast.cpp b/python_binding/operator/pybind_Cast.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..960a084ff063e6310a4526dd65e8dabb0e8f905a
--- /dev/null
+++ b/python_binding/operator/pybind_Cast.cpp
@@ -0,0 +1,46 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include <string>
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Cast.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Cast(py::module &m) {
+    // Binding for CastOp class
+    auto pyCastOp = py::class_<Cast_Op, std::shared_ptr<Cast_Op>, OperatorTensor>(m, "CastOp", py::multiple_inheritance(),R"mydelimiter(
+        CastOp is a tensor operator that casts the input tensor to a data type specified by the target_type argument.
+        :param target_type: data type of the output tensor 
+        :type target_type: Datatype
+        :param name: name of the node.
+    )mydelimiter")
+        .def(py::init<DataType>(), py::arg("target_type"))
+        .def("target_type", &Cast_Op::targetType, "Get the targeted type, output tensor data type")
+        .def_static("get_inputs_name", &Cast_Op::getInputsName, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", &Cast_Op::getOutputsName, "Get the names of the output tensors.");
+
+    // Binding for the Cast function
+    m.def("Cast", &Cast, py::arg("target_type"), py::arg("name") = "",
+        R"mydelimiter(
+        CastOp is a tensor operator that casts the input tensor to a data type specified by the target_type argument.
+        :param target_type: data type of the output tensor 
+        :type target_type: Datatype
+        :param name: name of the node.
+    )mydelimiter");
+}
+} // namespace Aidge
\ No newline at end of file
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 6af8fef88e411af0a3ecbe5a771bf7af24de411a..f125291fafb89ec7ae81678a37e2bde2222a1054 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -64,5 +64,7 @@ void init_GenericOperator(py::module& m) {
             }
             return genericNode;
         }, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"), py::arg("name") = "");
+
+    declare_registrable<GenericOperator_Op>(m, "GenericOperatorOp");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_LRN.cpp b/python_binding/operator/pybind_LRN.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..178af540b3efb280eaba67572e07cb673e563b22
--- /dev/null
+++ b/python_binding/operator/pybind_LRN.cpp
@@ -0,0 +1,31 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/LRN.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_LRN(py::module& m) {
+    py::class_<LRN_Op, std::shared_ptr<LRN_Op>, OperatorTensor>(m, "LRNOp", py::multiple_inheritance())
+        .def(py::init<std::int32_t>(), py::arg("size"))
+        .def_static("get_inputs_name", &LRN_Op::getInputsName)
+        .def_static("get_outputs_name", &LRN_Op::getOutputsName)
+        .def_readonly_static("Type", &LRN_Op::Type);
+    declare_registrable<LRN_Op>(m, "LRNOp");
+    m.def("LRN", &LRN, py::arg("size"), py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index e22f88687eff6856ce57fab6621781ffc86873b4..a1d1889c9a1881d3aa7b6eb9ccb4c23c5314cc80 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -63,6 +63,7 @@ void init_Operator(py::module& m){
     .def_property_readonly("attr", &Operator::attributes)
     .def("set_back_edges", &Operator::setBackEdges, py::arg("input_indexes"))
     .def("is_back_edge", &Operator::isBackEdge, py::arg("input_index"))
+    .def("is_atomic", &Operator::isAtomic)
     ;
 }
 }
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
index 35321f525e486107af3715ce1c09f48b7c5cd60f..2aa62609835a7042dd0df54f28b453b7e33a3b5b 100644
--- a/python_binding/operator/pybind_Resize.cpp
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -9,22 +9,51 @@
  *
  ********************************************************************************/
 
+#include <cstddef>  // std::size_t
+
 #include <pybind11/pybind11.h>
 
-#include "aidge/operator/Resize.hpp"
+#include "aidge/data/Interpolation.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Resize.hpp"
+#include "aidge/utils/Registrar.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
-void init_Resize(py::module& m) {
-    py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance())
-        .def_static("get_inputs_name", &Resize_Op::getInputsName)
-        .def_static("get_outputs_name", &Resize_Op::getOutputsName)
-        .def_readonly_static("Type", &Resize_Op::Type);
+void init_Resize(py::module &m) {
+  py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(
+          m, "ResizeOp", py::multiple_inheritance())
+          .def(py::init<Interpolation::CoordinateTransformation, Interpolation::Mode, float, PadBorderType>(), py::arg("coordinate_transformation_mode"), py::arg("interpolation_mode"), py::arg("cubic_coeff_a") = -0.75f, py::arg("padding_mode") = PadBorderType::Edge)
+          .def_static("get_inputs_name", &Resize_Op::getInputsName)
+          .def_static("get_outputs_name", &Resize_Op::getOutputsName)
+          .def_readonly_static("Type", &Resize_Op::Type);
 
-    declare_registrable<Resize_Op>(m, "ResizeOp");
+  declare_registrable<Resize_Op>(m, "ResizeOp");
 
-    m.def("Resize", &Resize, py::arg("name") = "");
+  m.def("Resize", &Resize,
+        py::arg("scale") = std::vector<float>({}),
+        py::arg("size") = std::vector<std::size_t>({}),
+        py::arg("coord_transfo_mode") =
+            Interpolation::CoordinateTransformation::HalfPixel,
+        py::arg("interpolation_mode") =
+            Interpolation::Mode::RoundPreferFloor,
+        py::arg("cubic_interpolation_coefficient_a") = -.75f,
+        py::arg("name") = "", R"mydelimiter(
+    Initialize a node containing a Resize operator.
+    This node can take 4 different inputs.
+    #0 Input to resize
+    #1 ROI NOT SUPPORTED (optional) - Tensor(double|float|float16)
+    #2 scales (optional) - tensor(float): #3 sizes - tensor(int64)
+    #3 sizes - tensor(int64)
+	:type coordinate_transformation_mode : :py:class: List[Int]
+	:param interpolationMode : Type of interpolation used in case of upsampling
+	:type interpolationMode : Interpolation::Mode
+	:param cubic_coeff_a : "A" coefficient of cubic interpolation. Only used if interpolation_mode = Interpolation::Mode::Cubic
+	:type cubic_coeff_a  : float
+    :param name : name of the node.
+    :type name : str
+    )mydelimiter");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Stack.cpp b/python_binding/operator/pybind_Stack.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2328892108d724438a39cc37eaf97b856caa3a8a
--- /dev/null
+++ b/python_binding/operator/pybind_Stack.cpp
@@ -0,0 +1,41 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Stack.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+void init_Stack(py::module &m) {
+    py::class_<StackOp, std::shared_ptr<StackOp>, OperatorTensor>(
+        m,
+        "StackOp",
+        py::multiple_inheritance(),
+        R"mydelimiter(Initialize a Stack operator.)mydelimiter")
+        .def(py::init<const std::uint32_t>(), py::arg("max_elements"))
+        .def_static("get_inputs_name", &StackOp::getInputsName)
+        .def_static("get_outputs_name", &StackOp::getOutputsName)
+        .def_readonly_static("Type", &StackOp::s_type);
+
+    m.def("Stack",
+          &stack,
+          py::arg("max_elements"),
+          py::arg("name") = "",
+          R"mydelimiter(
+        Initialize a node containing a Stack operator.
+            :param max_elements : the maximum number of tensors to be stacked.
+			:param name: name of the node.
+		)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 02f4b732c39ef5cbc1755eb314d32c25c96d01fd..006eeb289f25570ddf337f048b05816102624028 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -20,6 +20,7 @@ void init_Random(py::module&);
 void init_Data(py::module&);
 void init_Database(py::module&);
 void init_DataProvider(py::module&);
+void init_Interpolation(py::module&);
 void init_Tensor(py::module&);
 void init_TensorImpl(py::module&);
 void init_Attributes(py::module&);
@@ -27,6 +28,7 @@ void init_OperatorImpl(py::module&);
 void init_Log(py::module&);
 void init_Operator(py::module&);
 void init_OperatorTensor(py::module&);
+void init_StaticAnalysis(py::module&);
 
 void init_Add(py::module&);
 void init_And(py::module&);
@@ -35,6 +37,7 @@ void init_Atan(py::module&);
 void init_AvgPooling(py::module&);
 void init_BatchNorm(py::module&);
 void init_BitShift(py::module&);
+void init_Cast(py::module&);
 void init_Clip(py::module&);
 void init_Concat(py::module&);
 void init_ConstantOfShape(py::module&);
@@ -50,6 +53,7 @@ void init_GlobalAveragePooling(py::module&);
 void init_GridSample(py::module&);
 void init_Heaviside(py::module&);
 void init_Identity(py::module&);
+void init_LRN(py::module&);
 void init_LeakyReLU(py::module&);
 void init_MatMul(py::module&);
 void init_MaxPooling(py::module&);
@@ -74,6 +78,7 @@ void init_Softmax(py::module&);
 void init_Split(py::module&);
 void init_Sqrt(py::module&);
 void init_Squeeze(py::module&);
+void init_Stack(py::module&);
 void init_Sub(py::module&);
 void init_Tanh(py::module&);
 void init_Transpose(py::module&);
@@ -103,6 +108,7 @@ void init_Aidge(py::module& m) {
     init_Data(m);
     init_Database(m);
     init_DataProvider(m);
+    init_Interpolation(m);
     init_Tensor(m);
     init_TensorImpl(m);
     init_Attributes(m);
@@ -117,6 +123,7 @@ void init_Aidge(py::module& m) {
     init_Log(m);
     init_Operator(m);
     init_OperatorTensor(m);
+    init_StaticAnalysis(m);
 
     init_Add(m);
     init_And(m);
@@ -125,6 +132,7 @@ void init_Aidge(py::module& m) {
     init_AvgPooling(m);
     init_BatchNorm(m);
     init_BitShift(m);
+    init_Cast(m);
     init_Clip(m);
     init_Concat(m);
     init_Conv(m);
@@ -140,6 +148,7 @@ void init_Aidge(py::module& m) {
     init_GridSample(m);
     init_Heaviside(m);
     init_Identity(m);
+    init_LRN(m);
     init_LeakyReLU(m);
     init_MatMul(m);
     init_MaxPooling(m);
@@ -163,6 +172,7 @@ void init_Aidge(py::module& m) {
     init_Split(m);
     init_Sqrt(m);
     init_Squeeze(m);
+    init_Stack(m);
     init_Sub(m);
     init_Tanh(m);
     init_Transpose(m);
diff --git a/python_binding/recipes/pybind_GraphViewHelper.cpp b/python_binding/recipes/pybind_GraphViewHelper.cpp
index ac56fb4b43eb5b0a737157ec9e64c6771a692816..eee5b6a61771b4b0724699f1c45dad6c8a35f04d 100644
--- a/python_binding/recipes/pybind_GraphViewHelper.cpp
+++ b/python_binding/recipes/pybind_GraphViewHelper.cpp
@@ -23,6 +23,6 @@ namespace py = pybind11;
 
 namespace Aidge {
 void init_GraphViewHelper(py::module &m) {
-    m.def("producers", &producers, py::arg("graphview"));
+    m.def("producers", &producers, py::arg("graphview"), py::arg("constant")=false);
 }
 } // namespace Aidge
diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp
index 6908cbd912b506a7adb7f33a02416d0173174969..77f20b9d655c6d9f6e95b23c4884bd1bc4f9ffd6 100644
--- a/python_binding/recipes/pybind_Recipes.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -112,7 +112,20 @@ void init_Recipes(py::module &m)
     :type recursive: bool
     )mydelimiter");
 
-  m.def("fuse_to_metaops", fuseToMetaOps, py::arg("graph_view"), py::arg("query"), py::arg("type") = "", R"mydelimiter(
+  m.def("fuse_to_metaops", py::overload_cast<SinglePassGraphMatching&, const std::string&, const std::string&>(fuseToMetaOps), py::arg("gm"), py::arg("query"), py::arg("type") = "", R"mydelimiter(
+    Fuse each sub-graph matching a query in a Meta Operator.
+
+    :param gm: SinglePassGraphMatching containing the graph to manipulate
+    :type gm: :py:class:`aidge_core.SinglePassGraphMatching`
+    :param query: Sub-graph matching query
+    :type query: str
+    :param type: Type name of the resulting meta operators
+    :type type: str, optional
+    :return: Number of sub-graph actually fused in a Meta Operator.
+    :rtype: int
+    )mydelimiter");
+
+  m.def("fuse_to_metaops", py::overload_cast<std::shared_ptr<GraphView>, const std::string&, const std::string&>(fuseToMetaOps), py::arg("graph_view"), py::arg("query"), py::arg("type") = "", R"mydelimiter(
     Fuse each sub-graph matching a query in a Meta Operator.
 
     :param graph_view: Graph view on which we want to apply the recipe
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index dd5c5c110154427a8af7afbf70b2c76b61e507a8..1708d9e36c174527c648e37b63b080211aa6df05 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -95,6 +95,10 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
     Log::debug("getBestMatch() for requirements: {}", requiredSpecs);
 
     const auto availableSpecsSet = getAvailableImplSpecs();
+    AIDGE_ASSERT(availableSpecsSet.size() > 0 , 
+                 "OperatorImpl::getBestMatch(): No available specs found by"
+                 "getAvailableSpecs(). "
+                 "Cannot find best implementation for required specs, aborting.");
     const std::vector<ImplSpec> availableSpecs(availableSpecsSet.begin(), availableSpecsSet.end());
     std::vector<int> matchingSpecs(availableSpecs.size(), -1);
 
diff --git a/src/data/Data.cpp b/src/data/Data.cpp
index 62a883d08a401e02c86408214a061f893ffbfb4a..865b4ff2dcff68753fb5a4cc9cafccdd129e3c8a 100644
--- a/src/data/Data.cpp
+++ b/src/data/Data.cpp
@@ -11,6 +11,50 @@
 
 #include "aidge/data/Data.hpp"
 
+bool Aidge::isDataTypeFloatingPoint(const DataType& type) {
+    switch (type) {
+    case DataType::Float64:
+    case DataType::Float32:
+    case DataType::Float16:
+    case DataType::BFloat16:  return true;
+    default:                  return false;
+    }
+    return false;
+}
+
+size_t Aidge::getDataTypeBitWidth(const DataType& type) {
+    switch (type) {
+    case DataType::Float64:   return 64;
+    case DataType::Float32:   return 32;
+    case DataType::Float16:   return 16;
+    case DataType::BFloat16:  return 16;
+    case DataType::Binary:    return 1;
+    case DataType::Ternary:   return 2;
+    case DataType::Int2:      return 2;
+    case DataType::Int3:      return 3;
+    case DataType::Int4:      return 4;
+    case DataType::Int5:      return 5;
+    case DataType::Int6:      return 6;
+    case DataType::Int7:      return 7;
+    case DataType::Int8:      return 8;
+    case DataType::Int16:     return 16;
+    case DataType::Int32:     return 32;
+    case DataType::Int64:     return 64;
+    case DataType::UInt2:     return 2;
+    case DataType::UInt3:     return 3;
+    case DataType::UInt4:     return 4;
+    case DataType::UInt5:     return 5;
+    case DataType::UInt6:     return 6;
+    case DataType::UInt7:     return 7;
+    case DataType::UInt8:     return 8;
+    case DataType::UInt16:    return 16;
+    case DataType::UInt32:    return 32;
+    case DataType::UInt64:    return 64;
+    default:                  return 0;
+    }
+    return 0;
+}
+
 Aidge::DataFormatTranspose Aidge::getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
     // Permutation array from default format to src format
     const auto srcDefToFormat = DataFormatTransposeDict[static_cast<int>(src)];
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 2a06aacbee974e07d5aa81313677a140aac853b1..d406f8d0d9f0a9b412465d8524a5d4410f9d48d1 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -14,6 +14,7 @@
 #include <cstddef>
 #include <vector>
 
+#include "aidge/data/half.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/Abs.hpp"
@@ -23,14 +24,14 @@
 #include "aidge/operator/ReduceMean.hpp"
 #include "aidge/operator/Sub.hpp"
 #include "aidge/operator/Sqrt.hpp"
-#include "aidge/operator/Transpose.hpp"
 #include "aidge/utils/Types.h"
 
+namespace Aidge {
 
-Aidge::Tensor::~Tensor() noexcept = default;
+Tensor::~Tensor() noexcept = default;
 
 
-Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
+Tensor Tensor::operator+(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -47,7 +48,7 @@ Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
 }
 
 
-Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
+Tensor Tensor::operator-(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -64,7 +65,7 @@ Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
 }
 
 
-Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
+Tensor Tensor::operator*(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -81,7 +82,7 @@ Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
 }
 
 
-Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
+Tensor Tensor::operator/(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -97,7 +98,7 @@ Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
     return div_.getOutput(0)->clone();
 }
 
-Aidge::Tensor Aidge::Tensor::sqrt() const {
+Tensor Tensor::sqrt() const {
     AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
     auto sqrt_ = Sqrt_Op();
     sqrt_.associateInput(0, std::make_shared<Tensor>(*this));
@@ -108,7 +109,7 @@ Aidge::Tensor Aidge::Tensor::sqrt() const {
     return sqrt_.getOutput(0)->clone();
 }
 
-Aidge::Tensor Aidge::Tensor::abs() const {
+Tensor Tensor::abs() const {
     AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
     auto abs_ = Abs_Op();
     abs_.associateInput(0, std::make_shared<Tensor>(*this));
@@ -119,7 +120,7 @@ Aidge::Tensor Aidge::Tensor::abs() const {
     return abs_.getOutput(0)->clone();
 }
 
-Aidge::Tensor Aidge::Tensor::mean() const {
+Tensor Tensor::mean() const {
     AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
     // TODO: should be the default behavior of ReduceMean_Op
     // No need to specify the list of all axes!
@@ -134,7 +135,7 @@ Aidge::Tensor Aidge::Tensor::mean() const {
     return mean_.getOutput(0)->clone();
 }
 
-Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
+Tensor& Tensor::operator=(const Tensor& other) {
     if (this == &other) {
         return *this;
     }
@@ -154,7 +155,7 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
 }
 
 
-void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t device, bool copyFrom) {
+void Tensor::setBackend(const std::string &name, DeviceIdx_t device, bool copyFrom) {
     if (mImpl) {
         if (mImpl->device() != std::make_pair(name, device)) {
             // Backend change: create new impl, copy from old to new and replace
@@ -171,8 +172,8 @@ void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t devic
     }
     }
 
-void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
-                           std::vector<Aidge::DimSize_t> strides) {
+void Tensor::resize(const std::vector<DimSize_t>& dims,
+                           std::vector<DimSize_t> strides) {
     if (dims.empty()) {  // scalar
         mDims = std::vector<DimSize_t>(0);
         mStrides = std::vector<DimSize_t>({1});
@@ -234,11 +235,12 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
     }
 }
 
-std::string Aidge::Tensor::toString() const {
-    AIDGE_ASSERT(
-        mImpl && (undefined() || (dims() == std::vector<DimSize_t>({0})) ||
-                  (mImpl->hostPtr() != nullptr)),
-        "tensor should have a valid host pointer");
+std::string Tensor::toString() const {
+
+    if (!hasImpl() || undefined()) {
+        // Return no value on no implementation or undefined size
+        return std::string("{}");
+    }
 
     // TODO: move lambda elsewhere?
     auto ptrToString = [](DataType dt, void* ptr, std::size_t idx) {
@@ -284,8 +286,10 @@ std::string Aidge::Tensor::toString() const {
     };
 
     if (dims().empty()) {
+        // The Tensor is defined with rank 0, hence scalar
         return ptrToString(mDataType, mImpl->hostPtr(), 0);
     }
+
     std::string res;
     std::size_t dim = 0;
     std::size_t counter = 0;
@@ -352,7 +356,7 @@ std::string Aidge::Tensor::toString() const {
     return res;
 }
 
-Aidge::Tensor Aidge::Tensor::extract(
+Tensor Tensor::extract(
     const std::vector<std::size_t>& fixedCoord) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
     AIDGE_ASSERT(fixedCoord.size() <= mDims.size(),
@@ -368,7 +372,7 @@ Aidge::Tensor Aidge::Tensor::extract(
     return subTensor;
 }
 
-Aidge::Tensor Aidge::Tensor::extract(
+Tensor Tensor::extract(
     const std::vector<std::size_t>& startCoord,
     const std::vector<std::size_t>& dims) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
@@ -382,7 +386,7 @@ Aidge::Tensor Aidge::Tensor::extract(
     return subTensor;
 }
 
-void Aidge::Tensor::makeContiguous() {
+void Tensor::makeContiguous() {
     if (!mImpl || isContiguous()) {
         return;
     }
@@ -420,7 +424,7 @@ void Aidge::Tensor::makeContiguous() {
     resize(mDims);
 }
 
-void Aidge::Tensor::copyCast(const Tensor& src) {
+void Tensor::copyCast(const Tensor& src) {
     if (&src == this) {
         return;
     }
@@ -441,7 +445,7 @@ void Aidge::Tensor::copyCast(const Tensor& src) {
                         src.size(), mImplOffset);
 }
 
-void Aidge::Tensor::copyFrom(const Tensor& src) {
+void Tensor::copyFrom(const Tensor& src) {
     if (&src == this) {
         return;
     }
@@ -462,7 +466,7 @@ void Aidge::Tensor::copyFrom(const Tensor& src) {
                         mImplOffset);
 }
 
-void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) {
+void Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) {
     std::vector<DimSize_t> newDims;
     for (std::size_t i = 0; i < src.dims().size(); ++i) {
         newDims.push_back(src.dims()[transpose[i]]);
@@ -504,11 +508,11 @@ void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t
     setImpl(newImpl);
 }
 
-void Aidge::Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) {
+void Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) {
     copyTranspose(src, std::vector<DimSize_t>(transpose.begin(), transpose.end()));
 }
 
-void Aidge::Tensor::copyCastFrom(const Tensor& src,
+void Tensor::copyCastFrom(const Tensor& src,
                                  std::shared_ptr<Tensor>& movedSrcPtr) {
     if (&src == this) {
         return;
@@ -541,13 +545,13 @@ void Aidge::Tensor::copyCastFrom(const Tensor& src,
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) {
+Tensor& Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) {
     // Scott Meyers' solution to avoid code duplication
     return const_cast<Tensor&>(
         static_cast<const Tensor&>(*this).refContiguous(fallback));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refContiguous(
+const Tensor& Tensor::refContiguous(
     std::shared_ptr<Tensor>& fallback) const {
     AIDGE_ASSERT(getImpl(),
                  "no backend was set for tensor, cannot refCast() it");
@@ -566,15 +570,15 @@ const Aidge::Tensor& Aidge::Tensor::refContiguous(
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
-                                      const Aidge::DataType& dt) {
+Tensor& Tensor::refCast(std::shared_ptr<Tensor>& fallback,
+                                      const DataType& dt) {
     // Scott Meyers' solution to avoid code duplication
     return const_cast<Tensor&>(
         static_cast<const Tensor&>(*this).refCast(fallback, dt));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
-                                            const Aidge::DataType& dt) const {
+const Tensor& Tensor::refCast(std::shared_ptr<Tensor>& fallback,
+                                            const DataType& dt) const {
     AIDGE_ASSERT(getImpl(),
                  "no backend was set for tensor, cannot refCast() it");
 
@@ -607,7 +611,7 @@ const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
+Tensor& Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
                                       const std::string& backend,
                                       DeviceIdx_t device) {
     // Scott Meyers' solution to avoid code duplication
@@ -615,7 +619,7 @@ Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
         static_cast<const Tensor&>(*this).refFrom(fallback, backend, device));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
+const Tensor& Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
                                             const std::string& backend,
                                             DeviceIdx_t device) const {
     AIDGE_ASSERT(getImpl(),
@@ -648,8 +652,8 @@ const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
-                                  const Aidge::DataType& dt,
+Tensor& Tensor::ref(std::shared_ptr<Tensor>& fallback,
+                                  const DataType& dt,
                                   const std::string& backend,
                                   DeviceIdx_t device) {
     // Scott Meyers' solution to avoid code duplication
@@ -657,8 +661,8 @@ Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
         static_cast<const Tensor&>(*this).ref(fallback, dt, backend, device));
 }
 
-const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
-                                        const Aidge::DataType& dt,
+const Tensor& Tensor::ref(std::shared_ptr<Tensor>& fallback,
+                                        const DataType& dt,
                                         const std::string& backend,
                                         DeviceIdx_t device) const {
     AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot ref() it");
@@ -682,9 +686,64 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
     }
 }
 
-std::set<std::string> Aidge::Tensor::getAvailableBackends() {
+
+std::vector<std::size_t>
+Tensor::toCoord(const std::vector<DimSize_t>& dimensions, std::size_t index) {
+    std::vector<std::size_t> coord(dimensions.size());
+    std::size_t i = dimensions.size();
+
+    while (i-- > 0) {
+        coord[i] = (index % dimensions[i]);
+        index /= dimensions[i];
+    }
+    return coord;
+}
+
+
+std::size_t Tensor::toIndex(const std::vector<DimSize_t> &dimensions, const std::vector<std::size_t>& coords) {
+    AIDGE_ASSERT(coords.size() == dimensions.size(), "Tensor::getIdx(): Coordinates does not match number of dimensions.\n\tCoords : {}\n\tDimensions: {}",coords, dimensions);
+    std::size_t index = 0;
+    std::size_t dimensions_s = 1; // stride
+    std::size_t i = dimensions.size();
+    while (i-- > 0) {
+        index += coords[i] * dimensions_s;
+        dimensions_s *= dimensions[i];
+    }
+    return index;
+}
+
+template<typename T>
+bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<T>& coords){
+    AIDGE_ASSERT(coords.size() == dimensions.size(),
+                 "Coordinates({}) to compare have not "
+                 "the same number of dimension as tensor dimensions({}), aborting.",
+                 coords,
+                 dimensions);
+    bool isInBound {true};
+    for(std::size_t i = 0 ; i < coords.size() && isInBound; ++i ){
+        isInBound = coords[i] >= 0 && coords[i] < static_cast<T>(dimensions[i]) ;
+    }
+    return isInBound;
+}
+
+
+bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::size_t index){
+    return index < std::accumulate(dimensions.cbegin(), dimensions.cend(), std::size_t(1), std::multiplies<std::size_t>());
+}
+
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int16_t>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int32_t>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int64_t>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::size_t>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<float>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<double>& coords);
+
+
+std::set<std::string> Tensor::getAvailableBackends() {
     std::set<std::string> backendsList;
-    for (const auto& tupleKey : Registrar<Tensor>::getKeys())
+    for (const auto& tupleKey : Registrar<Tensor>::getKeys()) {
         backendsList.insert(std::get<0>(tupleKey));
+    }
     return backendsList;
 }
+}  // namespace Aidge
diff --git a/src/data/interpolation.cpp b/src/data/interpolation.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ce5431a2b3bd742f98a169666811bd5d373a5c24
--- /dev/null
+++ b/src/data/interpolation.cpp
@@ -0,0 +1,237 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/data/Interpolation.hpp"
+
+#include <algorithm>  // std::clamp
+#include <bitset>
+#include <cmath>      // std::ceil, std::floor
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int64_t
+#include <stdexcept>  // std::runtime_error
+#include <utility>    // std::make_pair, std::set
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/half.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Log.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+template <typename T>
+[[noreturn]] T
+Interpolation::interpolate(const std::vector<float> & /*originalIndex*/,
+                           const std::vector<Point<T>> & /*points*/,
+                           const Mode /*interpMode*/) {
+    AIDGE_THROW_OR_ABORT(
+        std::runtime_error,
+        "interpolate() is backend dependendant and should be"
+        "called from derived classes: Interpolation<Backend>::interpolate(...)"
+        "Meaning that for CPU backend, InterpolationCPU::interpolate() should "
+        "be called.");
+}
+
+std::vector<float> Interpolation::untransformCoordinates(
+    const std::vector<DimSize_t> &transformedCoords,
+    const std::vector<DimSize_t> &inputDims,
+    const std::vector<DimSize_t> &outputDims,
+    const Interpolation::CoordinateTransformation coordTransfoMode) {
+    AIDGE_ASSERT(
+        inputDims.size() == outputDims.size(),
+        "Interpolate::untransformCoordinates: input and output coordinates "
+        "dimension number mismatch, they should be equal."
+        "Got inputDims({}) and outputDims ({}).",
+        inputDims,
+        outputDims);
+    AIDGE_ASSERT(
+        transformedCoords.size() == outputDims.size(),
+        "Interpolate::untransformCoordinates: coordinates dimension mismatch, "
+        "transformed coords number should be equal to output dimension number."
+        "Got coords to transform ({}) and outputDims ({})",
+        transformedCoords,
+        outputDims);
+    std::vector<float> originalCoords(transformedCoords.size());
+
+    for (DimIdx_t i = 0; i < transformedCoords.size(); ++i) {
+        float scale = static_cast<float>(outputDims[i]) /
+                      static_cast<float>(inputDims[i]);
+
+        switch (coordTransfoMode) {
+        case CoordinateTransformation::AlignCorners:
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "Interpolation::untransformCoords: Unsupported Coordinate "
+                "transform : AlignCorners");
+            break;
+        case CoordinateTransformation::Asymmetric:
+            originalCoords[i] = transformedCoords[i] / scale;
+            break;
+        case CoordinateTransformation::HalfPixel:
+            originalCoords[i] = (transformedCoords[i] + 0.5) / scale - 0.5;
+            break;
+        case CoordinateTransformation::HalfPixelSymmetric:
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "Interpolation::untransformCoords: Unsupported Coordinate "
+                "transform : HalfPixelSymmetric");
+            break;
+        case Interpolation::CoordinateTransformation::PytorchHalfPixel:
+
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "Interpolation::untransformCoords: Unsupported Coordinate "
+                "transform : PytorchHalfPixel");
+            break;
+        }
+    }
+    return originalCoords;
+}
+
+/**
+ * @details Generates a list of all neighbours of a given coordinate.
+ * Since the coordinates are floating points as they are the result of
+ * Interpolation::untransformCoords, they are approximation of coordinates in
+ * originalTensor frame from coordinates in interpolatedTensor frame.
+ *
+ * So to retrieve the neghbouring values, we must apply either floor() or
+ * ceil() to each coordinate.
+ *
+ * In order to generate the list of all combinations
+ * available, we simply iterate through the bits of each values from 0 to
+ * tensorDims.
+ * @example : in 2 dimensions , we  have the point (1.3, 3.4)
+ * we iterate up to 2^2 - 1 and
+ * 0 = 0b00 -> (floor(x), floor(y)) = (1,3)
+ * 1 = 0b01 -> (floor(x), ceil(y))  = (1,4)
+ * 2 = 0b10 -> (ceil(x) , floor(y)) = (2,3)
+ * 3 = 0b11 -> (ceil(x) , ceil(y))  = (2,4)
+ */
+template <typename T>
+std::set<Interpolation::Point<T>>
+Interpolation::retrieveNeighbours(const T *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode) {
+
+    Log::debug("retrieveNeighbours: TensorDims : {}", tensorDims);
+    Log::debug("retrieveNeighbours: coords to interpolate : {}", coords);
+
+    // Will retrieve out of bound values depending on given padding mode.
+    // auto retrieveOutOfBoundValue =
+    //     [&tensorValues, &tensorDims, &paddingMode](Coords coord) -> T {
+    //     std::vector<DimSize_t> rectifiedCoord;
+    //     rectifiedCoord.reserve(coord.size());
+    //     switch (paddingMode) {
+    //     case Aidge::PadBorderType::Edge: {
+    //         for (DimSize_t i = 0; i < coord.size(); ++i) {
+    //             rectifiedCoord[i] = coord[i] < 0 ? 0 : tensorDims[i] - 1;
+    //         }
+    //         return tensorValues[Tensor::getIdx(tensorDims, rectifiedCoord)];
+    //     }
+    //     case Aidge::PadBorderType::Zero: {
+    //         return static_cast<T>(0);
+    //     }
+    //     default: {
+    //         AIDGE_THROW_OR_ABORT(
+    //             std::runtime_error,
+    //             "Unsupported padding mode as of now for interpolation.");
+    //     }
+    //     }
+    // };
+
+    std::set<Point<T>> neighbours;
+    const std::size_t nbNeighbours = std::size_t(1) << tensorDims.size();
+    Coords neighbourCoords(tensorDims.size());
+
+    for (std::size_t i = 0; i < nbNeighbours; ++i) {
+        const std::bitset<MaxDim> bits = std::bitset<MaxDim>{i};
+        for (size_t j = 0; j < tensorDims.size(); ++j) {
+            neighbourCoords[j] =
+                bits[j] == 0 ? std::ceil(coords[j]) : std::floor(coords[j]);
+        }
+
+        T value;
+        if (Tensor::isInBounds(tensorDims, neighbourCoords)) {
+            // cast from unsigned to signed won't create problem as we ensured
+            // that all neighboursCoords values are > 0 with isInBounds
+            value = tensorValues[Tensor::toIndex(
+                tensorDims,
+                std::vector<DimSize_t>(neighbourCoords.begin(),
+                                       neighbourCoords.end()))];
+        } else {
+            switch (paddingMode) {
+                case PadBorderType::Edge:
+                    for (DimSize_t j = 0; j < tensorDims.size(); ++j) {
+                        neighbourCoords[j] = (neighbourCoords[j] < 0) ? 0 :
+                                                ((neighbourCoords[j] >= static_cast<std::int64_t>(tensorDims[j])) ? (tensorDims[j] - 1) :
+                                                    neighbourCoords[j]);
+                    }
+                    value = tensorValues[Tensor::toIndex(
+                                tensorDims,
+                                std::vector<DimSize_t>(neighbourCoords.begin(),
+                                                    neighbourCoords.end()))];
+                    break;
+                case PadBorderType::Zero:
+                    value = static_cast<T>(0);
+                    break;
+                default:
+                    AIDGE_THROW_OR_ABORT(
+                        std::runtime_error,
+                        "Unsupported padding mode as of now for interpolation.");
+            }
+        }
+        neighbours.insert(std::make_pair(neighbourCoords, value));
+    }
+    Log::debug("Interpolation::retrieveNeighbours(): neighbourCoords: {}",
+               neighbours);
+    return neighbours;
+}
+
+template std::set<Interpolation::Point<int16_t>>
+Interpolation::retrieveNeighbours(const int16_t *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<int32_t>>
+Interpolation::retrieveNeighbours(const int32_t *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<int64_t>>
+Interpolation::retrieveNeighbours(const int64_t *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<half_float::half>>
+Interpolation::retrieveNeighbours(const half_float::half *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<float>>
+Interpolation::retrieveNeighbours(const float *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<double>>
+Interpolation::retrieveNeighbours(const double *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+} // namespace Aidge
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index b2c03e794888a0909ada5db208fc07ad266d4ae2..e1f25e86827e81b26436876dce1b98fe0cda80b8 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -153,18 +153,20 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
               if (parent.first == node_ptr && parent.second == outputIdx) {
                 // Add-on to display the operator's output dimensions
                 std::string dims = "";
+                std::string dtype = "";
                 const auto op = std::dynamic_pointer_cast<OperatorTensor>(node_ptr->getOperator());
                 if (op && !op->getOutput(outputIdx)->undefined()) {
                   dims += " " + fmt::format("{}", op->getOutput(outputIdx)->dims());
+                  dtype += "\n" + fmt::format("{}", op->getOutput(outputIdx)->dataType());
                 }
 
                 if (mNodes.find(child) != mNodes.end()) {
-                  fmt::print(fp.get(), "{}_{}-->|\"{}{}&rarr;{}\"|{}_{}\n", node_ptr->type(), namePtrTable.at(node_ptr),
-                              outputIdx, dims, inputIdx, child->type(), namePtrTable.at(child));
+                  fmt::print(fp.get(), "{}_{}-->|\"{}{}{}&rarr;{}\"|{}_{}\n", node_ptr->type(), namePtrTable.at(node_ptr),
+                              outputIdx, dims, dtype, inputIdx, child->type(), namePtrTable.at(child));
                 }
                 else if (verbose) {
-                  fmt::print(fp.get(), "{}_{}-->|\"{}{}&rarr;{}\"|{}:::externalCls\n", node_ptr->type(), namePtrTable.at(node_ptr),
-                              outputIdx, dims, inputIdx, static_cast<void*>(child.get()));
+                  fmt::print(fp.get(), "{}_{}-->|\"{}{}{}&rarr;{}\"|{}:::externalCls\n", node_ptr->type(), namePtrTable.at(node_ptr),
+                              outputIdx, dims, dtype, inputIdx, static_cast<void*>(child.get()));
                 }
                 // Do no break here because the same child can be connected to several inputs
               }
diff --git a/src/graph/StaticAnalysis.cpp b/src/graph/StaticAnalysis.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..033e51022842983caacba9385248c9f02c1e5568
--- /dev/null
+++ b/src/graph/StaticAnalysis.cpp
@@ -0,0 +1,171 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/graph/StaticAnalysis.hpp"
+
+Aidge::OperatorStats::OperatorStats(const Operator& op)
+  : mOp(op)
+{
+    //ctor
+}
+
+size_t Aidge::OperatorStats::getNbArithmIntOps() const {
+    const auto opTensor = dynamic_cast<const OperatorTensor*>(&mOp);
+    if (opTensor) {
+        if (!isDataTypeFloatingPoint(opTensor->getOutput(0)->dataType())) {
+            return getNbArithmOps();
+        }
+    }
+    return 0;
+}
+
+Aidge::StaticAnalysis::StaticAnalysis(std::shared_ptr<GraphView> graph)
+  : mGraph(graph)
+{
+    //ctor
+}
+
+void Aidge::StaticAnalysis::summary(bool incProducers) const {
+    fmt::println("--------------------------------------------------------------------------------");
+    fmt::println("                        Layer (type)               Output Shape         Param #");
+    fmt::println("================================================================================");
+
+    size_t nbParams = 0;
+    size_t paramsSize = 0;  // Size in bits
+    size_t fwdBwdSize = 0;  // Size in bits
+
+    const auto namePtrTable = mGraph->getRankedNodesName("{0} ({1}#{3})");
+    for (const auto node : mGraph->getOrderedNodes()) {
+        if (node->type() == Producer_Op::Type && !incProducers) {
+            continue;
+        }
+
+        auto opTensor = std::dynamic_pointer_cast<OperatorTensor>(node->getOperator());
+        std::string outputDimsStr = fmt::format("{: >27}", "?");
+        if (opTensor) {
+            const auto outputDims = opTensor->getOutput(0)->dims();
+            outputDimsStr = fmt::format("{: >27}", fmt::format("{}", outputDims));
+  
+            for (size_t out = 0; out < node->nbOutputs(); ++out) {
+                const auto output = opTensor->getOutput(out);
+                if (output && node->type() != Producer_Op::Type) {
+                    fwdBwdSize += output->size()
+                        * getDataTypeBitWidth(output->dataType());
+                }
+            }
+        }
+
+        nbParams += getNbParams(node);
+        paramsSize += getParamsSize(node);
+        fmt::println("{: >36}{}{: >16}",
+          namePtrTable.at(node), outputDimsStr, getNbParams(node));
+    }
+
+    size_t inputSize = 0;  // Size in bits
+    for (const auto input : mGraph->getOrderedInputs()) {
+        if (input.first) {
+            auto opTensor = std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator());
+            if (opTensor && opTensor->getInput(input.second)) {
+                inputSize += opTensor->getInput(input.second)->size()
+                    * getDataTypeBitWidth(opTensor->getInput(input.second)->dataType());
+            }
+        }
+    }
+
+    fmt::println("================================================================================");
+    fmt::println("Total params: {}", nbParams);
+    fmt::println("--------------------------------------------------------------------------------");
+    fmt::println("Input size (MB): {}", inputSize / 8.0 / 1024 / 1024);
+    fmt::println("Forward/backward pass size (MB): {}", fwdBwdSize / 8.0 / 1024 / 1024);
+    fmt::println("Params size (MB): {}", paramsSize / 8.0 / 1024 / 1024);
+    fmt::println("Estimated Total Size (MB): {}", (inputSize + fwdBwdSize + paramsSize) / 8.0 / 1024 / 1024);
+    fmt::println("--------------------------------------------------------------------------------");
+}
+
+size_t Aidge::StaticAnalysis::getNbParams(std::shared_ptr<Node> node) const {
+    const auto opTensor = std::dynamic_pointer_cast<OperatorTensor>(node->getOperator());
+
+    size_t nbParams = 0;
+
+    // Look for Producers directly attached to the node's inputs.
+    size_t i = 0;
+    for (auto parent : node->inputs()) {
+        if (parent.first && mGraph->inView(parent.first)) {
+            if (parent.first->type() == Producer_Op::Type && opTensor->getInput(i)) {
+                nbParams += opTensor->getInput(i)->size();
+            }
+        }
+        ++i;
+    }
+
+    // Look for internal Producers, in case of meta-op.
+    if (!node->getOperator()->isAtomic()) {
+        const auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(node->getOperator())->getMicroGraph();
+        for (const auto internalNode : microGraph->getNodes()) {
+            if (internalNode->type() == Producer_Op::Type) {
+                const auto internalOpTensor = std::dynamic_pointer_cast<OperatorTensor>(internalNode->getOperator());
+                nbParams += internalOpTensor->getOutput(0)->size();
+            }
+        }
+    }
+
+    return nbParams;
+}
+
+size_t Aidge::StaticAnalysis::getParamsSize(std::shared_ptr<Node> node) const {
+    const auto opTensor = std::dynamic_pointer_cast<OperatorTensor>(node->getOperator());
+
+    size_t paramsSize = 0;
+
+    // Look for Producers directly attached to the node's inputs.
+    size_t i = 0;
+    for (auto parent : node->inputs()) {
+        if (parent.first && mGraph->inView(parent.first)) {
+            if (parent.first->type() == Producer_Op::Type && opTensor->getInput(i)) {
+                paramsSize += opTensor->getInput(i)->size()
+                    * getDataTypeBitWidth(opTensor->getInput(i)->dataType());
+            }
+        }
+        ++i;
+    }
+
+    // Look for internal Producers, in case of meta-op.
+    if (!node->getOperator()->isAtomic()) {
+        const auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(node->getOperator())->getMicroGraph();
+        for (const auto internalNode : microGraph->getNodes()) {
+            if (internalNode->type() == Producer_Op::Type) {
+                const auto internalOpTensor = std::dynamic_pointer_cast<OperatorTensor>(internalNode->getOperator());
+                paramsSize += internalOpTensor->getOutput(0)->size()
+                    * getDataTypeBitWidth(internalOpTensor->getOutput(0)->dataType());
+            }
+        }
+    }
+
+    return paramsSize;
+}
+
+std::shared_ptr<Aidge::OperatorStats> Aidge::StaticAnalysis::getOpStats(std::shared_ptr<Node> node) const {
+    return (Registrar<OperatorStats>::exists(node->type()))
+        ? Registrar<OperatorStats>::create(node->type())(*(node->getOperator()))
+        : (node->getOperator()->isAtomic())
+            ? std::make_shared<OperatorStats>(*(node->getOperator()))
+            : std::make_shared<MetaOpStats>(*(node->getOperator()));
+}
+
+size_t Aidge::StaticAnalysis::accumulate(size_t (OperatorStats::*func)() const) const {
+    return std::accumulate(
+        mGraph->getNodes().cbegin(),
+        mGraph->getNodes().cend(),
+        std::size_t(0),
+        [this, func](const size_t& lhs, const std::shared_ptr<Node>& rhs) {
+            return lhs + (this->getOpStats(rhs).get()->*func)();
+        });
+}
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index 0f90a5a58dbd8d69b847022c336075ecc3f3d553..c5bca92406e518df593fcc6c3a40525a4ba81dfa 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -86,7 +86,15 @@ bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) {
 }
 
 void Aidge::GenericOperator_Op::setBackend(const std::string & name, DeviceIdx_t device) {
-    Log::warn("GenericOperator::setBackend(): cannot set backend for a generic operator, as no implementation has been provided!");
+    if (Registrar<GenericOperator_Op>::exists({name, type()})) {
+        // A custom implementation exists for this meta operator
+        mImpl = Registrar<GenericOperator_Op>::create({name, type()})(*this);
+    }else{
+        Log::warn("GenericOperator::setBackend(): cannot set backend for a generic operator, as no implementation has been provided!");
+    }
+
+
+
 
     for (std::size_t i = 0; i < nbOutputs(); ++i) {
         mOutputs[i]->setBackend(name, device);
@@ -108,4 +116,4 @@ std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
                                                 Aidge::IOIndex_t nbOut,
                                                 const std::string& name) {
     return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index bbcfd0d28ca039318647d206af876727793e1bfc..57886ec2faec86bc5d3a515ed685fdcfd0e15e4e 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -44,8 +44,10 @@ bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
         // Global average pooling takes each filter, averages its values and uses
         // it as an output(Much like a fancier flatten). 1st dim is batch 2nd is
         // number of filter
-        mOutputs[0]->resize({getInput(0)->dims().at(0),
-                             getInput(0)->dims().at(1)});
+        std::vector<DimSize_t> outputDims(getInput(0)->nbDims(), 1);
+        outputDims[0] = getInput(0)->dims()[0];
+        outputDims[1] = getInput(0)->dims()[1];
+        mOutputs[0]->resize(outputDims);
         return true;
     }
 
diff --git a/src/operator/LRN.cpp b/src/operator/LRN.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c5ce243bd6a48ae4b1ce8461924b498c804b53e6
--- /dev/null
+++ b/src/operator/LRN.cpp
@@ -0,0 +1,60 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/LRN.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::LRN_Op::Type = "LRN";
+
+Aidge::LRN_Op::LRN_Op(std::int32_t size)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<LRNAttr::Alpha>(0.0001),
+        attr<LRNAttr::Beta>(0.75),
+        attr<LRNAttr::Bias>(1.0),
+        attr<LRNAttr::Size>(size)))
+{}
+
+Aidge::LRN_Op::LRN_Op(const Aidge::LRN_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(LRN_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::LRN_Op::clone() const {
+    return std::make_shared<LRN_Op>(*this);
+}
+
+void Aidge::LRN_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    mImpl = Registrar<LRN_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::LRN_Op::getAvailableBackends() const {
+    return Registrar<LRN_Op>::getKeys();
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::LRN(std::int32_t size, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<LRN_Op>(size), name);
+}
\ No newline at end of file
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index 61239071a99a9dfca8613ef78eba17757c4276b7..cd4a4808137a786b04d8d143e50b96b9648e7d9a 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -150,7 +150,7 @@ void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
 }
 
 void Aidge::Memorize_Op::forward() {
-    Operator::forward();
+    OperatorTensor::forward();
     ++mAttributes->template getAttr<MemorizeAttr::ForwardStep>();
     mAttributes->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
 }
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index d93d7d320f0f508b20714943ae3c8ed7fc561ec8..060c725482fedf4d6093e5acb988b2c721c27edc 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -87,12 +87,26 @@ void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceId
     if (Registrar<MetaOperator_Op>::exists({name, type()})) {
         // A custom implementation exists for this meta operator
         mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
+
+        // Set backend for in/out tensor of the MetaOp
+        for(auto i: mGraph->inputNodes()){
+            auto op_i = std::static_pointer_cast<OperatorTensor>(i->getOperator());
+            for(std::size_t in_idx=0; in_idx < op_i->nbInputs(); ++in_idx){
+                op_i->getInput(in_idx)->setBackend(name, device);
+            }
+        }
+        for(auto o: mGraph->outputNodes()){
+            auto op_o = std::static_pointer_cast<OperatorTensor>(o->getOperator());
+            for(std::size_t out_idx=0; out_idx < op_o->nbOutputs(); ++out_idx){
+                op_o->getOutput(out_idx)->setBackend(name, device);
+            }
+        }
+    }else{
+        // Input/output tensors backend are also updated here.
+        mGraph->setBackend(name, device);
     }
 
-    // The micro-graph should always be set to the right backend, since it
-    // shares input/output tensors.
-    // Input/output tensors backend are updated here.
-    mGraph->setBackend(name, device);
+
 }
 
 std::set<std::string> Aidge::MetaOperator_Op::getAvailableBackends() const {
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index ff6fb9ce4b6b8596477dfdd1f43f8927e534459b..586dbc2037d36d26f39dd06404b3b70b99270c1e 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -45,7 +45,8 @@ Aidge::OperatorTensor::OperatorTensor(const OperatorTensor& other)
 
 void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs());
-    AIDGE_ASSERT(data->type() == Tensor::Type, "Input data must be of Tensor type");
+    AIDGE_ASSERT(data != nullptr, "Undefined data argument, make sure that the associated tensor holds data before associating the input.")
+    AIDGE_ASSERT(data->type() == Tensor::Type, "OperatorTensor::associateInput(): Input data must be of Tensor type, got {}", data->type());
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
 }
 
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index cd5b18759cdd743f292054bca91ffee5da722ea6..a27e2745b8929e84456ac079d063d94ffa359679 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -24,6 +24,7 @@ Aidge::Elts_t Aidge::Pop_ProdConso::getNbRequiredData(const Aidge::IOIndex_t inp
     assert(mOp.getRawInput(inputIdx) && "requires valid input");
 
     const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+    AIDGE_ASSERT(!op.getInput(inputIdx)->empty(), "Pop operator requires known, non-empty, input dims for scheduling. You might have an unresolved data dependency upstream in the computing graph.");
     return Elts_t::DataElts(op.getInput(inputIdx)->size()
         / op.getInput(inputIdx)->dims()[0]);
 }
@@ -93,7 +94,7 @@ std::set<std::string> Aidge::Pop_Op::getAvailableBackends() const {
 }
 
 void Aidge::Pop_Op::forward() {
-    Operator::forward();
+    OperatorTensor::forward();
     ++mAttributes->template getAttr<PopAttr::ForwardStep>();
 }
 
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index 9e5762452e382a31c1e5da25708507653da2e474..252f55a6abdea13cc43cf21d3e8c7ab33ddbb86e 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -11,56 +11,31 @@
 
 #include "aidge/operator/Resize.hpp"
 
-#include <cstddef>    // std::size_t
-#include <cstdint>    // std::int64_t
-#include <stdexcept>  // std::runtime_error
+#include <algorithm>
+#include <cstddef>   // std::size_t
+#include <cstdint>   // std::int64_t
+#include <fmt/core.h>
+#include <stdexcept> // std::runtime_error
 #include <string>
 #include <vector>
-#include <fmt/core.h>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Interpolation.hpp"
 #include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Resize_Op::Type = "Resize";
-
-Aidge::Resize_Op::Resize_Op()
-    : OperatorTensor(Type,
-        {InputCategory::Data,
-            InputCategory::OptionalData,
-            InputCategory::OptionalData,
-            InputCategory::OptionalData},
-        1) {}
-
-/**
- * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
- * but not its input tensors (the new operator has no input associated).
- * @param op Operator to copy.
- */
-
-Aidge::Resize_Op::Resize_Op(const Aidge::Resize_Op& op)
-    : OperatorTensor(op)
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Resize_Op, *this, op.backend());
-    }
-    else {
-        mImpl = nullptr;
-    }
-}
+namespace Aidge {
 
-std::shared_ptr<Aidge::Operator> Aidge::Resize_Op::clone() const {
-    return std::make_shared<Resize_Op>(*this);
-}
+const std::string Resize_Op::Type = "Resize";
 
-bool Aidge::Resize_Op::dimsForwarded() const {
+bool Resize_Op::dimsForwarded() const {
     // in case of ROI add getInput(1) condition
-    if ((getInput(1) && !getInput(1)->undefined())
-        || (getInput(2) && !getInput(2)->undefined())
-        || (getInput(3) && !getInput(3)->undefined())
-        )
-    {
+    if ((getInput(1) && !getInput(1)->undefined()) ||
+        (getInput(2) && !getInput(2)->undefined()) ||
+        (getInput(3) && !getInput(3)->undefined())) {
         // output dims are data dependent
         return false;
     }
@@ -68,93 +43,137 @@ bool Aidge::Resize_Op::dimsForwarded() const {
     return OperatorTensor::dimsForwarded();
 }
 
-bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
-    if (inputsAssociated()) {
-        AIDGE_ASSERT(getInput(0)->nbDims() == 4,
-            "input tensor must have dimensions = 4 (batch, channel, height, width).");
-
-        const bool input1ROIPresent           = getInput(1) && !getInput(1)->undefined();
-        const bool input2ScalesPresent        = getInput(2) && !getInput(2)->undefined();
-        const bool input3SizesPresent         = getInput(3) && !getInput(3)->undefined();
+bool Resize_Op::forwardDims(bool allowDataDependency) {
+    if (!allowDataDependency) {
+        Log::warn("{}: cannot execute forwardDims() as the output "
+                    "dimensions are computed from some input data.",
+                    type());
+        return false;
+    }
 
-        AIDGE_ASSERT(input2ScalesPresent != input3SizesPresent, "Only one of scales and  sizes can be specified.")
+    // Some optional input may be linked but undefined because of ONNX import
+    if (!inputsAssociated(false)) {
+        return false;
+    }
 
-        if (input1ROIPresent) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Input #1 (ROI) is given and it is not supported.");
+    /** @brief input #0 */
+    constexpr IOIndex_t inDataIdx = 0;
+    /** @brief input #1 */
+    constexpr IOIndex_t inROIIdx = 1;
+    /** @brief input #2 */
+    constexpr IOIndex_t inScalesIdx = 2;
+    /** @brief input #3 */
+    constexpr IOIndex_t inSizesIdx = 3;
+
+    std::vector<DimSize_t> outDims = getInput(inDataIdx)->dims();
+    /////////////////////////////////////////////////////
+    // Ensuring operator is connected properly
+    const bool inputROIPresent =
+        getInput(inROIIdx) && !getInput(inROIIdx)->undefined();
+    if (inputROIPresent) {
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "{}: input ROI(#{}) is present but it is not supported.",
+                type(),
+                inROIIdx);
         }
-        else if (input2ScalesPresent)  {
-            if (!allowDataDependency) {
-                Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #2");
-                return false;
-            }
-
-            AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(2)->size(),
-                "input #0 and input #2 (Scales) must have the same dimensions.");
-
-            std::vector<DimSize_t>      outDims = getInput(0)->dims();
-            const std::vector<DimSize_t> inDims = getInput(0)->dims();
 
-            std::shared_ptr<Tensor> fallback;
-            const auto& scales = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-
-            for (std::size_t dim=0; dim < getInput(2)->size(); ++dim) {
-                outDims[dim] = inDims[dim]*static_cast<int64_t*>(scales.getImpl()->hostPtr())[dim];
-            }
-
-            mOutputs[0]->resize(outDims);
-            return true;
+    const bool inputScalesPresent =
+        getInput(inScalesIdx) && !getInput(inScalesIdx)->undefined();
+    const bool inputSizesPresent =
+        getInput(inSizesIdx) && !getInput(inSizesIdx)->undefined();
+
+    AIDGE_ASSERT(inputScalesPresent ^ inputSizesPresent,
+                 "{}: Only one of the two inputs must be defined between input "
+                 "Scales(#2) "
+                 "and Sizes(#3). They cannot be specified at the same time.",
+                 type())
+
+    std::shared_ptr<Tensor> resizeParam = inputScalesPresent ? getInput(inScalesIdx) : getInput(inSizesIdx);
+    AIDGE_ASSERT(getInput(inDataIdx)->nbDims() == resizeParam->size(),
+        "{}: data input #0 and resizing parameter input #{} must have the "
+        "same dimensions.",
+        type(), inputScalesPresent ? inScalesIdx :inSizesIdx);
+
+
+    ////////////////////////////////////////////
+    // Case resize is done using Scales formula
+    if (inputScalesPresent) {
+
+        std::shared_ptr<Tensor> fallback;
+        const auto &scales =
+            resizeParam
+                ->refCastFrom(fallback,
+                              DataType::Float32,
+                              resizeParam->backend());
+
+        const std::vector<DimSize_t> inDims = getInput(inDataIdx)->dims();
+        for (std::size_t dim = 0; dim < getInput(inScalesIdx)->size(); ++dim) {
+            const auto scaleAlongDim = scales.get<cpptype_t<DataType::Float32>>(dim);
+            AIDGE_ASSERT(scaleAlongDim > 0,
+                         "{}: all scales values must be sctricly positive, "
+                         "got {}.",
+                         type(),
+                         scaleAlongDim);
+            outDims[dim] =
+                static_cast<DimSize_t>(inDims[dim] * scaleAlongDim);
         }
-        else if (input3SizesPresent) {
-            if (!allowDataDependency) {
-                Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #3");
-                return false;
-            }
-
-            AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(3)->size(),
-                "input #0 and input #3 (Sizes) must have the same dimensions.");
-
-            std::vector<DimSize_t> outDims = getInput(0)->dims();
 
-            std::shared_ptr<Tensor> fallback;
-            const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-
-            for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {
-                outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
-            }
-
-            mOutputs[0]->resize(outDims);
-            return true;
-        }
-        else {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: Either Input #2 or Input #3 must be present.");
+        ///////////////////////////////////////////////////////////////
+        // case where resize output dims are given via the Size input
+    } else {
+        std::shared_ptr<Tensor> fallback;
+        const auto &sizes = resizeParam
+                                ->refCastFrom(fallback,
+                                              NativeType<DimSize_t>::type,
+                                              resizeParam->backend());
+
+        for (std::size_t dim = 0; dim < getInput(inSizesIdx)->size(); ++dim) {
+            outDims[dim] = sizes.get<DimSize_t>(dim);
         }
     }
-
-    return false;
+    mOutputs[0]->resize(outDims);
+    return true;
 }
 
-void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+void Resize_Op::setBackend(const std::string &name, DeviceIdx_t device) {
     SET_IMPL_MACRO(Resize_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 
-    // By default, automatically set backend for all inputs: roi, scales and sizes
-    if(getInput(1)) {
+    // By default, automatically set backend for all optional inputs: roi, scales and
+    // sizes
+    if (getInput(1)) {
         getInput(1)->setBackend(name, device);
     }
-    if(getInput(2)) {
+    if (getInput(2)) {
         getInput(2)->setBackend(name, device);
     }
-    if(getInput(3)) {
+    if (getInput(3)) {
         getInput(3)->setBackend(name, device);
     }
 }
 
-std::set<std::string> Aidge::Resize_Op::getAvailableBackends() const {
-    return Registrar<Resize_Op>::getKeys();
-}
-
-/////////////////////////////////////////////
+std::shared_ptr<Node>
+Resize(std::vector<float> scale,
+        std::vector<std::size_t> size,
+       Interpolation::CoordinateTransformation coordTransfoMode,
+       Interpolation::Mode interpolMode,
+       float cubicCoefA,
+       const std::string &name) {
+    std::shared_ptr<Node> node_resize = std::make_shared<Node>(std::make_shared<Resize_Op>(coordTransfoMode,
+                                                              interpolMode,
+                                                              cubicCoefA),
+                                  name);
+    if (scale.size()) {
+        std::shared_ptr<Node> prod_scale = Producer(std::make_shared<Tensor>(Vector<float>(scale)));
+        prod_scale->addChild(node_resize, 0, 2);
+    }
+    if (size.size())
+    {
+        std::shared_ptr<Node> prod_size = Producer(std::make_shared<Tensor>(Vector<std::size_t>(size)));
+        prod_size->addChild(node_resize, 0, 3);
+    }
+    return node_resize;
 
-std::shared_ptr<Aidge::Node> Aidge::Resize(const std::string &name) {
-    return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
-}
\ No newline at end of file
+}
+} // namespace Aidge
diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..efe6296a351f69ef3a11d4e1bc04bd0b52d46a06
--- /dev/null
+++ b/src/operator/Stack.cpp
@@ -0,0 +1,124 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Stack.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+// TODO: Check why getRequiredMemory is always called with empty vector as
+// inputSize
+Elts_t StackProdConso::getRequiredMemory(
+    const Aidge::IOIndex_t inputIdx,
+    const std::vector<DimSize_t> &inputsSize) const {
+    assert(mOp.getRawInput(inputIdx) && "requires valid input");
+
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    // The produced data after one forward pass is simply the input size,
+    // we do not produce the whole output tensor everytime.
+    if (op.forwardStep() <= op.maxElements()) {
+        return Elts_t::DataElts(op.getInput(inputIdx)->size());
+    } else {
+        return Elts_t::NoneElts();
+    }
+}
+
+void StackProdConso::resetConsummerProducer() {
+    ProdConso::resetConsummerProducer();
+
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    op.forwardStep() = 0;
+}
+
+const std::string StackOp::s_type = "Stack";
+
+void StackOpImpl::forward() {
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    AIDGE_ASSERT(op.getInput(0), "missing input #0");
+    AIDGE_ASSERT((op.forwardStep() < op.maxElements()),
+                 "cannot forward anymore, maximum number of elements to stack "
+                 "exceeded");
+
+    op.getOutput(0)->getImpl()->copy(
+        op.getInput(0)->getImpl()->rawPtr(),
+        op.getInput(0)->size(),
+        op.forwardStep() * op.getInput(0)->size());
+}
+
+StackOp::StackOp(std::uint32_t maxElements)
+    : OperatorTensor(s_type, {InputCategory::Data}, 1),
+      mAttributes(std::make_shared<Attributes_>(
+          attr<StackAttr::MaxElements>(maxElements),
+          attr<StackAttr::ForwardStep>(0))) {
+    if (maxElements == 0) {
+        AIDGE_THROW_OR_ABORT(
+            std::invalid_argument,
+            "StackOp creation failed: maxElements must be greater than 0.");
+    }
+    mImpl = std::make_shared<StackOpImpl>(*this);
+}
+
+StackOp::StackOp(const Aidge::StackOp &op)
+    : OperatorTensor(op), mAttributes(op.mAttributes) {
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(StackOp, *this, op.backend());
+    } else {
+        mImpl = std::make_shared<StackOpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::StackOp::clone() const {
+    return std::make_shared<StackOp>(*this);
+}
+
+bool Aidge::StackOp::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        auto inputDims = getInput(0)->dims();
+        inputDims.insert(inputDims.begin(), maxElements());
+        getOutput(0)->resize(inputDims);
+        return true;
+    }
+
+    return false;
+}
+
+void StackOp::setBackend(const std::string &name, DeviceIdx_t device) {
+    if (Registrar<StackOp>::exists({name})) {
+        SET_IMPL_MACRO(StackOp, *this, name);
+    } else {
+        mImpl = std::make_shared<StackOpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> StackOp::getAvailableBackends() const {
+    return Registrar<StackOp>::getKeys();
+}
+
+void StackOp::forward() {
+    Operator::forward();
+    ++forwardStep();
+}
+
+std::shared_ptr<Node> stack(std::uint32_t maxElements,
+                            const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<StackOp>(maxElements),
+                                  name);
+}
+} // namespace Aidge
diff --git a/src/recipes/FuseToMetaOps.cpp b/src/recipes/FuseToMetaOps.cpp
index 0ad5e5a1da0e6aef74f7e47751dd2d4e8648980b..ac6536d7e7ec89c5eb1b9efb0c301cfa979739cf 100644
--- a/src/recipes/FuseToMetaOps.cpp
+++ b/src/recipes/FuseToMetaOps.cpp
@@ -17,9 +17,9 @@
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/recipes/Recipes.hpp"
 
-size_t Aidge::fuseToMetaOps(std::shared_ptr<GraphView> graphView, const std::string& query, const std::string& type) {
+size_t Aidge::fuseToMetaOps(SinglePassGraphMatching& gm, const std::string& query, const std::string& type) {
     const auto metaType = (!type.empty()) ? type : query;
-    const auto matches = SinglePassGraphMatching(graphView).match(query);
+    const auto matches = gm.match(query);
 
     size_t nbReplaced = 0;
     for (const auto& match : matches) {
@@ -48,3 +48,8 @@ size_t Aidge::fuseToMetaOps(std::shared_ptr<GraphView> graphView, const std::str
     Log::info("Replaced {} (out of {}) matching sub-graph with meta operators", nbReplaced, matches.size());
     return nbReplaced;
 }
+
+size_t Aidge::fuseToMetaOps(std::shared_ptr<GraphView> graphView, const std::string& query, const std::string& type) {
+    SinglePassGraphMatching gm(graphView);
+    return fuseToMetaOps(gm, query, type);
+}
diff --git a/src/recipes/GraphViewHelper.cpp b/src/recipes/GraphViewHelper.cpp
index 9522c0fe7346e78875a08d3ebf19a04dea2909e1..bbc6524ccbe7158476a6bd846d77da754f186091 100644
--- a/src/recipes/GraphViewHelper.cpp
+++ b/src/recipes/GraphViewHelper.cpp
@@ -16,17 +16,20 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/recipes/GraphViewHelper.hpp"
 
 
-std::set<std::shared_ptr<Aidge::Tensor>> Aidge::producers(std::shared_ptr<Aidge::GraphView> graphview) {
+std::set<std::shared_ptr<Aidge::Tensor>> Aidge::producers(std::shared_ptr<Aidge::GraphView> graphview, bool constant) {
     std::set<std::shared_ptr<Tensor>> res;
     const auto& nodes = graphview->getNodes();
     for (const auto& node : nodes) {
         if (node->type() == "Producer") {
-            const auto& param = std::static_pointer_cast<OperatorTensor>(node->getOperator());
-            res.insert(param->getOutput(0));
+            const auto& producer = std::static_pointer_cast<Producer_Op>(node->getOperator());
+            if (!producer->constant() || constant) {
+                res.insert(producer->getOutput(0));
+            }
         }
     }
     return res;
diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp
index da32a8e0ec6a3c9f27da5c47f9e6166c1fc879bc..136fcc16fe5f83f684f0686fe64105023f8cc688 100644
--- a/src/utils/Log.cpp
+++ b/src/utils/Log.cpp
@@ -56,13 +56,13 @@ std::string Aidge::Log::mFileName = []() {
     }
     return std::string();
 }();
-std::unique_ptr<FILE, decltype(&std::fclose)> Aidge::Log::mFile {nullptr, nullptr};
+std::unique_ptr<FILE, Aidge::Log::fcloseDeleter> Aidge::Log::mFile {nullptr, Aidge::Log::fcloseDeleter()};
 std::vector<std::string> Aidge::Log::mContext;
 
 void Aidge::Log::log(Level level, const std::string& msg) {
     if (level >= mConsoleLevel) {
         // Apply log level style only for console.
-        // Styles that were already applied to msg with fmt are kept also in 
+        // Styles that were already applied to msg with fmt are kept also in
         // the log file.
         const auto modifier
             = !mConsoleColor ? fmt::text_style()
@@ -94,14 +94,17 @@ void Aidge::Log::log(Level level, const std::string& msg) {
 }
 
 void Aidge::Log::initFile(const std::string& fileName) {
-    mFile = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen(fileName.c_str(), "a"), &std::fclose);
+    FILE* rawFile = std::fopen(fileName.c_str(), "a");
 
-    if (!mFile) {
+    if (!rawFile) {
         mFileName.clear(); // prevents AIDGE_THROW_OR_ABORT() to try to log into file
         AIDGE_THROW_OR_ABORT(std::runtime_error,
             "Could not create log file: {}", fileName);
     }
 
+    // Assign the new FILE* with the deleter
+    mFile = std::unique_ptr<FILE, fcloseDeleter>(rawFile, fcloseDeleter());
+
     const std::time_t t = std::time(nullptr);
     fmt::println(mFile.get(), "###### {:%Y-%m-%d %H:%M:%S} ######", fmt::localtime(t));
 }
diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt
index fd96b060630c162e93143e8f51019a0ce3e82cc9..2c0c746a412643c568ff07d8c50b5f5d8cbf72d5 100644
--- a/unit_tests/CMakeLists.txt
+++ b/unit_tests/CMakeLists.txt
@@ -1,12 +1,17 @@
-Include(FetchContent)
+find_package(Catch2 3.7.1)
 
-FetchContent_Declare(
-  Catch2
-  GIT_REPOSITORY https://github.com/catchorg/Catch2.git
-  GIT_TAG        v3.0.1 # or a later release
-)
+if(NOT Catch2_FOUND)
+    message(STATUS "Catch2 not found in system, retrieving from git")
+    Include(FetchContent)
 
-FetchContent_MakeAvailable(Catch2)
+    FetchContent_Declare(
+      Catch2
+      GIT_REPOSITORY https://github.com/catchorg/Catch2.git
+      GIT_TAG        v3.7.1 # or a later release
+    )
+
+    FetchContent_MakeAvailable(Catch2)
+endif()
 
 file(GLOB_RECURSE src_files "*.cpp")
 
diff --git a/unit_tests/data/Test_Interpolation.cpp b/unit_tests/data/Test_Interpolation.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..73a48125a7f6e300cebe483dfc1cf025fdfc7707
--- /dev/null
+++ b/unit_tests/data/Test_Interpolation.cpp
@@ -0,0 +1,76 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstdlib>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Interpolation.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+TEST_CASE("[core/data] Interpolation", "[Interpolation][Data]") {
+    Log::setConsoleLevel(Log::Debug);
+
+    auto tensor = std::make_shared<Tensor>(std::vector<DimSize_t>({10, 10}));
+    tensor->setDataType(DataType::Float32);
+    tensor->setBackend("cpu");
+    Aidge::constantFiller(tensor, 1337.F);
+
+    SECTION("retrieveNeighbours") {
+        std::set<Interpolation::Point<float>> neighbours;
+        std::set<Interpolation::Point<float>> expectedResult;
+
+        std::vector<float> coords;
+        SECTION("Out of bounds") {
+            coords = {-0.5, -0.5};
+            expectedResult = {{{-1, -1}, 0.f},
+                              {{0, -1}, 0.F},
+                              {{-1, 0}, 0.F},
+                              {{0, 0}, 1337.F}};
+            neighbours = Interpolation::retrieveNeighbours<float>(
+                reinterpret_cast<float *>(tensor->getImpl()->rawPtr()),
+                tensor->dims(),
+                coords,
+                PadBorderType::Zero);
+
+            CHECK(neighbours == expectedResult);
+        }
+        SECTION("Some coords are rounds hence duplicates are filtered out") {
+            tensor = std::make_shared<Tensor>(
+                std::vector<DimSize_t>({5, 10, 10, 10}));
+            tensor->setDataType(DataType::Float32);
+            tensor->setBackend("cpu");
+            Aidge::constantFiller(tensor, 1337.F);
+
+            expectedResult = {{{0, 0, -1, -1}, 0.F},
+                              {{0, 0, 0, -1}, 0.F},
+                              {{0, 0, -1, 0}, 0.F},
+                              {{0, 0, 0, 0}, 1337.F}};
+
+            neighbours = Interpolation::retrieveNeighbours(
+                reinterpret_cast<float *>(tensor->getImpl()->rawPtr()),
+                tensor->dims(),
+                std::vector<float>({0, 0, -0.25, -0.25}));
+            CHECK(expectedResult == neighbours);
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp
index d5cd8cdcfb88beee9aab2393b0c5591c79a70b80..58003bb4009a484ca63acffdb50fbda156a48787 100644
--- a/unit_tests/data/Test_Tensor.cpp
+++ b/unit_tests/data/Test_Tensor.cpp
@@ -9,9 +9,9 @@
  *
  ********************************************************************************/
 
-#include <array>
 #include <cstddef>     // std::size_t
 #include <cstdint>     // std::uint8_t, std::uint16_t, std::int32_t
+#include <cstdlib>
 #include <numeric>     // std::accumulate, std::inner_product
 #include <functional>  // std::multiplies
 #include <random>      // std::mt19937,
@@ -42,7 +42,8 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T_default.strides() == std::vector<DimSize_t>({1})) &&
             (T_default.getImpl() == nullptr) &&
             (T_default.grad() != nullptr) &&
-            (T_default.isContiguous() == true)
+            (T_default.isContiguous() == true) &&
+            (T_default.capacity() == 0)
         ));
     }
     SECTION("scalar constructor") {
@@ -339,32 +340,63 @@ TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") {
                 }
 
                 // Test get() and set() by coords
-                // We create coords of rank 0 to the number of dimensions
-                for (std::size_t coord_size = 0; coord_size < dims.size(); ++coord_size) {
-                    std::vector<std::size_t> coords(coord_size);
-                    for (std::size_t coord_idx = 0; coord_idx < coord_size; ++coord_idx) {
-                        std::size_t dim_idx = (dimsDist(gen)-1) % dims[coord_idx];
-                        coords[coord_idx] = dim_idx;
-                    }
-                    std::size_t flat_idx, flat_storage_idx;
-                    // As it is continuous we have getIdx() == getStorageIdx()
-                    REQUIRE_NOTHROW(flat_idx = x.getIdx(coords));
-                    REQUIRE_NOTHROW(flat_storage_idx = x.getStorageIdx(coords));
-                    REQUIRE(flat_storage_idx == flat_idx);
-                    float val, val_flat;
-                    // Test get() by index and by coords
-                    REQUIRE_NOTHROW(val_flat = x.get<float>(flat_idx));
-                    REQUIRE_NOTHROW(val = x.get<float>(coords));
-                    REQUIRE(val == val_flat);
-                    REQUIRE(val == values[flat_idx]);
-                    // Test set() by coords, also update the reference array
-                    REQUIRE_NOTHROW(x.set(coords, val + 1));
-                    values[flat_idx] += 1;
+                // We create coords of the number of dimensions
+                std::vector<std::size_t> coords(nb_dims);
+                for (std::size_t coord_idx = 0; coord_idx < nb_dims; ++coord_idx) {
+                    std::size_t dim_idx = (dimsDist(gen)-1) % dims[coord_idx];
+                    coords[coord_idx] = dim_idx;
                 }
+                std::size_t flat_idx, flat_storage_idx;
+                // As it is continuous we have getIdx() == getStorageIdx()
+                REQUIRE_NOTHROW(flat_idx = x.getIdx(coords));
+                REQUIRE_NOTHROW(flat_storage_idx = x.getStorageIdx(coords));
+                REQUIRE(flat_storage_idx == flat_idx);
+                float val, val_flat;
+                // Test get() by index and by coords
+                REQUIRE_NOTHROW(val_flat = x.get<float>(flat_idx));
+                REQUIRE_NOTHROW(val = x.get<float>(coords));
+                REQUIRE(val == val_flat);
+                REQUIRE(val == values[flat_idx]);
+                // Test set() by coords, also update the reference array
+                REQUIRE_NOTHROW(x.set(coords, val + 1));
+                values[flat_idx] += 1;
             }
         }
     }
-
+    SECTION("Index & coord manipulation"){
+            Tensor tensor;
+            std::vector<DimSize_t> dims {2,2};
+            int nbVal = std::accumulate(dims.begin(),
+                                        dims.end(),
+                                        1,
+                                        std::multiplies<DimSize_t>());
+            float* values = static_cast<float*>(malloc(nbVal * sizeof(float)));
+            values[0] = 0;
+            values[1] = 1;
+            values[2] = 2;
+            values[3] = 3;
+            tensor.setDataType(DataType::Int32);
+            tensor.setBackend("cpu");
+            tensor.resize(dims);
+            tensor.getImpl()->setRawPtr(values, 4);
+            std::vector<std::size_t> coords;
+        SECTION("getIdx"){
+            CHECK(Tensor::toIndex(tensor.dims(), std::vector<std::size_t>({1,1}) ) == 3);
+            CHECK(Tensor::toIndex(tensor.dims(), std::vector<std::size_t>({1,0}) ) == 2);
+            // No check to ensure if value is in bounds
+            CHECK_THROWS(tensor.getIdx(std::vector<std::size_t>({0,2})));
+        }
+        SECTION("getCoord"){
+            CHECK(Tensor::toCoord(tensor.dims(),  3 ) ==std::vector<std::size_t>({1,1}));
+            CHECK(Tensor::toCoord(tensor.dims(),  2 ) ==std::vector<std::size_t>({1,0}));
+        }
+        SECTION("isInBound"){
+            CHECK_THROWS(Tensor::isInBounds(dims, std::vector<DimSize_t>({1,2,4,5})) == true);
+            CHECK(Tensor::isInBounds(dims, std::vector<DimSize_t>({1,2})) == false);
+            CHECK(Tensor::isInBounds(dims, std::vector<int>({-1,1})) == false);
+            CHECK(Tensor::isInBounds(dims, std::vector<DimSize_t>({1,1})) == true);
+        }
+    }
     SECTION("Tensor extract") {
         bool equal;
 
@@ -457,7 +489,7 @@ TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") {
     SECTION("Pretty printing for debug") {
         Tensor x{};
         // Empty Tensor
-        REQUIRE_THROWS(x.print());
+        REQUIRE_NOTHROW(x.print());
         // scalar
         x = Tensor(42);
         REQUIRE_NOTHROW(x.print());
diff --git a/unit_tests/graph/Test_Matching.cpp b/unit_tests/graph/Test_Matching.cpp
index d6d98d4701cba900548d127879c9b3940cf1d739..8c5fa222a68a7f2eed329be7c49ca62d0d7ba52f 100644
--- a/unit_tests/graph/Test_Matching.cpp
+++ b/unit_tests/graph/Test_Matching.cpp
@@ -50,9 +50,9 @@ TEST_CASE("[core/graph] Matching") {
         ReLU("relu2"),
         PaddedConv(8, 16, {3, 3}, "conv3", {1, 1}, {2, 2, 2, 2}),
         ReLU("relu3"),
-        PaddedConv(8, 16, {5, 5}, "conv4", {1, 1}, {2, 2, 2, 2}),
+        PaddedConv(16, 16, {5, 5}, "conv4", {1, 1}, {2, 2, 2, 2}),
         Add("add"),
-        PaddedConv(8, 16, {5, 5}, "conv5", {1, 1}, {2, 2, 2, 2}),
+        PaddedConv(16, 16, {5, 5}, "conv5", {1, 1}, {2, 2, 2, 2}),
         ReLU("relu5"),
         Add("add2")
     });
diff --git a/unit_tests/graph/Test_StaticAnalysis.cpp b/unit_tests/graph/Test_StaticAnalysis.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9488cbaf60fffcaee32a573993a46a0a440a4dea
--- /dev/null
+++ b/unit_tests/graph/Test_StaticAnalysis.cpp
@@ -0,0 +1,68 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include <fmt/chrono.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/graph/StaticAnalysis.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/BatchNorm.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/MetaOperatorDefs.hpp"
+#include "aidge/operator/Producer.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[core/graph] StaticAnalysis") {
+    SECTION("Conv") {
+        auto g1 = Sequential({
+            Conv(3, 4, {5, 5}, "conv1"),
+            ReLU("relu1"),
+            PaddedConv(4, 8, {5, 5}, "conv2", {1, 1}, {2, 2, 2, 2}),
+            ReLU("relu2"),
+            PaddedConv(8, 16, {3, 3}, "conv3", {1, 1}, {2, 2, 2, 2}),
+            ReLU("relu3"),
+            PaddedConv(16, 16, {5, 5}, "conv4", {1, 1}, {2, 2, 2, 2}),
+            Add("add"),
+            PaddedConv(16, 16, {5, 5}, "conv5", {1, 1}, {2, 2, 2, 2}),
+            ReLU("relu5"),
+            Add("add2")
+        });
+
+        g1->getNode("relu3")->addChild(g1->getNode("add"), 0, 1);
+        g1->getNode("conv5")->addChild(g1->getNode("add2"), 0, 1);
+        g1->updateInputsOutputs();
+
+        g1->forwardDims({{16, 3, 512, 512}});
+
+        StaticAnalysis stats(g1);
+        REQUIRE(stats.getNbParams(g1->getNode("conv1")) == 3 * 4 * 5 * 5 + 4);
+        REQUIRE(stats.getNbParams(g1->getNode("conv2")) == 4 * 8 * 5 * 5 + 8);
+        REQUIRE(stats.getNbParams(g1->getNode("conv3")) == 8 * 16 * 3 * 3 + 16);
+
+        const auto conv1Stats = stats.getOpStats(g1->getNode("conv1"));
+        REQUIRE(conv1Stats->getNbMACOps() == 1LL * (16 * 508 * 508) * (5 * 5 * 3 * 4));
+        REQUIRE(conv1Stats->getNbArithmOps() == 2LL * (16 * 508 * 508) * (5 * 5 * 3 * 4));
+        REQUIRE(conv1Stats->getNbArithmFpOps() == 2LL * (16 * 508 * 508) * (5 * 5 * 3 * 4));
+        REQUIRE(conv1Stats->getNbArithmIntOps() == 0);
+
+        g1->getNode("conv1")->getOperator()->setDataType(DataType::Int8);
+        REQUIRE(conv1Stats->getNbMACOps() == 1LL * (16 * 508 * 508) * (5 * 5 * 3 * 4));
+        REQUIRE(conv1Stats->getNbArithmOps() == 2LL * (16 * 508 * 508) * (5 * 5 * 3 * 4));
+        REQUIRE(conv1Stats->getNbArithmFpOps() == 0);
+        REQUIRE(conv1Stats->getNbArithmIntOps() == 2LL * (16 * 508 * 508) * (5 * 5 * 3 * 4));
+    }
+}
diff --git a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
index 15c714b63c2b86e156b43cdaec390ddf60eb7353..29e6f0a56d34935ab5c061897a27b902e03db790 100644
--- a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
+++ b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
@@ -70,12 +70,14 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(forwardDims)",
           for (uint16_t i = 0; i < nb_dims; ++i) {
             dims[i] = dimsDist(gen) + 1;
           }
-          std::vector<DimSize_t> dims_out{dims[0], dims[1]};
+          std::vector<DimSize_t> dims_out(nb_dims, 1);
+          dims_out[0] = dims[0];
+          dims_out[1] = dims[1];
           input_T->resize(dims);
           op->setInput(0, input_T);
           REQUIRE_NOTHROW(op->forwardDims());
           REQUIRE(op->getOutput(0)->dims() == dims_out);
-          REQUIRE((op->getOutput(0)->dims().size()) == static_cast<size_t>(2));
+          REQUIRE((op->getOutput(0)->dims().size()) == nb_dims);
         }
       }
     }
diff --git a/unit_tests/operator/Test_PopImpl.cpp b/unit_tests/operator/Test_PopImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f46131ed8c324f38874eb433f97d13977b4253a4
--- /dev/null
+++ b/unit_tests/operator/Test_PopImpl.cpp
@@ -0,0 +1,36 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Pop.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+
+using Aidge::Tensor;
+using Aidge::Pop;
+
+TEST_CASE("[cpu/operator] Pop(forward)", "[Pop][CPU]") {
+    std::shared_ptr<Tensor> pop1 = std::make_shared<Tensor>(Aidge::Array1D<int,3>{{4,5,6}});
+    std::shared_ptr<Tensor> pop2 = std::make_shared<Tensor>(Aidge::Array1D<int,3>{{1,2,3}});
+    std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Aidge::Array2D<int,2,3>{{{1,2,3}, {4,5,6}}});
+
+    auto pop = Aidge::Pop("pop");
+    pop->getOperator()->associateInput(0, input);
+    pop->getOperator()->setBackend("cpu");
+    pop->getOperator()->setDataType(Aidge::DataType::Int32);
+
+    REQUIRE_NOTHROW(pop->forward());
+    REQUIRE(*std::static_pointer_cast<Aidge::OperatorTensor>(pop->getOperator())->getOutput(0) == *pop2);
+    REQUIRE_NOTHROW(pop->forward());
+    REQUIRE(*std::static_pointer_cast<Aidge::OperatorTensor>(pop->getOperator())->getOutput(0) == *pop1);
+}
diff --git a/unit_tests/operator/Test_Resize_Op.cpp b/unit_tests/operator/Test_Resize_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..111e8fb4f62040127f8b5da8125ba9d91c546f23
--- /dev/null
+++ b/unit_tests/operator/Test_Resize_Op.cpp
@@ -0,0 +1,180 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Resize.hpp"
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef> // std::size_t
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Log.hpp"
+
+namespace Aidge {
+
+
+TEST_CASE("[core/operator] Resize_Op(forwardDims)",
+          "[Resize][forwardDimsScales]") {
+  std::vector<Aidge::DimSize_t> input_dims;
+  std::vector<float> scales;
+  std::vector<std::size_t> sizes;
+  std::vector<Aidge::DimSize_t> expected_dims;
+
+  SECTION("Un-connected input leads to failure.") {
+    input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
+    std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+    auto resize_node = Resize();
+    auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+    op->associateInput(0, input_data);
+
+    REQUIRE_THROWS(op->forwardDims(true));
+  }
+
+  SECTION("Connecting both Scales & Sizes leads to failure") {
+    input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
+    std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+    scales = std::vector<float>({.5, 3.0f, 2.0f, 2.0f});
+    sizes = std::vector<std::size_t>({1, 3, 4, 4});
+    expected_dims = std::vector<Aidge::DimSize_t>({2, 3, 4, 4});
+
+    auto resize_node = Resize(scales, sizes);
+    auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+    op->associateInput(0, input_data);
+
+    REQUIRE_THROWS(op->forwardDims(true));
+  }
+
+  SECTION("Input Scales") {
+    SECTION("TEST 1") {
+      input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({1, 1, 2, 2});
+      sizes = std::vector<std::size_t>({});
+      expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 4, 4});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+
+    SECTION("TEST 2") {
+      input_dims = std::vector<Aidge::DimSize_t>({4, 4, 10, 10});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({1, 1, 2, 3});
+      sizes = std::vector<std::size_t>({});
+      expected_dims = std::vector<Aidge::DimSize_t>({4, 4, 20, 30});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 3") {
+      input_dims = std::vector<Aidge::DimSize_t>({4, 2, 10, 10});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({1, 1, 0.5, 0.5});
+      sizes = std::vector<std::size_t>({});
+      expected_dims = std::vector<Aidge::DimSize_t>({4, 2, 5, 5});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 4") {
+      input_dims = std::vector<Aidge::DimSize_t>({11, 11, 4, 4});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+
+      scales = std::vector<float>({1, 1, 0.3, 0.3});
+      sizes = std::vector<std::size_t>({});
+      expected_dims = std::vector<Aidge::DimSize_t>({11, 11, 1, 1});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+  }
+
+  SECTION("Input Sizes") {
+    SECTION("TEST 1") {
+      input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({});
+      sizes = std::vector<std::size_t>({4, 5, 8, 8});
+      expected_dims = std::vector<Aidge::DimSize_t>({4, 5, 8, 8});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 2") {
+      input_dims = std::vector<Aidge::DimSize_t>({60, 60, 30, 30});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({});
+      sizes = std::vector<std::size_t>({1, 1, 75, 75});
+      expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 75, 75});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 3") {
+      input_dims = std::vector<Aidge::DimSize_t>({11, 11, 20, 20});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({});
+      sizes = std::vector<std::size_t>({19, 6, 8, 8});
+      expected_dims = std::vector<Aidge::DimSize_t>({19, 6, 8, 8});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 4") {
+      input_dims = std::vector<Aidge::DimSize_t>({43, 211, 22, 22});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({});
+      sizes = std::vector<std::size_t>({1, 1, 10, 10});
+      expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 10, 10});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+  }
+}
+
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_StackImpl.cpp b/unit_tests/operator/Test_StackImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d853a1ba27fea0a071c1c2373bbd7ef7f4eacd11
--- /dev/null
+++ b/unit_tests/operator/Test_StackImpl.cpp
@@ -0,0 +1,172 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <catch2/matchers/catch_matchers_string.hpp>
+#include <cstddef>
+#include <memory>
+#include <numeric>
+#include <random>
+#include <stdexcept>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Stack.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+
+using Catch::Matchers::Equals;
+
+namespace Aidge {
+
+TEST_CASE("[core/operator] Stack(forward)", "[Stack]") {
+    constexpr auto nbTrials = 10;
+    auto rd = Catch::Generators::Detail::getSeed;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> nbDist(1, 100);
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(2, 5);
+    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f);
+    // std::uniform_int_distribution<std::size_t> tensorNbDimsDist(2U, 5U);
+
+    const std::size_t nbDimsTensor = nbDimsDist(gen);
+    std::vector<std::size_t> dimsIn(nbDimsTensor);
+    std::shared_ptr<Tensor> t1 = std::make_shared<Tensor>();
+
+    SECTION("Constructors") {
+        // Valid arguments
+        for (auto i = 0; i < nbTrials; ++i) {
+            auto maxElements = nbDist(gen);
+            REQUIRE_NOTHROW(StackOp(maxElements));
+
+            auto op1 = StackOp(maxElements);
+            REQUIRE(op1.maxElements() == maxElements);
+            REQUIRE(op1.forwardStep() == 0);
+
+            // Copy Constructor
+            auto op2 = op1;
+            REQUIRE(op2.maxElements() == maxElements);
+            REQUIRE(op2.forwardStep() == 0);
+        }
+
+        // Invalid arguments
+        REQUIRE_THROWS_AS(StackOp(0), std::invalid_argument);
+    }
+
+    SECTION("forwardDims") {
+        for (auto i = 0; i < nbTrials; ++i) {
+            auto maxElements = nbDist(gen);
+            auto op = StackOp(maxElements);
+
+            const std::size_t nbDims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nbDims);
+            for (std::size_t i = 0; i < nbDims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            t1->resize(dims);
+
+            REQUIRE_THROWS_WITH(
+                op.forwardDims(),
+                Equals("Stack: input #0 should be associated with a Tensor"));
+            op.associateInput(0, t1);
+            REQUIRE_NOTHROW(op.forwardDims());
+            REQUIRE(op.getOutput(0)->dims()[0] == maxElements);
+        }
+    }
+
+    SECTION("forward") {
+
+        std::generate(dimsIn.begin(), dimsIn.end(), [&gen, &dimsDist]() {
+            return dimsDist(gen);
+        });
+        const std::size_t nbElems =
+            std::accumulate(dimsIn.cbegin(),
+                            dimsIn.cend(),
+                            1u,
+                            std::multiplies<std::size_t>());
+
+        std::uniform_int_distribution<std::size_t> numTensorsDist(2, 10);
+        const std::size_t numTensors = numTensorsDist(gen);
+
+        std::vector<std::shared_ptr<Tensor>> tensors(numTensors);
+        std::vector<float *> arrays(numTensors);
+
+        for (std::size_t i = 0; i < numTensors; ++i) {
+            arrays[i] = new float[nbElems];
+            for (std::size_t j = 0; j < nbElems; ++j) {
+                arrays[i][j] = valueDist(gen);
+            }
+            tensors[i] = std::make_shared<Tensor>();
+            tensors[i]->resize(dimsIn);
+            tensors[i]->setBackend("cpu");
+            tensors[i]->setDataType(DataType::Float32);
+            tensors[i]->getImpl()->setRawPtr(arrays[i], nbElems);
+        }
+
+        auto myStack = stack(numTensors);
+        myStack->getOperator()->setBackend("cpu");
+        myStack->getOperator()->setDataType(DataType::Float32);
+        auto op =
+            std::static_pointer_cast<OperatorTensor>(myStack->getOperator());
+
+        op->associateInput(0, tensors[0]);
+        op->forwardDims();
+        op->getOutput(0)->zeros();
+
+        // Perform forward passes for each tensor
+        for (std::size_t i = 0; i < numTensors; ++i) {
+            if (i > 0) {
+                op->associateInput(0, tensors[i]);
+            }
+            op->forward();
+        }
+
+        auto output = op->getOutput(0);
+
+        std::vector<DimSize_t> expectedDims = dimsIn;
+        expectedDims.insert(expectedDims.begin(), numTensors);
+
+        REQUIRE(output->dims() == expectedDims);
+
+        // Compare output slices with input tensors
+        for (std::size_t i = 0; i < numTensors; ++i) {
+            Tensor outputTensor = output->extract(
+                {static_cast<std::uint64_t>(static_cast<int>(i))});
+            Tensor inputTensor(DataType::Float32);
+            inputTensor.resize(dimsIn);
+            inputTensor.setBackend("cpu");
+            inputTensor.getImpl()->setRawPtr(arrays[i], nbElems);
+
+            REQUIRE(approxEq<float>(outputTensor, inputTensor));
+        }
+
+        // Attempt to exceed maxElements
+        std::shared_ptr<Tensor> extraTensor = std::make_shared<Tensor>();
+        extraTensor->resize(dimsIn);
+        extraTensor->setBackend("cpu");
+        extraTensor->setDataType(DataType::Float32);
+        float *extraArray = new float[nbElems];
+        for (std::size_t j = 0; j < nbElems; ++j) {
+            extraArray[j] = valueDist(gen);
+        }
+        extraTensor->getImpl()->setRawPtr(extraArray, nbElems);
+
+        REQUIRE_THROWS_AS((op->associateInput(0, extraTensor), op->forward()),
+                          std::runtime_error);
+
+        // Clean up
+        delete[] extraArray;
+        for (std::size_t i = 0; i < numTensors; ++i) {
+            delete[] arrays[i];
+        }
+    }
+}
+} // namespace Aidge