diff --git a/CMakeLists.txt b/CMakeLists.txt
index 776c4e3be35b6a2044015774c760d7b5b0d3956c..499c2971cb60f979e72419cf65b9897d0613bf0a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 3.15)
+cmake_minimum_required(VERSION 3.18)
 set(CXX_STANDARD 14)
 
 file(STRINGS "${CMAKE_SOURCE_DIR}/version.txt" version)
@@ -84,6 +84,8 @@ if( ${ENABLE_ASAN} )
 endif()
 
 # PYTHON BINDING
+set(AIDGE_REQUIRES_PYTHON FALSE) # Will be set if aidge_core lib depends upon python interpreter
+set(AIDGE_PYTHON_HAS_EMBED FALSE)  # Will be set if python interpreter is found on the system
 if (PYBIND)
     # Python binding lib is by default installed in <prefix>/python_packages/<package>/
     # When installed from python, setup.py should set it to the python package dir
@@ -92,13 +94,17 @@ if (PYBIND)
     include(PybindModuleCreation)
     generate_python_binding(${pybind_module_name} ${module_name})
 
-    # Handles Python + pybind11 headers dependencies
-    target_link_libraries(${module_name}
-        PUBLIC
-            pybind11::pybind11
-        PRIVATE
-            Python::Module
-        )
+    ##
+    # As of now, when PYBIND is set, the core archive itself depends upon pybind/python,
+    # we define -DPYBIND and the dependencies on pybind/python runtime where necessary.
+
+    # Add -DPYBIND to compilation and interface
+    target_compile_definitions(${module_name} PUBLIC PYBIND)
+
+    # Add dependencies on pybind/python. See details in add_pybind_dependency()
+    include(PybindDependency)
+    add_pybind_dependency(${module_name})
+    ##
 endif()
 
 target_link_libraries(${module_name} PUBLIC Threads::Threads fmt::fmt)
@@ -206,10 +212,10 @@ export(EXPORT ${CMAKE_PROJECT_NAME}-targets
 ##############################################
 ## Add test
 if(TEST)
-    if(PYBIND)
-        message(FATAL_ERROR "PYBIND and TEST are both enabled. But cannot compile with catch_2.\nChoose between pybind and Catch2 for compilation.")
+    if (AIDGE_REQUIRES_PYTHON AND NOT AIDGE_PYTHON_HAS_EMBED)
+        message(WARNING "Skipping compilation of tests: missing Python embedded interpreter")
+    else()
+        enable_testing()
+        add_subdirectory(unit_tests)
     endif()
-    enable_testing()
-    add_subdirectory(unit_tests)
 endif()
-
diff --git a/README.md b/README.md
index 4b7954d410bce0de1fb1f07c5a268cc962445d29..fe8fd5a4252054c730be8e948d0d2e415c009d47 100644
--- a/README.md
+++ b/README.md
@@ -16,6 +16,7 @@ pip install . -v
 > - `AIDGE_INSTALL` : to set the installation folder. Defaults to `<python_prefix>/lib/libAidge`
 > - `AIDGE_PYTHON_BUILD_TYPE` : to set the compilation mode to **Debug** or **Release** or "" (for default flags). Defaults to **Release**.
 > - `AIDGE_BUILD_GEN` : to set the build backend (for development mode) or "" for the cmake default. Default to "".
+> - `AIDGE_BUILD_TEST` : to build the C++ unit tests. Set to "ON" or "OFF". Default to "OFF".
 
 
 ## Pip installation for development
@@ -24,9 +25,10 @@ To setup aidge_core using pip in development (or editable mode), use the `--no-b
 
 For instance run the following command in your python environnement for a typical setup :
 ``` bash
+export AIDGE_BUILD_TEST=ON              # enable C++ unit tests
 export AIDGE_PYTHON_BUILD_TYPE=         # default flags (no debug info but fastest build time)
 export AIDGE_PYTHON_BUILD_TYPE=Debug    # or if one really need to debug the C++ code
-pip install setuptools setuptools_scm[toml] cmake   # Pre-install build requirements (refer to the pyproject.toml [build-system] section)
+pip install -U pip setuptools setuptools_scm[toml] cmake   # Pre-install build requirements (refer to the pyproject.toml [build-system] section)
 pip install -v --no-build-isolation -e .
 ```
 
@@ -41,7 +43,7 @@ cmake --build build -j $(nproc) && cmake --install build
 
 One can also use an alternate cmake build backend such as ninja which can be installed easily though pip, for instance :
 ``` bash
-pip install ninja
+pip install -U ninja
 export AIDGE_BUILD_GEN=Ninja
 pip install -v --no-build-isolation -e .
 ```
@@ -85,9 +87,12 @@ make all install
 | *-DCMAKE_INSTALL_PREFIX:PATH* | ``str``  | Path to the install folder |
 | *-DCMAKE_BUILD_TYPE*          | ``str``  | If ``Debug``, compile in debug mode, ``Release`` compile with highest optimisations or "" (empty) , default= ``Release`` |
 | *-DWERROR*                    | ``bool`` | If ``ON`` show warning as error during compilation phase, default=``OFF`` |
-| *-DPYBIND*                    | ``bool`` | If ``ON`` activate python binding, default=``ON`` |
+| *-DTEST*                      | ``bool`` | If ``ON`` build C++ unit tests, default=``ON`` |
+| *-DPYBIND*                    | ``bool`` | If ``ON`` activate python binding, default=``OFF`` |
+| *-DPYBIND_INSTALL_PREFIX:PATH*| ``str`` | Path to the python module install folder when ``-DPYBIND=ON``, defaults to ``$CMAKE_INSTALL_PREFIX/python_packages/<module>`` |
 
-If you have compiled with PyBind you can find at the root of the ``build`` file the python lib ``aidge_core.cpython*.so``
+If one compiles with ``-DPYBIND=ON``, ``-DPYBIND_INSTALL_PREFIX:PATH`` can be used to install the python module directly in the
+python sources tree (for instance ``$PWD/aidge_core``). ``setup.py`` takes care of this and installs the module at the right place.
 
 ## Run tests
 ### CPP
diff --git a/aidge_core-config.cmake.in b/aidge_core-config.cmake.in
index d97afe8a2a1ca98eb862d66c388081bca7b72edc..abe55b6faef64aa61d4df4076c035ac0c5f998b4 100644
--- a/aidge_core-config.cmake.in
+++ b/aidge_core-config.cmake.in
@@ -3,6 +3,11 @@
 include(CMakeFindDependencyMacro)
 find_dependency(fmt)
 find_dependency(Threads)
+set(AIDGE_REQUIRES_PYTHON @AIDGE_REQUIRES_PYTHON@)
+set(AIDGE_PYTHON_HAS_EMBED @AIDGE_PYTHON_HAS_EMBED@)
+if (AIDGE_REQUIRES_PYTHON AND AIDGE_PYTHON_HAS_EMBED)
+    find_dependency(Python COMPONENTS Interpreter Development)
+endif()
 
 include(${CMAKE_CURRENT_LIST_DIR}/aidge_core-config-version.cmake)
 
diff --git a/aidge_core/aidge_export_aidge/static/CMakeLists.txt b/aidge_core/aidge_export_aidge/static/CMakeLists.txt
index 4220bb9d502474301cf748252930ff8bdd5c97e3..d7fe26d9c286f72d898a21d07baae2c91d08b71a 100644
--- a/aidge_core/aidge_export_aidge/static/CMakeLists.txt
+++ b/aidge_core/aidge_export_aidge/static/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 3.15)
+cmake_minimum_required(VERSION 3.18)
 set(CXX_STANDARD 14)
 
 file(STRINGS "${CMAKE_SOURCE_DIR}/project_name.txt" project_name)
@@ -18,6 +18,7 @@ set(module_name _${CMAKE_PROJECT_NAME}) # target name
 ##############################################
 # Define options
 option(PYBIND "python binding" ON)
+option(STANDALONE "Build standalone executable" ON)
 option(WERROR "Warning as error" OFF)
 option(TEST "Enable tests" OFF)
 option(COVERAGE "Enable coverage" OFF)
@@ -61,16 +62,8 @@ set_property(TARGET ${module_name} PROPERTY POSITION_INDEPENDENT_CODE ON)
 
 # PYTHON BINDING
 if (PYBIND)
-    # Handles Python + pybind11 headers dependencies
     include(PybindModuleCreation)
     generate_python_binding(${CMAKE_PROJECT_NAME} ${module_name})
-
-    target_link_libraries(${module_name}
-        PUBLIC
-            pybind11::pybind11
-        PRIVATE
-            Python::Python
-        )
 endif()
 
 if( ${ENABLE_ASAN} )
@@ -94,7 +87,6 @@ target_include_directories(${module_name}
         ${CMAKE_CURRENT_SOURCE_DIR}/src
 )
 
-target_link_libraries(${module_name} PUBLIC fmt::fmt)
 target_compile_features(${module_name} PRIVATE cxx_std_14)
 
 target_compile_options(${module_name} PRIVATE
@@ -151,8 +143,13 @@ install(FILES
 ## Exporting from the build tree
 message(STATUS "Exporting created targets to use them in another build")
 export(EXPORT ${CMAKE_PROJECT_NAME}-targets
-    FILE "${CMAKE_CURRENT_BINARY_DIR}/${project}-targets.cmake")
-
-# Compile executable
-add_executable(main main.cpp)
-target_link_libraries(main PUBLIC _aidge_core ${module_name})
+    FILE "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_PROJECT_NAME}-targets.cmake")
+
+if(STANDALONE)
+    if(AIDGE_REQUIRES_PYTHON AND NOT AIDGE_PYTHON_HAS_EMBED)
+        message(WARNING "Skipping compilation of standalone executable: missing Python embedded interpreter")
+    else()
+        add_executable(main main.cpp)
+        target_link_libraries(main PRIVATE ${module_name})
+    endif()
+endif()
diff --git a/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake b/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake
index 193f3332231ac384daab2e5bf75c1a5de0d2bf1d..217a48351def531cf7da39c9e78e0627fdba87f4 100644
--- a/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake
+++ b/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake
@@ -1,8 +1,7 @@
 function(generate_python_binding name target_to_bind)
 
-    find_package(Python COMPONENTS Interpreter Development)
+    find_package(Python COMPONENTS Interpreter Development.Module)
 
-    add_definitions(-DPYBIND)
     Include(FetchContent)
     FetchContent_Declare(
     PyBind11
@@ -15,11 +14,9 @@ function(generate_python_binding name target_to_bind)
     file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
 
     pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
-    target_include_directories(${name} PUBLIC "python_binding")
+    target_include_directories(${name} PRIVATE "python_binding")
+
+    # Link target library to bind
+    target_link_libraries(${name} PRIVATE ${target_to_bind})
 
-    # Handles Python + pybind11 headers dependencies
-    target_link_libraries(${name}
-        PUBLIC
-            ${target_to_bind}
-    )
 endfunction()
diff --git a/aidge_core/aidge_export_aidge/static/export-config.cmake.in b/aidge_core/aidge_export_aidge/static/export-config.cmake.in
index f3604be11c27d86caf1ad8a48b333b9bd8f30625..f0be5e076dbdfef359fc00fd41c25c0bba815839 100644
--- a/aidge_core/aidge_export_aidge/static/export-config.cmake.in
+++ b/aidge_core/aidge_export_aidge/static/export-config.cmake.in
@@ -1,3 +1,8 @@
+@PACKAGE_INIT@
+
+include(CMakeFindDependencyMacro)
+find_dependency(aidge_core)
+
 include(${CMAKE_CURRENT_LIST_DIR}/aidge_backend_cpu-config-version.cmake)
 
 include(${CMAKE_CURRENT_LIST_DIR}/aidge_backend_cpu-targets.cmake)
diff --git a/aidge_core/aidge_export_aidge/static/main.cpp b/aidge_core/aidge_export_aidge/static/main.cpp
index ab8bac1851b6d2dae4bf97bd3af10e19e0b71c1e..61bc3ebeb915be12570c6300965e3b64ac2870dd 100644
--- a/aidge_core/aidge_export_aidge/static/main.cpp
+++ b/aidge_core/aidge_export_aidge/static/main.cpp
@@ -1,6 +1,10 @@
 #include <iostream>
 #include <aidge/backend/cpu.hpp>
 
+/* Register default cpu Tensor implementation */
+#include <aidge/backend/cpu/data/TensorImpl.hpp>
+
+/* Include model generator */
 #include "include/dnn.hpp"
 
 int main()
diff --git a/aidge_core/show_graphview.py b/aidge_core/show_graphview.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddf0fc4b4659a727c7879738ef5e3eb40186cac1
--- /dev/null
+++ b/aidge_core/show_graphview.py
@@ -0,0 +1,225 @@
+import os
+import json
+import builtins
+import aidge_core
+import numpy as np
+from pathlib import Path
+ 
+def _retrieve_operator_attrs(node : aidge_core.Node) -> dict[str, int, float, bool, None]:
+    """
+    Returns the dictionary containing the attributes of a given Node.
+
+    :param graph: A Node in the list of ordered nodes. 
+    :type graph: aidge_core.Node
+
+    :return: A dictionary with the Node's attributes.
+    :rtype: dict[str, int, float, bool, None]
+    """       
+
+    if node.get_operator().attr is not None:
+        node_attr_dict =  node.get_operator().attr.dict()
+        for key,value in node_attr_dict.items():
+            if not type(value).__name__ in dir(builtins):
+                node_attr_dict[key] = value.name
+    
+    else:
+        node_attr_dict = {}
+
+    return node_attr_dict
+
+def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_embed : bool, write_trainable_params_ext : bool, path_trainable_params : Path, params_file_format : str) -> dict[str, int, float, bool, None]:
+    """
+    Creates a dictionary to store the information of a given ordered GraphView. 
+
+    :param ordered_nodes: A list with the GraphView's ordered nodes.
+    :type graph: list
+    :param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed). 
+    :type write_trainable_params_embed: bool
+    :param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file. 
+    :type write_trainable_params_ext: bool
+    :param path_trainable_params: Path of the external file used to store the Nodes' trainable parameters.
+    :type path_trainable_params: Path
+    :param params_file_format: Format of the external file used to store the Nodes' trainable parameters. Options: ``npz`` or ``json``. Default : ``json``. Requires ``write_trainable_params_ext``.
+    :type params_file_format: str
+    
+    :return: A dictionary with the GraphView description.
+    :rtype: dict[str, int, float, bool, None]
+    """            
+
+    graphview_dict = {'graph': []}
+
+    for node in ordered_nodes:
+        
+        if node is not None:
+            node_dict = {'name' : node.name(), 
+                         'optype' : node.get_operator().type(),
+                         'nb_inputs' : node.get_operator().nb_inputs(),
+                         'nb_outputs' : node.get_operator().nb_outputs()}
+            
+            inputs = []
+            for input_idx in range(node.get_operator().nb_inputs()):
+                input_dict = {'dims' : node.get_operator().get_input(input_idx).dims(),
+                              'data_type' : str(node.get_operator().get_input(input_idx).dtype()),
+                              'data_format' : str(node.get_operator().get_input(input_idx).dformat())}              
+                inputs.append(input_dict)    
+            
+            node_dict['inputs'] = inputs
+
+            outputs = []
+            for output_idx in range(node.get_operator().nb_outputs()):
+                output_dict = {'dims' : node.get_operator().get_output(output_idx).dims(),
+                               'data_type' : str(node.get_operator().get_output(output_idx).dtype()),
+                              'data_format' : str(node.get_operator().get_output(output_idx).dformat())}              
+                outputs.append(output_dict)    
+            
+            node_dict['outputs'] = outputs
+
+            parents = node.get_parents()
+            if None in parents:
+                if parents[0] is None: parents.append(parents.pop(0))
+            else:
+                pass
+    
+            parents_inputs = [] 
+            for parent in parents:
+                if parent is not None:
+                    for output_idx in range(parent.get_operator().nb_outputs()):
+                        for input_idx in range(node.get_operator().nb_inputs()):
+                            if parent.get_operator().get_output(output_idx).dims() == node.get_operator().get_input(input_idx).dims():
+                                parents_inputs.append((parent.name(), input_idx))
+
+                elif parent is None:
+                    for input_idx in list(range(node.get_operator().nb_inputs())):
+                        if input_idx not in [item[1] for item in parents_inputs]:
+                                parents_inputs.append((None, input_idx))  
+
+            parents_inputs.sort(key=lambda x: x[1])
+            node_dict['parents'] = parents_inputs
+
+            children_outputs = []
+            for child in node.get_children():
+                for input_idx in range(child.get_operator().nb_inputs()):
+                    for output_idx in range(node.get_operator().nb_outputs()):
+                        if child.get_operator().get_input(input_idx).dims() == node.get_operator().get_output(output_idx).dims():
+                            children_outputs.append((child.name(), output_idx))
+            node_dict['children'] = children_outputs
+        
+            # Check if my node is a metaop
+            attributes_dict = {}
+            if isinstance(node.get_operator(), aidge_core.MetaOperator_Op):
+                attributes_dict['micro_graph'] = []
+                for micro_node in node.get_operator().get_micro_graph().get_nodes():
+                    micro_node_dict = {'name' : micro_node.name(), 
+                                        'optype' : micro_node.type()}
+                    
+                    micro_node_attr_dict =  _retrieve_operator_attrs(micro_node)
+                    micro_node_dict['attributes'] = micro_node_attr_dict
+                    attributes_dict['micro_graph'].append(micro_node_dict)
+
+            else:
+                node_attr_dict = _retrieve_operator_attrs(node)
+                attributes_dict.update(node_attr_dict)
+
+            node_dict['attributes'] = attributes_dict
+
+            if node.type() == 'Producer':
+                if write_trainable_params_ext:
+                    
+                    params_file_format.casefold()
+
+                    if params_file_format=='npz':
+                        np.savez_compressed(Path(path_trainable_params, node.name()), **{node.name() : node.get_operator().get_output(0)})
+                        node_dict['tensor_data'] = Path(path_trainable_params, node.name() + '.npz')
+
+                    elif params_file_format=='json':
+                        tensor = np.array(node.get_operator().get_output(0))
+                        tensor_dict = {
+                            node.name() : 
+                            {
+                                'dims' : tensor.shape,
+                                'data_type' : str(tensor.dtype),
+                                'tensor_data' : tensor.tolist()
+                            }   
+                        }
+                                   
+                        with open(Path(path_trainable_params, node.name() + '.json'), 'w') as fp:
+                            json.dump(tensor_dict, fp, indent=4)
+
+                        node_dict['tensor_data'] = Path(path_trainable_params, node.name() + '.json')
+
+                    else:
+                        raise Exception("File format to write trainable parameters not recognized.")
+
+                
+                elif write_trainable_params_embed:
+                    node_dict['tensor_data'] = np.array(node.get_operator().get_output(0)).tolist()
+                
+                else:
+                    pass
+
+            graphview_dict['graph'].append(node_dict)
+
+        else: # node is None
+            pass
+    
+    return graphview_dict
+
+def _write_dict_json(graphview_dict : dict[str, int, float, bool, None], json_path : str) -> None:
+    """
+    Writes dictionary containing GraphView description to a JSON file.
+    
+    :param graphview_dict: A dictionary with the GraphView description.
+    :type graphview_dict: dict[str, int, float, bool, None]
+    :param json_path: Path to write JSON file.
+    :type json_path: str
+    """
+
+    with open(json_path, 'w') as fp:
+        json.dump(graphview_dict, fp, indent=4)
+
+    return None
+    
+def gview_to_json(gview : aidge_core.GraphView, json_path : Path, write_trainable_params_embed : bool = False, write_trainable_params_ext : bool = False, params_file_format : str = 'json') -> None:   
+    """
+    Generates the description for a GraphView in the JSON format.
+    
+    :param graph: A GraphView of Aidge.
+    :type graph: aidge_core.GraphView
+    :param json_path: Path to write JSON file.
+    :type json_path: Path
+    :param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed). 
+    :type write_trainable_params_embed: bool, optional
+    :param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file. 
+    :type write_trainable_params_ext: bool, optional
+    :param params_file_format: Format of the external file used to store the Nodes' trainable parameters. Options: ``npz`` or ``json``. Default : ``json``. Requires ``write_trainable_params_ext``.
+    :type params_file_format: str, optional
+    """
+
+    if json_path.is_dir():
+        json_path = (json_path.parent).joinpath('model.json')
+
+    elif not json_path.is_dir():
+        if json_path.suffix == '.json':
+            pass
+        else: 
+            raise Exception('If ``json_path`` contains a filename it must be of JSON format.')
+
+    if write_trainable_params_ext:
+        path_trainable_params = (json_path.parent).joinpath(json_path.stem +  '_trainable_params/')
+    else:
+        path_trainable_params = Path()
+
+    if isinstance(gview, aidge_core.GraphView):
+        # Sort GraphView in topological order
+        ordered_nodes = gview.get_ordered_nodes()
+    
+        # Create dict from GraphView 
+        graphview_dict = _create_dict(ordered_nodes, write_trainable_params_embed, write_trainable_params_ext, path_trainable_params, params_file_format)
+        
+        # Write dict to JSON
+        _write_dict_json(graphview_dict, json_path)
+
+    else:
+        raise Exception("Graph must be an instance of aidge_core.GraphView.")
+        
+    return None
\ No newline at end of file
diff --git a/aidge_core/unit_tests/static/main.cpp b/aidge_core/unit_tests/static/main.cpp
index 06171e2a036a18b0dea3dca40de34c296d99222d..640fc1fe60b55070de41ca4ce35ccd08084498b9 100644
--- a/aidge_core/unit_tests/static/main.cpp
+++ b/aidge_core/unit_tests/static/main.cpp
@@ -4,6 +4,10 @@ This file is copied in the test export.
 */
 #include <iostream>
 
+/* Register default cpu Tensor implementation */
+#include <aidge/backend/cpu/data/TensorImpl.hpp>
+
+/* Include model generator */
 #include "include/dnn.hpp"
 
 int main()
diff --git a/aidge_core/unit_tests/test_export.py b/aidge_core/unit_tests/test_export.py
index 9fb16128eebed9102cdf0e46e359a832bf6ac140..5d2e700a86925d1455cdee83e7d40cd891e72ba6 100644
--- a/aidge_core/unit_tests/test_export.py
+++ b/aidge_core/unit_tests/test_export.py
@@ -65,6 +65,7 @@ class test_export(unittest.TestCase):
     def setUp(self):
         self.EXPORT_PATH: pathlib.Path = pathlib.Path("dummy_export")
         self.BUILD_DIR: pathlib.Path = self.EXPORT_PATH / "build"
+        self.INSTALL_DIR: pathlib.Path = (self.EXPORT_PATH / "install").absolute()
 
     def tearDown(self):
         pass
@@ -96,9 +97,10 @@ class test_export(unittest.TestCase):
         )
         os.makedirs(self.BUILD_DIR, exist_ok=True)
         clean_dir(self.BUILD_DIR)  # if build dir existed already ensure its emptyness
+        clean_dir(self.INSTALL_DIR)
 
         # Test compilation of export
-        install_path = (
+        search_path = (
             os.path.join(sys.prefix, "lib", "libAidge")
             if "AIDGE_INSTALL" not in os.environ
             else os.environ["AIDGE_INSTALL"]
@@ -116,14 +118,16 @@ class test_export(unittest.TestCase):
                 [
                     "cmake",
                     str(self.EXPORT_PATH.absolute()),
-                    "-DPYBIND=1",
-                    f"-DCMAKE_INSTALL_PREFIX:PATH={install_path}",
+                    "-DPYBIND=ON",
+                    f"-DCMAKE_PREFIX_PATH={search_path}", # search dependencies
+                    f"-DCMAKE_INSTALL_PREFIX:PATH={self.INSTALL_DIR}", # local install
                 ],
                 cwd=str(self.BUILD_DIR),
             ):
                 print(std_line, end="")
         except subprocess.CalledProcessError as e:
             print(f"An error occurred: {e}\nFailed to configure export.")
+            raise SystemExit(1)
 
         ##########################
         # BUILD EXPORT
@@ -135,6 +139,7 @@ class test_export(unittest.TestCase):
                 print(std_line, end="")
         except subprocess.CalledProcessError as e:
             print(f"An error occurred: {e}\nFailed to build export.")
+            raise SystemExit(1)
 
         ##########################
         # INSTALL EXPORT
@@ -146,6 +151,7 @@ class test_export(unittest.TestCase):
                 print(std_line, end="")
         except subprocess.CalledProcessError as e:
             print(f"An error occurred: {e}\nFailed to install export.")
+            raise SystemExit(1)
 
 
 if __name__ == "__main__":
diff --git a/aidge_core/unit_tests/test_operator_squeeze.py b/aidge_core/unit_tests/test_operator_squeeze.py
new file mode 100644
index 0000000000000000000000000000000000000000..b43605893f32f17e7b544b2fea09b16bdd982050
--- /dev/null
+++ b/aidge_core/unit_tests/test_operator_squeeze.py
@@ -0,0 +1,194 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+from aidge_core import Log
+import numpy as np
+from numpy import testing as npt
+
+
+class TestSqueeze(unittest.TestCase):
+    """
+    Test squeeze operator
+    """
+
+    def setUp(self):
+        ############DEFINING INPUT AND OUTPUTS FOR TESTS
+        axes_to_squeeze_0 = [0]
+        axes_to_squeeze_many = [0, 1, 4]
+        axes_to_squeeze_all = []
+        axes_to_squeeze_error = [1, 2, 4, 5, 10, 3, 42, 127, 12, 3, 4, 1, 4, 50]
+
+        squeeze_dim_0 = aidge_core.Squeeze(axes_to_squeeze_0, name="squeeze_dim_0")
+        squeeze_many = aidge_core.Squeeze(axes_to_squeeze_many, name="squeeze_many")
+        squeeze_all = aidge_core.Squeeze(axes_to_squeeze_all, name="squeeze_all")
+        squeeze_error = aidge_core.Squeeze(axes_to_squeeze_error, name="squeeze_error")
+
+        input_1_data_shape = np.array([1, 2, 3])
+        input_2_data_hape = np.array([1, 1, 3, 3, 1, 9])
+        input_3_data_shape = np.array([1])
+        input_4_data_shape = np.array([1, 1, 4])
+
+        input_axes_0 = axes_to_squeeze_0
+        input_axes_many = axes_to_squeeze_many
+        input_axes_all = axes_to_squeeze_all
+        # input_axes_error = aidge_core.Tensor(axes_to_squeeze_error)
+
+        ####################### DEFINING TEST RUNS
+        self.tests_axes_defined_by_attribute = [
+            (input_1_data_shape, squeeze_dim_0, np.array([2, 3])),
+            (input_1_data_shape, squeeze_all, np.array([2, 3])),
+            (input_2_data_hape, squeeze_dim_0, np.array([1, 3, 3, 1, 9])),
+            (input_2_data_hape, squeeze_many, np.array([3, 3, 9])),
+            (input_2_data_hape, squeeze_all, np.array([3, 3, 9])),
+            (input_3_data_shape, squeeze_dim_0, np.array([])),
+            (input_3_data_shape, squeeze_all, np.array([])),
+            (input_4_data_shape, squeeze_dim_0, np.array([1, 4])),
+            (input_4_data_shape, squeeze_all, np.array([4])),
+        ]
+
+        # operators are puprposefully chosen with different predefined attribute than the input_axes tensor
+        self.tests_axes_defined_by_input = [
+            (input_1_data_shape, input_axes_0, squeeze_error, np.array([2, 3])),
+            (input_1_data_shape, input_axes_all, squeeze_error, np.array([2, 3])),
+            (input_2_data_hape, input_axes_0, squeeze_error, np.array([1, 3, 3, 1, 9])),
+            (input_2_data_hape, input_axes_many, squeeze_error, np.array([3, 3, 9])),
+            (input_2_data_hape, input_axes_all, squeeze_error, np.array([3, 3, 9])),
+            (input_3_data_shape, input_axes_0, squeeze_error, np.array([])),
+            (input_3_data_shape, input_axes_all, squeeze_error, np.array([])),
+            (input_4_data_shape, input_axes_0, squeeze_error, np.array([1, 4])),
+            (input_4_data_shape, input_axes_all, squeeze_error, np.array([4])),
+        ]
+        self.test_error = [
+            (input_1_data_shape, squeeze_error),
+            (input_1_data_shape, squeeze_many),
+            (input_3_data_shape, squeeze_many),
+            (input_4_data_shape, squeeze_many),
+        ]
+        return
+
+    def tearDown(self):
+        pass
+
+    def test_axes_defined_via_tensor_input(self):
+        Log.notice("\ntest_axes_defined_via_tensor_input")
+        for index, (
+            input_shape,
+            input_axes_to_squeeze,
+            squeeze_node_template,
+            output_shape,
+        ) in enumerate(self.tests_axes_defined_by_input):
+            test_squeeze_node = squeeze_node_template
+            test_squeeze_op = test_squeeze_node.get_operator()
+
+            print(f"\nTest {index}")
+            print(f"input shape : {input_shape}")
+            print(f"input axes: {np.array(input_axes_to_squeeze)}")
+            print(f"operator : {test_squeeze_node}")
+            print(f"expected output_shape : {output_shape}")
+
+            test_squeeze_op.set_backend("cpu")
+            test_squeeze_op.set_datatype(aidge_core.dtype.float32)
+
+            input_values = np.ones(shape=input_shape, dtype=np.float32)
+            output_values = np.ones(shape=output_shape, dtype=np.float32)
+
+            input_data = aidge_core.Tensor(input_values)
+            input_data.set_datatype(aidge_core.dtype.float32)
+            input_data.set_backend("cpu")
+
+            input_axes = aidge_core.Tensor(
+                np.array(input_axes_to_squeeze, dtype=np.float32)
+            )
+            input_axes.set_datatype(aidge_core.dtype.int8)
+            input_axes.set_backend("cpu")
+
+            test_squeeze_op.set_input(0, input_data)
+            test_squeeze_op.set_input(1, input_axes)
+
+            self.assertEqual(test_squeeze_op.forward_dims(True), True)
+            test_squeeze_op.forward()
+
+            squeeze_output = test_squeeze_op.get_output(0)
+
+            npt.assert_array_equal(
+                squeeze_output.dims(),
+                output_shape,
+                err_msg=f"SQUEEZE FAILURE : expected result differs from output size\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            npt.assert_array_almost_equal(
+                np.array(squeeze_output, dtype=np.float32),
+                output_values,
+                7,
+                err_msg=f"SQUEEZE FAILURE : output tensor values differs from expected values\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            # self.assertEqual(test_squeeze_op.dims_forwarded(), True, "SQUEEZE_FAILURE : dims_forwarded failed.")
+        return
+
+    def test_axes_defined_via_attribute(self):
+        Log.notice("\ntest_axes_defined_via_attribute")
+        for index, (input_shape, squeeze_node_template, output_shape) in enumerate(
+            self.tests_axes_defined_by_attribute
+        ):
+            test_squeeze_node = squeeze_node_template
+            test_squeeze_op = test_squeeze_node.get_operator()
+
+            print(f"\nTest {index}")
+            print(f"input size : {input_shape.shape}")
+            print(f"operator : {test_squeeze_node}")
+            print(f"expected output_shape : {output_shape}")
+
+            test_squeeze_node.get_operator().set_backend("cpu")
+
+            input_values = np.ones(shape=input_shape, dtype=np.float32)
+            output_values = np.ones(shape=output_shape, dtype=np.float32)
+            input_data = aidge_core.Tensor(input_values)
+            input_data.set_datatype(aidge_core.dtype.float32)
+            input_data.set_backend("cpu")
+            test_squeeze_op.set_input(0, input_data)
+
+            test_squeeze_op.forward_dims()
+            test_squeeze_op.forward()
+
+            squeeze_output = test_squeeze_op.get_output(0)
+
+            npt.assert_array_equal(
+                squeeze_output.dims(),
+                output_shape,
+                err_msg=f"SQUEEZE FAILURE : expected result differs from output size\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            npt.assert_array_almost_equal(
+                np.array(squeeze_output, dtype=np.float32),
+                output_values,
+                7,
+                err_msg=f"SQUEEZE FAILURE : output tensor values differs from expected values\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+        return
+
+    def test_error(self):
+        for input_shape, squeeze_node_template in self.test_error:
+            test_squeeze_node = squeeze_node_template
+            test_squeeze_op = test_squeeze_node.get_operator()
+
+            input_values = np.ones(shape=input_shape)
+            input_data = aidge_core.Tensor(input_values)
+            input_data.set_datatype(aidge_core.dtype.float32)
+            input_data.set_backend("cpu")
+            test_squeeze_op.set_input(0, input_data)
+
+            with self.assertRaises((RuntimeError, AssertionError)):
+                test_squeeze_op.forward_dims()
+                test_squeeze_op.forward()
+        return
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/aidge_core/unit_tests/test_operator_unsqueeze.py b/aidge_core/unit_tests/test_operator_unsqueeze.py
new file mode 100644
index 0000000000000000000000000000000000000000..12f55fa30bc027fa5a3cea6ccb6a8d2970cad018
--- /dev/null
+++ b/aidge_core/unit_tests/test_operator_unsqueeze.py
@@ -0,0 +1,211 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+import numpy as np
+from numpy import testing as npt
+
+
+class TestUnsqueeze(unittest.TestCase):
+    """
+    Test unsqueeze operator
+    """
+
+    def setUp(self):
+        axis_to_unsqueeze_dim_0 = [0]
+        axis_to_unsqueeze_many = [1, 4, 5]
+        axis_to_unsqueeze_error_identical_index = [0, 0, 0]
+        axis_to_unsqueeze_error_too_high_index = [50]
+        axis_to_unsqueeze_onnx_test = [0, 4]
+        unsqueeze_dim_0 = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_dim_0, name="unsqueeze_dim_0"
+        )
+        unsqueeze_many = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_many, name="unsqueeze_many"
+        )
+        unsqueeze_error_identical_index = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_error_identical_index,
+            name="unsqueeze_error_identical_index",
+        )
+        unsqueeze_error_node = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_error_too_high_index,
+            name="unsqueeze_error_index_too_high",
+        )
+        unsqueeze_onnx_test = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_onnx_test, name="unsqueeze taken from onnx documentation"
+        )
+
+        input_1_data_shape = np.array([1, 2, 3])
+        input_2_data_shape = np.array([2, 1, 3, 3])
+        input_3_data_shape = np.array([1, 1, 4])
+        input_onnx_data_shape = np.array([3, 4, 5])
+
+        input_axes_dim_0 = axis_to_unsqueeze_dim_0
+        input_axes_many = axis_to_unsqueeze_many
+        input_axes_onnx_test = axis_to_unsqueeze_onnx_test
+
+        self.tests_axes_defined_by_attribute = [
+            (input_1_data_shape, unsqueeze_dim_0, np.array([1, 1, 2, 3])),
+            (input_2_data_shape, unsqueeze_dim_0, np.array([1, 2, 1, 3, 3])),
+            (input_2_data_shape, unsqueeze_many, np.array([2, 1, 1, 3, 1, 1, 3])),
+            (input_3_data_shape, unsqueeze_dim_0, np.array([1, 1, 1, 4])),
+            (input_3_data_shape, unsqueeze_many, np.array([1, 1, 1, 4, 1, 1])),
+            (input_onnx_data_shape, unsqueeze_onnx_test, np.array([1, 3, 4, 5, 1])),
+        ]
+
+        self.tests_axes_defined_by_tensor = [
+            (
+                input_1_data_shape,
+                input_axes_dim_0,
+                unsqueeze_error_node,
+                np.array([1, 1, 2, 3]),
+            ),
+            (
+                input_2_data_shape,
+                input_axes_dim_0,
+                unsqueeze_error_node,
+                np.array([1, 2, 1, 3, 3]),
+            ),
+            (
+                input_2_data_shape,
+                input_axes_many,
+                unsqueeze_error_node,
+                np.array([2, 1, 1, 3, 1, 1, 3]),
+            ),
+            (
+                input_3_data_shape,
+                input_axes_dim_0,
+                unsqueeze_error_node,
+                np.array([1, 1, 1, 4]),
+            ),
+            (
+                input_3_data_shape,
+                input_axes_many,
+                unsqueeze_error_node,
+                np.array([1, 1, 1, 4, 1, 1]),
+            ),
+            (
+                input_onnx_data_shape,
+                input_axes_onnx_test,
+                unsqueeze_error_node,
+                np.array([1, 3, 4, 5, 1]),
+            ),
+        ]
+
+        self.test_error = [
+            (input_1_data_shape, unsqueeze_error_identical_index),
+            (input_1_data_shape, unsqueeze_error_node),
+            (input_1_data_shape, unsqueeze_many),  # dims too high
+        ]
+        return
+
+    def tearDown(self):
+        pass
+
+    def test_axes_defined_by_attribute(self):
+        for index, (
+            input_shape,
+            unsqueeze_template,
+            expected_output_shape,
+        ) in enumerate(self.tests_axes_defined_by_attribute):
+            test_unsqueeze = unsqueeze_template
+            test_unsqueeze_op = test_unsqueeze.get_operator()
+
+            print(f"\nTest {index}")
+            print(f"input size : {input_shape}")
+            print(f"operator : {test_unsqueeze}")
+            print(f"expected output_shape : {expected_output_shape}")
+
+            test_unsqueeze_op.set_backend("cpu")
+
+            input_values = np.ones(shape=input_shape, dtype=np.float32)
+            expected_output_values = np.ones(
+                shape=expected_output_shape, dtype=np.float32
+            )
+            input_tensor = aidge_core.Tensor(input_values)
+            test_unsqueeze_op.set_input(0, input_tensor)
+
+            test_unsqueeze_op.forward_dims()
+            test_unsqueeze_op.forward()
+
+            unsqueeze_output = test_unsqueeze_op.get_output(0)
+
+            npt.assert_array_equal(
+                unsqueeze_output.dims(),
+                expected_output_shape,
+                err_msg=f"UNSQUEEZE FAILURE : expected result dimensions differs from output's\n\toperator : {test_unsqueeze}\n\tinput.shape : {input_shape.shape}",
+            )
+            npt.assert_array_almost_equal(
+                np.array(unsqueeze_output),
+                expected_output_values,
+                7,
+                err_msg=f"UNSQUEEZE FAILURE : output tensor values differs from expected values\n\toperator : {test_unsqueeze}\n\tinput.shape : {input_shape.shape}",
+            )
+        return
+
+    def test_axes_defined_via_tensor_input(self):
+        for index, (
+            input_shape,
+            input_axes_to_squeeze,
+            squeeze_node_template,
+            output_shape,
+        ) in enumerate(self.tests_axes_defined_by_tensor):
+            test_squeeze_node = squeeze_node_template
+            test_squeeze_op = test_squeeze_node.get_operator()
+
+            print(f"\nTest {index}")
+            print(f"input shape : {input_shape}")
+            print(f"input axes: {np.array(input_axes_to_squeeze)}")
+            print(f"operator : {test_squeeze_node}")
+            print(f"expected output_shape : {output_shape}")
+
+            test_squeeze_op.set_backend("cpu")
+            test_squeeze_op.set_datatype(aidge_core.dtype.float32)
+
+            input_values = np.ones(shape=input_shape, dtype=np.float32)
+            output_values = np.ones(shape=output_shape, dtype=np.float32)
+
+            input_data = aidge_core.Tensor(input_values)
+            input_data.set_datatype(aidge_core.dtype.float32)
+            input_data.set_backend("cpu")
+
+            input_axes = aidge_core.Tensor(
+                np.array(input_axes_to_squeeze, dtype=np.float32)
+            )
+            input_axes.set_datatype(aidge_core.dtype.int8)
+            input_axes.set_backend("cpu")
+
+            test_squeeze_op.set_input(0, input_data)
+            test_squeeze_op.set_input(1, input_axes)
+
+            self.assertEqual(test_squeeze_op.forward_dims(True), True)
+            test_squeeze_op.forward()
+
+            squeeze_output = test_squeeze_op.get_output(0)
+
+            npt.assert_array_equal(
+                squeeze_output.dims(),
+                output_shape,
+                err_msg=f"SQUEEZE FAILURE : expected result differs from output size\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            npt.assert_array_almost_equal(
+                np.array(squeeze_output, dtype=np.float32),
+                output_values,
+                7,
+                err_msg=f"SQUEEZE FAILURE : output tensor values differs from expected values\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            # self.assertEqual(test_squeeze_op.dims_forwarded(), True, "SQUEEZE_FAILURE : dims_forwarded failed.")
+        return
+
+
+if __name__ == "__main__":
+    unittest.main()
+
diff --git a/aidge_core/unit_tests/test_show_graphview.py b/aidge_core/unit_tests/test_show_graphview.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c68e93e39a543e96c2b664dbe554660bf37cc91
--- /dev/null
+++ b/aidge_core/unit_tests/test_show_graphview.py
@@ -0,0 +1,129 @@
+import json
+import tempfile
+import unittest
+import builtins
+import aidge_core
+from pathlib import Path
+from aidge_core.show_graphview import gview_to_json
+
+def create_gview():
+    # Create a LeNet-like model
+    gview = aidge_core.sequential([aidge_core.PaddedConv2D(in_channels=1, out_channels=6, kernel_dims=[5,5], name='feature_feature_0_Conv', stride_dims=[1,1], padding_dims = [2,2,2,2]),
+                               aidge_core.ReLU(name='feature_feature_1_Relu'),
+                               aidge_core.MaxPooling2D(kernel_dims=[2,2], stride_dims=[2,2], ceil_mode=0, name='feature_feature_2_MaxPool'),
+                               aidge_core.Conv2D(in_channels=6, out_channels=16, kernel_dims=[5,5], name='feature_feature_3_Conv', stride_dims=[1,1], dilation_dims = [1,1]),
+                               aidge_core.ReLU(name='feature_feature_4_Relu'),
+                               aidge_core.MaxPooling2D(kernel_dims=[2,2], stride_dims=[2,2], ceil_mode=0, name='feature_feature_5_MaxPool'),
+                               aidge_core.FC(in_channels=400, out_channels=120, name='classifier_classifier_1_Gemm'),
+                               aidge_core.ReLU(name='classifier_classifier_2_Relu'),
+                               aidge_core.FC(in_channels=120, out_channels=84, name='classifier_classifier_3_Gemm'),
+                               aidge_core.ReLU(name='classifier_classifier_4_Relu'),
+                               aidge_core.FC(in_channels=84, out_channels=10, name='classifier_classifier_5_Gemm'),
+                            ])
+
+    # Fill Producers
+    for node in gview.get_nodes():
+        if node.type() == "Producer":
+            prod_op = node.get_operator()
+            value = prod_op.get_output(0)
+            value.set_backend("cpu")
+            tuple_out = node.output(0)[0]
+            
+            if (tuple_out[0].type() == "Conv" or tuple_out[0].type() == "PaddedConv") and tuple_out[1]==1:
+                # Conv weight
+                aidge_core.xavier_uniform_filler(value)
+            elif tuple_out[0].type() == "Conv" and tuple_out[1]==2:
+                # Conv bias
+                aidge_core.constant_filler(value, 0.01)
+            elif tuple_out[0].type() == "FC" and tuple_out[1]==1:
+                # FC weight
+                aidge_core.normal_filler(value)
+            elif tuple_out[0].type() == "FC" and tuple_out[1]==2:
+                # FC bias
+                aidge_core.constant_filler(value, 0.01)
+            else:
+                pass
+
+    # Compile model
+    gview.forward_dims([[1, 1, 28, 28]]) 
+    gview.set_datatype(aidge_core.dtype.float32)
+
+    return gview
+
+class test_show_gview(unittest.TestCase):
+    """Test aidge functionality to show GraphView.    
+    """
+
+    def setUp(self):
+        pass
+
+    def tearDown(self):
+        pass
+
+    def test_gview_to_json(self):
+        
+        gview = create_gview()
+
+        # Create temporary file to store JSON model description             
+        model_description_file = tempfile.NamedTemporaryFile(mode="w+", suffix='.json')
+
+        gview_to_json(gview, Path(model_description_file.name))
+
+        # Load JSON
+        with open(model_description_file.name, 'r') as fp:
+                model_json = json.load(fp)
+
+        # Get list of nodes of Aidge graphview
+        gview_ordered_nodes = gview.get_ordered_nodes()
+
+        # Iterate over the list of ordered nodes and the corresponding JSON 
+        self.assertEqual(len(gview_ordered_nodes), len(model_json['graph']))
+
+        for node_gview, node_json in zip(gview_ordered_nodes, model_json['graph']):   
+                    
+            self.assertEqual(node_gview.get_operator().type(), node_json['optype'])
+            self.assertEqual(node_gview.get_operator().nb_inputs(), node_json['nb_inputs'])
+            self.assertEqual(node_gview.get_operator().nb_outputs(), node_json['nb_outputs'])
+            
+            self.assertEqual(node_gview.get_operator().nb_inputs(), len(node_json['inputs']))
+            for input_idx in range(node_gview.get_operator().nb_inputs()):
+                self.assertEqual(node_gview.get_operator().get_input(input_idx).dims(), node_json['inputs'][input_idx]['dims'])
+                self.assertEqual(str(node_gview.get_operator().get_input(input_idx).dtype()), node_json['inputs'][input_idx]['data_type'])
+                self.assertEqual(str(node_gview.get_operator().get_input(input_idx).dformat()), node_json['inputs'][input_idx]['data_format'])
+
+            self.assertEqual(node_gview.get_operator().nb_outputs(), len(node_json['outputs']))
+            for output_idx in range(node_gview.get_operator().nb_outputs()):
+                self.assertEqual(node_gview.get_operator().get_output(output_idx).dims(), node_json['outputs'][output_idx]['dims'])
+                self.assertEqual(str(node_gview.get_operator().get_output(output_idx).dtype()), node_json['outputs'][output_idx]['data_type'])
+                self.assertEqual(str(node_gview.get_operator().get_output(output_idx).dformat()), node_json['outputs'][output_idx]['data_format'])
+
+            self.assertEqual(len(node_gview.get_parents()), len(node_json['parents']))                  
+            self.assertEqual(len(node_gview.get_children()), len(node_json['children']))
+
+            if not hasattr(node_gview.get_operator(), 'get_micro_graph'):
+                try:
+                    self.assertEqual(len(node_gview.get_operator().attr.dict()), len(node_json['attributes']))
+                    self.assertDictEqual(node_gview.get_operator().attr.dict(), node_json['attributes'])
+
+                except AttributeError:
+                    self.assertIsNone(node_gview.get_operator().attr) and self.assertFalse(node_json['attributes'])
+
+            elif hasattr(node_gview.get_operator(), 'get_micro_graph'):
+                
+                self.assertEqual(len(node_gview.get_operator().get_micro_graph().get_nodes()), len(node_json['attributes']['micro_graph']))
+                
+                for micro_node_gview in node_gview.get_operator().get_micro_graph().get_nodes():
+                    for micro_node_json in node_json['attributes']['micro_graph']:
+                        if micro_node_gview.get_operator().type() == micro_node_json['optype']:
+                            
+                            for key, value in micro_node_gview.get_operator().attr.dict().items():
+                                if not type(value).__name__ in dir(builtins):
+                                    # Replace original value by its name (str) because value is of a type that could not be written to the JSON
+                                    # Cannot update this dict inplace : micro_node_gview.get_operator().attr.dict().update({key : value.name}) 
+                                    temp_mnode_dict = micro_node_gview.get_operator().attr.dict()
+                                    temp_mnode_dict.update({key : value.name})
+                                    self.assertDictEqual(temp_mnode_dict, micro_node_json['attributes'])                
+                    
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/aidge_core/unit_tests/test_topological_order.py b/aidge_core/unit_tests/test_topological_order.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e7f2e2d9b9770c2fae1e5c2812ba33113589134
--- /dev/null
+++ b/aidge_core/unit_tests/test_topological_order.py
@@ -0,0 +1,67 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+
+class test_topological_order(unittest.TestCase):
+    """Test python binding for nodes ordering"""
+
+    def setUp(self):
+        pass
+
+    def tearDown(self):
+        pass
+
+    def test_generic_loop_order_0(self):
+        # Defines a Generic recurring loop header operator with
+        # inputs: (init, back) and outputs (loop, last)
+        # Note that one must specify the back edge as otherwise the
+        # generated order may not schedule the loop header before the add
+        loop0 = aidge_core.GenericOperator("Loop", 2, 0, 2, "Loop#0")
+        loop0.get_operator().set_back_edges({1})
+        assert not loop0.get_operator().is_back_edge(0)
+        assert loop0.get_operator().is_back_edge(1)
+        add0 = aidge_core.Add(2, "add0")
+
+        loop0.add_child(add0, 0, 1)
+        add0.add_child(loop0, 0, 1)
+        graph = aidge_core.GraphView()
+        graph.add(loop0)
+        graph.add(add0)
+
+        nodes = graph.get_ordered_nodes()
+        assert len(nodes) == 2
+        assert nodes == [loop0, add0]
+
+    def test_generic_loop_order_1(self):
+        # Defines a Generic recurring loop header operator with
+        # inputs: (back, init) and outputs (loop, last)
+        # Note that one must specify the back edge as otherwise the
+        # generated order may not schedule the loop header before the add
+        loop0 = aidge_core.GenericOperator("Loop", 2, 0, 2, "Loop#0")
+        loop0.get_operator().set_back_edges({0})
+        assert not loop0.get_operator().is_back_edge(1)
+        assert loop0.get_operator().is_back_edge(0)
+        add0 = aidge_core.Add(2, "add0")
+
+        loop0.add_child(add0, 0, 1)
+        add0.add_child(loop0, 0, 0)
+        graph = aidge_core.GraphView()
+        graph.add(loop0)
+        graph.add(add0)
+
+        nodes = graph.get_ordered_nodes()
+        assert len(nodes) == 2
+        assert nodes == [loop0, add0]
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/cmake/PybindDependency.cmake b/cmake/PybindDependency.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..1f4e7d426fa8d78a98d6bcce44d9d7dfab17ec1e
--- /dev/null
+++ b/cmake/PybindDependency.cmake
@@ -0,0 +1,56 @@
+function(add_pybind_dependency target_name)
+
+    # This function add dependencies on pybind/python in the
+    # case where a target depends on it. This is orthogonal to
+    # the creation of a pybind python module.
+
+    # In this case we need to add additional dependencies and distinguish the two link time usage for the archive:
+
+    #### 1. link for producing a python binding module, which must not include the python interpreter
+
+    # For the case 1, the archive is bound to a python module which will provide the runtime,
+    # hence we add dependency only on the pybind and python headers. Also we install the pybind headers
+    # for backward compatibility of dependent build systems which may not depend upon pybind.
+
+    #### 2. link for producing an executable (tests for instance) which must include the python interpreter
+
+    # For the case 2, a library or executable must also depend on the embedded python libraries,
+    # hence we add dependency on Python::Python when the target is not a module. Also we account for
+    # the case where the python libraries are not present (such as on cibuildwheel). In this case
+    # only python modules can be built, not standalone executables.
+
+    # Make detection of Development.Embed optional, we need to separate the components detections
+    # otherwise the variables set by the Interpreter components may be undefined.
+    find_package(Python COMPONENTS Interpreter)
+    find_package(Python COMPONENTS Development)
+    if(NOT Python_Development.Embed_FOUND)
+        message(WARNING "Could not find Python embed libraries, fall back to Python Module only mode. If you are running this from `cibuildwheel, this warning is nominal.")
+        find_package(Python COMPONENTS Development.Module)
+    endif()
+
+    # Set these variables which are used in the package config (aidge_core-config.cmake.in)
+    # and for conditional build on the presence on the python interpreter library
+    set(AIDGE_REQUIRES_PYTHON TRUE PARENT_SCOPE)
+    set(AIDGE_PYTHON_HAS_EMBED ${Python_Development.Embed_FOUND} PARENT_SCOPE)
+
+    # Add pybind11 headers dependencies, the headers for the package interface are installed below
+    target_include_directories(${target_name} SYSTEM PUBLIC
+        $<INSTALL_INTERFACE:include/_packages_deps/${target_name}>
+        $<BUILD_INTERFACE:${pybind11_INCLUDE_DIR}>)
+
+    # Add include dirs for Python.h
+    target_include_directories(${target_name} SYSTEM PUBLIC ${Python_INCLUDE_DIRS})
+
+    # Add Python embedded interpreter when the target is not a module (tests executables for instance)
+    # Also requires to have Development.Embed installed on the system
+    if (Python_Development.Embed_FOUND)
+         set(target_is_module $<STREQUAL:$<TARGET_PROPERTY:TYPE>,MODULE_LIBRARY>)
+         target_link_libraries(${target_name} INTERFACE $<$<NOT:${target_is_module}>:Python::Python>)
+    endif()
+
+    # Install pybind headers such that dependent modules can find them
+    install(DIRECTORY ${pybind11_INCLUDE_DIR}/pybind11
+        DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/_packages_deps/${target_name}
+    )
+
+endfunction()
diff --git a/cmake/PybindModuleCreation.cmake b/cmake/PybindModuleCreation.cmake
index e2bbb2c3fb57867e8add781805033fa5979393a9..853810e24b40eadb0830645a4373c238177ad649 100644
--- a/cmake/PybindModuleCreation.cmake
+++ b/cmake/PybindModuleCreation.cmake
@@ -2,7 +2,6 @@ function(generate_python_binding name target_to_bind)
 
     find_package(Python COMPONENTS Interpreter Development.Module)
 
-    add_definitions(-DPYBIND)
     Include(FetchContent)
     FetchContent_Declare(
     PyBind11
@@ -15,11 +14,8 @@ function(generate_python_binding name target_to_bind)
     file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
 
     pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
-    target_include_directories(${name} PUBLIC "python_binding")
+    target_include_directories(${name} PRIVATE "python_binding")
 
-    # Handles Python + pybind11 headers dependencies
-    target_link_libraries(${name}
-        PUBLIC
-            ${target_to_bind}
-    )
+    # Link specified target to bind
+    target_link_libraries(${name} PRIVATE ${target_to_bind})
 endfunction()
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index a07b97c2f3ad40b062bac1f6a1887722ded269a6..3000b9dc0ff13f1df2698773f217389521ae48f7 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -36,8 +36,11 @@
 #include "aidge/nodeTester/ConditionalInterpreter.hpp"
 
 #include "aidge/operator/Add.hpp"
+#include "aidge/operator/And.hpp"
+#include "aidge/operator/ArgMax.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/BatchNorm.hpp"
+#include "aidge/operator/BitShift.hpp"
 #include "aidge/operator/Clip.hpp"
 #include "aidge/operator/Concat.hpp"
 #include "aidge/operator/Conv.hpp"
@@ -59,6 +62,7 @@
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/Pow.hpp"
 #include "aidge/operator/ReduceMean.hpp"
+#include "aidge/operator/ReduceSum.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Reshape.hpp"
 #include "aidge/operator/Resize.hpp"
@@ -73,6 +77,10 @@
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/stimuli/Stimulus.hpp"
 
+#include "aidge/operator/ShiftMax.hpp"
+#include "aidge/scheduler/ShiftGELU.hpp"
+#include "aidge/stimuli/ILayerNorm.hpp"
+
 #include "aidge/recipes/Recipes.hpp"
 
 #include "aidge/utils/Attributes.hpp"
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 1fc9168da120ba87c916b1a6a346997be69184b4..4af7da64ebca3c02eb9aabca1f2dad88fd8b9829 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -14,73 +14,172 @@
 
 #include <string>
 #include <vector>
+#include <functional>
 
 #include "aidge/utils/Types.h"
+#include "aidge/utils/DynamicAttributes.hpp"
+#include "aidge/data/Data.hpp"
 #include "aidge/data/Elts.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
 
 namespace Aidge {
+class Node;
 class Operator;
 
+/**
+ * @brief ImplSpec stores the requirements or the specifications of an implementation.
+ * 
+ */
+struct ImplSpec {
+    struct IOSpec {
+        IOSpec(DataType type_, DataFormat format_ = DataFormat::Any, const std::vector<std::pair<int, int>>& dims_ = {}):
+            type(type_),
+            format(format_),
+            dims(dims_)
+        {}
+
+        DataType type;
+        DataFormat format;
+        std::vector<std::pair<int, int>> dims;
+    };
+
+    ImplSpec(const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const IOSpec& io, const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const IOSpec& i, const IOSpec& o, const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const std::vector<IOSpec>& i, const std::vector<IOSpec>& o, const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const Aidge::ImplSpec&);
+    ~ImplSpec() noexcept;
+
+    std::vector<IOSpec> inputs;
+    std::vector<IOSpec> outputs;
+    DynamicAttributes attrs;
+};
+
+inline bool operator==(const ImplSpec::IOSpec& lhs, const ImplSpec::IOSpec& rhs) {
+    return (lhs.type == rhs.type)
+        && (lhs.format == rhs.format)
+        && (lhs.dims == rhs.dims);
+}
+
+inline bool operator<(const ImplSpec::IOSpec& lhs, const ImplSpec::IOSpec& rhs) {
+    return (lhs.type < rhs.type)
+        || (lhs.type == rhs.type && lhs.format < rhs.format)
+        || (lhs.type == rhs.type && lhs.format == rhs.format && lhs.dims < rhs.dims);
+}
+
+inline bool operator<(const ImplSpec& lhs, const ImplSpec& rhs) {
+    return (lhs.inputs < rhs.inputs)
+        || (lhs.inputs == rhs.inputs && lhs.outputs < rhs.outputs)
+        || (lhs.inputs == rhs.inputs && lhs.outputs == rhs.outputs && lhs.attrs < rhs.attrs);
+}
+
+/**
+ * @brief Impl stores the details of a specific implementation.
+ * It is associated to a ImplSpec in a registry.
+ * 
+ */
+template <class FwdFunc, class BwdFunc>
+struct Impl {
+    Impl(std::function<std::unique_ptr<ProdConso>(const Operator&)> prodConso_,
+      std::function<FwdFunc> forward_,
+      std::function<BwdFunc> backward_ = nullptr):
+        prodConso(prodConso_), forward(forward_), backward(backward_) {}
+
+    std::function<std::unique_ptr<ProdConso>(const Operator&)> prodConso;
+    std::function<FwdFunc> forward;
+    std::function<BwdFunc> backward;
+};
+
 class OperatorImpl {
 public:
     OperatorImpl(const Operator& op, const std::string& backend = "");
     virtual void forward();
     virtual void backward();
+    virtual std::shared_ptr<ProdConso> prodConso();
 
     const std::string& backend() const noexcept {
         return mBackend;
     }
-    /**
-     * @brief Minimum amount of data from a specific input required by the
-     * implementation to be run.
-     *
-     * @param inputIdx Index of the input analysed.
-     * @return std::size_t
-     */
-    virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
 
-    // Amount of input data that cannot be overwritten during the execution.
-    virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
-
-    // Memory required at an output for a given input size.
-    virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
+    const Operator& getOperator() const noexcept {
+        return mOp;
+    }
 
     /**
-     * @brief Total amount of consumed data from a specific input.
-     *
-     * @param inputIdx Index of the input analysed.
-     * @return DimSize_t
+     * @brief Get the operator required implementation specification, according
+     * to the current operator configuration.
+     * 
      */
-    virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
+    ImplSpec getRequiredSpec() const;
 
     /**
-     * @brief Total amount of produced data ready to be used on a specific output.
-     *
-     * @param outputIdx Index of the output analysed.
-     * @return DimSize_t
+     * @brief Get the best implementation that matches \p requiredSpecs.
+     * If no implementation matches \p requiredSpecs, \p requiredSpecs is
+     * returned.
+     * 
      */
-    virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
+    ImplSpec getBestMatch(const ImplSpec& requiredSpecs) const;
 
     /**
-     * @brief Update the Consummer Producer system by simulating the consumption and production of i/o
-     *
+     * @brief Get an adapted meta operator corresponding to the required 
+     * specifications \p requiredSpecs from the implementation specifications
+     * \p spec.
+     * 
+     * @param spec Implementation specification
+     * @param requiredSpecs Required specifications
+     * @return std::shared_ptr<Node> Adapted meta op or nullptr
      */
-    virtual void updateConsummerProducer();
+    std::shared_ptr<Node> getAdaptation(const ImplSpec& spec, const ImplSpec& requiredSpecs) const;
 
     /**
-     * @brief Reset the Consummer Producer system.
-     *
+     * @brief Get the best adapted meta operator corresponding to the required 
+     * specifications \p requiredSpecs.
+     * The best adaptation is the one with the lowest overhead cost.
+     * Currently, it is the one requiring the least number of additionnal 
+     * operators to match the available implementations.
+     * 
+     * @param requiredSpecs Required specifications
+     * @return std::shared_ptr<Node> Adapted meta op or nullptr
      */
-    virtual void resetConsummerProducer();
+    std::shared_ptr<Node> getBestAdaptation(const ImplSpec& requiredSpecs) const;
 
     virtual ~OperatorImpl() = default;
 
 protected:
+    virtual std::shared_ptr<ProdConso> getProdConso() const;
+    virtual std::set<ImplSpec> getAvailableImplSpecs() const;
+    bool checkIOSpec(const ImplSpec::IOSpec& required, const ImplSpec::IOSpec& spec) const;
+
     const Operator &mOp;
     const std::string mBackend;
-    std::vector<Elts_t> mNbConsumedData;
-    std::vector<Elts_t> mNbProducedData;
+    std::shared_ptr<ProdConso> mProdConso;
 };
 } // namespace Aidge
 
+template<>
+struct fmt::formatter<Aidge::ImplSpec::IOSpec> {
+    template<typename ParseContext>
+    inline constexpr auto parse(ParseContext& ctx) {
+        return ctx.begin();
+    }
+
+    template<typename FormatContext>
+    inline auto format(Aidge::ImplSpec::IOSpec const& ioSpec, FormatContext& ctx) const {
+        return fmt::format_to(ctx.out(), "{}, {}, {}", ioSpec.type, ioSpec.format, ioSpec.dims);
+    }
+};
+
+template<>
+struct fmt::formatter<Aidge::ImplSpec> {
+    template<typename ParseContext>
+    inline constexpr auto parse(ParseContext& ctx) {
+        return ctx.begin();
+    }
+
+    template<typename FormatContext>
+    inline auto format(Aidge::ImplSpec const& implSpec, FormatContext& ctx) const {
+        return fmt::format_to(ctx.out(), "{}, {}", implSpec.inputs, implSpec.outputs);
+    }
+};
+
 #endif /* AIDGE_BACKEND_OPERATORIMPL_H_ */
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index eaadc7a7ca5fa85672619fb2d3b5b17590fd3778..23221e653ba725e4463b06cfabb5483a20756701 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -48,7 +48,8 @@ enum class DataType {
     UInt8,
     UInt16,
     UInt32,
-    UInt64
+    UInt64,
+    Any
 };
 
 enum class DataFormat {
@@ -58,7 +59,8 @@ enum class DataFormat {
     CHWN,
     NCDHW,
     NDHWC,
-    CDHWN
+    CDHWN,
+    Any
 };
 
 using DataFormatTranspose = std::array<size_t, 5>;
@@ -82,35 +84,7 @@ constexpr std::array<DataFormatTranspose, 7> DataFormatTransposeDict = {{
  * @return DataFormatTranspose Permutation array to achieve a transposition
  *         from src to dst DataFormat.
 */
-constexpr inline DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
-    // Permutation array from default format to src format
-    const auto srcDefToFormat = DataFormatTransposeDict[static_cast<int>(src)];
-    // Permutation array from default format to dst format
-    const auto dstDefToFormat = DataFormatTransposeDict[static_cast<int>(dst)];
-    // Compute permutation array from src format to default format:
-    DataFormatTranspose srcFormatToDef{};
-    for (size_t i = 0; i < srcDefToFormat.size(); ++i) {
-        if (srcDefToFormat[i] > 0) {
-            srcFormatToDef[srcDefToFormat[i] - 1] = i;
-        }
-        else {
-            srcFormatToDef[i] = i;
-        }
-    }
-
-    // Compute permutation array from src format to dst format:
-    DataFormatTranspose srcToDst{};
-    for (size_t i = 0; i < dstDefToFormat.size(); ++i) {
-        if (dstDefToFormat[srcFormatToDef[i]] > 0) {
-            srcToDst[i] = dstDefToFormat[srcFormatToDef[i]] - 1;
-        }
-        else {
-            srcToDst[i] = i;
-        }
-    }
-
-    return srcToDst;
-}
+DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataFormat& dst);
 
 class Data {
 public:
@@ -145,11 +119,11 @@ const char* const EnumStrings<Aidge::DataType>::data[]
     = {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Ternary",
        "Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16",
        "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6",
-       "UInt7", "UInt8", "UInt16", "UInt32", "UInt64"};
+       "UInt7", "UInt8", "UInt16", "UInt32", "UInt64", "Any"};
 
 template <>
 const char* const EnumStrings<Aidge::DataFormat>::data[]
-    = {"Default", "NCHW", "NHWC", "CHWN", "NCDHW", "NDHWC", "CDHWN"};
+    = {"Default", "NCHW", "NHWC", "CHWN", "NCDHW", "NDHWC", "CDHWN", "Any"};
 
 template <Aidge::DataType D> struct cpptype {
     using type = void; // Placeholder
diff --git a/include/aidge/data/DataProvider.hpp b/include/aidge/data/DataProvider.hpp
index 62d10a6983e8cf5fd8e2730d3203bed97284e336..6c19b5355e406454a2e20bc8994d0ab04d53576a 100644
--- a/include/aidge/data/DataProvider.hpp
+++ b/include/aidge/data/DataProvider.hpp
@@ -35,6 +35,9 @@ private:
     // Desired size of the produced batches
     const std::size_t mBatchSize;
 
+    // The backend for data tensors
+    std::string mBackend;
+
     // Enable random shuffling for learning
     const bool mShuffle;
 
@@ -67,7 +70,7 @@ public:
      * @param database database from which to load the data.
      * @param batchSize number of data samples per batch.
      */
-    DataProvider(const Database& database, const std::size_t batchSize, const bool shuffle = false, const bool dropLast = false);
+    DataProvider(const Database& database, const std::size_t batchSize, const std::string& backend = "cpu", const bool shuffle = false, const bool dropLast = false);
 
 public:
     /**
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 89d7a3a7b0c4d164473869a9d6372c3bf48cd308..58e893ca5d5339d93799415f076dd69d54db69ca 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -36,7 +36,7 @@ namespace Aidge {
  * Contains a pointer to an actual contiguous implementation of data.
  */
 class Tensor : public Data,
-               public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
+               public Registrable<Tensor, std::tuple<std::string, DataType>, std::function<std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>> {
    private:
     DataType mDataType = DataType::Float32; /** enum to specify data type. */
     DataFormat mDataFormat = DataFormat::Default; /** enum to specify data format. */
@@ -312,6 +312,18 @@ class Tensor : public Data,
      */
     Tensor sqrt() const;
 
+    /**
+     * @brief Element-wise abs operation for Tensor.
+     * @return Tensor
+     */
+    Tensor abs() const;
+
+    /**
+     * @brief Mean operation for Tensor.
+     * @return Tensor
+     */
+    Tensor mean() const;
+
     ~Tensor() noexcept;
 
 public:
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 17bd3b1e9aeece2c80dab8c1aa1cba6498cc730f..efdb06c4ac6d0e6898d899cc639a88d1da301000 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -150,6 +150,24 @@ public:
     void setOrderedInputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& inputs);
     void setOrderedOutputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& outputs);
 
+    /**
+     * @brief Get a topological node order for an acyclic walk of the graph
+     * Graph cycles are broken on operator back edges such that resolution on
+     * single level lattice can be done in a single pass as it is
+     * the case generally for static resolution of Tensor shapes/datatypes.
+     * When reversed is true, gets a topological order on the reversed graph
+     * which is equivalent to a post-dfs order of the graph.
+     * The returned order is deterministic given the graph node set and the
+     * graph ordered output nodes.
+     * The output nodes connectivity must cover all nodes of the graph,
+     * otherwise a runtime exception is thrown.
+     * The returned order is biased toward left-to-right child order both
+     * for topological and post-dfs order.
+     * @param reversed returns a topological order of the reversed graph
+     * @return the ordered list of nodes
+     */
+    std::vector<Aidge::NodePtr> getOrderedNodes(bool reversed = false) const;
+
     /**
      * @brief Get inputs of the current GraphView with their associated id.
      * The rank of the nodes are their rank in the vector.
diff --git a/include/aidge/graph/Matching.hpp b/include/aidge/graph/Matching.hpp
index fc8bfb3353352186b23459e1ca82505827c28345..951aa6b29d73d9055cf9f13c8ddc6313cb506879 100644
--- a/include/aidge/graph/Matching.hpp
+++ b/include/aidge/graph/Matching.hpp
@@ -43,6 +43,7 @@ public:
         bool singleOutput = true;
         IOIndex_t edgeLeftIdx = 0;
         IOIndex_t edgeRightIdx = 0;
+        NodePtr startNode;
 
         // For check & debug purpose:
         size_t depth = 0;
@@ -134,10 +135,20 @@ public:
      *
      * @param query The query to search.
      * @param disjoint If true, only keep the longuest disjoint (non-overlapping) matches.
-     * @return Set of matches, each stored in a MatchingResult struct.
+     * @return std::set<MatchingResult> Set of matches, each stored in a MatchingResult struct.
     */
     std::set<MatchingResult> match(const std::string& query, bool disjoint = false);
 
+    /**
+     * @brief Same as match() but with a mandatory start node.
+     * 
+     * @param startNode Mandatory start node for the query.
+     * @param query The query to search.
+     * @return MatchingResult MatchingResult struct, with empty graph if query
+     * is not found, or the graph corresponding to the query.
+     */
+    MatchingResult matchFrom(NodePtr startNode, const std::string& query);
+
     /**
      * Filter to keep only the longuest disjoint (non-overlapping) matches.
     */
@@ -158,7 +169,7 @@ private:
     bool matchNodeOrBlock(Context& ctx, std::set<MatchingResult>& matches);
 
     /**
-     * BLOCK = '(' SEQ | PAR | BLOCK | ALT | NODE ')'
+     * BLOCK = '(' SEQ | PAR | ALT | BLOCK | NODE ')'
     */
     bool matchBlock(Context& ctx, std::set<MatchingResult>& matches);
 
@@ -190,7 +201,7 @@ private:
      * TYPE = [A-Za-z0-9_]+
      * ANCHOR = [A-Za-z0-9_]+
      * LAMBDA = [A-Za-z0-9_]+
-     * NODE = (TYPE | '.') ('#' ANCHOR)? ('[' LAMBDA ']')?
+     * NODE = ((TYPE | '.') ('#' ANCHOR)? ('[' LAMBDA ']')?) | '$'
     */
     bool matchNode(Context& ctx, std::set<MatchingResult>& matches);
 
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index f694a1234b6037a0ae75a89380af9747765e290c..32932fa6f598737644f74d4e2ce5da89557b5d3d 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -17,6 +17,7 @@
 #include <set>
 #include <string>
 #include <vector>
+#include <deque>
 #include <utility>
 
 #ifdef PYBIND
@@ -63,6 +64,9 @@ private:
   std::vector<std::vector<IOIndex_t>> mIdInChildren; /** List of input index for each Node linked to each output of the Node. */
   std::vector<IOIndex_t> mIdOutParents; /** index of the output linked to each input of the Node. Default: gk_IODefaultIndex. */
 
+  std::deque<std::function<bool()>> mForward;
+  std::deque<std::function<bool()>> mBackward;
+
 public:
   Node() = delete;
 
@@ -73,12 +77,28 @@ public:
    */
   Node(std::shared_ptr<Operator> op, const std::string& name = "");
 
-  virtual ~Node() = default;
+  virtual ~Node();
 
   friend bool operator==(const Node &lhs, const Node &rhs) {
     return lhs.shared_from_this() == rhs.shared_from_this();
   }
 
+  void addBeforeForward(std::function<bool()> func) {
+    mForward.push_front(func);
+  }
+
+  void addAfterForward(std::function<bool()> func) {
+    mForward.push_back(func);
+  }
+
+  void addBeforeBackward(std::function<bool()> func) {
+    mBackward.push_front(func);
+  }
+
+  void addAfterBackward(std::function<bool()> func) {
+    mBackward.push_back(func);
+  }
+
 public:
   ///////////////////////////////////////////////////////
   //        FUNCTIONAL DESCRIPTION
@@ -240,6 +260,16 @@ public:
     return getOperator()->inputCategory(idx);
   }
 
+  /**
+   * @brief Returns whether the given node parent index is a back edge
+   * A back edge is defined by the operator and node parent index
+   * correspond to operator input index.
+   * @return true if the operator defines it as a back edge
+   */
+  inline bool parentIsBackEdge(IOIndex_t idx) const {
+    return getOperator()->isBackEdge(idx);
+  }
+
   /**
    * @brief Number of inputs linked to a Parent's output.
    * @return IOIndex_t
diff --git a/include/aidge/graph/OpArgs.hpp b/include/aidge/graph/OpArgs.hpp
index bf14d39af34c2e14d98906a663edf335c30c6f12..70a431b5621270a6b6083a436aba145ce9dafbf3 100644
--- a/include/aidge/graph/OpArgs.hpp
+++ b/include/aidge/graph/OpArgs.hpp
@@ -12,8 +12,10 @@
 #ifndef AIDGE_CORE_GRAPH_OPARGS_H_
 #define AIDGE_CORE_GRAPH_OPARGS_H_
 
-#include <memory>
 #include <cassert>
+#include <memory>
+#include <string>
+#include <vector>
 
 namespace Aidge {
 class Node;
@@ -56,20 +58,22 @@ public:
  * one in a sequential way. Nodes linked with the Sequential graph
  * generation instructions must have a single output.
  * Sequential(A, B, C) returns A-->B-->C.
- * @param inputs List of Node and GraphView to link sequentially.
+ * @param[in] inputs List of Node and GraphView to link sequentially.
+ * @param[in] name : name of the graphview to return
  * @return std::shared_ptr<GraphView> Pointer to the generated view.
  */
-std::shared_ptr<GraphView> Sequential(std::vector<OpArgs> inputs);
+std::shared_ptr<GraphView> Sequential(std::vector<OpArgs> inputs, std::string name = "");
 
 /////////////////////////////
 // Parallel
 
 /**
  * @brief Creates a GraphView with provided Nodes without linking them.
- * @param inputs List of Node and GraphView to link sequentially.
+ * @param[in] inputs List of Node and GraphView to link sequentially.
+ * @param[in] name : name of the graphview to return
  * @return std::shared_ptr<GraphView> pointer to the generated view.
  */
-std::shared_ptr<GraphView> Parallel(std::vector<OpArgs> inputs);
+std::shared_ptr<GraphView> Parallel(std::vector<OpArgs> inputs, std::string name = "");
 
 /////////////////////////////
 // Residual
@@ -81,9 +85,10 @@ std::shared_ptr<GraphView> Parallel(std::vector<OpArgs> inputs);
  * generation instructions must have a single output.
  * Recursive(A, B, C) returns A-->B-->C , A-->C.
  * @param inputs List of Node and GraphView to link sequentially.
+ * @param[in] name : name of the graphview to return
  * @return std::shared_ptr<GraphView> pointer to the generated view.
  */
-std::shared_ptr<GraphView> Residual(std::vector<OpArgs> inputs);
+std::shared_ptr<GraphView> Residual(std::vector<OpArgs> inputs, std::string name = "");
 
 }
 
diff --git a/include/aidge/graphRegex/GraphRegex.hpp b/include/aidge/graphRegex/GraphRegex.hpp
index b62a42fcfeb258e5c659eaeb6681190482f37aa4..573447cf934b196e8b0c32d7a58e1977f5aa5f9a 100644
--- a/include/aidge/graphRegex/GraphRegex.hpp
+++ b/include/aidge/graphRegex/GraphRegex.hpp
@@ -12,13 +12,12 @@
 namespace Aidge{
 
 /**
- * type for recipes function use in query and resolve  
-*/
+ * @brief type for recipes function use in query and resolve  
+ */
 using RecipesFunctionType = std::function<void(std::shared_ptr<MatchSolution>)>;
 
 /**
- * @brief class which is the hight level interface for graph matching, used to simplify match definition  
- * 
+ * @brief high level interface for graph matching, used to simplify match definition 
  */
 class GraphRegex{
 
diff --git a/include/aidge/hook/Hook.hpp b/include/aidge/hook/Hook.hpp
index 5e00db5d68f11aadd4f3b6eb8174ba61b33e4a49..5edf231d51f913f58351b4817e145b5f48953ddd 100644
--- a/include/aidge/hook/Hook.hpp
+++ b/include/aidge/hook/Hook.hpp
@@ -24,8 +24,8 @@
 namespace Aidge {
 
 class Operator;
-class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)> {
-//class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>{
+class Hook : public Registrable<Hook, std::tuple<std::string>, std::function<std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>> {
+//class Hook : public Registrable<Hook, std::tuple<std::string>, std::function<std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>>{
 protected:
     const std::shared_ptr<Operator> mOperator;
 
diff --git a/include/aidge/operator/Abs.hpp b/include/aidge/operator/Abs.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f1dc37003fbff9463d041030818ec0534c5ac1fd
--- /dev/null
+++ b/include/aidge/operator/Abs.hpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ABS_H_
+#define AIDGE_CORE_OPERATOR_ABS_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Abs_Op : public OperatorTensor,
+    public Registrable<Abs_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Abs_Op&)>> {
+public:
+    static const std::string Type;
+
+    Abs_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Abs_Op(const Abs_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl) {
+            SET_IMPL_MACRO(Abs_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Abs_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Abs_Op>(*this);
+    }
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Abs(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Abs_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_ABS_H_ */
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 97db476729abc07985b16de62084be5fce603bc9..daf50771703d6608dbbe90364aac8667aefbdd1d 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -24,7 +24,7 @@
 namespace Aidge {
 
 class Add_Op : public OperatorTensor,
-    public Registrable<Add_Op, std::string, std::shared_ptr<OperatorImpl>(const Add_Op&)> {
+    public Registrable<Add_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Add_Op&)>> {
 public:
     static const std::string Type;
 
@@ -55,6 +55,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input_0", "data_input_n"};
diff --git a/include/aidge/operator/And.hpp b/include/aidge/operator/And.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e4f04e2fa3ec2a4a01f023b9ab203e6b2ab36e76
--- /dev/null
+++ b/include/aidge/operator/And.hpp
@@ -0,0 +1,82 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_AND_H_
+#define AIDGE_CORE_OPERATOR_AND_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+/**
+ * @brief Tensor element-wise logical and operation.
+ */
+class And_Op : public OperatorTensor,
+    public Registrable<And_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const And_Op&)>> {
+public:
+    static const std::string Type;
+
+    /**
+     * @brief Compute element-wise and operation on two given inputs.
+     * @details supports broadcasting of both operands.
+     */
+    And_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    And_Op(const And_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl) {
+            SET_IMPL_MACRO(And_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::And_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<And_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input_1", "data_input_2"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> And(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<And_Op>(), name);
+}
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_OPERATOR_AND_H_ */
diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..13f63ce98c526f0c57a363ada4e7f50ccdbfb83b
--- /dev/null
+++ b/include/aidge/operator/ArgMax.hpp
@@ -0,0 +1,136 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ARGMAX_H_
+#define AIDGE_CORE_OPERATOR_ARGMAX_H_
+
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ArgMaxAttr { Axis, KeepDims, SelectLastIndex };
+
+/**
+ * @brief This operator has as purpose to reduce given dimension by replacing with the Max value's index.
+*/
+class ArgMax_Op : public OperatorTensor,
+                public Registrable<ArgMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ArgMax_Op &)>> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<ArgMaxAttr,
+                                        std::int32_t,
+                                        bool,
+                                        bool>;
+    template <ArgMaxAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    ArgMax_Op() = delete;
+
+    /**
+     * @brief constructor for ArgMax op
+     * @param[in] axis around which perform the operation
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axis and 
+     * if false we remove the dimension completely
+     * @param[in] select_last_index in case we have many maximum, if true the last index is returned 
+     * if false the first index is returned. 
+     */
+    ArgMax_Op(std::int32_t axis, bool keep_dims, bool select_last_index)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ArgMaxAttr::Axis>(axis),
+            attr<ArgMaxAttr::KeepDims>(keep_dims),
+            attr<ArgMaxAttr::SelectLastIndex>(select_last_index)))
+    {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ArgMax_Op(const ArgMax_Op& op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ArgMax_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ArgMax_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ArgMax_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::Axis>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::KeepDims>(); }
+    inline bool& selectLastIndex() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::SelectLastIndex>(); }
+
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+/**
+ * @brief Compute the max value of a Tensor over the provided axes. Dimensions
+ * may be reduced by erasing the provided axis or not.
+ *
+ * @param axis Dimension over which data max should be computed.
+ * @param keep_dims Whether or not reduced dimensions are to be erased.
+ * @param select_last_index Whether to select the last index of max elements in case there are many maximums.
+ * By default the first max element index is 
+ * @param name Name of the Operator.
+ * @return std::shared_ptr<Node> Node containing the Operator.
+ */
+inline std::shared_ptr<Node> ArgMax(std::int32_t axis=0,
+                                    bool keep_dims=true,
+                                    bool select_last_index=false,
+                                    const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<ArgMax_Op>(axis, keep_dims, select_last_index), name);
+
+}
+
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_ARGMAX_H_ */
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index b2f4ce92580afddcc7aa3627ea0ef89d4ac3ffee..54b40907e8b4127b7b96b95b229440d782149c3d 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -28,7 +28,7 @@ enum class AvgPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
 class AvgPooling_Op : public OperatorTensor,
-                public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)> {
+                public Registrable<AvgPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>> {
 
 public:
     static const std::string Type;
@@ -77,6 +77,7 @@ public:
 
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<AvgPoolingAttr::StrideDims>(); }
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 7f1f63c68a512c4b6a59a515d6130afe9696a8c2..cdac7935f6ded752201c04b2dda6cfb9e06438ec 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -28,7 +28,7 @@ enum class BatchNormAttr { Epsilon, Momentum };
 
 template <DimIdx_t DIM>
 class BatchNorm_Op : public OperatorTensor,
-                public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)> {
+                public Registrable<BatchNorm_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>> {
 public:
     static const std::string Type;
 
@@ -79,6 +79,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline float& epsilon() const { return mAttributes->template getAttr<BatchNormAttr::Epsilon>(); }
diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..bd14bea76937fbfc42cbafa9636df9b55832fa9d
--- /dev/null
+++ b/include/aidge/operator/BitShift.hpp
@@ -0,0 +1,125 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_BITSHIFT_H_
+#define AIDGE_CORE_OPERATOR_BITSHIFT_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/StaticAttributes.hpp"
+
+
+namespace Aidge {
+    enum class BitShiftAttr { BitShiftdirection };
+
+/**
+ * @brief Tensor BitShift Operator
+ */
+class BitShift_Op : public OperatorTensor,
+    public Registrable<BitShift_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const BitShift_Op&)>> {
+public:
+    enum BitShiftDirection {left,right};
+    static const std::string Type;
+private:     
+
+    using Attributes_ = StaticAttributes<BitShiftAttr,BitShiftDirection>;
+    template <BitShiftAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+public:
+
+    BitShift_Op(BitShiftDirection direction) 
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+                attr<BitShiftAttr::BitShiftdirection>(direction))) 
+                {}
+
+    /**¨PPPP
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    BitShift_Op(const BitShift_Op& op)
+        : OperatorTensor(op),mAttributes(op.mAttributes)
+    {
+        if (op.mImpl) {
+            SET_IMPL_MACRO(BitShift_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::BitShift_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<BitShift_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+   
+    /**
+     * @brief Setter to specify which backend to use
+     * 
+     * @return Boolean
+     */
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+    
+    /**
+     * @brief Getter to retrieve Attributes of the bitshift class
+     * 
+     * @return Attributes
+     */
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Retrieve the direction in which the shift should be applied (right or left)
+     * 
+     * @return BitShiftDirection 
+     */
+    inline BitShiftDirection& direction() const noexcept { return mAttributes ->template getAttr<BitShiftAttr::BitShiftdirection>(); }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"InputTensor", "ShiftAmount"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"OutputTensor"};
+    }
+
+
+};
+/**
+ * @brief The bitwise shift operator performs an element-wise operation between the input tensor and the shift tensor in 
+    the direction specified by "direction" 
+ * @param[in] direction Direction of the bitshift (Left or Right)
+ * @param[in] name Name of the node
+ * @return std::shared_ptr<Node> 
+ */
+    inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direction, const std::string& name = "") {
+        return std::make_shared<Node>(std::make_shared<BitShift_Op>(direction), name);
+    }
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::BitShiftAttr>::data[] = {"BitShiftdirection"};
+
+}
+
+#endif /* AIDGE_CORE_OPERATOR_BITSHIFT_H_ */
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index fd12f551a2251f3dfe8ea0a0d0528d9dad742e42..3fa1bb22a0dd9def11e0621b67cbd8395b5344fa 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -33,7 +33,7 @@ public:
 enum class CastAttr { TargetType };
 
 class Cast_Op : public OperatorTensor,
-    public Registrable<Cast_Op, std::string, std::unique_ptr<OperatorImpl>(const Cast_Op&)> {
+    public Registrable<Cast_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Cast_Op&)>> {
 public:
     static const std::string Type;
 
@@ -73,6 +73,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline DataType& targetType() const { return mAttributes->template getAttr<CastAttr::TargetType>(); }
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 46cd3a5a328984bde7e537d984b30e0774a3d259..98835dd2a4b02e51b50636ee8606382a50ba7b89 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -37,7 +37,7 @@ public:
 enum class ConcatAttr { Axis };
 
 class Concat_Op : public OperatorTensor,
-    public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)> {
+    public Registrable<Concat_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Concat_Op&)>> {
 public:
     static const std::string Type;
 
@@ -67,6 +67,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::int32_t& axis() const { return mAttributes->template getAttr<ConcatAttr::Axis>(); }
diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..18e626544606fd150b2843d2367aa8858669c2ba
--- /dev/null
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -0,0 +1,136 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
+#define AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
+
+#include <cstdint>
+#include <cstdlib>
+#include <functional>
+#include <limits>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+enum class ConstantOfShapeAttr {
+  /**
+   * @brief value to fill the output tensor with.
+   * Its a scalar tensor holding a value with a fixed datatype
+   */
+  Value,
+};
+
+/**
+ * @brief This operator's purpose is to generate a tensor of shape given via
+ * input and filled with a given value set via attribute.
+ */
+class ConstantOfShape_Op
+    : public OperatorTensor,
+      public Registrable<ConstantOfShape_Op, std::string,
+                         std::function<std::shared_ptr<OperatorImpl>(
+                             const ConstantOfShape_Op &)>> {
+
+public:
+  // name of the type of the operation
+  static const std::string Type;
+
+private:
+  using Attributes_ = StaticAttributes<ConstantOfShapeAttr, Tensor>;
+  template <ConstantOfShapeAttr e>
+  using attr = typename Attributes_::template attr<e>;
+  const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+  /**
+   * @brief constructor for ConstantOfShape_op
+   * @param[in] value : a scalar tensor which holds the value that will 
+   * fill the output tensor
+   */
+  ConstantOfShape_Op(const Tensor &value = Tensor(0.f))
+      : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+            attr<ConstantOfShapeAttr::Value>(value))) {}
+
+  /**
+   * @brief Copy-constructor. Copy the operator attributes and its output
+   * tensor(s), but not its input tensors (the new operator has no input
+   * associated).
+   * @param op Operator to copy.
+   */
+  ConstantOfShape_Op(const ConstantOfShape_Op &op)
+      : OperatorTensor(op), mAttributes(op.mAttributes) {
+    if (op.mImpl) {
+      SET_IMPL_MACRO(ConstantOfShape_Op, *this, op.backend());
+    } else {
+      mImpl = nullptr;
+    }
+  }
+
+  /**
+   * @brief Clone the operator using its copy-constructor.
+   * @see Operator::MatMul_Op
+   */
+  std::shared_ptr<Operator> clone() const override final {
+    return std::make_shared<ConstantOfShape_Op>(*this);
+  }
+
+  /**
+   * @brief Compute dimensions for the output Tensor
+   * @param allowDataDependency specify if the output shape of this operator
+   * depends on its inputs.
+   */
+  bool forwardDims(bool allowDataDependency = false) override final;
+
+  void setBackend(const std::string &name,
+                  DeviceIdx_t device = 0) override final;
+  std::set<std::string> getAvailableBackends() const override;
+
+  inline std::shared_ptr<Attributes> attributes() const override {
+    return mAttributes;
+  }
+  inline Tensor &value() const noexcept {
+    return mAttributes->template getAttr<ConstantOfShapeAttr::Value>();
+  }
+
+  static const std::vector<std::string> getInputsName() { return {"input"}; }
+  static const std::vector<std::string> getOutputsName() {
+    return {"constant_of_shape"};
+  }
+};
+
+// helper with C-style array instead of std::array for kernel_dims to allow
+// automatic template DIM deduction
+inline std::shared_ptr<Node> ConstantOfShape(const Tensor value = Tensor(0.f),
+                                             const std::string &name = "") {
+  return std::make_shared<Node>(std::make_shared<ConstantOfShape_Op>(value),
+                                name);
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"Value"};
+}
+
+#endif // AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
+
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 7366472d24b78b58aab589ea2b3ccd045e4a5ea7..cd1a57dd9ac52d2f5cdff3b5ed54c6dd2aeeed34 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -34,7 +34,7 @@ enum class ConvAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
-                public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)> {
+                public Registrable<Conv_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>> {
 
 public:
     static const std::string Type;
@@ -97,6 +97,7 @@ public:
 
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     DimSize_t inChannels() const {
         if (!getInput(1)) {
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 63d8e8419b47279c51783db057b5b1a63c7d0884..f0a55a299094add58bd3938e9cca9bbb48e21da8 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -33,7 +33,7 @@ enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
-                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)> {
+                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>> {
 public:
     static const std::string Type;
 
@@ -83,6 +83,7 @@ public:
                           const IOIndex_t outputIdx = 0) const override;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     DimSize_t nbChannels() const {
         if (!getInput(1)) {
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
index 72ff83834962c1860b135a4187e72199b04361db..856cd0e85d1abb47d3c163115bef6cbfb59bb66f 100644
--- a/include/aidge/operator/DepthToSpace.hpp
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -35,7 +35,7 @@ enum class DepthToSpaceAttr { BlockSize, Mode };
 class DepthToSpace_Op : public OperatorTensor,
                 public Registrable<DepthToSpace_Op,
                     std::string,
-                    std::shared_ptr<OperatorImpl>(const DepthToSpace_Op &)> {
+                    std::function<std::shared_ptr<OperatorImpl>(const DepthToSpace_Op &)>> {
 public:
     static const std::string Type;
     enum class Mode { DCR, CRD };
@@ -68,6 +68,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::uint32_t& blockSize() const { return mAttributes->template getAttr<DepthToSpaceAttr::BlockSize>(); }
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index b16a5e6733e8846b05e3e491cf5bc7f793d97f1c..5ed9e789deab71b107a6071ab11452c3cf73fa9d 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -25,7 +25,7 @@
 namespace Aidge {
 
 class Div_Op : public OperatorTensor,
-    public Registrable<Div_Op, std::string, std::shared_ptr<OperatorImpl>(const Div_Op&)> {
+    public Registrable<Div_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Div_Op&)>> {
 
 public:
     static const std::string Type;
@@ -57,6 +57,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_1", "data_input_2"};
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index b6cc8f30c0fff3366cb1d3fea678e4cad8f9cb10..88a4bfd29e7d27e7eaea00d967e0ba631354d253 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -25,7 +25,7 @@
 namespace Aidge {
 
 class Erf_Op : public OperatorTensor,
-    public Registrable<Erf_Op, std::string, std::shared_ptr<OperatorImpl>(const Erf_Op&)> {
+    public Registrable<Erf_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Erf_Op&)>> {
 public:
     static const std::string Type;
 
@@ -44,6 +44,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index f1996fbae025838e2e6f6c21c70018c7cc9746f5..592ba4e2b796ba1aede24a737e296ddf1e285499 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -27,7 +27,7 @@ namespace Aidge {
 class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
-                                 std::shared_ptr<OperatorImpl>(const FC_Op &)> {
+                                 std::function<std::shared_ptr<OperatorImpl>(const FC_Op &)>> {
 public:
     static const std::string Type;
 
@@ -60,6 +60,14 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    DimSize_t inChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Fully Connected (FC) operator has no weight Tensor associated so no specific number of input channel imposed.");
+        }
+        return getInput(1)->template dims<2>()[1];
+    }
 
     DimSize_t outChannels() const {
         if (!getInput(1)) {
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index aebe3879b94fd13c8226fffe42e513715d8e3e5a..517d63adc59ed848c53852697ab9f8511dfc2a2a 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -34,7 +34,7 @@ enum class FoldAttr { OutputDims, StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class Fold_Op : public OperatorTensor,
-                public Registrable<Fold_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Fold_Op<DIM> &)> {
+                public Registrable<Fold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Fold_Op<DIM> &)>> {
 
 public:
     static const std::string Type;
@@ -78,6 +78,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, DIM>& outputDims() const { return mAttributes->template getAttr<FoldAttr::OutputDims>(); }
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index f2e3b0fe8c063a5eec5e0c2140c3b7eabf3ee68a..80dcdd67883529c710b142b6b547d4b02e85cd44 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -36,7 +36,7 @@ enum class GatherAttr { Axis, Indices, GatheredShape };
 class Gather_Op : public OperatorTensor,
                 public Registrable<Gather_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Gather_Op&)> {
+                                   std::function<std::shared_ptr<OperatorImpl>(const Gather_Op&)>> {
 public:
     static const std::string Type;
 
@@ -73,6 +73,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::int8_t& axis() const { return mAttributes -> getAttr<GatherAttr::Axis>(); }
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 41516a39723249b5b5c715a66ce3398dff8e65b1..2812da066887d63133ede2d69b5804f0b8a8101e 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 class GenericOperator_Op
     : public OperatorTensor,
-      public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)> {
+      public Registrable<GenericOperator_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>> {
 private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
@@ -57,6 +57,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override { return std::set<std::string>(); };
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
     template <class T>
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index 734e12344fed4cd25dd41e91dc8cfb18fea4fd45..ef440e8c697ff221aa8df42e459de7ac697e8a0c 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -32,8 +32,8 @@ namespace Aidge {
 class GlobalAveragePooling_Op
     : public OperatorTensor,
       public Registrable<GlobalAveragePooling_Op, std::string,
-                         std::shared_ptr<OperatorImpl>(
-                             const GlobalAveragePooling_Op &)> {
+                         std::function<std::shared_ptr<OperatorImpl>(
+                             const GlobalAveragePooling_Op &)>> {
 public:
   static const std::string Type;
 
@@ -46,6 +46,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
   void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
   static const std::vector<std::string> getInputsName() {
     return {"data_input"};
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index 81900824ed0d26572e593982fa21ed900eda88ee..dc2b2059e75711572e0f7fa94cc6ccb9f58c970b 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
 enum class GridSampleAttr { Mode, PaddingMode, AlignCorners };
 
 class GridSample_Op : public OperatorTensor,
-	public Registrable<GridSample_Op, std::string, std::shared_ptr<OperatorImpl>(const GridSample_Op&)> {
+	public Registrable<GridSample_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const GridSample_Op&)>> {
 
 public:
 	static const std::string Type;
@@ -58,6 +58,7 @@ public:
 	bool forwardDims(bool /*allowDataDependencies*/ = false) override final;
 
 	void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
 	inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 	inline Mode mode() const { return mAttributes->template getAttr<GridSampleAttr::Mode>(); }
diff --git a/include/aidge/operator/ILayerNorm.hpp b/include/aidge/operator/ILayerNorm.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f660cc64eb65770cc6cf5335d9c070b155d03c0f
--- /dev/null
+++ b/include/aidge/operator/ILayerNorm.hpp
@@ -0,0 +1,81 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 10.09.2024
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ILAYERNORM_H_
+#define AIDGE_CORE_OPERATOR_ILAYERNORM_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class ILayerNorm_Op : public OperatorTensor,
+    public Registrable<ILayerNorm_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ILayerNorm_Op&)>> {
+public:
+    static const std::string Type;
+
+    ILayerNorm_Op()
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::Param}, 1)
+    {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ILayerNorm_Op(const ILayerNorm_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ILayerNorm_Op, *this, op.backend());
+        }else{
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ILayerNorm_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ILayerNorm_Op>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "weight", "bias"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> ILayerNorm(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<ILayerNorm_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_ILAYERNORM_H_ */
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index 622d6290af55ef5a717c6f5763ade5a2750fb9f0..24476f231806bf38ae48b9e2d5ec405e072afdb2 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -26,6 +26,11 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
+class Identity_OpImpl : public OperatorImpl {
+public:
+    Identity_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
 
 /**
  * @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes.
@@ -35,7 +40,7 @@ namespace Aidge {
  *
  */
 class Identity_Op : public OperatorTensor,
-    public Registrable<Identity_Op, std::string, std::unique_ptr<OperatorImpl>(const Identity_Op&)> {
+    public Registrable<Identity_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Identity_Op&)>> {
 public:
     static const std::string Type;
 
@@ -54,29 +59,8 @@ public:
      */
     std::shared_ptr<Operator> clone() const override;
 
-    // bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
-
-    /**
-     * @brief Check if output dimensions have been computed.
-     * @note Since Indentity has no output Tensor, this function checks if its
-     * only input's dimensions have been computed.
-     *
-     * @return true Input has dimensions.
-     * @return false Input has no dimensions or is a nullptr.
-     */
-    bool dimsForwarded() const override final;
-
-
-    void forward() override final;
-
-    void backward() override final { }
-
-    void setBackend(const std::string& /*name*/, DeviceIdx_t /*device*/ = 0) override final {
-        // setBackend do nothing, Identity node has no backend it just pass the same Tensor
-    }
-    void setDataType(const DataType& /*dataType*/) const override final {
-        // setDatatype do nothing, Identity node has no backend it just pass the same Tensor
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 30d171eab3ee54864aae48f445e4d0f04792dd31..179eb90b39bb5d527781289b9b233d3a29d14494 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -29,7 +29,7 @@ enum class LeakyReLUAttr {
 };
 
 class LeakyReLU_Op : public OperatorTensor,
-    public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)> {
+    public Registrable<LeakyReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>> {
 public:
     static const std::string Type;
 
@@ -62,6 +62,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); }
diff --git a/include/aidge/operator/Ln.hpp b/include/aidge/operator/Ln.hpp
index c6a9ec4c8d59800cdbcc3f0229acdbbb436cd732..22fc51664b89bcdeb5970b0cc92beafdde52e43f 100755
--- a/include/aidge/operator/Ln.hpp
+++ b/include/aidge/operator/Ln.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 
 class Ln_Op : public OperatorTensor,
-    public Registrable<Ln_Op, std::string, std::unique_ptr<OperatorImpl>(const Ln_Op&)> {
+    public Registrable<Ln_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Ln_Op&)>> {
 public:
     static const std::string Type;
 
@@ -46,6 +46,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index f81fb7bd0a3156fcffccc10fe3d460273f353252..bf6ab84c7373962e71434050427c9b6ecae3b034 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -26,7 +26,7 @@ namespace Aidge {
 class MatMul_Op : public OperatorTensor,
               public Registrable<MatMul_Op,
                                  std::string,
-                                 std::shared_ptr<OperatorImpl>(const MatMul_Op &)> {
+                                 std::function<std::shared_ptr<OperatorImpl>(const MatMul_Op &)>> {
 public:
     static const std::string Type;
 
@@ -59,6 +59,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input1", "data_input2"};
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 3b7473a6a17e8ebf490941068c8245d5847e0299..0cc43a6fbe50849b169a59d048962668d3e4666c 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -33,7 +33,7 @@ enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public OperatorTensor,
-                public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)> {
+                public Registrable<MaxPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>> {
 public:
     static const std::string Type;
 
@@ -69,6 +69,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); }
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index a1d90f06f098eb7fa2fc199b595991702daf488a..2b05b5fffed98a7df99a450a5f99c88efa2f7288 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -25,19 +25,25 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-class Memorize_OpImpl : public OperatorImpl {
+class Memorize_ProdConso : public ProdConso {
 public:
-    Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Memorize_ProdConso(const Operator& op): ProdConso(op) {}
     Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
     Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
     void updateConsummerProducer() override;
+};
+
+class Memorize_OpImpl : public OperatorImpl {
+public:
+    Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Memorize_ProdConso>(mOp); };
     void forward() override;
 };
 
 enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep };
 
 class Memorize_Op : public OperatorTensor,
-    public Registrable<Memorize_Op, std::string, std::unique_ptr<OperatorImpl>(const Memorize_Op&)> {
+    public Registrable<Memorize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Memorize_Op&)>> {
 public:
     static const std::string Type;
 
@@ -66,6 +72,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
     bool dimsForwarded() const override;
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 69f2120d90beb727bd661628c362410066ae3cff..ccff976cbb7cf8efc59223dfd658ca2a4d03a80b 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -21,13 +21,14 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
 #include "aidge/scheduler/SequentialScheduler.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 class MetaOperator_Op : public OperatorTensor,
-                public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)> {
+                public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::function<std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)>> {
 public:
     // outputs shared with micro-graph output Tensors
     // Micro-graph handling:
@@ -79,8 +80,10 @@ public:
         return false;
     }
 
+    std::string backend() const noexcept override;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     void setDataType(const DataType &datatype) const override {
         // The micro-graph should always be set to the right data type, since it
@@ -89,6 +92,8 @@ public:
         mGraph->setDataType(datatype);
     }
 
+    std::shared_ptr<Attributes> attributes() const override;
+
     Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
     Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override;
     Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override;
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index 9908911419d8ce027cdb18c4abf45a5c71be67b1..49d92cd12f68a0b23530039c1df70ced9b2d2080 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -31,7 +31,7 @@ public:
 };
 
 class Move_Op : public OperatorTensor,
-    public Registrable<Move_Op, std::tuple<std::string, std::string>, std::unique_ptr<OperatorImpl>(const Move_Op&)> {
+    public Registrable<Move_Op, std::tuple<std::string, std::string>, std::function<std::unique_ptr<OperatorImpl>(const Move_Op&)>> {
 public:
     static const std::string Type;
 
@@ -50,6 +50,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 35a4b7e061bba76f1e63343e9230eddddfde11ac..bfe4fcb0de1cb7dda4a0ea8fc7b99638bc813f47 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
  * @brief Tensor element-wise multiplication.
  */
 class Mul_Op : public OperatorTensor,
-    public Registrable<Mul_Op, std::string, std::shared_ptr<OperatorImpl>(const Mul_Op&)> {
+    public Registrable<Mul_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Mul_Op&)>> {
 public:
     static const std::string Type;
 
@@ -50,6 +50,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_1", "data_input_2"};
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index c938fc362aa1f747f5f31bea3fdb08fa851e2333..87aa4080e57d14d0d8a738afed2e976521b42048 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -57,6 +57,7 @@ private:
     const OperatorType mOperatorType;
     const std::vector<InputCategory> mInputsCategory;
     const IOIndex_t mNbOut;
+    std::set<IOIndex_t> mBackEdges;
 
 public:
     Operator() = delete;
@@ -73,7 +74,8 @@ public:
         std::enable_shared_from_this<Operator>(),
         mOperatorType(op.mOperatorType),
         mInputsCategory(op.mInputsCategory),
-        mNbOut(op.mNbOut)
+        mNbOut(op.mNbOut),
+        mBackEdges(op.mBackEdges)
     {
         mType = op.mType;
         mImpl = nullptr;
@@ -124,14 +126,17 @@ public:
 ///////////////////////////////////////////////////////
 //        IMPLEMENTATION
 ///////////////////////////////////////////////////////
-    std::string backend() const noexcept {
+    virtual std::string backend() const noexcept {
         return mImpl ? mImpl->backend() : "";
     }
 
     virtual void setBackend(const std::string& name, DeviceIdx_t device = 0) = 0;
+    void setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends);
     virtual void setDataType(const DataType& dataType) const = 0;
     virtual void setDataFormat(const DataFormat& dataFormat) const = 0;
 
+    virtual std::set<std::string> getAvailableBackends() const = 0;
+
     /**
      * @brief Set a new OperatorImpl to the Operator
      *
@@ -205,6 +210,21 @@ public:
     inline IOIndex_t nbInputs() const noexcept { return mInputsCategory.size(); };
     inline IOIndex_t nbOutputs() const noexcept { return mNbOut; };
 
+    /**
+     * @brief Set the back edge input indexes for recurring operators.
+     * Any recuring operators should specify it's back edges, otherwise
+     * the interpretation of the data flow graph may not be possible.
+     */
+    inline void setBackEdges(const std::set<IOIndex_t>& backEdges) { mBackEdges = backEdges; }
+
+    /**
+     * @brief Returns whether the given input index is a back edge.
+     * @return true if the input index is in the back edge set
+     */
+    inline bool isBackEdge(IOIndex_t inputIdx) const {
+        return mBackEdges.find(inputIdx) != mBackEdges.end();
+    }
+
     static const std::vector<std::string> getInputsName() {
         return {};
     }
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index 1097454fce62f645eb83c491498031738847e96c..c8cdd93810e18bd3cdd0a2d080e54aae2d787c66 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -40,6 +40,14 @@ protected:
 public:
     OperatorTensor() = delete;
 
+    /**
+     * @brief Operator tensor constructor. This function is not meant to be called directly but by a derived class constructor
+     * every operator class derive from this class.
+     * 
+	 * @param[in] type     : type of operator (i.e. "Add", "AveragePool",...)
+	 * @param[in] inputsCategory : describes the type of each input.
+	 * @param[in] nbOut    : Number of tensors this operator will output
+     */
     OperatorTensor(const std::string& type, const std::vector<InputCategory>& inputsCategory,
                    const IOIndex_t nbOut);
 
@@ -79,6 +87,15 @@ public:
      * For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
      */
     virtual std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
+
+	/**
+	 * @brief Will compute the dimensions of operator's output tensor given the input sizes
+ 	 *        If the output dimensions cannot be computed because it depends on some undefined inputs then forwardDims will return false and enter in TOKEN mode for subsequent tensors.
+ 	 *        - TOKEN mode means that forwarddims will only ensure that all inputs and outputs of the graph the node is within are connected.
+ 	 * @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optionnal parameter tensors.
+ 	 * @return true if dims have been properly forwarded. false otherwise. If set to false, then forwardDims will enter in token mode.
+ 	 *      
+     */
     virtual bool forwardDims(bool allowDataDependency = false);
     virtual bool dimsForwarded() const;
     ///////////////////////////////////////////////////
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index bdb5330a6fd02693f4d75ccba06ce613d9a0dff1..2c670bf23d4703a5a9e8502c8b356fdde32e2561 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -29,7 +29,7 @@ enum class PadBorderType { Constant, Edge, Reflect, Wrap };
 
 template <DimIdx_t DIM>
 class Pad_Op : public OperatorTensor,
-                public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)> {
+                public Registrable<Pad_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>> {
 public:
     static const std::string Type;
 
@@ -74,6 +74,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<PadAttr::BeginEndBorders>(); }
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 41ab3c537eacc88920419cb5e0deecc4720796ba..d5898b3630721b036b3acb916e6dec87455009f7 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -24,17 +24,23 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Pop_ProdConso : public ProdConso {
+public:
+    Pop_ProdConso(const Operator& op): ProdConso(op) {}
+    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
+};
+
 class Pop_OpImpl : public OperatorImpl {
 public:
     Pop_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Pop_ProdConso>(mOp); };
     void forward() override;
 };
 
 enum class PopAttr { ForwardStep };
 
 class Pop_Op : public OperatorTensor,
-    public Registrable<Pop_Op, std::string, std::unique_ptr<OperatorImpl>(const Pop_Op&)> {
+    public Registrable<Pop_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Pop_Op&)>> {
 public:
     static const std::string Type;
 
@@ -59,6 +65,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
     void updateConsummerProducer() override;
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index eaf4297fd8b3751463a20ae219af5c25ecd789ae..f6762dd33088f486184bdfd0a5b8dbdbd0c641da 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -25,7 +25,7 @@
 namespace Aidge {
 
 class Pow_Op : public OperatorTensor,
-    public Registrable<Pow_Op, std::string, std::shared_ptr<OperatorImpl>(const Pow_Op&)> {
+    public Registrable<Pow_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pow_Op&)>> {
 public:
     static const std::string Type;
 
@@ -57,6 +57,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input_1", "data_input_2"};
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 257a6965be4c08735f23ae575ffe104bb706593a..115ddcb5549b1c0daa01b3ab67946655cda7287c 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -30,8 +30,8 @@ enum class ProdAttr { Constant };
 
 class Producer_Op
     : public OperatorTensor,
-      public Registrable<Producer_Op, std::string, std::shared_ptr<OperatorImpl>(
-                                          const Producer_Op &)> {
+      public Registrable<Producer_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(
+                                          const Producer_Op &)>> {
 public:
     static const std::string Type;
 
@@ -89,6 +89,7 @@ public:
     inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); }
@@ -103,7 +104,7 @@ public:
     void forward() override final;
 
     void backward() override final {
-        // fmt::print("Basic Producer backward() function.\n");
+        Log::debug("Basic Producer backward() function.");
     }
 
     void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const override;
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index cc714c4619a0f8eee7af03993700fed7489a6c0e..9b264c1d3d7955f71538dd90f105cfd7ee469d0a 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 
 class ReLU_Op : public OperatorTensor,
-    public Registrable<ReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const ReLU_Op&)> {
+    public Registrable<ReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>> {
 public:
     static const std::string Type;
 
@@ -46,6 +46,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 07beb0a39a88254f0aecdda35cd63f5d338af532..5d5895a8fb279f1efa5c6321614199f44402b83a 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -26,10 +26,13 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ReduceMeanAttr { Axes, KeepDims };
+enum class ReduceMeanAttr { Axes, KeepDims, NoopWithEmptyAxes };
 
+/**
+ * @brief This operator has as purpose to reduce given axes by replacing with the mean value.
+*/
 class ReduceMean_Op : public OperatorTensor,
-                public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)> {
+                public Registrable<ReduceMean_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>> {
 
 public:
     static const std::string Type;
@@ -37,7 +40,8 @@ public:
 private:
     using Attributes_ = StaticAttributes<ReduceMeanAttr,
                                             std::vector<std::int32_t>,
-                                            DimSize_t>;
+                                            bool,
+                                            bool>;
     template <ReduceMeanAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
@@ -45,7 +49,15 @@ private:
 public:
     ReduceMean_Op() = delete;
 
-    ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims);
+    /**
+     * @brief constructor for ReduceMean op
+     * @param[in] axes around which perform the operation
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and 
+     * if false we remove the dimension completely
+     * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing
+     * and if false, we reduce on all axes
+     */
+    ReduceMean_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -62,10 +74,12 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::Axes>(); }
-    inline DimSize_t& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); }
+    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::NoopWithEmptyAxes>(); }
 
 
     static const std::vector<std::string> getInputsName() {
@@ -74,6 +88,8 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+    virtual ~ReduceMean_Op() noexcept;
 };
 
 /**
@@ -85,15 +101,30 @@ public:
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> Node containing the Operator.
  */
+
+
+// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+// template <DimSize_t DIM>
+// inline std::shared_ptr<Node> ReduceMean(
+//     std::int32_t const (&axes)[DIM],
+//     DimSize_t keep_dims = 1,
+//     const std::string& name = "") {
+//     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported");
+//     return ReduceMean(to_array(axes), keep_dims, name);
+// }
+
+// template <DimIdx_t DIM>
+// const std::string ReduceMean_Op::Type = "ReduceMean";
 std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
-                                        DimSize_t keep_dims=1,
+                                        bool keep_dims=true,
+                                        bool noop_with_empty_axes=false,
                                         const std::string& name = "");
 
 }  // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"axes", "keep_dims"};
+const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..bae03cb7d2e3ac855537eb22e54bf706ec0e0b4a
--- /dev/null
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -0,0 +1,136 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_REDUCESUM_H_
+#define AIDGE_CORE_OPERATOR_REDUCESUM_H_
+
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ReduceSumAttr { Axes, KeepDims, NoopWithEmptyAxes };
+
+
+/**
+ * @brief This operator has as purpose to reduce given axes by replacing with the sum value.
+*/
+class ReduceSum_Op : public OperatorTensor,
+                public Registrable<ReduceSum_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceSum_Op &)>> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<ReduceSumAttr,
+                                            std::vector<std::int32_t>,
+                                            bool,
+                                            bool>;
+    template <ReduceSumAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    ReduceSum_Op() = delete;
+
+    /**
+     * @brief constructor for ReduceSum op
+     * @param[in] axes around which perform the operation
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and 
+     * if false we remove the dimension completely
+     * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing
+     * and if false, we reduce on all axes
+     */
+    ReduceSum_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ReduceSumAttr::Axes>(axes),
+            attr<ReduceSumAttr::KeepDims>(keep_dims),
+            attr<ReduceSumAttr::NoopWithEmptyAxes>(noop_with_empty_axes)))
+    {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ReduceSum_Op(const ReduceSum_Op& op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ReduceSum_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ReduceSum_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ReduceSum_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::Axes>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::KeepDims>(); }
+    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::NoopWithEmptyAxes>(); }
+
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+/**
+ * @brief Compute the sum value of a Tensor over the provided axes. Dimensions
+ * may be reduced by erasing the provided axes or not.
+ *
+ * @param axes Dimensions over which data sum should be computed.
+ * @param keep_dims Whether or not reduced dimensions are to be erased.
+ * @param name Name of the Operator.
+ * @return std::shared_ptr<Node> Node containing the Operator.
+ */
+inline std::shared_ptr<Node> ReduceSum(const std::vector<std::int32_t> &axes={},
+                                        bool keep_dims=true,
+                                        bool noop_with_empty_axes=false,
+                                        const std::string& name = "") {
+    // FIXME: properly handle default w&b initialization in every cases
+    AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceSum, not supported");
+    return std::make_shared<Node>(std::make_shared<ReduceSum_Op>(axes, keep_dims, noop_with_empty_axes), name);
+
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_REDUCESUM_H_ */
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 5bd9b3e8d56c106803bf65dc7bf595da85558a1a..721b964d3ff4cd87121d43e8719a8fde1445761b 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -32,7 +32,7 @@ public:
 enum class ReshapeAttr { Shape, AllowZero };
 
 class Reshape_Op : public OperatorTensor,
-                   public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)> {
+                   public Registrable<Reshape_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Reshape_Op&)>> {
 
 public:
     static const std::string Type;
@@ -47,7 +47,7 @@ private:
 public:
     Reshape_Op() = delete;
 
-    Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero);
+    Reshape_Op(const std::vector<std::int64_t>& shape = {}, bool allowzero = false);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -65,6 +65,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::vector<std::int64_t>& shape() const { return mAttributes->template getAttr<ReshapeAttr::Shape>(); }
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 622a1ff1b191aad9f3f8045380be522d32cf2845..a48b95aff2a18750d83f12a62c408ad41b20afee 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -25,7 +25,7 @@
 namespace Aidge {
 
 class Resize_Op : public OperatorTensor,
-                  public Registrable<Resize_Op, std::string, std::shared_ptr<OperatorImpl>(const Resize_Op&)>{
+                  public Registrable<Resize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Resize_Op&)>>{
 
 public:
     static const std::string Type;
@@ -49,6 +49,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         //  roi, scales, sizes, even if considered as const parameters/input
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 311dc0202d866253bb98285e77e6d6ea8b345e0f..4ef39f63a2f9af34cd3fe28b01cf2fc195bdfc6e 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -30,7 +30,7 @@ enum class ScalingAttr {
 
 class Scaling_Op
     : public OperatorTensor,
-      public Registrable<Scaling_Op, std::string, std::shared_ptr<OperatorImpl>(const Scaling_Op&)> {
+      public Registrable<Scaling_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Scaling_Op&)>> {
 public:
     static const std::string Type;
 
@@ -57,6 +57,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline float& scalingFactor() const noexcept { return mAttributes -> getAttr<ScalingAttr::ScalingFactor>(); }
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index d76a9fd069ebbda81e446e6f3486ff0ff66755bb..cfd43fa0dd5a064ee21eafc2d0f50c12ad6e3272 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -36,7 +36,7 @@ enum class ShapeAttr { Start, End };
 class Shape_Op : public OperatorTensor,
                 public Registrable<Shape_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Shape_Op&)> {
+                                   std::function<std::shared_ptr<OperatorImpl>(const Shape_Op&)>> {
 
 public:
     static const std::string Type;
@@ -66,6 +66,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::int64_t& start() const noexcept { return mAttributes -> getAttr<ShapeAttr::Start>(); }
diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp
index 4d3000750c2224aaea278beca4c8124e0845042e..30f1d71e0a56d92a70830a5def81040e0c5a186c 100644
--- a/include/aidge/operator/ShiftGELU.hpp
+++ b/include/aidge/operator/ShiftGELU.hpp
@@ -7,7 +7,7 @@
  *
  * SPDX-License-Identifier: EPL-2.0
  * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
- * Date: 25.06.2024
+ * Date: 10.09.2024
  *
  ********************************************************************************/
 
@@ -28,7 +28,7 @@
 namespace Aidge {
 
 class ShiftGELU_Op : public OperatorTensor,
-    public Registrable<ShiftGELU_Op, std::string, std::shared_ptr<OperatorImpl>(const ShiftGELU_Op&)> {
+    public Registrable<ShiftGELU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ShiftGELU_Op&)>> {
 public:
     static const std::string Type;
 
@@ -48,6 +48,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp
index d75e6559f5f4df9a1010d65ba97529e6165ae42f..9fbd81aedef1eb640a7ce805d745297edb640560 100644
--- a/include/aidge/operator/ShiftMax.hpp
+++ b/include/aidge/operator/ShiftMax.hpp
@@ -7,7 +7,7 @@
  *
  * SPDX-License-Identifier: EPL-2.0
  * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
- * Date: 25.06.2024
+ * Date: 10.09.2024
  *
  ********************************************************************************/
 
@@ -28,7 +28,7 @@
 namespace Aidge {
 
 class ShiftMax_Op : public OperatorTensor,
-    public Registrable<ShiftMax_Op, std::string, std::shared_ptr<OperatorImpl>(const ShiftMax_Op&)> {
+    public Registrable<ShiftMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ShiftMax_Op&)>> {
 public:
     static const std::string Type;
 
@@ -48,6 +48,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index b3204240cd130251fe8abe7d50bdad9b92b7558c..24bc3321673f4dcffd3e3663f7e0a0e584389492 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 
 class Sigmoid_Op : public OperatorTensor,
-    public Registrable<Sigmoid_Op, std::string, std::unique_ptr<OperatorImpl>(const Sigmoid_Op&)> {
+    public Registrable<Sigmoid_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Sigmoid_Op&)>> {
 public:
     static const std::string Type;
 
@@ -37,6 +37,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 241e165a0e441ccb856431225ce1d6fd170a25f8..811402420df170c011e478148cf646e6c585cc84 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -29,7 +29,7 @@ enum class SliceAttr { Starts, Ends, Axes, Steps };
 
 class Slice_Op
     : public OperatorTensor,
-      public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)> {
+      public Registrable<Slice_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Slice_Op &)>> {
 public:
     static const std::string Type;
 
@@ -69,6 +69,7 @@ public:
     bool forwardDims(bool allowDataDependency = true) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::vector<std::int64_t>& starts() const noexcept { return mAttributes -> getAttr<SliceAttr::Starts>(); }
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index c221a67e31fc6de1bcb2c727854c8ebee2986ee4..72ea56dd6293e416ddcca12ac38fd57d76071354 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -29,7 +29,7 @@ enum class SoftmaxAttr { Axis };
 class Softmax_Op : public OperatorTensor,
                 public Registrable<Softmax_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Softmax_Op&)> {
+                                   std::function<std::shared_ptr<OperatorImpl>(const Softmax_Op&)>> {
 
 public:
     static const std::string Type;
@@ -57,6 +57,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 661f9e32d47c7fb7e0c111805a50c6fcc131cffe..8c3a111c42dfeb2b4e27269839e41f3b362bdda3 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -34,7 +34,7 @@ enum class SplitAttr { Axis, Split };
 
 class Split_Op
     : public OperatorTensor,
-      public Registrable<Split_Op, std::string, std::shared_ptr<OperatorImpl>(const Split_Op &)> {
+      public Registrable<Split_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Split_Op &)>> {
 
 public:
     static const std::string Type;
@@ -68,6 +68,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::int8_t& axis() const { return mAttributes->template getAttr<SplitAttr::Axis>(); }
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index ce4aaafc92d1f7d601946c02d4eb025eb735a3f9..4858cdcd164d6be0582ddabe67c780461a9667aa 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -26,7 +26,7 @@ namespace Aidge {
 class Sqrt_Op : public OperatorTensor,
                 public Registrable<Sqrt_Op,
                                 std::string,
-                                std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> {
+                                std::function<std::shared_ptr<OperatorImpl>(const Sqrt_Op&)>> {
 public:
     static const std::string Type;
 
@@ -45,6 +45,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..64a775eb4209ecad0e29decd8336ebb77bbe652f
--- /dev/null
+++ b/include/aidge/operator/Squeeze.hpp
@@ -0,0 +1,160 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SQUEEZE_H_
+#define AIDGE_CORE_OPERATOR_SQUEEZE_H_
+
+#include <cstdint>
+#include <cstdlib>
+#include <functional>
+#include <limits>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+/**
+ * @brief implementation of the operator squeeze.
+ * @note Since this operator implementation is agnostic to the backend it is
+ * located here instead of in aidge_backend_cpu/cuda.
+ */
+class Squeeze_OpImpl : public OperatorImpl {
+public:
+  Squeeze_OpImpl(const Operator &op, const std::string &backend = "")
+      : OperatorImpl(op, backend) {}
+  void forward() override;
+};
+
+enum class SqueezeAttr {
+  /**
+   * @brief axes to squeeze, if left empty all 1 sized
+   * dimensions will be removed.
+   */
+  Axes
+};
+
+/**
+ * @brief This operator has as purpose to remove dummy dimensions around given
+ * axes.
+ * input#0 : Tensor to squeeze
+ * input#1 Optionnal : 1D tensor that lists the axes to squeeze
+ * @note the axes to squeeze can either be given via attribute or via input #1,
+ * for the sake of simplicity of the example unders, the axes to squeeze are
+ * given via attribute
+ * @example Calling squeeze(1) on a tensor of dimensions (2,1,3,4) will result
+ * in a tensor of dim (2,3,4).
+ * @example Calling squeeze(1) on a tensor of dimensions (1,2,3,4) will result
+ * in a tensor of dim (1,2,3,4).
+ * @example Calling squeeze() with no argument will result in the removal of
+ * every 1-sized dimension in the tensor.
+ */
+class Squeeze_Op
+    : public OperatorTensor,
+      public Registrable<Squeeze_Op, std::string,
+                         std::function<std::shared_ptr<OperatorImpl>(const Squeeze_Op &)>> {
+
+public:
+  static const std::string
+      Type; // name of the type of the operation (Here "Squeeze")
+
+private:
+  using Attributes_ = StaticAttributes<SqueezeAttr, std::vector<int8_t>>;
+  template <SqueezeAttr e> using attr = typename Attributes_::template attr<e>;
+  const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+  /**
+   * @brief constructor for Squeeze op
+   * @param[in] axes around which perform the operation
+   */
+  Squeeze_Op(const std::vector<int8_t> &axes = {})
+      : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData},
+                       1),
+        mAttributes(
+            std::make_shared<Attributes_>(attr<SqueezeAttr::Axes>(axes))) {
+    mImpl = std::make_shared<Squeeze_OpImpl>(*this);
+  }
+
+  /**
+   * @brief Copy-constructor. Copy the operator attributes and its output
+   * tensor(s), but not its input tensors (the new operator has no input
+   * associated).
+   * @param op Operator to copy.
+   */
+  Squeeze_Op(const Squeeze_Op &op)
+      : OperatorTensor(op), mAttributes(op.mAttributes) {
+    if (!op.backend().empty()) {
+      SET_IMPL_MACRO(Squeeze_Op, *this, op.backend());
+    } else {
+      mImpl = std::make_shared<Squeeze_OpImpl>(*this);
+    }
+  }
+
+  /**
+   * @brief Clone the operator using its copy-constructor.
+   * @see Operator::MatMul_Op
+   */
+  std::shared_ptr<Operator> clone() const override final {
+    return std::make_shared<Squeeze_Op>(*this);
+  }
+
+  /**
+   * @brief Compute dimensions for the output Tensor
+   */
+  bool forwardDims(bool allowDataDependency = false) override final;
+  bool dimsForwarded() const override final;
+
+  void setBackend(const std::string &name,
+                  DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+  inline std::shared_ptr<Attributes> attributes() const override {
+    return mAttributes;
+  }
+
+  /**
+   * @brief axes to squeeze, if left empty all 1 sized
+   * dimensions will be removed.
+   */
+  inline std::vector<int8_t> &axes() const noexcept {
+    return mAttributes->template getAttr<SqueezeAttr::Axes>();
+  }
+
+  static const std::vector<std::string> getInputsName() {
+    return {"data_input", "axes_to_squeeze"};
+  }
+  static const std::vector<std::string> getOutputsName() {
+    return {"squeezed"};
+  }
+};
+
+// helper with C-style array instead of std::array for kernel_dims to allow
+// automatic template DIM deduction
+inline std::shared_ptr<Node> Squeeze(const std::vector<int8_t> axes = {},
+                                     const std::string &name = "") {
+  return std::make_shared<Node>(std::make_shared<Squeeze_Op>(axes), name);
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::SqueezeAttr>::data[] = {"Axes"};
+}
+
+#endif // AIDGE_CORE_OPERATOR_SQUEEZE_H_
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index bb29ba67851bce8eed46ab1d4df3cf7a8ce91a1a..170baf6fd0f38668f64cbd36044c856fae261737 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -25,7 +25,7 @@
 namespace Aidge {
 
 class Sub_Op : public OperatorTensor,
-    public Registrable<Sub_Op, std::string, std::shared_ptr<OperatorImpl>(const Sub_Op&)> {
+    public Registrable<Sub_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Sub_Op&)>> {
 public:
     static const std::string Type;
 
@@ -48,6 +48,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_1", "data_input_2"};
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
index fd05bf7c434ec2547995800f47380c53585ca6d7..f1a30e3f08ce3886cc1ca39a55a3b23979a47860 100644
--- a/include/aidge/operator/Tanh.hpp
+++ b/include/aidge/operator/Tanh.hpp
@@ -24,7 +24,7 @@
 namespace Aidge {
 
 class Tanh_Op : public OperatorTensor,
-    public Registrable<Tanh_Op, std::string, std::unique_ptr<OperatorImpl>(const Tanh_Op&)> {
+    public Registrable<Tanh_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Tanh_Op&)>> {
 public:
     static const std::string Type;
 
@@ -44,6 +44,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 375d6e098324516b750f8054f9214390373737e2..155627f2cfd3173ccfbbe2a1ce8c23784cd06d71 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -36,7 +36,7 @@ public:
 enum class TransposeAttr { OutputDimsOrder };
 
 class Transpose_Op : public OperatorTensor,
-                public Registrable<Transpose_Op, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op&)> {
+                public Registrable<Transpose_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Transpose_Op&)>> {
 
 public:
     static const std::string Type;
@@ -67,6 +67,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<TransposeAttr::OutputDimsOrder>(); }
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index 3fda7c21405ef023f4324089e60be0330b5f34b6..09a689528a6814eca6bb56ef326e2da527f14843 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -41,7 +41,7 @@ enum class UnfoldAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class Unfold_Op : public OperatorTensor,
-                public Registrable<Unfold_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM> &)> {
+                public Registrable<Unfold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM> &)>> {
 
 public:
     static const std::string Type;
@@ -77,6 +77,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<UnfoldAttr::StrideDims>(); }
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c0710540576959b62bbdf235ff6ea15f9d18cacd
--- /dev/null
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -0,0 +1,158 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
+#define AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+/**
+ * @brief implementation of the operator unsqueeze.
+ * @note Since this operator implementation is agnostic to the backend it is
+ * located here instead of in aidge_backend_cpu/cuda.
+ */
+class Unsqueeze_OpImpl : public OperatorImpl {
+public:
+  Unsqueeze_OpImpl(const Operator &op, const std::string &backend = "")
+      : OperatorImpl(op, backend) {}
+  void forward() override;
+};
+
+enum class UnsqueezeAttr {
+  /**
+   * @brief vector of axes to unsqueeze.
+   * values must be comprised within
+   * [ -a ; a-1 ]
+   * with a = input_tensor.nbDim() + dims_to_unsqueeze.size()
+   */
+  Axes
+};
+
+/**
+ * @brief This operator has as purpose to add a dummy dimension around given
+ * axis. Unsqueezing the 2nd dim of a tensor of dim (1,2,3,4) will result in a
+ * tensor of dim (1,2,1,3,4)
+ * You can also unsqueeze dimensions whose index is higher than the nb of input
+ * dimensions as long as :
+ * dims_to_unsqueeze[i] < tensor.nbDim() +
+ * dims_to_unsqueeze.size()
+ */
+class Unsqueeze_Op
+    : public OperatorTensor,
+      public Registrable<Unsqueeze_Op, std::string,
+                         std::function<std::shared_ptr<OperatorImpl>(const Unsqueeze_Op &)>> {
+
+public:
+  static const std::string
+      Type; // name of the type of the operation (Here "Unsqueeze")
+
+private:
+  using Attributes_ = StaticAttributes<UnsqueezeAttr, std::vector<int8_t>>;
+  template <UnsqueezeAttr e>
+  using attr = typename Attributes_::template attr<e>;
+  const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+  Unsqueeze_Op() =
+      delete; // no default constructor since this class has attributes
+
+  /**
+   * @brief constructor for Unsqueeze op
+   * @param[in] axis around which perform the operation
+   */
+  Unsqueeze_Op(const std::vector<int8_t> &axes)
+      : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData},
+                       1),
+        mAttributes(
+            std::make_shared<Attributes_>(attr<UnsqueezeAttr::Axes>(axes))) {
+    mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
+  }
+
+  /**
+   * @brief Copy-constructor. Copy the operator attributes and its output
+   * tensor(s), but not its input tensors (the new operator has no input
+   * associated).
+   * @param op Operator to copy.
+   */
+  Unsqueeze_Op(const Unsqueeze_Op &op)
+      : OperatorTensor(op), mAttributes(op.mAttributes) {
+    if (!op.backend().empty()) {
+      SET_IMPL_MACRO(Unsqueeze_Op, *this, op.backend());
+    } else {
+      mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
+    }
+  }
+
+  /**
+   * @brief Clone the operator using its copy-constructor.
+   * @see Operator::MatMul_Op
+   */
+  std::shared_ptr<Operator> clone() const override final {
+    return std::make_shared<Unsqueeze_Op>(*this);
+  }
+
+  /**
+   * @brief Compute dimensions for the output Tensor
+   */
+  bool forwardDims(bool allowDataDependency = false) override final;
+  bool dimsForwarded() const override final;
+
+  void setBackend(const std::string &name,
+                  DeviceIdx_t device = 0) override final;
+  std::set<std::string> getAvailableBackends() const override;
+
+  inline std::shared_ptr<Attributes> attributes() const override {
+    return mAttributes;
+  }
+  /**
+   * @brief vector of axes to unsqueeze.
+   * values must be comprised within
+   * [ -a ; a-1 ]
+   * with : a = input_tensor.nbDim() + dims_to_unsqueeze.size()
+   */
+  inline std::vector<int8_t> &axes() const noexcept {
+    return mAttributes->template getAttr<UnsqueezeAttr::Axes>();
+  }
+
+  static const std::vector<std::string> getInputsName() {
+    return {"data_input", "axes_to_unsqueeze"};
+  }
+  static const std::vector<std::string> getOutputsName() {
+    return {"unsqueezed"};
+  }
+};
+
+// helper with C-style array instead of std::array for kernel_dims to allow
+// automatic template DIM deduction
+inline std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {},
+                                       const std::string &name = "") {
+  return std::make_shared<Node>(std::make_shared<Unsqueeze_Op>(axes), name);
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"Axes"};
+}
+
+#endif // AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index c42b285dacb6c59c5fa30388c268f1680152a5e0..a9b9213e914811ccff7d1e6d8efe4fdd8a505b87 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -50,6 +50,13 @@ void matMulToFC(std::shared_ptr<GraphView> graphView);
  */
 size_t removeNode(std::shared_ptr<GraphView> graphView, const std::string& type, bool incProducers = false);
 
+/**
+ * @brief Fuses constant => Generic | constantOfShape and transforms it into a Producer
+ * @param graph Graph to manipulate
+ * @return size_t Number of replacement
+ */
+size_t removeConstantOfShape(std::shared_ptr<GraphView> graph_view);
+
 /**
  * @brief Remove ``Dropout`` Node.
  *
@@ -143,6 +150,13 @@ size_t fuseToMetaOps(std::shared_ptr<GraphView> graph, const std::string& query,
 */
 size_t convToMatMul(std::shared_ptr<GraphView> graph);
 
+/**
+ * @brief Adapt a graph to the available kernels of a backend.
+ * 
+ * @param graph Graph to manipulate
+ */
+void adaptToBackend(std::shared_ptr<GraphView> graph);
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_RECIPES_H_ */
diff --git a/include/aidge/scheduler/MemoryManager.hpp b/include/aidge/scheduler/MemoryManager.hpp
index 94add56e8afdebb8e42f7ae49a32da2aeed9e9cb..2e397d1dbaa1cc8d8f586d15363cbd2245963152 100644
--- a/include/aidge/scheduler/MemoryManager.hpp
+++ b/include/aidge/scheduler/MemoryManager.hpp
@@ -19,6 +19,25 @@
 #include "aidge/graph/Node.hpp"
 
 namespace Aidge {
+/**
+ * @brief The MemoryManager can be used to generate an optimized static memory 
+ * layout for a computing graph in a global memory space.
+ * The are some assumptions:
+ * - A MemoryManager represents a single global memory space, filled with 
+ *   contiguous, non-overlapping MemorySpace chunks.
+ * - A MemorySpace contains one or multiple MemoryPlane, each MemoryPlane
+ *   corresponding to the allocation of a specific Tensor. When a Tensor can re-
+ *   use the memory of the preceding one (for in-place or partially in-place
+ *   operators), multiple overlapping MemoryPlane can be created in the same 
+ *   MemorySpace (remember, MemorySpace **cannot** be overlapping!).
+ * - A MemoryPlane is tailored for handling (N)HWC data with two properties:
+ *   - Possibility of wrapping: on the H axis (each W*C block is contiguous).
+ *   - Possibility of concatenation: on the C axis (C1+C2+...+Cn).
+ * - All the sizes and offets specified in a MemoryManager are expressed in
+ *   number of data elements, or **words**, meaning currently a uniform data 
+ *   precision is expected in a MemoryManager (for instance, if the precision is
+ *   16-bits, each data element will be 2 bytes, which will be the size of a word).
+ */
 class MemoryManager {
 public:
     typedef int Clock_T;
@@ -45,18 +64,45 @@ public:
             allocated(clock_),
             released(-1) {}
 
+        /// Offset of the MemorySpace in the MemoryManager global memory space (in words)
         unsigned int offset;
+        /// Size of the MemorySpace (in words)
         unsigned int size;
         std::set<std::shared_ptr<Node> > dependencies;
         Clock_T allocated;
         Clock_T released;
     };
 
-    // MemoryPlane belongs to a MemorySpace. Any number of potentially
-    // overlapping planes can be associated to a MemorySpace.
-    // MemoryPlane can be non-contiguous (in case of stride, or wrapping, when
-    // offset + size > memSpace.size).
-    // MemoryPlane cannot be re-arranged inside a MemorySpace.
+    /**
+     * @brief MemoryPlane belongs to a MemorySpace. Any number of potentiall
+     * overlapping planes can be associated to a MemorySpace.
+     * MemoryPlane can be non-contiguous (in case of stride, or wrapping, when
+     * offset + size > memSpace.size).
+     * MemoryPlane cannot be re-arranged inside a MemorySpace.
+     * 
+     * A MemoryPlane is tailored for handling (N)HWC data with two properties:
+     * - Possibility of wrapping: on the H axis (each W*C block is contiguous).
+     * - Possibility of concatenation: on the C axis (C1+C2+...+Cn).
+     * 
+     * Detail of (N)HWC data handling:
+     * - \p length is the size of contiguous and non-breakable memory line (W in HWC);
+     * - \p count is the number of memory lines of size \p length constituting a memory block (H in HWC);
+     * - \p stride is the number of channels, or memory blocks, *in total*, 
+     *   of \p count lines of size \p length (C in NHWC);
+     * - \p size is the number of channels, or memory blocks, *in this MemoryPlane*,
+     *   of \p count lines of size \p length.
+     *   In the case of concatenation, there can be multiple overlapping MemoryPlane
+     *   with different size, like NHWC = NHW(C1+C2):
+     *   - MemoryPlane#1: \p size = C1 and \p stride = C=C1+C2
+     *   - MemoryPlane#2: \p size = C2 and \p stride = C=C1+C2
+     *                    (with an additionnal relative offset of +C1)
+     * In this mode, wrapping can only occur on the H (\p count) axis. W*C chunks
+     * are garanteed to be contiguous (\p length * \p stride).
+     * 
+     * By default, \p stride = \p size, \p count = 1 and \p length = 1, meaning
+     * there is no NHWC layout and the MemoryPlane can be wrapped **anywhere**.
+     * In this case, \p size is the total size of the MemoryPlane (H*W*C, in words).
+     */
     struct MemoryPlane {
         MemoryPlane(std::shared_ptr<MemorySpace> memSpace_,
                     Clock_T clock_,
@@ -92,36 +138,91 @@ public:
                 <= memSpace->offset + memSpace->size);
         }
 
+        /**
+         * @brief Get the total size of the MemoryPlane, including the stride.
+         * 
+         * @return unsigned int Total size in words
+         */
         inline unsigned int getSize() const {
             return stride * length * count;
         }
 
+        /**
+         * @brief Get the useful size of the MemoryPlane, as if its memory blocks
+         * were contiguous, without stride.
+         * 
+         * @return unsigned int Useful size in words
+         */
         inline unsigned int getUsefulSize() const {
             return size * length * count;
         }
 
+        /**
+         * @brief Get the absolute offset of the beginning of the memory plane.
+         * 
+         * @return unsigned int Contiguous offset in words
+         */
         inline unsigned int getContiguousOffset() const {
             return memSpace->offset + offset;
         }
 
+        /**
+         * @brief Get the size of the contiguous part of the memory plane, from
+         * its beginning to the limit of the MemorySpace size.
+         * If the MemoryPlane fill the MemorySpace without wrapping, the contiguous
+         * size will be the same as the total size of the MemoryPlane.
+         * 
+         * @return unsigned int Contiguous size in words
+         */
         inline unsigned int getContiguousSize() const {
             return std::min(getSize(), getLimit());
         }
 
+        /**
+         * @brief Get the absolute offset of the wrapped part of the memory plane.
+         * Since the wrapped part of the memory plane begins at the beginning of
+         * the MemorySpace, the returned offset is always the same as the MemorySpace
+         * offset.
+         * 
+         * @return unsigned int Wrapped offset in words
+         */
         inline unsigned int getWrappedOffset() const {
             return memSpace->offset;
         }
 
+        /**
+         * @brief Get the size of the wrapped part of the memory plane, from
+         * the beginning of the MemorySpace to the total size of the MemoryPlane,
+         * including the stride.
+         * If the MemoryPlane fill the MemorySpace without wrapping, the wrapped
+         * size will 0.
+         * 
+         * @return unsigned int Wrapped size in words
+         */
         inline unsigned int getWrappedSize() const {
             return getSize() - getContiguousSize();
         }
 
+        /**
+         * @brief Get the absolute offset after the end of the memory plane (if it
+         * is wrapped, the offset will correspond to the end of the wrapped part).
+         * The word at the final offset is not included in the MemoryPlane.
+         * 
+         * @return unsigned int Final offset in words
+         */
         inline unsigned int getFinalOffset() const {
             return (getWrappedSize() > 0)
                 ? getWrappedOffset() + getWrappedSize()
                 : getContiguousOffset() + getContiguousSize();
         }
 
+        /**
+         * @brief Get the absolute offset after the end of the contiguous part
+         * of the memory plane.
+         * The word at the upper offset is not included in the MemoryPlane.
+         * 
+         * @return unsigned int Upper offset in words
+         */
         inline unsigned int getUpperOffset() const {
             return (getContiguousOffset() + getContiguousSize());
         }
@@ -146,10 +247,29 @@ public:
 
         std::shared_ptr<MemorySpace> memSpace;
         Clock_T allocated;
+        /// Relative offset of the MemoryPlane in the MemorySpace (in words)
         unsigned int offset;
+        /// Number of channels, or memory blocks, *in this MemoryPlane*,
+        /// of \p count lines of size \p length.
+        /// In the case of concatenation, there can be multiple overlapping MemoryPlane
+        /// with different size, like NHWC = NHW(C1+C2):
+        /// - MemoryPlane#1: \p size = C1 and \p stride = C=C1+C2
+        /// - MemoryPlane#2: \p size = C2 and \p stride = C=C1+C2
+        ///                  (with an additionnal relative offset of +C1)
+        /// By default, \p stride = \p size, \p count = 1 and \p length = 1, meaning
+        /// there is no NHWC layout and the MemoryPlane can be wrapped **anywhere**.
+        /// In this case, \p size is the total size of the MemoryPlane (H*W*C, in words).
         unsigned int size;
+        /// Number of channels, or memory blocks *in total*,
+        /// of \p count lines of size \p length (the C in NHWC).
+        /// There should be C blocks of H*W size.
         unsigned int stride;
+        /// Size of an elementary, contiguous and non-breakable, memory line 
+        /// (the W in NHWC), in words. A MemoryPlane wrapping cannot occur in
+        /// the middle of a memory line.
         unsigned int length;
+        /// Number of memory lines of size \p length constituting a memory block
+        /// (the H in NHWC). The size of a memory block is H*W.
         unsigned int count;
     };
 
diff --git a/include/aidge/scheduler/ProdConso.hpp b/include/aidge/scheduler/ProdConso.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a7c0ed5ae73d1f891744e835f0da5ad14a37f850
--- /dev/null
+++ b/include/aidge/scheduler/ProdConso.hpp
@@ -0,0 +1,89 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_SCHEDULER_PRODCONSO_H_
+#define AIDGE_SCHEDULER_PRODCONSO_H_
+
+#include <string>
+#include <vector>
+
+#include "aidge/utils/Types.h"
+#include "aidge/data/Elts.hpp"
+
+namespace Aidge {
+class Operator;
+
+class ProdConso {
+public:
+    ProdConso(const Operator& op, bool inPlace = false);
+
+    static std::unique_ptr<ProdConso> defaultModel(const Operator& op) {
+        return std::make_unique<ProdConso>(op, false);
+    }
+
+    static std::unique_ptr<ProdConso> inPlaceModel(const Operator& op) {
+        return std::make_unique<ProdConso>(op, true);
+    }
+
+    /**
+     * @brief Minimum amount of data from a specific input required by the
+     * implementation to be run.
+     *
+     * @param inputIdx Index of the input analysed.
+     * @return std::size_t
+     */
+    virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
+
+    // Amount of input data that cannot be overwritten during the execution.
+    virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
+
+    // Memory required at an output for a given input size.
+    virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
+
+    /**
+     * @brief Total amount of consumed data from a specific input.
+     *
+     * @param inputIdx Index of the input analysed.
+     * @return DimSize_t
+     */
+    virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
+
+    /**
+     * @brief Total amount of produced data ready to be used on a specific output.
+     *
+     * @param outputIdx Index of the output analysed.
+     * @return DimSize_t
+     */
+    virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
+
+    /**
+     * @brief Update the Consummer Producer system by simulating the consumption and production of i/o
+     *
+     */
+    virtual void updateConsummerProducer();
+
+    /**
+     * @brief Reset the Consummer Producer system.
+     *
+     */
+    virtual void resetConsummerProducer();
+
+    virtual ~ProdConso() = default;
+
+protected:
+    const Operator &mOp;
+    const bool mInPlace;
+    std::vector<Elts_t> mNbConsumedData;
+    std::vector<Elts_t> mNbProducedData;
+};
+} // namespace Aidge
+
+#endif /* AIDGE_SCHEDULER_PRODCONSO_H_ */
diff --git a/include/aidge/stimuli/Stimulus.hpp b/include/aidge/stimuli/Stimulus.hpp
index 80e7c76d4857f577f30b90588f4c3998be80bdb8..3def790b65f441c567e5d43150f465233cb64557 100644
--- a/include/aidge/stimuli/Stimulus.hpp
+++ b/include/aidge/stimuli/Stimulus.hpp
@@ -26,7 +26,7 @@ namespace Aidge {
  * @brief Stimulus. A class wrapping a data sample. Stimulus has two functioning modes. The first mode enables to load data samples from a dataPath and optionnaly store the data in-memory. The second mode enables to store a data sample that was already loaded in memory.
  * @details When Stimulus is used in the first mode, the loading function is determined automaticaly based on the backend and the file extension.
  */
-class Stimulus : public Registrable<Stimulus, std::tuple<std::string, std::string>, std::unique_ptr<StimulusImpl>(const std::string&)> {
+class Stimulus : public Registrable<Stimulus, std::tuple<std::string, std::string>, std::function<std::unique_ptr<StimulusImpl>(const std::string&)>> {
 private:
     /// Stimulus data path
     const std::string mDataPath;
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index 7dce3d327d42de15dc2589788b4643742ed1a463..cf71ed0b5953fa1759e04c66311d3d829a603a01 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -14,6 +14,9 @@
 
 #include <string>
 #include <set>
+#include <map>
+
+#include "aidge/utils/future_std/any.hpp"
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
@@ -63,6 +66,8 @@ public:
     */
     virtual std::set<std::string> getAttrsName() const = 0;
 
+    virtual std::map<std::string, future_std::any> getAttrs() const = 0;
+
 #ifdef PYBIND
     virtual bool hasAttrPy(const std::string& name) const = 0;
 
@@ -84,6 +89,7 @@ public:
     virtual py::dict dict() const = 0;
 
 #endif
+
     virtual ~Attributes() {}
 };
 }
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index cf7f048dbe5999f433277c46e4e3cb9798c43674..04ed58f7e636d6a0d528f1946ead110857312576 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -18,6 +18,7 @@
 #include <typeinfo>
 #include <cassert>
 #include <string>
+#include <typeindex>
 
 #include "aidge/utils/future_std/any.hpp"
 #include "aidge/utils/Attributes.hpp"
@@ -38,6 +39,9 @@ namespace Aidge {
 ///\todo managing complex types or excluding non-trivial, non-aggregate types
 class DynamicAttributes : public Attributes {
 public:
+    DynamicAttributes() = default;
+    DynamicAttributes(const std::map<std::string, future_std::any>& attrs): mAttrs(attrs) {}
+
     /**
      * \brief Returning an Attribute identified by its name
      * \tparam T expected Attribute type
@@ -48,6 +52,22 @@ public:
      */
     template<class T> const T& getAttr(const std::string& name) const
     {
+        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
+            [](const future_std::any& lhs, const future_std::any& rhs) {
+#ifdef PYBIND
+                if (lhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
+                }
+                else if (rhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
+                }
+                else
+#endif
+                {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
+                }
+            }));
+
         const auto dot = name.find('.');
         if (dot == name.npos) {
 #ifdef PYBIND
@@ -83,6 +103,22 @@ public:
     ///\param value Attribute value
     template<class T> void addAttr(const std::string& name, const T& value)
     {
+        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
+            [](const future_std::any& lhs, const future_std::any& rhs) {
+#ifdef PYBIND
+                if (lhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
+                }
+                else if (rhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
+                }
+                else
+#endif
+                {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
+                }
+            }));
+
         const auto dot = name.find('.');
         if (dot == name.npos) {
             const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
@@ -111,6 +147,22 @@ public:
     ///\param value Attribute value
     template<class T> void setAttr(const std::string& name, const T& value)
     {
+        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
+            [](const future_std::any& lhs, const future_std::any& rhs) {
+#ifdef PYBIND
+                if (lhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
+                }
+                else if (rhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
+                }
+                else
+#endif
+                {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
+                }
+            }));
+
         const auto dot = name.find('.');
         if (dot == name.npos) {
             auto res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
@@ -328,8 +380,45 @@ public:
     };
 #endif
 
+    future_std::any getAny(const std::string& name) const
+    {
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+#ifdef PYBIND
+            // If attribute does not exist in C++, it might have been created or modified in Python
+            auto it = mAttrs.find(name);
+            if (it == mAttrs.end()) {
+                auto itPy = mAttrsPy.find(name);
+                if (itPy != mAttrsPy.end()) {
+                    // Attribute exists in Python, but its type is not known
+                    // Return a std::any of py::object, which will be comparable
+                    mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(py::object),
+                        [](const future_std::any& lhs, const future_std::any& rhs) {
+                            return (future_std::any_cast<py::object>(lhs) < future_std::any_cast<py::object>(rhs));
+                        }));
+
+                    return future_std::any(itPy->second);
+                }
+            }
+#endif
+
+            return mAttrs.at(name);
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            return future_std::any_cast<const DynamicAttributes&>(mAttrs.at(ns)).getAny(nsName);
+        }
+    }
+
+    std::map<std::string, future_std::any> getAttrs() const override {
+        return mAttrs;
+    }
+
     virtual ~DynamicAttributes() {}
 
+    friend bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs);
+
 private:
 #ifdef PYBIND
     // Stores C++ attributes (copy) and Python-only attributes
@@ -345,8 +434,19 @@ private:
 #else
     std::map<std::string, future_std::any> mAttrs;
 #endif
+
+public:
+    // Stores the comparison function for each attribute type ever used
+    static std::map<std::type_index, bool(*)(const future_std::any&, const future_std::any&)> mAnyCompare;
 };
 
+inline bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs) {
+    return (lhs.mAttrs < rhs.mAttrs);
+}
+}
+
+namespace future_std {
+bool operator<(const future_std::any& lhs, const future_std::any& rhs);
 }
 
 #endif /* AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_ */
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 872c3f6b5a258292c41428852580210ab32decbf..0468ae2616997c306bbd475fe6eb73cc033b0bcc 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -23,7 +23,7 @@
 
 #include <functional>
 #include <map>
-#include <vector>
+#include <set>
 
 namespace Aidge {
 #ifdef PYBIND
@@ -37,21 +37,21 @@ template <class DerivedClass, class Key, class Func> // curiously rucurring temp
 class Registrable {
 public:
     typedef Key registrar_key;
-    typedef std::function<Func> registrar_type;
+    typedef Func registrar_type;
 
-    static std::map<Key, std::function<Func>>& registry()
+    static std::map<Key, Func>& registry()
     {
         #ifdef PYBIND
         #define _CRT_SECURE_NO_WARNINGS
         if (Py_IsInitialized()){
             std::string name = std::string("registrar_")+typeid(Registrable<DerivedClass, Key, Func>).name();
-            static auto shared_data = reinterpret_cast<std::map<Key, std::function<Func>> *>(py::get_shared_data(name));
+            static auto shared_data = reinterpret_cast<std::map<Key, Func> *>(py::get_shared_data(name));
             if (!shared_data)
-                shared_data = static_cast<std::map<Key, std::function<Func>> *>(py::set_shared_data(name, new std::map<Key, std::function<Func>>()));
+                shared_data = static_cast<std::map<Key, Func> *>(py::set_shared_data(name, new std::map<Key, Func>()));
             return *shared_data;
         }
         #endif // PYBIND
-        static std::map<Key, std::function<Func>> rMap;
+        static std::map<Key, Func> rMap;
         return rMap;
     }
 
@@ -77,12 +77,12 @@ struct Registrar {
 
     static auto create(const registrar_key& key) {
         AIDGE_ASSERT(exists(key), "missing or invalid registrar key: {} for registrable object {}\nDid you include/import the corresponding module?\nIf so, it is possible that the object is not yet supported.", key, typeid(C).name());
-        return C::registry()[key];
+        return C::registry().at(key);
     }
-    static std::vector<registrar_key> getKeys(){
-        std::vector<registrar_key> keys;
+    static std::set<registrar_key> getKeys(){
+        std::set<registrar_key> keys;
         for(const auto& keyValue : C::registry())
-            keys.push_back(keyValue.first);
+            keys.insert(keyValue.first);
         return keys;
     }
 };
@@ -101,11 +101,14 @@ template <class C>
 void declare_registrable(py::module& m, const std::string& class_name){
     typedef typename C::registrar_key registrar_key;
     typedef typename C::registrar_type registrar_type;
-    m.def(("register_"+ class_name).c_str(), [](registrar_key& key, registrar_type function){
+    m.def(("register_"+ class_name).c_str(), [](const registrar_key& key, registrar_type function){
         Registrar<C>(key, function);
     })
     .def(("get_keys_"+ class_name).c_str(), [](){
         return Registrar<C>::getKeys();
+    })
+    .def(("get_key_value_"+ class_name).c_str(), [](const registrar_key& key){
+        return Registrar<C>::create(key);
     });
 }
 #endif
@@ -141,4 +144,13 @@ void declare_registrable(py::module& m, const std::string& class_name){
 
 }
 
+#define CONCAT(a, b) CONCAT_INNER(a, b)
+#define CONCAT_INNER(a, b) a ## b
+
+#define REGISTRAR(cls, ...) \
+    namespace { \
+    static Registrar<cls> CONCAT(CONCAT(aidge_register_ , cls), __COUNTER__)(__VA_ARGS__); \
+    } \
+    static_assert(true, "")
+
 #endif //AIDGE_CORE_UTILS_REGISTRAR_H_
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 3bb41b5bb0d9c2727d95a2656a1a2d5b96ff950b..414381891ce52046ee7c2df5b82a17e1314773cd 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -158,7 +158,11 @@ public:
                 std::enable_if_t<(SIZE > 0), bool> = true>
     constexpr const std::type_info& getAttrType(std::size_t i) const {
         if (i == SIZE-1) {
-            return typeid(typename std::tuple_element<SIZE-1,std::tuple<T...>>::type);
+            // Workaround for NVCC from 12.2.1 to 12.4.1
+            // error: no suitable constructor exists to convert from "const char *" to "std::type_info"
+            typename std::tuple_element<SIZE-1,std::tuple<T...>>::type dummy{};
+            return typeid(dummy);
+            //return typeid(typename std::tuple_element<SIZE-1,std::tuple<T...>>::type);
         }
         else {
             return getAttrType<SIZE-1>(i);
@@ -175,6 +179,12 @@ public:
         return mAttrs;
     }
 
+    virtual std::map<std::string, future_std::any> getAttrs() const override {
+        std::map<std::string, future_std::any> attrs;
+        appendAttr(mAttrs, attrs);
+        return attrs;
+    }
+
     //////////////////////////////////////
     ///     Generic Attributes API
     //////////////////////////////////////
@@ -319,6 +329,15 @@ private:
 
         return false;
     }
+    
+    template<std::size_t I = 0, typename... Tp>
+    inline typename std::enable_if<I == sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& /*t*/, std::map<std::string, future_std::any>& /*attrs*/) const {}
+
+    template<std::size_t I = 0, typename... Tp>
+    inline typename std::enable_if<I < sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& t, std::map<std::string, future_std::any>& attrs) const {
+        attrs.insert(std::make_pair(EnumStrings<ATTRS_ENUM>::data[I], future_std::any(std::get<I>(t))));
+        appendAttr<I + 1, Tp...>(t, attrs);
+    }
 
     std::tuple<T...> mAttrs;
 };
diff --git a/pyproject.toml b/pyproject.toml
index b820759982252b69790cde89c500e3b11f9a52da..b838aca5ee100d182ba88b79f23f3a2ebff9acf3 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -23,7 +23,7 @@ test = [
 requires = [
     "setuptools>=64",
     "setuptools_scm[toml]==7.1.0",
-    "cmake>=3.15.3.post1"
+    "cmake>=3.18.4.post1"
 ]
 build-backend = "setuptools.build_meta"
 
@@ -36,8 +36,10 @@ include = [ # package names should match these glob patterns (["*"] by default)
     "aidge_core*"
 ]
 exclude = [ # exclude packages matching these glob patterns (empty by default)
-    "aidge_core.unit_tests",
-    "aidge_core.unit_tests.static"
+    ".unit_tests",
+    ".unit_tests.static",
+    ".aidge_export_aidge.__pycache__",
+    ".aidge_export_aidge.utils.__pycache__",
 ] 
 
 # SETUPTOOLS_SCM
@@ -49,15 +51,15 @@ write_to = "aidge_core/_version.py"
 [tool.cibuildwheel]
 build-frontend = "build"
 test-requires = "pytest"
-# FIXME: The ignored export test requires a to build the generated export via cmake.
-# However due to a strange bug I haven't been able to properly link Python::Module to the export target
-# Resulting in the need to link Python::Python which is the python interpreter.
-# This suppresses the issue but sadly this target is not available on the cibuilwheel image.
-# Hence the test is ignored. If you want to try and solve this bug go on. 
-# Just take care to increment the counter just below.
-# 
-# Work time spent on this bug : 24h
-test-command = "pytest --ignore={package}/aidge_core/unit_tests/test_export.py {package}/aidge_core/unit_tests"
+# WARNING: in the test suite the `test_export.py` used to be skipped
+# because it did not build when the python embedded interpreter is not available
+# as it is the case for cibuildwheel containers.
+# Now the build system takes care of this and skips the generation of a standalone
+# executable when it is not possible.
+# The root causes for this conditional build is that 1. the python embedded interpreter
+# is not alweays available, and 2. the aidge_core library depends on it as of now.
+# Hopefully this latter dependency may be removed in the future, simplifying the build.
+test-command = "pytest -v --capture=no {package}/aidge_core/unit_tests"
 # uncomment to run cibuildwheel locally on selected distros
 # build=[
 # "cp38-manylinux_x86_64",
diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp
index 6a83805fc1af2e111dd1c9f49c669e0c2f9422aa..04172c3ff68641a9fe0d14f9a326cd17e7002912 100644
--- a/python_binding/backend/pybind_OperatorImpl.cpp
+++ b/python_binding/backend/pybind_OperatorImpl.cpp
@@ -13,6 +13,7 @@
 #include <pybind11/stl.h>
 #include <string>
 
+#include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 
@@ -31,102 +32,70 @@ public:
         PYBIND11_OVERRIDE(
             void,
             OperatorImpl,
-            forward,
+            forward
 
         );
     }
+
     void backward() override {
         PYBIND11_OVERRIDE(
             void,
             OperatorImpl,
-            backward,
+            backward
 
         );
     }
-    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override {
-        PYBIND11_OVERRIDE_NAME(
-            Elts_t,
-            OperatorImpl,
-            "get_nb_required_data",
-            getNbRequiredData,
-            inputIdx
-        );
-    }
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override {
-        PYBIND11_OVERRIDE_NAME(
-            Elts_t,
-            OperatorImpl,
-            "get_nb_required_protected",
-            getNbRequiredProtected,
-            inputIdx
 
-        );
-    }
-    Elts_t getRequiredMemory(const IOIndex_t outputIdx,
-    const std::vector<DimSize_t> &inputsSize) const override {
+    std::shared_ptr<ProdConso> getProdConso() const override {
         PYBIND11_OVERRIDE_NAME(
-            Elts_t,
+            std::shared_ptr<ProdConso>,
             OperatorImpl,
-            "get_required_memory",
-            getRequiredMemory,
-            outputIdx,
-            inputsSize
-
+            "get_prod_conso",
+            getProdConso
         );
     }
-    Elts_t getNbConsumedData(const IOIndex_t inputIdx) const override {
-        PYBIND11_OVERRIDE_NAME(
-            Elts_t,
-            OperatorImpl,
-            "get_nb_consumed_data",
-            getNbConsumedData,
-            inputIdx
 
-        );
-    }
-    Elts_t getNbProducedData(const IOIndex_t outputIdx) const override {
+    std::set<ImplSpec> getAvailableImplSpecs() const noexcept override {
         PYBIND11_OVERRIDE_NAME(
-            Elts_t,
+            std::set<ImplSpec>,
             OperatorImpl,
-            "get_nb_produced_data",
-            getNbProducedData,
-            outputIdx
-
+            "get_available_impl_specs",
+            getAvailableImplSpecs
         );
     }
-    void updateConsummerProducer() override {
-        PYBIND11_OVERRIDE_NAME(
-            void,
-            OperatorImpl,
-            "update_consummer_producer",
-            updateConsummerProducer,
-
-        );
-    }
-    void resetConsummerProducer() override {
-        PYBIND11_OVERRIDE_NAME(
-            void,
-            OperatorImpl,
-            "reset_consummer_producer",
-            resetConsummerProducer,
+};
 
-        );
-    }
+// See https://pybind11.readthedocs.io/en/stable/advanced/classes.html#binding-protected-member-functions
+class OperatorImpl_Publicist : public OperatorImpl {
+public:
+    using OperatorImpl::getProdConso;
+    using OperatorImpl::getAvailableImplSpecs;
 };
 
 void init_OperatorImpl(py::module& m){
+    py::class_<ImplSpec::IOSpec>(m, "IOSpec")
+    .def(py::init<DataType, DataFormat, const std::vector<std::pair<int, int>>&>(), py::arg("type"), py::arg("format") = DataFormat::Any, py::arg("dims") = std::vector<std::pair<int, int>>{})
+    ;
+
+    py::class_<ImplSpec>(m, "ImplSpec")
+    .def(py::init<const DynamicAttributes&>(), py::arg("attr") = DynamicAttributes())
+    .def(py::init<const ImplSpec::IOSpec&, const DynamicAttributes&>(), py::arg("io"), py::arg("attr") = DynamicAttributes())
+    .def(py::init<const ImplSpec::IOSpec&, const ImplSpec::IOSpec&, const DynamicAttributes&>(), py::arg("i"), py::arg("o"), py::arg("attr") = DynamicAttributes())
+    ;
 
     py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr())
     .def(py::init<const Operator&, const std::string&>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>())
     .def("forward", &OperatorImpl::forward)
     .def("backward", &OperatorImpl::backward)
-    .def("get_nb_required_data", &OperatorImpl::getNbRequiredData)
-    .def("get_nb_required_protected", &OperatorImpl::getNbRequiredProtected)
-    .def("get_required_memory", &OperatorImpl::getRequiredMemory)
-    .def("get_nb_consumed_data", &OperatorImpl::getNbConsumedData)
-    .def("get_nb_produced_data", &OperatorImpl::getNbProducedData)
-    .def("update_consummer_producer", &OperatorImpl::updateConsummerProducer)
-    .def("reset_consummer_producer", &OperatorImpl::resetConsummerProducer)
+    .def("prod_conso", &OperatorImpl::prodConso)
+    .def("backend", &OperatorImpl::backend)
+    .def("get_operator", &OperatorImpl::getOperator)
+    .def("get_required_spec", &OperatorImpl::getRequiredSpec)
+    .def("get_best_match", &OperatorImpl::getBestMatch)
+    .def("get_adaptation", &OperatorImpl::getAdaptation)
+    .def("get_best_adaptation", &OperatorImpl::getBestAdaptation)
+    .def("get_prod_conso", &OperatorImpl_Publicist::getProdConso)
+    .def("get_available_impl_specs", &OperatorImpl_Publicist::getAvailableImplSpecs)
     ;
 }
 }
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index 1d4eae0776b66a16e6472a51661b22fe281e6f6b..e91f345d7974cb06aa7aec9e27300b9cf9230985 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -16,46 +16,51 @@
 namespace py = pybind11;
 namespace Aidge {
 
-void init_Data(py::module& m){
-    // Define enumeration names for python as lowercase dtype name
-    // This defined enum names compatible with basic numpy dtype
+template <class T>
+void bindEnum(py::module& m, const std::string& name) {
+    // Define enumeration names for python as lowercase type name
+    // This defined enum names compatible with basic numpy type
     // name such as: float32, flot64, [u]int32, [u]int64, ...
-    auto python_enum_name = [](const DataType& dtype) {
+    auto python_enum_name = [](const T& type) {
         auto str_lower = [](std::string& str) {
             std::transform(str.begin(), str.end(), str.begin(),
                            [](unsigned char c){
                                return std::tolower(c);
                            });
         };
-        auto dtype_name = std::string(Aidge::format_as(dtype));
-        str_lower(dtype_name);
-        return dtype_name;
+        auto type_name = std::string(Aidge::format_as(type));
+        str_lower(type_name);
+        return type_name;
     };
-    // Auto generate enumeration names from lowercase dtype strings
+    // Auto generate enumeration names from lowercase type strings
     std::vector<std::string> enum_names;
-    for (auto dtype_str : EnumStrings<Aidge::DataType>::data) {
-        auto dtype = static_cast<DataType>(enum_names.size());
-        auto enum_name = python_enum_name(dtype);
+    for (auto type_str : EnumStrings<T>::data) {
+        auto type = static_cast<T>(enum_names.size());
+        auto enum_name = python_enum_name(type);
         enum_names.push_back(enum_name);
     }
 
-    // Define python side enumeration aidge_core.dtype
-    auto e_dtype = py::enum_<DataType>(m, "dtype");
+    // Define python side enumeration aidge_core.type
+    auto e_type = py::enum_<T>(m, name.c_str());
 
     // Add enum value for each enum name
     for (std::size_t idx = 0; idx < enum_names.size(); idx++) {
-        e_dtype.value(enum_names[idx].c_str(), static_cast<DataType>(idx));
+        e_type.value(enum_names[idx].c_str(), static_cast<T>(idx));
     }
 
     // Define str() to return the bare enum name value, it allows
-    // to compare directly for instance str(tensor.dtype())
-    // with str(nparray.dtype)
-    e_dtype.def("__str__", [enum_names](const DataType& dtype) {
-        return enum_names[static_cast<int>(dtype)];
+    // to compare directly for instance str(tensor.type())
+    // with str(nparray.type)
+    e_type.def("__str__", [enum_names](const T& type) {
+        return enum_names[static_cast<int>(type)];
     }, py::prepend());;
+}
 
-    py::class_<Data, std::shared_ptr<Data>>(m,"Data");
+void init_Data(py::module& m){
+    bindEnum<DataType>(m, "dtype");
+    bindEnum<DataFormat>(m, "dformat");
 
+    py::class_<Data, std::shared_ptr<Data>>(m,"Data");
 
 }
 }
diff --git a/python_binding/data/pybind_DataProvider.cpp b/python_binding/data/pybind_DataProvider.cpp
index 2f652aff5008f8008952ffb1bb6fb1738021b436..77abd1f39bb4d5375d2fc57c5bd5595e79f135fb 100644
--- a/python_binding/data/pybind_DataProvider.cpp
+++ b/python_binding/data/pybind_DataProvider.cpp
@@ -27,7 +27,7 @@ std::vector<std::shared_ptr<Aidge::Tensor>> DataProvider::next() {
 void init_DataProvider(py::module& m){
 
     py::class_<DataProvider, std::shared_ptr<DataProvider>>(m, "DataProvider")
-        .def(py::init<Database&, std::size_t, bool, bool>(), py::arg("database"), py::arg("batch_size"), py::arg("shuffle"), py::arg("drop_last"))
+        .def(py::init<Database&, std::size_t, std::string, bool, bool>(), py::arg("database"), py::arg("batch_size"), py::arg("backend") = "cpu", py::arg("shuffle") = false, py::arg("drop_last") = false)
         .def("__iter__", &DataProvider::iter)
         .def("__next__", &DataProvider::next)
         .def("__len__", &DataProvider::getNbBatch);
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index d4d6edc9ca4d51eabe0665352997f5d5469bff74..2feaa1f8b8ecd50e1f2570107af1e62fc4f1f457 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -25,7 +25,7 @@ namespace Aidge {
 
 using registrableTensor = Registrable<Tensor,
                                       std::tuple<std::string, DataType>,
-                                      std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>;
+                                      std::function<std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>;
 
 using pyTensorClass = py::class_<Tensor,
                                  std::shared_ptr<Tensor>,
@@ -257,7 +257,7 @@ static void addScalarCtor(pyTensorClass& mTensor) {
     // though it is not merged: https://github.com/pybind/pybind11/pull/3544/.
     // Hence we use some helper functions defined above to try matching the different numpy scalar types.
     mTensor.def(py::init([](py::object obj,
-                            const std::string backend="cpu") {
+                            const std::string& backend = "cpu") {
         NativeValue native_val;
         DataType native_dtype;
         bool found = getScalarNativeVal(obj, &native_val, &native_dtype);
@@ -283,7 +283,7 @@ static void addScalarCtor(pyTensorClass& mTensor) {
 template<typename T>
 void addArrayCtor(pyTensorClass& mTensor) {
     mTensor.def(py::init([](const py::array_t<T, py::array::c_style|py::array::forcecast> b,
-                            const std::string backend = "cpu") {
+                            const std::string& backend = "cpu") {
         /* Request a buffer descriptor from Python */
         py::buffer_info info = b.request();
         Tensor* newTensor = new Tensor();
@@ -322,6 +322,7 @@ void init_Tensor(py::module& m){
     .def("grad", &Tensor::grad)
     .def("set_grad", &Tensor::setGrad)
     .def("dtype", &Tensor::dataType)
+    .def("dformat", &Tensor::dataFormat)
     .def("size", &Tensor::size)
     .def("capacity", &Tensor::capacity)
     .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize, py::arg("dims"), py::arg("strides") = std::vector<DimSize_t>())
@@ -584,5 +585,6 @@ void init_Tensor(py::module& m){
     // Handles python scalars and numpy scalars with a single overload
     addScalarCtor(pyClassTensor);
 
+    declare_registrable<Tensor>(m, "Tensor");
 }
 }
diff --git a/python_binding/data/pybind_TensorImpl.cpp b/python_binding/data/pybind_TensorImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4c664274ec2c33174f51dad34ba1591c323b2d87
--- /dev/null
+++ b/python_binding/data/pybind_TensorImpl.cpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <pybind11/operators.h>
+#include <pybind11/numpy.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/TensorImpl.hpp"
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_TensorImpl(py::module& m){
+  py::class_<TensorImpl, std::shared_ptr<TensorImpl>>(m, "TensorImpl");
+
+  py::class_<TensorImpl_cpu<double>, std::shared_ptr<TensorImpl_cpu<double>>, TensorImpl>(m, "TensorImpl_cpu_float64")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+    
+  py::class_<TensorImpl_cpu<float>, std::shared_ptr<TensorImpl_cpu<float>>, TensorImpl>(m, "TensorImpl_cpu_float32")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<half_float::half>, std::shared_ptr<TensorImpl_cpu<half_float::half>>, TensorImpl>(m, "TensorImpl_cpu_float16")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<int64_t>, std::shared_ptr<TensorImpl_cpu<int64_t>>, TensorImpl>(m, "TensorImpl_cpu_int64")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<int32_t>, std::shared_ptr<TensorImpl_cpu<int32_t>>, TensorImpl>(m, "TensorImpl_cpu_int32")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<int16_t>, std::shared_ptr<TensorImpl_cpu<int16_t>>, TensorImpl>(m, "TensorImpl_cpu_int16")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<int8_t>, std::shared_ptr<TensorImpl_cpu<int8_t>>, TensorImpl>(m, "TensorImpl_cpu_int8")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<uint64_t>, std::shared_ptr<TensorImpl_cpu<uint64_t>>, TensorImpl>(m, "TensorImpl_cpu_uint64")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<uint32_t>, std::shared_ptr<TensorImpl_cpu<uint32_t>>, TensorImpl>(m, "TensorImpl_cpu_uint32")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<uint16_t>, std::shared_ptr<TensorImpl_cpu<uint16_t>>, TensorImpl>(m, "TensorImpl_cpu_uint16")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<uint8_t>, std::shared_ptr<TensorImpl_cpu<uint8_t>>, TensorImpl>(m, "TensorImpl_cpu_uint8")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+}
+}
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 2930383817d1555d51b8bddd8eff6402240e905a..cd9b2a16f92a4e7ccd2a0f2f17e605a6b049c752 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -130,6 +130,10 @@ void init_GraphView(py::module& m) {
           .def("__call__", &GraphView::operator(), py::arg("connectors"))
           .def("set_datatype", &GraphView::setDataType, py::arg("datatype"))
           .def("set_backend", &GraphView::setBackend, py::arg("backend"), py::arg("device") = 0)
+          .def("get_ordered_nodes", &GraphView::getOrderedNodes, py::arg("reversed") = false,
+               R"mydelimiter(
+               Get ordered nodes for the graph view
+               )mydelimiter")
           //   .def("__getitem__", [](Tensor& b, size_t idx)-> py::object {
           //      // TODO : Should return error if backend not compatible with get
           //      if (idx >= b.size()) throw py::index_error();
@@ -142,6 +146,9 @@ void init_GraphView(py::module& m) {
           //                return py::none();
           //           }
           //      })
+          .def("get_ranked_nodes", &GraphView::getRankedNodes)
+          .def("set_dataformat", &GraphView::setDataFormat, py::arg("dataformat"))
+          
             ;
 
      m.def("get_connected_graph_view", &getConnectedGraphView);
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index 1fa552ce153b2b0f655ca9f38d1d80f62390184b..d8e77bb259cbcbae7940a09dc405bb8f50b5b79b 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -48,6 +48,16 @@ void init_Node(py::module& m) {
     :rtype: str
     )mydelimiter")
 
+    .def("create_unique_name", &Node::createUniqueName, py::arg("base_name"), 
+    R"mydelimiter(
+    Given a base name, generate a new name which is unique in all the GraphViews containing this node.
+
+    :param base_name: proposed name for the node.
+    :type base_name: str
+    :rtype: str
+    )mydelimiter")
+
+
     .def("__repr__", &Node::repr)
 
     .def("add_child",
diff --git a/python_binding/graph/pybind_OpArgs.cpp b/python_binding/graph/pybind_OpArgs.cpp
index 6ea89f91945ac44f2142c5b9e8440b11ec6a1663..a129ca51c27367ceb1f7518ca85afe134e98cc4a 100644
--- a/python_binding/graph/pybind_OpArgs.cpp
+++ b/python_binding/graph/pybind_OpArgs.cpp
@@ -31,9 +31,9 @@ void init_OpArgs(py::module& m){
     py::implicitly_convertible<Node, OpArgs>();
     py::implicitly_convertible<GraphView, OpArgs>();
 
-    m.def("sequential", &Sequential, py::arg("inputs"));
-    m.def("parallel", &Parallel, py::arg("inputs"));
-    m.def("residual", &Residual, py::arg("inputs"));
+    m.def("sequential", &Sequential, py::arg("inputs"), py::arg("name") =  "");
+    m.def("parallel", &Parallel, py::arg("inputs"), py::arg("name") =  "");
+    m.def("residual", &Residual, py::arg("inputs"), py::arg("name") =  "");
 
 }
 }
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 103e7c1e4db6e197a1dac959a25d266e031d3e55..8a00a1cb4a419f1125411b5b1c823bf91570d62e 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -24,7 +24,8 @@ void declare_Add(py::module &m) {
   py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance())
     .def(py::init<const IOIndex_t>(), py::arg("nb_inputs"))
     .def_static("get_inputs_name", &Add_Op::getInputsName)
-    .def_static("get_outputs_name", &Add_Op::getOutputsName);
+    .def_static("get_outputs_name", &Add_Op::getOutputsName)
+    .def_readonly_static("Type", &Add_Op::Type);
 
   declare_registrable<Add_Op>(m, "AddOp");
 
diff --git a/python_binding/operator/pybind_And.cpp b/python_binding/operator/pybind_And.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..08dddfc8168bb77086a3dd72aca45b110a4cbce9
--- /dev/null
+++ b/python_binding/operator/pybind_And.cpp
@@ -0,0 +1,34 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/And.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_And(py::module& m) {
+    py::class_<And_Op, std::shared_ptr<And_Op>, OperatorTensor>(m, "AndOp", py::multiple_inheritance(),
+          R"mydelimiter( Initialize an And operator.)mydelimiter")
+    .def(py::init<>())
+    .def_static("get_inputs_name", &And_Op::getInputsName)
+    .def_static("get_outputs_name", &And_Op::getOutputsName);
+    declare_registrable<And_Op>(m, "AndOp");
+    m.def("And", &And, py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing an And operator.
+			:param name : name of the node.
+		)mydelimiter");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_ArgMax.cpp b/python_binding/operator/pybind_ArgMax.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3de54afd7a669347cc2b272cff9b87cf152be09a
--- /dev/null
+++ b/python_binding/operator/pybind_ArgMax.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <array>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/ArgMax.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_ArgMax(py::module &m) {
+  const std::string pyClassName("ArgMaxOp");
+  py::class_<ArgMax_Op, std::shared_ptr<ArgMax_Op>, OperatorTensor>(
+    m, pyClassName.c_str(), py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize an ArgMax operator.
+			:param axis: The axis along which to compute the max element. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axis: int
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param select_last_index: If True, selects the last index if there are multiple occurrences 
+									of the max value. If False (default), selects the first occurrence.
+			:type select_last_index: bool
+		)mydelimiter")
+    .def(py::init<std::int32_t, bool, bool>(), py::arg("axis"), py::arg("keep_dims"), py::arg("select_last_index"))
+    .def_static("get_inputs_name", &ArgMax_Op::getInputsName)
+    .def_static("get_outputs_name", &ArgMax_Op::getOutputsName)
+    ;
+  declare_registrable<ArgMax_Op>(m, pyClassName);
+
+  m.def("ArgMax", [](std::int32_t axis,
+                    bool keepDims,
+                    bool selectLastIndex,
+                    const std::string& name) {
+        return ArgMax(axis, keepDims, selectLastIndex, name);
+    }, py::arg("axis") = 0,
+       py::arg("keep_dims") = true,
+       py::arg("select_last_index") = false,
+       py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing an ArgMax operator.
+			:param axis: The axis along which to compute the max element. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axis: int
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param select_last_index: If True, selects the last index if there are multiple occurrences 
+									of the max value. If False (default), selects the first occurrence.
+			:type select_last_index: bool
+			:param name : name of the node.
+		)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 0587554b722c99d009a248ce963f80cb4fd892ec..b98a642111402050fd3cba6dd8a12b11a3bbde8a 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -45,7 +45,8 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
             py::arg("kernel_dims"),
             py::arg("stride_dims") = create_array<DimSize_t,DIM>(1))
     .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
-    .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName);
+    .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
+    .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type);
 
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 42e31de2c7c8ba440cd8e479cf9285b398970b42..9a1bdacd169beebc843448d23bdaf8502de437b4 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -30,7 +30,8 @@ void declare_BatchNormOp(py::module& m) {
             py::arg("epsilon"),
             py::arg("momentum"))
         .def_static("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName);
+        .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
+        .def_readonly_static("Type", &BatchNorm_Op<DIM>::Type);
 
     declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_BitShift.cpp b/python_binding/operator/pybind_BitShift.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b4f6c90e54e781b011459be6e8e6e252e7347b00
--- /dev/null
+++ b/python_binding/operator/pybind_BitShift.cpp
@@ -0,0 +1,58 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include <string>
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/BitShift.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_BitShift(py::module &m) {
+    // Binding for BitShiftOp class
+    auto pyBitShiftOp = py::class_<BitShift_Op, std::shared_ptr<BitShift_Op>, OperatorTensor>(m, "BitShiftOp", py::multiple_inheritance(),R"mydelimiter(
+        BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements.
+        This class allows shifting tensor values either to the left or right based on the 
+        specified direction. The direction can be accessed and controlled using the 
+        BitShiftDirection enum.
+        :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right)
+        :type direction: BitShiftDirection
+        :param name: name of the node.
+    )mydelimiter")
+        .def(py::init<BitShift_Op::BitShiftDirection>(), py::arg("direction"))
+        .def("direction", &BitShift_Op::direction, "Get the direction of the bit shift (left or right).")
+        .def_static("get_inputs_name", &BitShift_Op::getInputsName, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", &BitShift_Op::getOutputsName, "Get the names of the output tensors.");
+
+    // Enum binding under BitShiftOp class
+    py::enum_<BitShift_Op::BitShiftDirection>(pyBitShiftOp, "BitShiftDirection")
+        .value("Right", BitShift_Op::BitShiftDirection::right)
+        .value("Left", BitShift_Op::BitShiftDirection::left)
+        .export_values();
+
+    // Binding for the BitShift function
+    m.def("BitShift", &BitShift, py::arg("direction") = BitShift_Op::BitShiftDirection::right, py::arg("name") = "",
+        R"mydelimiter(
+        BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements.
+        This class allows shifting tensor values either to the left or right based on the 
+        specified direction. The direction can be accessed and controlled using the 
+        BitShiftDirection enum.
+        :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right)
+        :type direction: BitShiftDirection
+        :param name: name of the node.
+    )mydelimiter");
+}
+} // namespace Aidge
\ No newline at end of file
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 9f02e04a41b20599a6cfe878f53db04c6d5bbe34..854f3783e9961bb5fd29746b88352438a43dd6e4 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -25,7 +25,8 @@ void init_Concat(py::module& m) {
                 py::arg("nb_inputs"),
                 py::arg("axis"))
         .def_static("get_inputs_name", &Concat_Op::getInputsName)
-        .def_static("get_outputs_name", &Concat_Op::getOutputsName);
+        .def_static("get_outputs_name", &Concat_Op::getOutputsName)
+        .def_readonly_static("Type", &Concat_Op::Type);
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
 
diff --git a/python_binding/operator/pybind_ConstantOfShape.cpp b/python_binding/operator/pybind_ConstantOfShape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b0d5ef2ef78380422ca1a137608f5289fa519aed
--- /dev/null
+++ b/python_binding/operator/pybind_ConstantOfShape.cpp
@@ -0,0 +1,44 @@
+
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/ConstantOfShape.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_ConstantOfShape(py::module &m) {
+  py::class_<ConstantOfShape_Op, std::shared_ptr<ConstantOfShape_Op>, OperatorTensor>(
+      m, "ConstantOfShapeOp", py::multiple_inheritance())
+      // Here we bind the methods of the Unsqueeze_Op that wil want to access
+      .def("get_inputs_name", &ConstantOfShape_Op::getInputsName)
+      .def("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
+      .def("value", &ConstantOfShape_Op::value);
+  // Here we bind the constructor of the ConstantOfShape Node. We add an argument for
+  // each attribute of the operator (in here we only have 'axes') and the last
+  // argument is the node's name.
+  m.def("ConstantOfShape", &ConstantOfShape, py::arg("value") = Tensor(0.f),
+        py::arg("name") = "",
+        R"mydelimiter(
+    Initialize a node containing an constantOfShape operator.
+	:param value : tensor with a given datatype that contains the value that will fill the output tensor
+	:type value  : :py:class: Tensor
+    :param name  : name of the node.
+)mydelimiter");
+}
+} // namespace Aidge
+
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 61fb37e788021757fa6c3aced9a5f4c30fb60548..bc72825b2161d8733334817e095c251c788e7eba 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -45,6 +45,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
         .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
         .def("in_channels", &Conv_Op<DIM>::inChannels)
         .def("out_channels", &Conv_Op<DIM>::outChannels)
+        .def_readonly_static("Type", &Conv_Op<DIM>::Type)
         ;
 
   declare_registrable<Conv_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 080df1832bf92a9db9d26e1fa18b652dc70c2a42..377d0fca5d78dff20b8df0cc0d5521eb9a3685a2 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -39,7 +39,8 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         py::arg("dilation_dims"))
   .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
-  .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels);
+  .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels)
+  .def_readonly_static("Type", &ConvDepthWise_Op<DIM>::Type);
 
   declare_registrable<ConvDepthWise_Op<DIM>>(m, pyClassName);
   m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
index 9dcb98a54596f32525d2880dd6e955d4643f6e7c..d2ad60725533be0b9db269ce5e022ac8560e1d91 100644
--- a/python_binding/operator/pybind_Div.cpp
+++ b/python_binding/operator/pybind_Div.cpp
@@ -22,7 +22,8 @@ void init_Div(py::module& m) {
     py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance())
         .def(py::init<>())
         .def_static("get_inputs_name", &Div_Op::getInputsName)
-        .def_static("get_outputs_name", &Div_Op::getOutputsName);
+        .def_static("get_outputs_name", &Div_Op::getOutputsName)
+        .def_readonly_static("Type", &Div_Op::Type);
     declare_registrable<Div_Op>(m, "DivOp");
     m.def("Div", &Div, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp
index c248753ca8de46293d49ce4dc614ae258c313256..6ca25f9569a53505385f37a02f3ab478a11f82a6 100644
--- a/python_binding/operator/pybind_Erf.cpp
+++ b/python_binding/operator/pybind_Erf.cpp
@@ -22,7 +22,8 @@ void init_Erf(py::module& m) {
     py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance())
         .def(py::init<>())
         .def_static("get_inputs_name", &Erf_Op::getInputsName)
-        .def_static("get_outputs_name", &Erf_Op::getOutputsName);
+        .def_static("get_outputs_name", &Erf_Op::getOutputsName)
+        .def_readonly_static("Type", &Erf_Op::Type);
 
     declare_registrable<Erf_Op>(m, "ErfOp");
 
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 9e0d61bc3a4d957e98db39577e120da5fe97ebea..2e9c41a16292d1e643415182d660b80105369d33 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -28,6 +28,7 @@ void declare_FC(py::module &m) {
     .def(py::init<>())
     .def_static("get_inputs_name", &FC_Op::getInputsName)
     .def_static("get_outputs_name", &FC_Op::getOutputsName)
+    .def_readonly_static("Type", &FC_Op::Type)
     .def("out_channels", &FC_Op::outChannels)
     // .def_property_readonly("a", &FC_Op::get_a)
     // .def_property_readonly("a", [](const FC_Op& self) {
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index aa831d1cfe92fb720df00bb7d8dd3af7f1c1a668..0aac0bbad69abb5faaaea3afd0183573db64b31f 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -29,7 +29,8 @@ void init_Gather(py::module& m) {
                 py::arg("indices"),
                 py::arg("gathered_shape"))
         .def_static("get_inputs_name", &Gather_Op::getInputsName)
-        .def_static("get_outputs_name", &Gather_Op::getOutputsName);
+        .def_static("get_outputs_name", &Gather_Op::getOutputsName)
+        .def_readonly_static("Type", &Gather_Op::Type);
 
     declare_registrable<Gather_Op>(m, "GatherOp");
 
diff --git a/python_binding/operator/pybind_GlobalAveragePooling.cpp b/python_binding/operator/pybind_GlobalAveragePooling.cpp
index d4d2a921addaef676913cee2a16991ad36686767..f37ac11f5c62d0334e34aff59561b2014d1977bd 100644
--- a/python_binding/operator/pybind_GlobalAveragePooling.cpp
+++ b/python_binding/operator/pybind_GlobalAveragePooling.cpp
@@ -25,7 +25,8 @@ void init_GlobalAveragePooling(py::module &m) {
                              py::multiple_inheritance())
       .def(py::init<>())
       .def_static("get_inputs_name", &GlobalAveragePooling_Op::getInputsName)
-      .def_static("get_outputs_name", &GlobalAveragePooling_Op::getOutputsName);
+      .def_static("get_outputs_name", &GlobalAveragePooling_Op::getOutputsName)
+      .def_readonly_static("Type", &GlobalAveragePooling_Op::Type);
 
   declare_registrable<GlobalAveragePooling_Op>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
index 49e74f4cbab90f141af5e76df7fbdef6e3794146..6d6c03b82ad4f905c41bb0cf849fc4e05fda4cb2 100644
--- a/python_binding/operator/pybind_GridSample.cpp
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -58,6 +58,7 @@ void declare_GridSampleOp(py::module &m) {
             py::arg("alogn_corners") = false)
         .def_static("get_inputs_name", &GridSample_Op::getInputsName)
         .def_static("get_outputs_name", &GridSample_Op::getOutputsName)
+        .def_readonly_static("Type", &GridSample_Op::Type)
         ;
 
   declare_registrable<GridSample_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_Identity.cpp b/python_binding/operator/pybind_Identity.cpp
index 560f2889f20233ef928557aa230e6dab7f0a5d2b..7599197226b2f8734c989755c6e7d3581a52974d 100644
--- a/python_binding/operator/pybind_Identity.cpp
+++ b/python_binding/operator/pybind_Identity.cpp
@@ -22,7 +22,8 @@ void init_Identity(py::module& m) {
     py::class_<Identity_Op, std::shared_ptr<Identity_Op>, OperatorTensor>(m, "IdentityOp", py::multiple_inheritance())
         .def(py::init<>())
         .def_static("get_inputs_name", &Identity_Op::getInputsName)
-        .def_static("get_outputs_name", &Identity_Op::getOutputsName);
+        .def_static("get_outputs_name", &Identity_Op::getOutputsName)
+        .def_readonly_static("Type", &Identity_Op::Type);
 
     m.def("Identity", &Identity, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index f46106fb3fb168631c9681d90bda857183c9bc04..e031d3dfb3348c5aec5bd497b40ff261528725ad 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -22,7 +22,8 @@ void init_LeakyReLU(py::module& m) {
     py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
         .def(py::init<float>(), py::arg("negative_slope"))
         .def_static("get_inputs_name", &LeakyReLU_Op::getInputsName)
-        .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName);
+        .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName)
+        .def_readonly_static("Type", &LeakyReLU_Op::Type);
     declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Ln.cpp b/python_binding/operator/pybind_Ln.cpp
index 0be710be1dfe1a5a83ceaf085094e8ded3f07ffd..50aa755821c257c174c4603404144dab4da26296 100755
--- a/python_binding/operator/pybind_Ln.cpp
+++ b/python_binding/operator/pybind_Ln.cpp
@@ -22,7 +22,8 @@ void init_Ln(py::module& m) {
     py::class_<Ln_Op, std::shared_ptr<Ln_Op>, OperatorTensor>(m, "LnOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Ln_Op::getInputsName)
-    .def_static("get_outputs_name", &Ln_Op::getOutputsName);
+    .def_static("get_outputs_name", &Ln_Op::getOutputsName)
+    .def_readonly_static("Type", &Ln_Op::Type);
 
     m.def("Ln", &Ln, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index 09e11f89ea579b5a3aa75f177958d981c53f1dce..f4f175afcb35eb1c10dcd1a1d9d2f2b1691dcfc0 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -24,7 +24,8 @@ void init_MatMul(py::module &m) {
   py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &MatMul_Op::getInputsName)
-    .def_static("get_outputs_name", &MatMul_Op::getOutputsName);
+    .def_static("get_outputs_name", &MatMul_Op::getOutputsName)
+    .def_readonly_static("Type", &MatMul_Op::Type);
   declare_registrable<MatMul_Op>(m, "MatMulOp");
   m.def("MatMul", &MatMul, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 2a850cd7bfe5cca21ea1ca54b5e9ad86b880bcc2..b59a4c5574ce5e56af13f9aea13e7514c9402c22 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -37,7 +37,8 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         py::arg("stride_dims"),
         py::arg("ceil_mode"))
   .def_static("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
-  .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName);
+  .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
+  .def_readonly_static("Type", &MaxPooling_Op<DIM>::Type);
   declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName);
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
diff --git a/python_binding/operator/pybind_Memorize.cpp b/python_binding/operator/pybind_Memorize.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3ac1122111aae1a9b7eb353399e46562ae51b0b1
--- /dev/null
+++ b/python_binding/operator/pybind_Memorize.cpp
@@ -0,0 +1,33 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/Memorize.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Memorize(py::module& m) {
+    py::class_<Memorize_Op, std::shared_ptr<Memorize_Op>, OperatorTensor>(m, "MemorizeOp", py::multiple_inheritance())
+        .def(py::init<const std::uint32_t>(), py::arg("end_step"))
+        .def_static("get_inputs_name", &Memorize_Op::getInputsName)
+        .def_static("get_outputs_name", &Memorize_Op::getOutputsName);
+
+    declare_registrable<Memorize_Op>(m, "MemorizeOp");
+
+    m.def("Memorize", &Memorize, py::arg("end_step"), py::arg("name") = "");
+}
+
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
index 1658b0d959c0882d53e078f6d68b4474b34c739e..23949b5fe3b22edf5b7105abd0de29b727740e35 100644
--- a/python_binding/operator/pybind_Mul.cpp
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -22,7 +22,8 @@ void init_Mul(py::module& m) {
     py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Mul_Op::getInputsName)
-    .def_static("get_outputs_name", &Mul_Op::getOutputsName);
+    .def_static("get_outputs_name", &Mul_Op::getOutputsName)
+    .def_readonly_static("Type", &Mul_Op::Type);
     declare_registrable<Mul_Op>(m, "MulOp");
     m.def("Mul", &Mul, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index dbf71a3cad870d848fbc2f5f67c13d5347b38b89..6ffbdd007b9f929ccac18de12f2319dcd68b1eda 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -53,7 +53,8 @@ void init_Operator(py::module& m){
     )mydelimiter")
     .def("associate_input", &Operator::associateInput, py::arg("inputIdx"), py::arg("data"))
     .def("set_datatype", &Operator::setDataType, py::arg("dataType"))
-    .def("set_backend", &Operator::setBackend, py::arg("name"), py::arg("device") = 0)
+    .def("set_backend", py::overload_cast<const std::string&, DeviceIdx_t>(&Operator::setBackend), py::arg("name"), py::arg("device") = 0)
+    .def("set_backend", py::overload_cast<const std::vector<std::pair<std::string, DeviceIdx_t>>&>(&Operator::setBackend), py::arg("backends"))
     .def("forward", &Operator::forward)
     // py::keep_alive forbide Python to garbage collect the implementation lambda as long as the Operator is not deleted !
     .def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>())
@@ -62,6 +63,8 @@ void init_Operator(py::module& m){
     .def("get_hook", &Operator::getHook)
     .def("add_hook", &Operator::addHook)
     .def_property_readonly("attr", &Operator::attributes)
+    .def("set_back_edges", &Operator::setBackEdges, py::arg("input_indexes"))
+    .def("is_back_edge", &Operator::isBackEdge, py::arg("input_index"))
     ;
 }
 }
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 3df203ed52967e3dbc393769276015a7fe0e016f..04882b7f5b86c7c09ed8b8e5a15c4bfabd03bb55 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -37,6 +37,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
         py::arg("borderValue") = 0.0)
     .def_static("get_inputs_name", &Pad_Op<DIM>::getInputsName)
     .def_static("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
+    .def_readonly_static("Type", &Pad_Op<DIM>::Type)
     ;
   declare_registrable<Pad_Op<DIM>>(m, pyClassName);
   m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
index 0c3b3f38803735d2df632496382e86a0c9f2735d..2040f642bbfc0428be48a6f7ec21fa3aed20a371 100644
--- a/python_binding/operator/pybind_Pop.cpp
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -22,7 +22,8 @@ void init_Pop(py::module& m) {
     py::class_<Pop_Op, std::shared_ptr<Pop_Op>, OperatorTensor>(m, "PopOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Pop_Op::getInputsName)
-    .def_static("get_outputs_name", &Pop_Op::getOutputsName);
+    .def_static("get_outputs_name", &Pop_Op::getOutputsName)
+    .def_readonly_static("Type", &Pop_Op::Type);
 
     m.def("Pop", &Pop, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
index e5d67542cd1acc5b2982081e4cf3a91948542147..ec29e3faa7c3efbc2b2dbe23372f57c30568b769 100644
--- a/python_binding/operator/pybind_Pow.cpp
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -22,7 +22,8 @@ void init_Pow(py::module& m) {
     py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Pow_Op::getInputsName)
-    .def_static("get_outputs_name", &Pow_Op::getOutputsName);
+    .def_static("get_outputs_name", &Pow_Op::getOutputsName)
+    .def_readonly_static("Type", &Pow_Op::Type);
     declare_registrable<Pow_Op>(m, "PowOp");
 
     m.def("Pow", &Pow, py::arg("name") = "");
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 30279dc477a0badbd5dc361ef7b5d071fa7b8cbc..3467ed970c3f830298b46897717d123a0ab11800 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -38,7 +38,8 @@ void init_Producer(py::module &m) {
         .def(py::init<const std::shared_ptr<Tensor>, bool>(), py::arg("tensor"), py::arg("constant"))
         .def("dims", &Producer_Op::dims)
         .def_static("get_inputs_name", &Producer_Op::getInputsName)
-        .def_static("get_outputs_name", &Producer_Op::getOutputsName);
+        .def_static("get_outputs_name", &Producer_Op::getOutputsName)
+        .def_readonly_static("Type", &Producer_Op::Type);
 
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(
                                         const std::shared_ptr<Tensor>,
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index d611523f15a7007b0e9ab9cce323ed9a57d8ecdf..79720845cf21103d3a9257880e8d2068673e36f0 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -22,7 +22,8 @@ void init_ReLU(py::module& m) {
     py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &ReLU_Op::getInputsName)
-    .def_static("get_outputs_name", &ReLU_Op::getOutputsName);
+    .def_static("get_outputs_name", &ReLU_Op::getOutputsName)
+    .def_readonly_static("Type", &ReLU_Op::Type);
     declare_registrable<ReLU_Op>(m, "ReLUOp");
 
     m.def("ReLU", &ReLU, py::arg("name") = "");
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 3023c077e2f3695902ca76dfa21831749f0ca82e..028e45755fb10bb01602959f721cf003cb1e5136 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -27,22 +27,50 @@ namespace Aidge {
 void declare_ReduceMeanOp(py::module &m) {
   const std::string pyClassName("ReduceMeanOp");
   py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, OperatorTensor>(
-    m, pyClassName.c_str(), py::multiple_inheritance())
-    .def(py::init<std::vector<std::int32_t>, DimSize_t>(), py::arg("axes"), py::arg("keep_dims"))
+    m, pyClassName.c_str(), py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+		)mydelimiter")
+    .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes") = std::vector<std::int32_t>(), py::arg("keep_dims") = true, py::arg("noop_with_empty_axes") = false)
     .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
+    .def_readonly_static("Type", &ReduceMean_Op::Type)
     ;
   declare_registrable<ReduceMean_Op>(m, pyClassName);
 
   m.def("ReduceMean", [](const std::vector<int>& axes,
-                                                                DimSize_t keepDims,
-                                                                const std::string& name) {
+                          bool keepDims,
+                          bool noopWithEmptyAxes,
+                          const std::string& name) {
         // AIDGE_ASSERT(axes.size() == DIM, "axes size [{}] does not match DIM [{}]", axes.size(), DIM);
 
-        return ReduceMean(axes, keepDims, name);
-    }, py::arg("axes"),
-       py::arg("keep_dims") = 1,
-       py::arg("name") = "");
+        return ReduceMean(axes, keepDims, noopWithEmptyAxes, name);
+    }, py::arg("axes") = std::vector<std::int32_t>(),
+       py::arg("keep_dims") = true,
+       py::arg("noop_with_empty_axes") = false,
+       py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+			:param name : name of the node.
+		)mydelimiter");
 }
 
 
diff --git a/python_binding/operator/pybind_ReduceSum.cpp b/python_binding/operator/pybind_ReduceSum.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..eaa57ef1c663a03cfd59ce02c13c3c7028b69e01
--- /dev/null
+++ b/python_binding/operator/pybind_ReduceSum.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <array>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/ReduceSum.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_ReduceSum(py::module &m) {
+  const std::string pyClassName("ReduceSumOp");
+  py::class_<ReduceSum_Op, std::shared_ptr<ReduceSum_Op>, OperatorTensor>(
+    m, pyClassName.c_str(), py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+		)mydelimiter")
+    .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes"), py::arg("keep_dims"), py::arg("noop_with_empty_axes"))
+    .def_static("get_inputs_name", &ReduceSum_Op::getInputsName)
+    .def_static("get_outputs_name", &ReduceSum_Op::getOutputsName)
+    ;
+  declare_registrable<ReduceSum_Op>(m, pyClassName);
+
+  m.def("ReduceSum", [](const std::vector<int>& axes,
+                        bool keepDims,
+                        bool noopWithEmptyAxes,
+                        const std::string& name) {
+        return ReduceSum(axes, keepDims, noopWithEmptyAxes, name);
+    }, py::arg("axes") = std::vector<std::int32_t>(),
+       py::arg("keep_dims") = true,
+       py::arg("noop_with_empty_axes") = false,
+       py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+			:param name : name of the node.
+		)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index 89d93134ac2f590bcb067aa6936081c16fc1e2a3..c0b0e8c30ef127d5cdcaf24ded75b83f06c86588 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -22,7 +22,8 @@ void init_Reshape(py::module& m) {
     py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
         .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
         .def_static("get_inputs_name", &Reshape_Op::getInputsName)
-        .def_static("get_outputs_name", &Reshape_Op::getOutputsName);
+        .def_static("get_outputs_name", &Reshape_Op::getOutputsName)
+        .def_readonly_static("Type", &Reshape_Op::Type);
     declare_registrable<Reshape_Op>(m, "ReshapeOp");
     m.def("Reshape", &Reshape, py::arg("shape") = std::vector<std::int64_t>(), py::arg("allowzero") = false, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
index a925af8cf357dabc09f4e8e3c39af9519b4ed550..35321f525e486107af3715ce1c09f48b7c5cd60f 100644
--- a/python_binding/operator/pybind_Resize.cpp
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -20,7 +20,8 @@ namespace Aidge {
 void init_Resize(py::module& m) {
     py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance())
         .def_static("get_inputs_name", &Resize_Op::getInputsName)
-        .def_static("get_outputs_name", &Resize_Op::getOutputsName);
+        .def_static("get_outputs_name", &Resize_Op::getOutputsName)
+        .def_readonly_static("Type", &Resize_Op::Type);
 
     declare_registrable<Resize_Op>(m, "ResizeOp");
 
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
index 31e6c0b08194fbb8b6ec2270e8127a2f838ba78f..22e8011a9cd37f80a0678f2629809d4412ba6fd2 100644
--- a/python_binding/operator/pybind_Scaling.cpp
+++ b/python_binding/operator/pybind_Scaling.cpp
@@ -24,7 +24,8 @@ void init_Scaling(py::module& m)
     py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, OperatorTensor>(m, "ScalingOp", py::multiple_inheritance())
         .def(py::init<float, size_t, bool>(), py::arg("scaling_factor"), py::arg("nb_bits"), py::arg("is_output_unsigned"))
         .def_static("get_inputs_name", &Scaling_Op::getInputsName)
-        .def_static("get_outputs_name", &Scaling_Op::getOutputsName);
+        .def_static("get_outputs_name", &Scaling_Op::getOutputsName)
+        .def_readonly_static("Type", &Scaling_Op::Type);
     declare_registrable<Scaling_Op>(m, "ScalingOp");
     m.def("Scaling", &Scaling, py::arg("scaling_factor") = 1.0f, py::arg("nb_bits") = 8, py::arg("is_output_unsigned") = true, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
index 4e1d4203e48f714746587c9f209b4d28bfecb439..b3511f31eeab7d5df679d16c3bfb89f51d75cdbe 100644
--- a/python_binding/operator/pybind_Shape.cpp
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -26,7 +26,8 @@ void init_Shape(py::module& m) {
                 py::arg("start"),
                 py::arg("end"))
         .def_static("get_inputs_name", &Shape_Op::getInputsName)
-        .def_static("get_outputs_name", &Shape_Op::getOutputsName);
+        .def_static("get_outputs_name", &Shape_Op::getOutputsName)
+        .def_readonly_static("Type", &Shape_Op::Type);
 
     declare_registrable<Shape_Op>(m, "ShapeOp");
 
diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp
index 0ba94c73fcd1fb435194f8485567771a147ec616..db7fc7bfb60ff8360933e5f84ab54d4cec8df724 100644
--- a/python_binding/operator/pybind_Sigmoid.cpp
+++ b/python_binding/operator/pybind_Sigmoid.cpp
@@ -22,7 +22,8 @@ void init_Sigmoid(py::module& m) {
     py::class_<Sigmoid_Op, std::shared_ptr<Sigmoid_Op>, OperatorTensor>(m, "SigmoidOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Sigmoid_Op::getInputsName)
-    .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName);
+    .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName)
+    .def_readonly_static("Type", &Sigmoid_Op::Type);
 
     m.def("Sigmoid", &Sigmoid, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index b87cc8da4874c666de21a6e798a66e3c7fad9c10..c8cae2592b966fff7ebfde1e5905ed31d5b22455 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -30,7 +30,8 @@ void init_Slice(py::module& m) {
                   py::arg("axes"),
                   py::arg("steps"))
     .def_static("get_inputs_name", &Slice_Op::getInputsName)
-    .def_static("get_outputs_name", &Slice_Op::getOutputsName);
+    .def_static("get_outputs_name", &Slice_Op::getOutputsName)
+    .def_readonly_static("Type", &Slice_Op::Type);
     declare_registrable<Slice_Op>(m, "SliceOp");
 
     m.def("Slice",
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 8b6e16d93bbee6b0517398a56de44784cd893b97..3b98ab9dfa1590093c567a363f67d32d613651a2 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -23,7 +23,8 @@ void init_Softmax(py::module& m) {
     py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
         .def(py::init<std::int32_t>(), py::arg("axis"))
         .def_static("get_inputs_name", &Softmax_Op::getInputsName)
-        .def_static("get_outputs_name", &Softmax_Op::getOutputsName);
+        .def_static("get_outputs_name", &Softmax_Op::getOutputsName)
+        .def_readonly_static("Type", &Softmax_Op::Type);
     declare_registrable<Softmax_Op>(m, "SoftmaxOp");
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp
index f63a01f9815aa59cfbad0aea36f148899f44c9ea..9b3feda9f791e65a9c32f2bda3da4da450838b40 100644
--- a/python_binding/operator/pybind_Split.cpp
+++ b/python_binding/operator/pybind_Split.cpp
@@ -27,7 +27,8 @@ void init_Split(py::module& m) {
                 py::arg("axis"),
                 py::arg("split"))
         .def_static("get_inputs_name", &Split_Op::getInputsName)
-        .def_static("get_outputs_name", &Split_Op::getOutputsName);
+        .def_static("get_outputs_name", &Split_Op::getOutputsName)
+        .def_readonly_static("Type", &Split_Op::Type);
 
     declare_registrable<Split_Op>(m, "SplitOp");
 
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index 9fae2cef29748482dfeabe173d946c6446a60a35..ba0c5aab02349df4c50f960bbeb7df2082aa9233 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -21,7 +21,8 @@ void init_Sqrt(py::module& m) {
     py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, OperatorTensor>(m, "SqrtOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Sqrt_Op::getInputsName)
-    .def_static("get_outputs_name", &Sqrt_Op::getOutputsName);
+    .def_static("get_outputs_name", &Sqrt_Op::getOutputsName)
+    .def_readonly_static("Type", &Sqrt_Op::Type);
     declare_registrable<Sqrt_Op>(m, "SqrtOp");
     m.def("Sqrt", &Sqrt, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ca90fb46af40189dbe66c320ecdd237470ffa112
--- /dev/null
+++ b/python_binding/operator/pybind_Squeeze.cpp
@@ -0,0 +1,52 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Squeeze.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Squeeze(py::module &m) {
+  py::class_<Squeeze_Op, std::shared_ptr<Squeeze_Op>, OperatorTensor>(
+      m, "SqueezeOp", py::multiple_inheritance(),
+		R"mydelimiter(
+		Initialize squeeze operator
+		:param axes :   axes to squeeze between [-r;r-1] 
+						with r = input_tensor.nbDims()
+						& r in [-128 , 127]
+		:type axes : :py:class: List[Int]
+		)mydelimiter")
+      .def("get_inputs_name", &Squeeze_Op::getInputsName)
+      .def("get_outputs_name", &Squeeze_Op::getOutputsName)
+      .def("axes", &Squeeze_Op::axes);
+  // Here we bind the constructor of the Squeeze Node. We add an argument
+  // for each attribute of the operator (in here we only have 'axes') and
+  // the last argument is the node's name.
+  m.def("Squeeze", &Squeeze, py::arg("axes") = std::vector<int8_t>({}),
+        py::arg("name") = "",
+        R"mydelimiter(
+    Initialize a node containing a squeeze operator.
+	:param axes :   axes to squeeze between [-r;r-1] 
+					with r = input_tensor.nbDims()
+					& r in [-128 , 127]
+	:type axes : :py:class: List[Int]
+    :param name : name of the node.
+)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
index 752490a72bc35ec8a0ab08dd8d51a31c887b4dc6..52a622f0fdf6480a375d17c9729017fca32b3092 100644
--- a/python_binding/operator/pybind_Sub.cpp
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -22,7 +22,8 @@ void init_Sub(py::module& m) {
     py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Sub_Op::getInputsName)
-    .def_static("get_outputs_name", &Sub_Op::getOutputsName);
+    .def_static("get_outputs_name", &Sub_Op::getOutputsName)
+    .def_readonly_static("Type", &Sub_Op::Type);
     declare_registrable<Sub_Op>(m, "SubOp");
     m.def("Sub", &Sub, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Tanh.cpp b/python_binding/operator/pybind_Tanh.cpp
index 74cde8dd3831c8d29ca87e2314afc27276ec025f..ded15ee78951d389d614d932e4a9c22bf310b814 100644
--- a/python_binding/operator/pybind_Tanh.cpp
+++ b/python_binding/operator/pybind_Tanh.cpp
@@ -22,7 +22,8 @@ void init_Tanh(py::module& m) {
     py::class_<Tanh_Op, std::shared_ptr<Tanh_Op>, OperatorTensor>(m, "TanhOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Tanh_Op::getInputsName)
-    .def_static("get_outputs_name", &Tanh_Op::getOutputsName);
+    .def_static("get_outputs_name", &Tanh_Op::getOutputsName)
+    .def_readonly_static("Type", &Tanh_Op::Type);
 
     m.def("Tanh", &Tanh, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index c0c3ad617bef3eda3e283667944ac423cd10a622..930dd95f3c3e4b10d2b4f8b496dfbbbcc6822050 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -31,7 +31,8 @@ void declare_Transpose(py::module &m) {
     m, "TransposeOp", py::multiple_inheritance())
     .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order"))
     .def_static("get_inputs_name", &Transpose_Op::getInputsName)
-    .def_static("get_outputs_name", &Transpose_Op::getOutputsName);
+    .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
+    .def_readonly_static("Type", &Transpose_Op::Type);
   declare_registrable<Transpose_Op>(m, pyClassName);
   m.def("Transpose", &Transpose, py::arg("output_dims_order"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..40c179c4064f07896113732a7e3c32db5f19c060
--- /dev/null
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -0,0 +1,48 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Unsqueeze.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Unsqueeze(py::module &m) {
+  py::class_<Unsqueeze_Op, std::shared_ptr<Unsqueeze_Op>, OperatorTensor>(
+      m, "UnsqueezeOp", py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize an unsqueeze operator.
+		:param axes :   axes to unsqueeze between [-r;r-1] 
+						with r = input_tensor.nbDims() + len(axes)
+		:type axes : :py:class: List[Int]
+		)mydelimiter")
+      // Here we bind the methods of the Unsqueeze_Op that wil want to access
+      .def("get_inputs_name", &Unsqueeze_Op::getInputsName)
+      .def("get_outputs_name", &Unsqueeze_Op::getOutputsName)
+      .def("axes", &Unsqueeze_Op::axes);
+  // Here we bind the constructor of the Unsqueeze Node. We add an argument for
+  // each attribute of the operator (in here we only have 'axes') and the last
+  // argument is the node's name.
+  m.def("Unsqueeze", &Unsqueeze, py::arg("axes") = std::vector<int8_t>({}),
+        py::arg("name") = "",
+        R"mydelimiter(
+    Initialize a node containing an unsqueeze operator.
+	:param axes :   axes to unsqueeze between [-r;r-1] 
+					with r = input_tensor.nbDims() + len(axes)
+	:type axes : :py:class: List[Int]
+    :param name : name of the node.
+)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index ba9965dca000af9b724dce961fe0cb44d349cc46..bfcd7c16d3b1bce7bd564b4aa8412f89d3c11813 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -21,17 +21,22 @@ void init_Data(py::module&);
 void init_Database(py::module&);
 void init_DataProvider(py::module&);
 void init_Tensor(py::module&);
-void init_OperatorImpl(py::module&);
+void init_TensorImpl(py::module&);
 void init_Attributes(py::module&);
+void init_OperatorImpl(py::module&);
 void init_Log(py::module&);
 void init_Operator(py::module&);
 void init_OperatorTensor(py::module&);
 
 void init_Add(py::module&);
+void init_And(py::module&);
+void init_ArgMax(py::module&);
 void init_AvgPooling(py::module&);
 void init_BatchNorm(py::module&);
+void init_BitShift(py::module&);
 void init_Clip(py::module&);
 void init_Concat(py::module&);
+void init_ConstantOfShape(py::module&);
 void init_Conv(py::module&);
 void init_ConvDepthWise(py::module&);
 void init_Div(py::module&);
@@ -41,17 +46,20 @@ void init_Gather(py::module&);
 void init_GenericOperator(py::module&);
 void init_GlobalAveragePooling(py::module&);
 void init_GridSample(py::module&);
+void init_Identity(py::module&);
 void init_LeakyReLU(py::module&);
 void init_MatMul(py::module&);
 void init_MaxPooling(py::module&);
+void init_Memorize(py::module&);
 void init_MetaOperatorDefs(py::module&);
 void init_Mul(py::module&);
-void init_Producer(py::module&);
 void init_Pad(py::module&);
 void init_Pop(py::module&);
 void init_Pow(py::module&);
-void init_ReduceMean(py::module&);
+void init_Producer(py::module&);
 void init_ReLU(py::module&);
+void init_ReduceMean(py::module&);
+void init_ReduceSum(py::module&);
 void init_Reshape(py::module&);
 void init_Resize(py::module&);
 void init_Scaling(py::module&);
@@ -61,10 +69,11 @@ void init_Slice(py::module&);
 void init_Softmax(py::module&);
 void init_Split(py::module&);
 void init_Sqrt(py::module&);
+void init_Squeeze(py::module&);
 void init_Sub(py::module&);
 void init_Tanh(py::module&);
 void init_Transpose(py::module&);
-void init_Identity(py::module&);
+void init_Unsqueeze(py::module&);
 
 void init_Node(py::module&);
 void init_GraphView(py::module&);
@@ -79,6 +88,7 @@ void init_GraphViewHelper(py::module&);
 
 void init_Scheduler(py::module&);
 void init_MemoryManager(py::module&);
+void init_ProdConso(py::module& m);
 void init_TensorUtils(py::module&);
 void init_Filler(py::module&);
 
@@ -89,6 +99,8 @@ void init_Aidge(py::module& m) {
     init_Database(m);
     init_DataProvider(m);
     init_Tensor(m);
+    init_TensorImpl(m);
+    init_Attributes(m);
 
     init_Node(m);
     init_GraphView(m);
@@ -96,17 +108,21 @@ void init_Aidge(py::module& m) {
     init_Connector(m);
 
     init_OperatorImpl(m);
-    init_Attributes(m);
     init_Log(m);
     init_Operator(m);
     init_OperatorTensor(m);
+
     init_Add(m);
+    init_And(m);
+    init_ArgMax(m);
     init_AvgPooling(m);
     init_BatchNorm(m);
+    init_BitShift(m);
     init_Clip(m);
     init_Concat(m);
     init_Conv(m);
     init_ConvDepthWise(m);
+    init_ConstantOfShape(m);
     init_Div(m);
     init_Erf(m);
     init_FC(m);
@@ -114,17 +130,19 @@ void init_Aidge(py::module& m) {
     init_GenericOperator(m);
     init_GlobalAveragePooling(m);
     init_GridSample(m);
+    init_Identity(m);
     init_LeakyReLU(m);
     init_MatMul(m);
     init_MaxPooling(m);
+    init_Memorize(m);
     init_MetaOperatorDefs(m);
     init_Mul(m);
     init_Pad(m);
-
     init_Pop(m);
     init_Pow(m);
-    init_ReduceMean(m);
     init_ReLU(m);
+    init_ReduceMean(m);
+    init_ReduceSum(m);
     init_Reshape(m);
     init_Resize(m);
     init_Scaling(m);
@@ -134,10 +152,11 @@ void init_Aidge(py::module& m) {
     init_Softmax(m);
     init_Split(m);
     init_Sqrt(m);
+    init_Squeeze(m);
     init_Sub(m);
     init_Tanh(m);
     init_Transpose(m);
-    init_Identity(m);
+    init_Unsqueeze(m);
 
     init_Producer(m);
 
@@ -148,9 +167,11 @@ void init_Aidge(py::module& m) {
     init_GraphViewHelper(m);
     init_Scheduler(m);
     init_MemoryManager(m);
+    init_ProdConso(m);
     init_TensorUtils(m);
     init_Filler(m);
 }
 
-PYBIND11_MODULE(aidge_core, m) { init_Aidge(m); }
 }  // namespace Aidge
+
+PYBIND11_MODULE(aidge_core, m) { Aidge::init_Aidge(m); }
diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp
index 1c04a320d85a833cc3c0b666390edc7a8648214b..6908cbd912b506a7adb7f33a02416d0173174969 100644
--- a/python_binding/recipes/pybind_Recipes.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -15,13 +15,14 @@
 #include <cstddef>
 #include <string>
 
+#include "aidge/graph/GraphView.hpp"
 #include "aidge/recipes/Recipes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 
 namespace Aidge {
-void init_Recipes(py::module &m) 
+void init_Recipes(py::module &m)
 {
 
 
@@ -71,18 +72,19 @@ void init_Recipes(py::module &m)
     )mydelimiter");
 
   m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter(
-    Recipe to remove a flatten operator.
+    Recipe to remove a Flatten operator if it is followed by a FC or a MatMul.
+    The recipe can remove multiple Flatten operator if they are one after the other.
 
-    :param graph_view: Graph view on which we want to apply the recipe
+    :param graph_view: Graph view on which we want to apply the recipe.
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
 
-  // m.def("remove_flatten", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(removeFlatten), py::arg("nodes"), R"mydelimiter(
-  //   Recipe to remove a flatten operator.
+  m.def("remove_constantOfShape", static_cast<size_t(*)(std::shared_ptr<GraphView>)>(removeConstantOfShape), py::arg("graph_view"), R"mydelimiter(
+    Fuses constant => Generic | constantOfShape and transforms it into a Producer
 
-  //   :param nodes: The flatten operator to remove.
-  //   :type nodes: list of :py:class:`aidge_core.Node`
-  //   )mydelimiter");
+    :param graph_view: Graph view on which we want to apply the recipe.
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    )mydelimiter");
 
   m.def("fuse_batchnorm", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseBatchNorm), py::arg("graph_view"), R"mydelimiter(
     Recipe to remove a flatten operator.
@@ -122,6 +124,13 @@ void init_Recipes(py::module &m)
     :return: Number of sub-graph actually fused in a Meta Operator.
     :rtype: int
     )mydelimiter");
+
+  m.def("adapt_to_backend", adaptToBackend, py::arg("graph_view"), R"mydelimiter(
+    Adapt the graph to a specific backend.
+
+    :param graph_view: Graph view on which we want to apply the recipe
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    )mydelimiter");
 }
 
 } // namespace Aidge
diff --git a/python_binding/scheduler/pybind_ProdConso.cpp b/python_binding/scheduler/pybind_ProdConso.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..abd6d5379178916b5842095d50a1de2155345b6f
--- /dev/null
+++ b/python_binding/scheduler/pybind_ProdConso.cpp
@@ -0,0 +1,116 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+/**
+ * @brief Trampoline class for binding
+ *
+ */
+class pyProdConso: public ProdConso {
+public:
+    using ProdConso::ProdConso; // Inherit constructors
+
+    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_nb_required_data",
+            getNbRequiredData,
+            inputIdx
+        );
+    }
+    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_nb_required_protected",
+            getNbRequiredProtected,
+            inputIdx
+
+        );
+    }
+    Elts_t getRequiredMemory(const IOIndex_t outputIdx,
+    const std::vector<DimSize_t> &inputsSize) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_required_memory",
+            getRequiredMemory,
+            outputIdx,
+            inputsSize
+
+        );
+    }
+    Elts_t getNbConsumedData(const IOIndex_t inputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_nb_consumed_data",
+            getNbConsumedData,
+            inputIdx
+
+        );
+    }
+    Elts_t getNbProducedData(const IOIndex_t outputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_nb_produced_data",
+            getNbProducedData,
+            outputIdx
+
+        );
+    }
+    void updateConsummerProducer() override {
+        PYBIND11_OVERRIDE_NAME(
+            void,
+            ProdConso,
+            "update_consummer_producer",
+            updateConsummerProducer,
+
+        );
+    }
+    void resetConsummerProducer() override {
+        PYBIND11_OVERRIDE_NAME(
+            void,
+            ProdConso,
+            "reset_consummer_producer",
+            resetConsummerProducer,
+
+        );
+    }
+};
+
+void init_ProdConso(py::module& m){
+
+    py::class_<ProdConso, std::shared_ptr<ProdConso>, pyProdConso>(m, "ProdConso", py::dynamic_attr())
+    .def(py::init<const Operator&, bool>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>())
+    .def_static("default_model", &ProdConso::defaultModel)
+    .def_static("in_place_model", &ProdConso::inPlaceModel)
+    .def("get_nb_required_data", &ProdConso::getNbRequiredData)
+    .def("get_nb_required_protected", &ProdConso::getNbRequiredProtected)
+    .def("get_required_memory", &ProdConso::getRequiredMemory)
+    .def("get_nb_consumed_data", &ProdConso::getNbConsumedData)
+    .def("get_nb_produced_data", &ProdConso::getNbProducedData)
+    .def("update_consummer_producer", &ProdConso::updateConsummerProducer)
+    .def("reset_consummer_producer", &ProdConso::resetConsummerProducer)
+    ;
+}
+}
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index ac35ce0a62408a69637a4160c9a008aba9dceb66..472af2a9465b121593613492f5120ddc9d7fe254 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -25,6 +25,7 @@ void init_Scheduler(py::module& m){
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
     .def("graph_view", &Scheduler::graphView)
     .def("save_scheduling_diagram", &Scheduler::saveSchedulingDiagram, py::arg("file_name"))
+    .def("save_static_scheduling_diagram", &Scheduler::saveStaticSchedulingDiagram, py::arg("file_name"))
     .def("resetScheduling", &Scheduler::resetScheduling)
     .def("generate_scheduling", &Scheduler::generateScheduling)
     .def("get_static_scheduling", &Scheduler::getStaticScheduling, py::arg("step") = 0)
diff --git a/python_binding/utils/pybind_Log.cpp b/python_binding/utils/pybind_Log.cpp
index f70a4bfab54ee14194ea04f96efa33a6b8e04201..ca8d1f33086fb5093c76826e5a2f53df873badf5 100644
--- a/python_binding/utils/pybind_Log.cpp
+++ b/python_binding/utils/pybind_Log.cpp
@@ -78,6 +78,13 @@ void init_Log(py::module& m){
     .def_static("set_console_level", &Log::setConsoleLevel, py::arg("level"),
           R"mydelimiter(
           Set the minimum log level displayed in the console.
+          Available `Level`s in ascending order : 
+            - Level.Debug
+            - Level.Info
+            - Level.Notice
+            - Level.Warn
+            - Level.Error
+            - Level.Fatal          
 
           :param level: Log level.
           :type level: Level
@@ -93,6 +100,13 @@ void init_Log(py::module& m){
     .def_static("set_file_level", &Log::setFileLevel, py::arg("level"),
           R"mydelimiter(
           Set the minimum log level saved in the log file.
+          Available `Level`s in ascending order : 
+            - Level.Debug
+            - Level.Info
+            - Level.Notice
+            - Level.Warn
+            - Level.Error
+            - Level.Fatal          
 
           :param level: Log level.
           :type level: Level
diff --git a/setup.py b/setup.py
index f0c41626f2fa348ac5d52778d0d865a31b4c344c..4f2e21711f193eb7d5c37ace7b5ad83ac63d3635 100644
--- a/setup.py
+++ b/setup.py
@@ -61,13 +61,14 @@ class CMakeBuild(build_ext):
             if build_gen
             else []
         )
+        test_onoff = os.environ.get("AIDGE_BUILD_TEST", "OFF")
 
         self.spawn(
             [
                 "cmake",
                 *build_gen_opts,
                 str(cwd),
-                "-DTEST=OFF",
+                f"-DTEST={test_onoff}",
                 f"-DCMAKE_INSTALL_PREFIX:PATH={install_path}",
                 f"-DCMAKE_BUILD_TYPE={compile_type}",
                 "-DPYBIND=ON",
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index d992703fedb224e6650ce2ad50317cda3bae650f..0fa2cfdadb3af350a5668444c0a330e023818a41 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -14,106 +14,345 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Transpose.hpp"
+#include "aidge/operator/Cast.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
+Aidge::ImplSpec::ImplSpec(const DynamicAttributes& attrs_):
+    attrs(attrs_) {}
+Aidge::ImplSpec::ImplSpec(const IOSpec& io, const DynamicAttributes& attrs_):
+    inputs(1, io), outputs(1, io), attrs(attrs_) {}
+Aidge::ImplSpec::ImplSpec(const IOSpec& i, const IOSpec& o, const DynamicAttributes& attrs_):
+    inputs(1, i), outputs(1, o), attrs(attrs_) {}
+Aidge::ImplSpec::ImplSpec(const std::vector<IOSpec>& i, const std::vector<IOSpec>& o, const DynamicAttributes& attrs_):
+    inputs(i), outputs(o), attrs(attrs_) {}
+Aidge::ImplSpec::ImplSpec(const Aidge::ImplSpec&) = default;
+Aidge::ImplSpec::~ImplSpec() noexcept = default;
+
 Aidge::OperatorImpl::OperatorImpl(const Operator& op, const std::string& backend):
     mOp(op),
-    mBackend(backend),
-    mNbConsumedData(mOp.nbInputs(), Elts_t::NoneElts()),
-    mNbProducedData(mOp.nbOutputs(), Elts_t::NoneElts())
+    mBackend(backend)
 {
     //ctor
 }
 
-Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    if (mOp.getRawInput(inputIdx)) {
-        const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
-        if (!input->undefined()) {
-            // Known amount of data: requires the whole tensor by default
-            return Elts_t::DataElts(input->size());
+std::shared_ptr<Aidge::ProdConso> Aidge::OperatorImpl::prodConso() {
+    if (!mProdConso) {
+        mProdConso = getProdConso();
+    }
+    return mProdConso;
+}
+
+Aidge::ImplSpec Aidge::OperatorImpl::getRequiredSpec() const {
+    const auto& opTensor = dynamic_cast<const OperatorTensor&>(mOp);
+
+    ImplSpec requiredSpec;
+    // Inputs specs
+    for (size_t i = 0; i < opTensor.nbInputs(); ++i) {
+        if (opTensor.getInput(i)) {
+            std::vector<std::pair<int, int>> dims;
+            for (auto dim : opTensor.getInput(i)->dims()) {
+                dims.push_back(std::make_pair<int, int>(dim, dim));
+            }
+
+            requiredSpec.inputs.push_back({opTensor.getInput(i)->dataType(), opTensor.getInput(i)->dataFormat(), dims});
         }
         else {
-            // Unknown amount of data: require a single token by default
-            return Elts_t::TokenElts(1);
+            requiredSpec.inputs.push_back({DataType::Any});
         }
     }
+    // Outputs specs
+    for (size_t i = 0; i < opTensor.nbOutputs(); ++i) {
+        std::vector<std::pair<int, int>> dims;
+        for (auto dim : opTensor.getOutput(i)->dims()) {
+            dims.push_back(std::make_pair<int, int>(dim, dim));
+        }
 
-    // Input not connected, meaning it is an optional input: do no require anything!
-    return Elts_t::NoneElts();
+        requiredSpec.outputs.push_back({opTensor.getOutput(i)->dataType(), opTensor.getOutput(i)->dataFormat(), dims});
+    }
+    // Attributes
+    if (!mOp.isAtomic()) {
+        requiredSpec.attrs.setAttr("type:!", mOp.type()); // :! mandatory qualifier
+    }
+    else {
+        requiredSpec.attrs.setAttr("type", mOp.type());
+    }
+    return requiredSpec;
 }
 
-Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
-    if (mOp.getRawInput(inputIdx)) {
-        const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
-        if (!input->undefined()) {
-            // Known amount of data: protect the whole tensor by default
-            return Elts_t::DataElts(input->size());
+Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs) const {
+    Log::debug("getBestMatch() for requirements: {}", requiredSpecs);
+
+    const auto availableSpecsSet = getAvailableImplSpecs();
+    const std::vector<ImplSpec> availableSpecs(availableSpecsSet.begin(), availableSpecsSet.end());
+    std::vector<int> matchingSpecs(availableSpecs.size(), -1);
+
+    for (size_t s = 0; s < availableSpecs.size(); ++s) {
+        auto spec = availableSpecs[s];
+        bool match = true;
+        int priority = 0;
+
+        // Check inputs
+        for (size_t i = 0; i < requiredSpecs.inputs.size(); ++i) {
+            const auto inputSpec = (i < spec.inputs.size()) ? spec.inputs[i] : spec.inputs.back();
+            if (!checkIOSpec(requiredSpecs.inputs[i], inputSpec)) {
+                match = false;
+                break;
+            }
         }
-        else {
-            // Unknown amount of data: protect a single token by default
-            // (this does not really make sense for now, as getNbRequiredProtected()
-            // is supposed to give a precise amount of data to protect for
-            // memory management purpose...)
-            return Elts_t::TokenElts(1);
+
+        // Check outputs
+        for (size_t i = 0; i < requiredSpecs.outputs.size(); ++i) {
+            const auto outputSpec = (i < spec.outputs.size()) ? spec.outputs[i] : spec.outputs.back();
+            if (!checkIOSpec(requiredSpecs.outputs[i], outputSpec)) {
+                match = false;
+                break;
+            }
+        }
+
+        // Check attributes
+        for (const auto& attrName : requiredSpecs.attrs.getAttrsName()) {
+            std::string name = attrName;
+            std::string qualifier;
+            const auto qualifierPos = std::find_if(attrName.begin(), attrName.end(),
+                [](char c) { return c == ':'; });
+            if (qualifierPos != attrName.begin()) {
+                name = attrName.substr(0, qualifierPos - attrName.begin());
+                qualifier = attrName.substr(qualifierPos - attrName.begin());
+            }
+
+            const bool mandatory = (qualifier == "!");
+            if (mandatory) {
+                // Required attribute:
+                if (!spec.attrs.hasAttr(name)) {
+                    // Missing attribute
+                    match = false;
+                    break;
+                }
+                else if (requiredSpecs.attrs.getAny(attrName) < spec.attrs.getAny(name)
+                    || spec.attrs.getAny(name) < requiredSpecs.attrs.getAny(attrName))
+                {
+                    // Attribute value mismatch
+                    match = false;
+                    break;
+                }
+            }
+            else {
+                const int attrPriority = (!qualifier.empty()) ? std::stoi(qualifier) : 0;
+
+                if (spec.attrs.hasAttr(name)
+                    && !(requiredSpecs.attrs.getAny(attrName) < spec.attrs.getAny(name))
+                    && !(spec.attrs.getAny(name) < requiredSpecs.attrs.getAny(attrName)))
+                {
+                    // Attribute value match
+                    priority = std::max(priority, attrPriority);
+                }
+            }
         }
+
+        if (match) {
+            matchingSpecs[s] = priority;
+        }
+
+        Log::debug("  {}:{} - {}", (match) ? "MATCH" : "MISMATCH", priority, spec);
     }
 
-    // Input not connected, meaning it is an optional input: do no require anything!
-    return Elts_t::NoneElts();
+    // Return best match
+    const auto bestMatch = std::max_element(matchingSpecs.begin(), matchingSpecs.end());
+    if (*bestMatch >= 0) {
+        const auto bestSpecIdx = bestMatch - matchingSpecs.begin();
+        return availableSpecs[bestSpecIdx];
+    }
+
+    // If there is no match, return the required specs for the registrar, which
+    // will throw a "missing or invalid registrar key"
+    return requiredSpecs;
 }
 
-Aidge::Elts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    if (mOp.getRawOutput(outputIdx)) {
-        const auto output = std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx));
-        if (!output->undefined()) {
-            // Known amount of data: requires the whole tensor by default,
-            // regardless of available data on inputs
-            return Elts_t::DataElts(output->size());
+bool Aidge::OperatorImpl::checkIOSpec(const ImplSpec::IOSpec& required, const ImplSpec::IOSpec& spec) const {
+    // Check type
+    if (required.type != DataType::Any
+        && spec.type != DataType::Any
+        && required.type != spec.type)
+    {
+        return false;
+    }
+
+    // Check format
+    if (required.format != DataFormat::Any
+        && spec.format != DataFormat::Any
+        && required.format != spec.format)
+    {
+        const auto transpose = getDataFormatTranspose(required.format, spec.format);
+        std::vector<size_t> identity(transpose.size());
+        std::iota(std::begin(identity), std::end(identity), 0);
+
+        if (!std::equal(transpose.begin(), transpose.end(), identity.begin())) {
+            return false;
         }
-        else {
-            // Unknown amount of data: require a single token by default
-            // (this does not really make sense for now, as getRequiredMemory()
-            // is supposed to give a precise amount of data to allocate for
-            // memory management purpose...)
-            return Elts_t::TokenElts(1);
+    }
+
+    // Check dims
+    if (!required.dims.empty() && !spec.dims.empty()) {
+        if (required.dims.size() != spec.dims.size()) {
+            return false;
+        }
+
+        for (size_t dim = 0; dim < required.dims.size(); ++dim) {
+            const auto requiredDim = required.dims[dim];
+            const auto specDim = spec.dims[dim];
+
+            if (requiredDim.first != -1
+                && specDim.first != -1
+                && !(specDim.first <= requiredDim.first && specDim.second >= requiredDim.second))
+            {
+                return false;
+            }
         }
     }
 
-    // Output not set, meaning it is an optional output: do no require anything!
-    return Elts_t::NoneElts();
+    return true;
 }
 
-Aidge::Elts_t Aidge::OperatorImpl::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
-    AIDGE_ASSERT(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(),
-        "input index ({}) is out of bound ({}) for operator type {}",
-        inputIdx, mNbConsumedData.size(), mOp.type());
-    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
-}
+std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec& spec, const ImplSpec& requiredSpecs) const {
+    auto op = std::static_pointer_cast<OperatorTensor>(mOp.clone());
+    auto node = std::make_shared<Node>(op);
 
-Aidge::Elts_t Aidge::OperatorImpl::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
-    AIDGE_ASSERT(static_cast<std::size_t>(outputIdx) < mNbProducedData.size(),
-        "output index ({}) is out of bound ({}) for operator type {}",
-        outputIdx, mNbProducedData.size(), mOp.type());
-    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
-}
+    // Adapt inputs
+    for (size_t i = 0; i < requiredSpecs.inputs.size(); ++i) {
+        const auto IOSpec = (i < spec.inputs.size()) ? spec.inputs[i] : spec.inputs.back();
+        const ImplSpec::IOSpec& requiredIOSpec = requiredSpecs.inputs[i];
+        std::shared_ptr<Node> parent = node;
+
+        // Input type
+        if (requiredIOSpec.type != DataType::Any
+            && IOSpec.type != DataType::Any
+            && requiredIOSpec.type != IOSpec.type)
+        {
+            const auto cast = Cast(IOSpec.type);
+            cast->addChild(parent, 0, i);
+
+            op->getInput(i)->setDataType(IOSpec.type);
+        }
+
+        // Input format
+        if (requiredIOSpec.format != DataFormat::Any
+            && IOSpec.format != DataFormat::Any
+            && requiredIOSpec.format != IOSpec.format)
+        {
+            const auto transpose = getDataFormatTranspose(requiredIOSpec.format, IOSpec.format);
+            auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
+            transposeOp->getOperator()->setDataFormat(IOSpec.format);
+            transposeOp->getOperator()->setDataType(IOSpec.type);
+            transposeOp->addChild(parent, 0, i);
+
+            op->getInput(i)->setDataFormat(IOSpec.format);
+        }
+
+        // Input dims
+        if (!requiredIOSpec.dims.empty() && !IOSpec.dims.empty()) {
+            if (requiredIOSpec.dims.size() != IOSpec.dims.size()) {
+                return nullptr;
+            }
+
+            for (size_t dim = 0; dim < requiredIOSpec.dims.size(); ++dim) {
+                const auto requiredDim = requiredIOSpec.dims[dim];
+                const auto specDim = IOSpec.dims[dim];
 
-void Aidge::OperatorImpl::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx) {
-        // each input is consumed by the minimum amount for a forward pass
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));
+                if (requiredDim.first != -1
+                    && specDim.first != -1
+                    && !(specDim.first <= requiredDim.first && specDim.second >= requiredDim.second))
+                {
+                    return nullptr;
+                }
+            }
+        }
     }
 
-    for (std::size_t outputIdx = 0; outputIdx < mNbProducedData.size(); ++outputIdx) {
-        mNbProducedData[outputIdx] += getRequiredMemory(outputIdx, {});
+    // Adapt outputs
+    for (size_t i = 0; i < requiredSpecs.outputs.size(); ++i) {
+        const auto IOSpec = (i < spec.outputs.size()) ? spec.outputs[i] : spec.outputs.back();
+        const ImplSpec::IOSpec& requiredIOSpec = requiredSpecs.outputs[i];
+        std::shared_ptr<Node> parent = node;
+
+        // Output type
+        if (requiredIOSpec.type != DataType::Any
+            && IOSpec.type != DataType::Any
+            && requiredIOSpec.type != IOSpec.type)
+        {
+            const auto cast = Cast(requiredIOSpec.type);
+            parent->addChild(cast, i, 0);
+
+            op->getOutput(i)->setDataType(IOSpec.type);
+        }
+
+        // Output format
+        if (requiredIOSpec.format != DataFormat::Any
+            && IOSpec.format != DataFormat::Any
+            && requiredIOSpec.format != IOSpec.format)
+        {
+            const auto transpose = getDataFormatTranspose(IOSpec.format, requiredIOSpec.format);
+            auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
+            transposeOp->getOperator()->setDataFormat(requiredIOSpec.format);
+            transposeOp->getOperator()->setDataType(requiredIOSpec.type);
+            parent->addChild(transposeOp, i, 0);
+
+            op->getOutput(i)->setDataFormat(IOSpec.format);
+        }
+
+        // Output dims
+        if (!requiredIOSpec.dims.empty() && !IOSpec.dims.empty()) {
+            if (requiredIOSpec.dims.size() != IOSpec.dims.size()) {
+                return nullptr;
+            }
+
+            for (size_t dim = 0; dim < requiredIOSpec.dims.size(); ++dim) {
+                const auto requiredDim = requiredIOSpec.dims[dim];
+                const auto specDim = IOSpec.dims[dim];
+
+                if (requiredDim.first != -1
+                    && specDim.first != -1
+                    && !(specDim.first <= requiredDim.first && specDim.second >= requiredDim.second))
+                {
+                    return nullptr;
+                }
+            }
+        }
     }
+
+    return MetaOperator(std::string("Adapted_" + op->type()).c_str(), getConnectedGraphView(node));
 }
 
-void Aidge::OperatorImpl::resetConsummerProducer(){
-    std::fill(mNbConsumedData.begin(), mNbConsumedData.end(), Elts_t::NoneElts());
-    std::fill(mNbProducedData.begin(), mNbProducedData.end(), Elts_t::NoneElts());
+std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getBestAdaptation(const ImplSpec& requiredSpecs) const {
+    const auto availableSpecs = getAvailableImplSpecs();
+    Log::debug("Adapt operator type {}: {} impl. available", mOp.type(), availableSpecs.size());
+
+    using AdaptationCost = int;
+    std::map<std::shared_ptr<Node>, AdaptationCost> adaptations;
+
+    for (const auto& availableSpec : availableSpecs) {
+        auto adaptation = getAdaptation(availableSpec, requiredSpecs);
+
+        if (adaptation) {
+            auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(adaptation->getOperator())->getMicroGraph();
+            adaptations.insert(std::make_pair(adaptation, microGraph->getNodes().size()));
+        }
+    }
+
+    Log::debug("Adapt operator type {}: found {} possible adaptations", mOp.type(), adaptations.size());
+
+    if (!adaptations.empty()) {
+        // Return best adaptation (with min. AdaptationCost)
+        const auto bestAdaptation = std::min_element(adaptations.begin(), adaptations.end(),
+            [](const auto& lhs, const auto& rhs) { return lhs.second < rhs.second; });
+        return bestAdaptation->first;
+    }
+
+    return nullptr;
 }
 
 void Aidge::OperatorImpl::forward() {
@@ -123,3 +362,11 @@ void Aidge::OperatorImpl::forward() {
 void Aidge::OperatorImpl::backward() {
     AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented yet for operator of type {}", mOp.type());
 }
+
+std::shared_ptr<Aidge::ProdConso> Aidge::OperatorImpl::getProdConso() const {
+    return std::make_shared<ProdConso>(mOp);
+}
+
+std::set<Aidge::ImplSpec> Aidge::OperatorImpl::getAvailableImplSpecs() const {
+    return std::set<ImplSpec>();
+}
diff --git a/src/data/Data.cpp b/src/data/Data.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..62a883d08a401e02c86408214a061f893ffbfb4a
--- /dev/null
+++ b/src/data/Data.cpp
@@ -0,0 +1,42 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/data/Data.hpp"
+
+Aidge::DataFormatTranspose Aidge::getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
+    // Permutation array from default format to src format
+    const auto srcDefToFormat = DataFormatTransposeDict[static_cast<int>(src)];
+    // Permutation array from default format to dst format
+    const auto dstDefToFormat = DataFormatTransposeDict[static_cast<int>(dst)];
+    // Compute permutation array from src format to default format:
+    DataFormatTranspose srcFormatToDef{};
+    for (size_t i = 0; i < srcDefToFormat.size(); ++i) {
+        if (srcDefToFormat[i] > 0) {
+            srcFormatToDef[srcDefToFormat[i] - 1] = i;
+        }
+        else {
+            srcFormatToDef[i] = i;
+        }
+    }
+
+    // Compute permutation array from src format to dst format:
+    DataFormatTranspose srcToDst{};
+    for (size_t i = 0; i < dstDefToFormat.size(); ++i) {
+        if (dstDefToFormat[srcFormatToDef[i]] > 0) {
+            srcToDst[i] = dstDefToFormat[srcFormatToDef[i]] - 1;
+        }
+        else {
+            srcToDst[i] = i;
+        }
+    }
+
+    return srcToDst;
+}
diff --git a/src/data/DataProvider.cpp b/src/data/DataProvider.cpp
index fc6b842edef17c80a4ef80667fc814bf85df25a4..7f4eb71aa1f1e05c42aef8090988d0ea05aa6cb2 100644
--- a/src/data/DataProvider.cpp
+++ b/src/data/DataProvider.cpp
@@ -23,9 +23,10 @@
 #include "aidge/utils/Random.hpp"
 
 
-Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::size_t batchSize, const bool shuffle, const bool dropLast)
+Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::size_t batchSize, const std::string& backend, const bool shuffle, const bool dropLast)
     : mDatabase(database),
       mBatchSize(batchSize),
+      mBackend(backend),
       mShuffle(shuffle),
       mDropLast(dropLast),
       mNumberModality(database.getItem(0).size()),
@@ -63,7 +64,7 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() con
         dataBatchDims[i].insert(dataBatchDims[i].begin(), current_batch_size);
         auto batchData = std::make_shared<Tensor>();
         batchData->resize(dataBatchDims[i]);
-        batchData->setBackend("cpu");
+        batchData->setBackend(mBackend);
         batchData->setDataType(mDataTypes[i]);
         batchTensors.push_back(batchData);
     }
@@ -78,6 +79,8 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() con
 
         // Browse each modularity in the database item
         for (std::size_t j = 0; j < mNumberModality; ++j) {
+
+            dataItem[j]->setBackend(mBackend);
             auto dataSample = dataItem[j];
 
             // Assert tensor sizes
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 20bf3fb78d0f14ca1496ef92425ec4cd155f86d5..abfc91c6cdf9fd4f6eb46100074b22083514d82e 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -16,9 +16,11 @@
 
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Abs.hpp"
 #include "aidge/operator/Add.hpp"
 #include "aidge/operator/Div.hpp"
 #include "aidge/operator/Mul.hpp"
+#include "aidge/operator/ReduceMean.hpp"
 #include "aidge/operator/Sub.hpp"
 #include "aidge/operator/Sqrt.hpp"
 #include "aidge/operator/Transpose.hpp"
@@ -106,6 +108,32 @@ Aidge::Tensor Aidge::Tensor::sqrt() const {
     return sqrt_.getOutput(0)->clone();
 }
 
+Aidge::Tensor Aidge::Tensor::abs() const {
+    AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
+    auto abs_ = Abs_Op();
+    abs_.associateInput(0, std::make_shared<Tensor>(*this));
+    abs_.setDataType(dataType());
+    abs_.setDataFormat(dataFormat());
+    abs_.setBackend(mImpl->backend());
+    abs_.forward();
+    return abs_.getOutput(0)->clone();
+}
+
+Aidge::Tensor Aidge::Tensor::mean() const {
+    AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
+    // TODO: should be the default behavior of ReduceMean_Op
+    // No need to specify the list of all axes!
+    std::vector<std::int32_t> axes(nbDims());
+    std::iota(std::begin(axes), std::end(axes), 0);
+    auto mean_ = ReduceMean_Op(axes, false, false);
+    mean_.associateInput(0, std::make_shared<Tensor>(*this));
+    mean_.setDataType(dataType());
+    mean_.setDataFormat(dataFormat());
+    mean_.setBackend(mImpl->backend());
+    mean_.forward();
+    return mean_.getOutput(0)->clone();
+}
+
 Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
     if (this == &other) {
         return *this;
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index d7a6e27fb1a739bd8b27411cf21b30bf58e2a3ad..16edfab64cfa9b37b350e052a5b23ebbc3de9d31 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -103,7 +103,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
         std::string givenName =
             (node_ptr->name().empty())
                 ? "<em>" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + "</em>"
-                : "\"" + node_ptr->name() + "\\n<sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
+                : "\"" + node_ptr->name() + "<br/><sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
 
         std::string nodeCls = "";
         if (node_ptr->type() == "Producer") {
@@ -229,9 +229,7 @@ void Aidge::GraphView::setNodesName() const {
     std::map<std::string, std::int32_t> typeIds;
     for (const auto& nodePtr: getNodes()) {
         const std::string& t = nodePtr->getOperator()->type();
-        if (typeIds.find(t) == typeIds.cend()) {
-            typeIds.emplace(t, 0);
-        }
+        typeIds.emplace(t, 0);
         const std::string nodeName = name() + std::string("_") + t + std::string("#") + std::to_string(typeIds[t]++);
         nodePtr->setName(nodeName);
     }
@@ -318,6 +316,7 @@ void Aidge::GraphView::setOrderedInputs(const std::vector<std::pair<NodePtr, IOI
 }
 
 void Aidge::GraphView::setOrderedOutputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& outputs) {
+  // Note: one can specify any node as graph output!
   size_t nbOutputs = 0;
   std::vector<std::pair<NodePtr, IOIndex_t>> ignoredOutputs(mOutputNodes);
   for (auto output : outputs) {
@@ -326,14 +325,13 @@ void Aidge::GraphView::setOrderedOutputs(const std::vector<std::pair<NodePtr, IO
     // it into account.
     if (output.first != nullptr) {
       auto it = std::find(ignoredOutputs.begin(), ignoredOutputs.end(), output);
-      AIDGE_ASSERT(it != ignoredOutputs.end(), "unknown or duplicate output: {} (of type {})", output.first->name(), output.first->type());
-      ignoredOutputs.erase(it);
+      if (it != ignoredOutputs.end()) {
+        ignoredOutputs.erase(it);
+      }
       ++nbOutputs;
     }
   }
 
-  AIDGE_ASSERT(nbOutputs <= mOutputNodes.size(), "too many specified number of outputs: {} specified vs {} available", nbOutputs, mOutputNodes.size());
-
   mOutputNodes = outputs;
   mOutputNodes.insert(mOutputNodes.end(), ignoredOutputs.begin(), ignoredOutputs.end());
 }
@@ -652,9 +650,12 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
 
 std::pair<std::vector<Aidge::NodePtr>, size_t> Aidge::GraphView::getRankedNodes() const {
   std::set<NodePtr> nodesToRank(mNodes);
-  nodesToRank.erase(mRootNode);
   std::vector<NodePtr> rankedNodes;
-  rankedNodes.push_back(mRootNode);
+
+  if (mRootNode) {
+    nodesToRank.erase(mRootNode);
+    rankedNodes.push_back(mRootNode);
+  }
 
   for (size_t curNodeIdx = 0; curNodeIdx < rankedNodes.size(); ++curNodeIdx) {
     NodePtr curNode = rankedNodes[curNodeIdx];
@@ -684,6 +685,61 @@ std::pair<std::vector<Aidge::NodePtr>, size_t> Aidge::GraphView::getRankedNodes(
   return std::make_pair(rankedNodes, orderUnicityLimit);
 }
 
+std::vector<Aidge::NodePtr> Aidge::GraphView::getOrderedNodes(bool reversed) const {
+    // We compute the order from a post-dfs walk on the reverse graph starting from
+    // ordered output nodes.
+    // Also, we walk the graph upward left to right in order
+    // to get a topological left-right order when possible.
+    // For the case where reversed is true, we walk the graph upward right to left
+    // and reverse the final order to get a post-dfs left-right order when possible.
+    std::vector<std::pair<NodePtr,std::pair<size_t, std::vector<NodePtr>>>> stack;
+    std::vector<NodePtr> reversePostDfs;
+    std::set<NodePtr> visited;
+    std::vector<NodePtr> outNodes(mNodes.size());
+    auto reverse_if_dfs = [reversed](auto &parents) {
+        if (reversed) std::reverse(parents.begin(), parents.end());
+    };
+    for (const auto& output : mOutputNodes) {
+            outNodes.push_back(output.first);
+    }
+    reverse_if_dfs(outNodes);
+    stack.push_back(std::make_pair(nullptr, std::make_pair(0, std::move(outNodes))));
+    while (!stack.empty()) {
+        auto node = stack.back().first;
+        auto& parentIdx = stack.back().second.first;
+        auto& parents = stack.back().second.second;
+        if (parentIdx == parents.size()) {
+            stack.pop_back();
+            if (node) {
+                reversePostDfs.push_back(node);
+            }
+        } else {
+            auto backEdgeIdx = reversed ? parents.size() - 1 - parentIdx: parentIdx;
+            auto isBackEdge = node != nullptr ? node->parentIsBackEdge(backEdgeIdx): false;
+            auto parent = parents[parentIdx++];
+            if (parent != nullptr && inView(parent) &&
+                visited.find(parent) == visited.end()) {
+                if (isBackEdge) {
+                    stack[0].second.second.push_back(parent);
+                } else {
+                    visited.insert(parent);
+                    auto next_parents = parent->getParents();
+                    reverse_if_dfs(next_parents);
+                    stack.push_back(std::make_pair(parent, std::make_pair(0, std::move(next_parents))));
+                }
+            }
+        }
+    }
+
+    if (reversePostDfs.size() != mNodes.size()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+                             "Could not enumerate all nodes, set output nodes such that all graph nodes are connected.");
+    }
+
+    reverse_if_dfs(reversePostDfs);
+    return reversePostDfs;
+}
+
 std::map<Aidge::NodePtr, std::string> Aidge::GraphView::getRankedNodesName(const std::string& format, bool markNonUnicity) const {
   const auto rankedNodes = getRankedNodes();
   std::map<NodePtr, std::string> rankedNodesName;
@@ -1418,7 +1474,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
   }
 
   // For each node, convert old node -> new node connections
-  for (auto &oldToNewNode : oldToNewNodes) {
+  for (const auto &oldToNewNode : oldToNewNodes) {
     if (oldToNewNode.second == nullptr) {
       continue;  // deleted node
     }
@@ -1426,7 +1482,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
     // Connect parent nodes. Nodes that were removed with cloneNode() are set to nullptr
     size_t parentId = 0;
     for (auto parent : oldToNewNode.first->inputs()) {
-      if (parent.first != nullptr) {
+      if (parent.first != nullptr && inView(parent.first)) {
         while (oldToNewNodes[parent.first] == nullptr) {
           // Find next valid parent in line, going backward in the graph
           AIDGE_INTERNAL_ASSERT(parent.first->getChildren().size() == 1);
@@ -1462,7 +1518,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
     newGraph->add(oldToNewNodes[mRootNode], false);
   }
 
-  for (auto &oldToNewNode : oldToNewNodes) {
+  for (const auto &oldToNewNode : oldToNewNodes) {
     if (oldToNewNode.second == nullptr)
       continue;  // deleted node
 
diff --git a/src/graph/Matching.cpp b/src/graph/Matching.cpp
index b93ac16a9384d9b6ec8b62124136cb5085268d58..4a62019a7aa044ebcf2089d91f3ba097d85218e7 100644
--- a/src/graph/Matching.cpp
+++ b/src/graph/Matching.cpp
@@ -56,6 +56,31 @@ std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphM
     return matches;
 }
 
+Aidge::SinglePassGraphMatching::MatchingResult Aidge::SinglePassGraphMatching::matchFrom(NodePtr startNode, const std::string& query) {
+    Context ctx;
+    ctx.query = query;
+    ctx.startNode = startNode;
+    std::set<MatchingResult> matches;
+
+    while (matchSequence(ctx, matches) || matchNodeOrBlock(ctx, matches)) {
+        removeWhiteSpace(ctx.query);
+        if (!ctx.query.empty() && ctx.query[0] == ';') {
+            ctx.query.erase(0, 1);
+        }
+        else {
+            break;
+        }
+    }
+
+    removeWhiteSpace(ctx.query);
+    if (!ctx.query.empty()) {
+        Log::warn("Syntax error, unable to parse remaining query: {}", ctx.query);
+    }
+
+    AIDGE_INTERNAL_ASSERT(matches.size() <= 1);
+    return (!matches.empty()) ? *matches.begin() : MatchingResult();
+}
+
 std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphMatching::filterLonguestDisjoint(const std::set<MatchingResult>& matches) {
     // Sort matches by highest number of nodes first, thanks to the CompareMatchingResultSize function
     std::set<MatchingResult, CompareMatchingResultSize> sortedMatches(matches.begin(), matches.end());
@@ -218,8 +243,8 @@ bool Aidge::SinglePassGraphMatching::matchBlock(Context& ctx, std::set<MatchingR
     // SEQ | PAR | BLOCK | ALT | NODE
     if (!matchSequence(newCtx, newMatches)
         && !matchParallel(newCtx, newMatches)
-        && !matchBlock(newCtx, newMatches)
         && !matchAlternative(newCtx, newMatches)
+        && !matchBlock(newCtx, newMatches)
         && !matchNode(newCtx, newMatches))
     {
         Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
@@ -368,6 +393,9 @@ bool Aidge::SinglePassGraphMatching::matchAlternative(Context& ctx, std::set<Mat
         return false;
     }
     newCtx.query = altCtx.query;
+    newCtx.anchors.insert(altCtx.anchors.begin(), altCtx.anchors.end());
+    bool firstSequence = altCtx.firstSequence;
+    bool firstNode = altCtx.firstNode;
     newMatches.insert(altMatches.begin(), altMatches.end());
 
     bool found = false;
@@ -391,6 +419,11 @@ bool Aidge::SinglePassGraphMatching::matchAlternative(Context& ctx, std::set<Mat
             return false;
         }
         newCtx.query = altCtx.query;
+        newCtx.anchors.insert(altCtx.anchors.begin(), altCtx.anchors.end());
+        AIDGE_ASSERT(firstSequence == altCtx.firstSequence,
+            "Ill-formed query; inconsistency between alternatives regarding first sequence in query at: {}", ctx.query);
+        AIDGE_ASSERT(firstNode == altCtx.firstNode,
+            "Ill-formed query; inconsistency between alternatives regarding first node in query at: {}", ctx.query);
         newMatches.insert(altMatches.begin(), altMatches.end());
     }
 
@@ -399,6 +432,9 @@ bool Aidge::SinglePassGraphMatching::matchAlternative(Context& ctx, std::set<Mat
         return false;
     }
 
+    newCtx.firstSequence = firstSequence;
+    newCtx.firstNode = firstNode;
+
     --newCtx.depth;
     ctx = newCtx;
     matches = newMatches;
@@ -513,7 +549,7 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     Log::debug("{}node", std::string(2*newCtx.depth, ' '));
     auto newMatches = matches;
 
-    // (TYPE | '.')
+    // (TYPE | '.' | '$')
     removeWhiteSpace(newCtx.query);
     if (newCtx.query.empty()) {
         Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
@@ -521,10 +557,16 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     }
 
     std::string type;
+    bool unconnected = false;
     if (newCtx.query[0] == '.') {
         // '.'
         newCtx.query.erase(0, 1); // drop '.'
     }
+    else if (newCtx.query[0] == '$') {
+        // '$'
+        newCtx.query.erase(0, 1); // drop '$'
+        unconnected = true;
+    }
     else {
         // TYPE
         const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(),
@@ -542,6 +584,9 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     // ('#' ANCHOR)?
     std::string anchor = "";
     if (!newCtx.query.empty() && newCtx.query[0] == '#') {
+        AIDGE_ASSERT(!unconnected,
+            "Ill-formed query; an anchor cannot be specified for end of graph ($) in query at: {}", ctx.query);
+
         // '#'
         newCtx.query.erase(0, 1); // drop '#'
 
@@ -555,6 +600,9 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     // ('[' LAMBDA ']')?
     std::string lambda = "";
     if (!newCtx.query.empty() && newCtx.query[0] == '[') {
+        AIDGE_ASSERT(!unconnected,
+            "Ill-formed query; a lambda cannot be specified for end of graph ($) in query at: {}", ctx.query);
+
         // '['
         newCtx.query.erase(0, 1);
 
@@ -581,9 +629,72 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     }
 
     // Parsing is done, try to match the node
-    if (newCtx.firstSequence && newCtx.firstNode) {
+    if (unconnected) {
+        for (auto it = newMatches.begin(); it != newMatches.end(); ) {
+            bool found = false;
+
+            if (newCtx.lookForChild) {
+                const auto outputs = (newCtx.edgeLeftIdx != gk_IODefaultIndex)
+                    ? ((newCtx.edgeLeftIdx < it->startNode->nbOutputs())
+                        ? std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>(1, std::vector<std::pair<NodePtr, IOIndex_t>>(it->startNode->output(newCtx.edgeLeftIdx)))
+                        : std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>())
+                    : it->startNode->outputs();
+
+                for (const auto& output : outputs) {
+                    for (const auto& node : output) {
+                        if (!node.first) {
+                            continue;
+                        }
+
+                        if (newCtx.edgeRightIdx == gk_IODefaultIndex || node.second == newCtx.edgeRightIdx) {
+                            if (mGraph->inView(node.first) && !it->graph->inView(node.first)) {
+                                found = true;
+                                break;
+                            }
+                        }
+                    }
+
+                    if (found) {
+                        break;
+                    }
+                }
+            }
+            else {
+                const auto inputs = (newCtx.edgeLeftIdx != gk_IODefaultIndex)
+                    ? ((newCtx.edgeLeftIdx < it->startNode->nbInputs())
+                        ? std::vector<std::pair<NodePtr, IOIndex_t>>(1, it->startNode->input(newCtx.edgeLeftIdx))
+                        : std::vector<std::pair<NodePtr, IOIndex_t>>())
+                    : it->startNode->inputs();
+
+                for (const auto& input : inputs) {
+                    if (!input.first) {
+                        continue;
+                    }
+
+                    if (newCtx.edgeRightIdx == gk_IODefaultIndex || input.second == newCtx.edgeRightIdx) {
+                        if (mGraph->inView(input.first) && !it->graph->inView(input.first)) {
+                            found = true;
+                            break;
+                        }
+                    }
+                }
+            }
+
+            if (found) {
+                it = newMatches.erase(it);
+            }
+            else {
+                ++it;
+            }
+        }
+
+        Log::debug("{}node $, found: {}", std::string(2*newCtx.depth + 2, ' '), newMatches.size());
+    }
+    else if (newCtx.firstSequence && newCtx.firstNode) {
         // First node of first sequence = root node
-        for (auto node : mGraph->getNodes()) {
+        const auto nodes = (newCtx.startNode) ? std::set<NodePtr>{newCtx.startNode} : mGraph->getNodes();
+
+        for (auto node : nodes) {
             if ((type.empty() || node->type() == type)
                 && (lambda.empty() || mLambda.at(lambda)(node)))
             {
@@ -627,7 +738,9 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
 
             if (newCtx.lookForChild) {
                 const auto outputs = (newCtx.edgeLeftIdx != gk_IODefaultIndex)
-                    ? std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>(1, std::vector<std::pair<NodePtr, IOIndex_t>>(it->startNode->output(newCtx.edgeLeftIdx)))
+                    ? ((newCtx.edgeLeftIdx < it->startNode->nbOutputs())
+                        ? std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>(1, std::vector<std::pair<NodePtr, IOIndex_t>>(it->startNode->output(newCtx.edgeLeftIdx)))
+                        : std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>())
                     : it->startNode->outputs();
 
                 for (const auto& output : outputs) {
@@ -636,6 +749,10 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
                     }
 
                     for (const auto& node : output) {
+                        if (!node.first) {
+                            continue;
+                        }
+
                         if ((type.empty() || node.first->type() == type)
                             && (lambda.empty() || mLambda.at(lambda)(node.first))
                             && (newCtx.edgeRightIdx == gk_IODefaultIndex || node.second == newCtx.edgeRightIdx))
@@ -664,10 +781,16 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
             }
             else {
                 const auto inputs = (newCtx.edgeLeftIdx != gk_IODefaultIndex)
-                    ? std::vector<std::pair<NodePtr, IOIndex_t>>(1, it->startNode->input(newCtx.edgeLeftIdx))
+                    ? ((newCtx.edgeLeftIdx < it->startNode->nbInputs())
+                        ? std::vector<std::pair<NodePtr, IOIndex_t>>(1, it->startNode->input(newCtx.edgeLeftIdx))
+                        : std::vector<std::pair<NodePtr, IOIndex_t>>())
                     : it->startNode->inputs();
 
                 for (const auto& input : inputs) {
+                    if (!input.first) {
+                        continue;
+                    }
+
                     if ((type.empty() || input.first->type() == type)
                         && (lambda.empty() || mLambda.at(lambda)(input.first))
                         && (newCtx.edgeRightIdx == gk_IODefaultIndex || input.second == newCtx.edgeRightIdx))
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 7fe155b5a2b9b42a1504dbb592b2326d13b99c1e..b2ceb903d51dbb880979cd2191825a6310f9e5ff 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -29,8 +29,13 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
       mIdInChildren(std::vector<std::vector<IOIndex_t>>(static_cast<std::size_t>(op->nbOutputs()),
                                                         std::vector<IOIndex_t>())),
       mIdOutParents(
-              std::vector<IOIndex_t>(static_cast<std::size_t>(op->nbInputs()), gk_IODefaultIndex)) {
+              std::vector<IOIndex_t>(static_cast<std::size_t>(op->nbInputs()), gk_IODefaultIndex))
+{
     // ctor
+    if (op) {
+        mForward.push_back([this](){ this->mOperator->forward(); return true; });
+        mBackward.push_back([this](){ this->mOperator->backward(); return true; });
+    }
 }
 
 ///////////////////////////////////////////////////////
@@ -68,13 +73,24 @@ void Aidge::Node::setName(const std::string& name) {
     mName = name;
 }
 
-std::string Aidge::Node::createUniqueName(std::string name){
-    for (auto graphView : views()){
-        if (graphView->inView(name)){
-            return createUniqueName(name.append("_"));
+std::string Aidge::Node::createUniqueName(std::string baseName)
+{
+    int index = 0;
+    bool nameAlreadyUsed = true;
+    std::string newName;
+    while (nameAlreadyUsed) {
+        std::string suffix = "_" + std::to_string(index);
+        newName = (index == 0) ? baseName : baseName + suffix;
+        nameAlreadyUsed = false;
+        for (auto graphView : views()) {
+            if (graphView->inView(newName)) {
+                nameAlreadyUsed = true;
+                break;
+            }
         }
+        index++;
     }
-    return name;
+    return newName;
 }
 
 ///////////////////////////////////////////////////////
@@ -82,13 +98,27 @@ std::string Aidge::Node::createUniqueName(std::string name){
 ///////////////////////////////////////////////////////
 
 void Aidge::Node::forward() {
-    assert((mOperator != nullptr) && "No Operator interface provided, can't run forward().\n");
-    mOperator->forward();
+    for (auto it = mForward.begin(); it != mForward.end(); ) {
+        const auto keep = (*it)();
+        if (!keep) {
+            it = mForward.erase(it);
+        }
+        else {
+            ++it;
+        }
+    }
 }
 
 void Aidge::Node::backward() {
-    assert((mOperator != nullptr) && "No Operator interface provided, can't run backward().\n");
-    mOperator->backward();
+    for (auto it = mBackward.begin(); it != mBackward.end(); ) {
+        const auto keep = (*it)();
+        if (!keep) {
+            it = mBackward.erase(it);
+        }
+        else {
+            ++it;
+        }
+    }
 }
 
 ///////////////////////////////////////////////////////
@@ -192,11 +222,11 @@ void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId)
         "Input index ({}) is out of bound ({}) for node {} (of type {})",
         inId, nbInputs(), name(), type());
     if (mIdOutParents[inId] != gk_IODefaultIndex) {
-        Log::notice("Notice: filling a Tensor already attributed");
+        Log::notice("Filling a Tensor already attributed.");
         auto originalParent = input(inId);
         // remove original parent reference to child
         // find the output ID for original Parent
-        // find first occurence of child in the output's children
+        // find first occurrence of child in the output's children
         originalParent.first->removeChild(shared_from_this(), originalParent.second);
     }
     mIdOutParents[inId] = newNodeoutId;
@@ -260,7 +290,7 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t
 
 void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) {
     if (getParent(inId) != nullptr) {
-        Log::notice("Notice: you are replacing an existing parent for node {} (of type {})", name(), type());
+        Log::notice("You are replacing an existing parent for node {} (of type {}).", name(), type());
     }
     AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(),
         "Input index ({}) is out of bound ({}) for node {} (of type {})",
@@ -411,6 +441,9 @@ std::set<Aidge::NodePtr> Aidge::Node::getNodeDelta(int delta, std::set<Aidge::No
     return out;
 }
 
+
+Aidge::Node::~Node() = default;
+
 // namespace Aidge {
 // std::ostream& operator << (std::ostream& os, Aidge::Node& n) {
 //     using namespace std;
diff --git a/src/graph/OpArgs.cpp b/src/graph/OpArgs.cpp
index cffd14c35a0fe11055198236eba6e344c0ff782c..6fe2320ea0ed6a71b6c4fad6a3fab4e1b6472abf 100644
--- a/src/graph/OpArgs.cpp
+++ b/src/graph/OpArgs.cpp
@@ -9,16 +9,20 @@
  *
  ********************************************************************************/
 
-#include "aidge/graph/Node.hpp"
-#include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/OpArgs.hpp"
 
+#include <memory>
+#include <string>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+
 Aidge::OpArgs::OpArgs(const OpArgs&) = default;
 Aidge::OpArgs& Aidge::OpArgs::operator=(const OpArgs&) = default;
 Aidge::OpArgs::~OpArgs() noexcept = default;
 
-std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs) {
-    std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
+std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs, std::string name) {
+    std::shared_ptr<GraphView> gv = std::make_shared<GraphView>(name);
     for (const OpArgs& elt : inputs) {
         if(elt.node() != nullptr) {
             // Connect the first output (ordered) of each output node (ordered)
@@ -61,8 +65,8 @@ std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs)
 }
 
 
-std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::vector<OpArgs> inputs) {
-    std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
+std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::vector<OpArgs> inputs, std::string name) {
+    std::shared_ptr<GraphView> gv = std::make_shared<GraphView>(name);
     for(const OpArgs& elt : inputs) {
         if (elt.node()!=nullptr)
             gv->add(elt.node());
@@ -73,8 +77,8 @@ std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::vector<OpArgs> inputs) {
 }
 
 
-std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::vector<OpArgs> inputs) {
-    std::shared_ptr<GraphView> gv = Sequential(inputs);
+std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::vector<OpArgs> inputs, std::string name) {
+    std::shared_ptr<GraphView> gv = Sequential(inputs,name);
     AIDGE_ASSERT(gv->outputNodes().size() == 1U,
         "Residual(): Zero or more than one output Node for the GraphView, don't know which one to choose from for the residual connection");
     std::shared_ptr<Node> lastNode = *gv->outputNodes().begin();
diff --git a/src/operator/Abs.cpp b/src/operator/Abs.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1dd7836ad220d031d60356a5663db84adaa486ec
--- /dev/null
+++ b/src/operator/Abs.cpp
@@ -0,0 +1,29 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Abs.hpp"
+
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Abs_Op::Type = "Abs";
+
+void Aidge::Abs_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Abs_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Abs_Op::getAvailableBackends() const {
+    return Registrar<Abs_Op>::getKeys();
+}
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index f9dc3335a3b62e87edf33d25c5a516a63c4129a0..033c476c8a9e865fdf9d5670e295c3e4fb6101b3 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -85,6 +85,10 @@ void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Add_Op::getAvailableBackends() const {
+    return Registrar<Add_Op>::getKeys();
+}
+
 std::shared_ptr<Aidge::Node> Aidge::Add(const IOIndex_t nbIn, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
 }
\ No newline at end of file
diff --git a/src/operator/And.cpp b/src/operator/And.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..aebd5a71725f0999635f3844d8b2589bfb885138
--- /dev/null
+++ b/src/operator/And.cpp
@@ -0,0 +1,62 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>    // std::size_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/And.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::And_Op::Type = "And";
+
+bool Aidge::And_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for And Operation: {} for input#0 vs {} for input#1",
+                    inputsDims0, inputsDims1);
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
+        return true;
+    }
+
+    return false;
+}
+
+void Aidge::And_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(And_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::And_Op::getAvailableBackends() const {
+    return Registrar<And_Op>::getKeys();
+}
diff --git a/src/operator/ArgMax.cpp b/src/operator/ArgMax.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4808b730d2261ba0c1ea6d0d09871b1f322fc8fb
--- /dev/null
+++ b/src/operator/ArgMax.cpp
@@ -0,0 +1,57 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ArgMax.hpp"
+
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ArgMax_Op::Type = "ArgMax";
+
+bool Aidge::ArgMax_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        // make Axis attribute positive
+        std::int32_t axis = mAttributes->template getAttr<ArgMaxAttr::Axis>();
+        axis = axis >= 0 ? axis: axis+static_cast<std::int32_t>(getInput(0)->nbDims());
+
+        // build output dimensions
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        if (mAttributes->template getAttr<ArgMaxAttr::KeepDims>()) {
+            outDims[axis] = 1;
+        }
+        else {
+            outDims.erase(outDims.begin() + static_cast<std::size_t>(axis));
+        }
+
+        // TODO: change {1} for {} when scalar Tensors are better handled.
+        mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
+        return true;
+    }
+    return false;
+}
+
+void Aidge::ArgMax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(ArgMax_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::ArgMax_Op::getAvailableBackends() const {
+    return Registrar<ArgMax_Op>::getKeys();
+}
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 296ae789197f88c655c0097d94b370ef91f0189f..f8c8e5e3f32fff8306184dfdf3baa87392479ebf 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -113,6 +113,11 @@ void Aidge::AvgPooling_Op<DIM>::setBackend(const std::string &name, Aidge::Devic
     mOutputs[0]->setBackend(name, device);
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::AvgPooling_Op<DIM>::getAvailableBackends() const {
+    return Registrar<AvgPooling_Op<DIM>>::getKeys();
+}
+
 template class Aidge::AvgPooling_Op<1>;
 template class Aidge::AvgPooling_Op<2>;
 template class Aidge::AvgPooling_Op<3>;
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index a81cfc132773134889a5164762091229759b4f38..bcf3b29c45abe2c40788fd1ec0bad87db8ee227b 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -95,6 +95,11 @@ void Aidge::BatchNorm_Op<DIM>::setBackend(const std::string &name, Aidge::Device
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::BatchNorm_Op<DIM>::getAvailableBackends() const {
+    return Registrar<BatchNorm_Op<DIM>>::getKeys();
+}
+
 template class Aidge::BatchNorm_Op<2>;
 template class Aidge::BatchNorm_Op<3>;
 template class Aidge::BatchNorm_Op<4>;
diff --git a/src/operator/BitShift.cpp b/src/operator/BitShift.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7595590f7811f08eb2b790a259cff6a8ee72ffbf
--- /dev/null
+++ b/src/operator/BitShift.cpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>    // std::size_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/operator/BitShift.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::BitShift_Op::Type = "BitShift";
+
+bool Aidge::BitShift_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (!inputsAssociated()) {
+    return false;
+    }
+
+    const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+    const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+    std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+    const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+    std::size_t out_id = outDims.size() - 1;
+    std::size_t low_id = lowDims.size() - 1;
+    std::size_t i = 0;
+
+    while (i++ < lowDims.size()) {
+        if (outDims[out_id] == 1) {
+            outDims[out_id] = lowDims[low_id];
+        }
+        else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for BitShift Operation: {} for input#0 vs {} for input#1",
+                inputsDims0, inputsDims1);
+        }
+        --out_id;
+        --low_id;
+    }
+    mOutputs[0]->resize(outDims);
+    return true;
+}
+
+
+void Aidge::BitShift_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(BitShift_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::BitShift_Op::getAvailableBackends() const {
+    return Registrar<BitShift_Op>::getKeys();
+}
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index b6164a77cb47e0b9127fa4b02255ed0991805fe7..54eef17b67b320ef244881cee44ed8cabaa9bf47 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -47,6 +47,10 @@ void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Cast_Op::getAvailableBackends() const {
+    return Registrar<Cast_Op>::getKeys();
+}
+
 std::shared_ptr<Aidge::Node> Aidge::Cast(const Aidge::DataType targetType, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
 }
\ No newline at end of file
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index c78afa8665322a9cbca42a3326d527c1ebd949d4..55efdd51d56f7db4f64880b967def661e5354af5 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -134,6 +134,10 @@ void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Concat_Op::getAvailableBackends() const {
+    return Registrar<Concat_Op>::getKeys();
+}
+
 /////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Concat(const Aidge::IOIndex_t nbIn, const std::int32_t axis, const std::string& name) {
diff --git a/src/operator/ConstantOfShape.cpp b/src/operator/ConstantOfShape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7fe9dc1309080f844961a8e8a28c4a05964ae741
--- /dev/null
+++ b/src/operator/ConstantOfShape.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ConstantOfShape.hpp"
+
+#include <cstdint>
+#include <fmt/format.h>
+#include <memory>
+#include <stdexcept> // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/half.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+const std::string ConstantOfShape_Op::Type = "ConstantOfShape";
+
+bool ConstantOfShape_Op::forwardDims(bool allowDataDependency) {
+  if (!inputsAssociated()) {
+    return false;
+  }
+
+  if (!allowDataDependency) {
+    Log::warn("{} : unable to forwardDims() because output dims are data "
+              "dependent on input#0",
+              type());
+    return false;
+  }
+
+  AIDGE_ASSERT(getInput(0)->nbDims() == 1,
+               "{} : Input tensor should have only 1 dimension. {} dimensions"
+               "received : {}",
+               __func__, getInput(0)->nbDims(), getInput(0)->dims());
+  AIDGE_ASSERT(getInput(0)->dataType() == DataType::Int64,
+               "{} : Input tensor data type should be int64t, received : {}",
+               __func__, getInput(0)->nbDims(), getInput(0)->dims());
+  std::vector<DimSize_t> output_dims;
+  output_dims.reserve(getInput(0)->size());
+  for (std::size_t i = 0; i < getInput(0)->size(); ++i) {
+    auto temp = getInput(0)->template get<std::int64_t>(i);
+    output_dims.push_back(temp);
+  }
+  mOutputs[0]->resize(output_dims);
+  return true;
+}
+
+void ConstantOfShape_Op::setBackend(const std::string &name,
+                                       Aidge::DeviceIdx_t device) {
+  SET_IMPL_MACRO(ConstantOfShape_Op, *this, name);
+  mOutputs[0]->setBackend(name, device);
+  value().setBackend(name,device);
+}
+
+std::set<std::string> Aidge::ConstantOfShape_Op::getAvailableBackends() const {
+  return Registrar<ConstantOfShape_Op>::getKeys();
+}
+
+} // namespace Aidge
+
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 92f4ec593a1dcb26a5a16ffb527667e39502e547..e055c7e5ebb9a6cff9f774da444cc582ed7de34c 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -43,16 +43,17 @@ bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         // first check weight since it defines inChannels and outChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
-                    "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
+                    "Wrong weight Tensor dimension: {} for Conv{}D operator. Expected number of dimensions is {}.", getInput(1)->nbDims(), DIM, DIM+2);
         // check data
         AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
                     (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
-                    "Wrong input size for Conv operator.");
+                    "Wrong input size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), inChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", "));
         // check optional bias
         if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
                     (getInput(2)->template dims<1>()[0] == outChannels()),
-                    "Wrong bias size for Conv operator.");
+                    "Wrong bias size ({}) for Conv operator. Expected dims are [{}].", getInput(2)->dims(), outChannels());
+
         std::array<DimSize_t, DIM + 2> outputDims{};
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
@@ -157,6 +158,11 @@ void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::Conv_Op<DIM>::getAvailableBackends() const {
+    return Registrar<Conv_Op<DIM>>::getKeys();
+}
+
 template class Aidge::Conv_Op<1>;
 template class Aidge::Conv_Op<2>;
 
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index 9e95e78ea6867c41a332916b352f091ad528894a..f4d524356bd207a7ed101c2887c2fcda53f3bb83 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -44,16 +44,17 @@ bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         // first check weight since it defines nbChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
-                    "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
+                    "Wrong weight Tensor dimension: {} for ConvDepthWise{}D operator. Expected number of dimensions is {}.", getInput(1)->nbDims(), DIM, DIM+2);
         // check data
         AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
                     (getInput(0)->template dims<DIM+2>()[1] == nbChannels()),
-                    "Wrong input size for Conv operator.");
+                    "Wrong input size ({}) for ConvDepthWise operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), nbChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", "));
         // check optional bias
         if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
                     (getInput(2)->template dims<1>()[0] == nbChannels()),
-                    "Wrong bias size for Conv operator.");
+                    "Wrong bias size ({}) for ConvDepthWise operator. Expected dims are [{}].", getInput(2)->dims(), nbChannels());
+
         std::array<DimSize_t, DIM + 2> outputDims = {};
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
@@ -156,6 +157,11 @@ void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::De
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::ConvDepthWise_Op<DIM>::getAvailableBackends() const {
+    return Registrar<ConvDepthWise_Op<DIM>>::getKeys();
+}
+
 template class Aidge::ConvDepthWise_Op<1>;
 template class Aidge::ConvDepthWise_Op<2>;
 
diff --git a/src/operator/DepthToSpace.cpp b/src/operator/DepthToSpace.cpp
index 0c858548ec484c34a651efa4adec1cde7ccb9e54..6b8d05625b99aec05be4f531460a5d25c120a5e0 100644
--- a/src/operator/DepthToSpace.cpp
+++ b/src/operator/DepthToSpace.cpp
@@ -113,6 +113,10 @@ void Aidge::DepthToSpace_Op::setBackend(const std::string& name, Aidge::DeviceId
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::DepthToSpace_Op::getAvailableBackends() const {
+    return Registrar<DepthToSpace_Op>::getKeys();
+}
+
 //////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::DepthToSpace(const std::uint32_t blockSize,
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 2140b17a3abee329effaae63fada187fc522495f..96eea3df966b273445be8a6e9d9a5acf2d6fafb2 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -57,6 +57,10 @@ void Aidge::Div_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Div_Op::getAvailableBackends() const {
+    return Registrar<Div_Op>::getKeys();
+}
+
 ///////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Div(const std::string& name) {
diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp
index ed1f79f79a3011f72da1a1804d84960595f880c0..bd5f76f8aa7c0889311e4f922fec8d20168e24b5 100644
--- a/src/operator/Erf.cpp
+++ b/src/operator/Erf.cpp
@@ -38,6 +38,10 @@ void Aidge::Erf_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Erf_Op::getAvailableBackends() const {
+    return Registrar<Erf_Op>::getKeys();
+}
+
 /////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Erf(const std::string& name) {
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 577a1842d76d3f58763ccd598205935e2c6d6eb4..dd3ed7aba65cf1875d691d9bc2c8c94bb03856c7 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -91,6 +91,10 @@ void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device
     }
 }
 
+std::set<std::string> Aidge::FC_Op::getAvailableBackends() const {
+    return Registrar<FC_Op>::getKeys();
+}
+
 std::shared_ptr<Aidge::Node> Aidge::FC(const Aidge::DimSize_t inChannels,
                                        const Aidge::DimSize_t outChannels,
                                        bool noBias,
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
index 1a2ec88bbfb2bfed134e779619a0a3f0604ce155..99ccb7505cd959178e4bd7132e32552ea5a72ecf 100644
--- a/src/operator/Fold.cpp
+++ b/src/operator/Fold.cpp
@@ -82,6 +82,11 @@ void Aidge::Fold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::Fold_Op<DIM>::getAvailableBackends() const {
+    return Registrar<Fold_Op<DIM>>::getKeys();
+}
+
 template class Aidge::Fold_Op<2>;
 
 ///////////////////////////////////////
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 00d471f6dc3e1417e4b343002b12a26260030d30..0ebc3e3bc81b15d9414d01f12a2768be6a7ddc42 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -142,6 +142,10 @@ void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Gather_Op::getAvailableBackends() const {
+    return Registrar<Gather_Op>::getKeys();
+}
+
 /////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Gather(std::int8_t axis,
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index e7b2bdffb979fe377de5c7bd1e86147874e7d043..bbcfd0d28ca039318647d206af876727793e1bfc 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -57,6 +57,10 @@ void Aidge::GlobalAveragePooling_Op::setBackend(const std::string &name, Aidge::
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::GlobalAveragePooling_Op::getAvailableBackends() const {
+    return Registrar<GlobalAveragePooling_Op>::getKeys();
+}
+
 ////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::GlobalAveragePooling(const std::string &name) {
diff --git a/src/operator/GridSample.cpp b/src/operator/GridSample.cpp
index fa1efc75a4c0a85717343ce4fcdea1a8adcfb4e7..d26679f8337390879c8f4c4d10deb883fb40e6da 100644
--- a/src/operator/GridSample.cpp
+++ b/src/operator/GridSample.cpp
@@ -95,6 +95,10 @@ void Aidge::GridSample_Op::setBackend(const std::string &name, Aidge::DeviceIdx_
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::GridSample_Op::getAvailableBackends() const {
+    return Registrar<GridSample_Op>::getKeys();
+}
+
 
 ////////////////////////////////////////////////
 
diff --git a/src/operator/ILayerNorm.cpp b/src/operator/ILayerNorm.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..daa7ecf86b7ea9a9b10b962d356581f926e92eed
--- /dev/null
+++ b/src/operator/ILayerNorm.cpp
@@ -0,0 +1,56 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 10.09.2024
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ILayerNorm.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ILayerNorm_Op::Type = "ILayerNorm";
+
+void Aidge::ILayerNorm_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
+    AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs());
+    AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
+    mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    if (inputIdx == 0 && getInput(0)->nbDims() == 1)
+        mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()});
+}
+
+bool Aidge::ILayerNorm_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        const DimSize_t nbFeatures =  getInput(0)->dims()[1];
+        for (std::size_t i = 0; i < nbInputs(); ++i) {
+            if(inputCategory(i) == InputCategory::Param && getInput(i)->size() != nbFeatures) {
+                getInput(i)->resize({getInput(0)->dims()[1]});
+            }
+        }
+        mOutputs[0]->resize(getInput(0)->dims());
+        return true;
+    }
+    return false;
+}
+
+
+void Aidge::ILayerNorm_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(ILayerNorm_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+    getInput(1)->setBackend(name, device);
+    getInput(2)->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::ILayerNorm_Op::getAvailableBackends() const {
+    return Registrar<ILayerNorm_Op>::getKeys();
+}
diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp
index 2f60eb2fd9c5d43c60ae7ee3af49c3b2e407a1fe..f0b8720bc1e22d8d6308460eabe436db8a4c9f6d 100644
--- a/src/operator/Identity.cpp
+++ b/src/operator/Identity.cpp
@@ -13,35 +13,37 @@
 
 #include "aidge/operator/Identity.hpp"
 
+void Aidge::Identity_OpImpl::forward() {
+    const Identity_Op& op = dynamic_cast<const Identity_Op&>(mOp);
+    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+}
+
+//////////////////////////////////////////////////
+
 const std::string Aidge::Identity_Op::Type = "Identity";
 
 Aidge::Identity_Op::Identity_Op()
     : OperatorTensor(Type, {InputCategory::Data}, 1)
 {
-    mImpl = std::make_shared<OperatorImpl>(*this);
+    mImpl = std::make_shared<Identity_OpImpl>(*this);
 }
 
 Aidge::Identity_Op::Identity_Op(const Aidge::Identity_Op& op)
     : OperatorTensor(op)
 {
-    mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
+    mImpl = std::make_shared<Identity_OpImpl>(*this, op.backend());
 }
 
 std::shared_ptr<Aidge::Operator> Aidge::Identity_Op::clone() const {
     return std::make_shared<Identity_Op>(*this);
 }
 
-bool Aidge::Identity_Op::dimsForwarded() const {
-    const auto& input0 = getInput(0);
-    return input0 ? (input0->undefined() ? false :
-                            input0->dims() == getOutput(0)->dims()) :
-                                false;
+void Aidge::Identity_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    mOutputs[0]->setBackend(name, device);
 }
 
-void Aidge::Identity_Op::forward() {
-    // Perform a shallow copy
-    *(mOutputs[0]) = *(mInputs[0]);
-    runHooks();
+std::set<std::string> Aidge::Identity_Op::getAvailableBackends() const {
+    return Registrar<Identity_Op>::getKeys();
 }
 
 std::shared_ptr<Aidge::Node> Aidge::Identity(const std::string& name) {
diff --git a/src/operator/LeakyReLU.cpp b/src/operator/LeakyReLU.cpp
index 9def23758d5f779f14dec2ee19199fe0f48c4980..dea73f3101887c5213a02b029d344a34f74ba4af 100644
--- a/src/operator/LeakyReLU.cpp
+++ b/src/operator/LeakyReLU.cpp
@@ -38,6 +38,10 @@ void Aidge::LeakyReLU_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::LeakyReLU_Op::getAvailableBackends() const {
+    return Registrar<LeakyReLU_Op>::getKeys();
+}
+
 /////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::LeakyReLU(float negativeSlope, const std::string& name) {
diff --git a/src/operator/Ln.cpp b/src/operator/Ln.cpp
index 31012cbb1eec22f8dc02497f9e46b88ec713eabe..90ae8d8c7dac464665828248c923a1f278dad79b 100755
--- a/src/operator/Ln.cpp
+++ b/src/operator/Ln.cpp
@@ -38,6 +38,10 @@ void Aidge::Ln_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Ln_Op::getAvailableBackends() const {
+    return Registrar<Ln_Op>::getKeys();
+}
+
 /////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Ln(const std::string& name) {
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index c95fe544cbd29f715e8bd7caae58193deaac6331..668ffd04b7acb0e72b4a3313805fa89ca3466f32 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -97,6 +97,10 @@ void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::MatMul_Op::getAvailableBackends() const {
+    return Registrar<MatMul_Op>::getKeys();
+}
+
 ////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::MatMul(const std::string& name) {
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
index 85f2dd930f2d35b9d9e9ea597b588637a56cb952..5ce137fe6b6c0e4b7150bfc0f1182f6f8ee94850 100644
--- a/src/operator/MaxPooling.cpp
+++ b/src/operator/MaxPooling.cpp
@@ -83,6 +83,11 @@ void Aidge::MaxPooling_Op<DIM>::setBackend(const std::string &name, Aidge::Devic
     mOutputs[0]->setBackend(name, device);
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::MaxPooling_Op<DIM>::getAvailableBackends() const {
+    return Registrar<MaxPooling_Op<DIM>>::getKeys();
+}
+
 template class Aidge::MaxPooling_Op<1>;
 template class Aidge::MaxPooling_Op<2>;
 template class Aidge::MaxPooling_Op<3>;
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index f713fdaad793aebebf5047d4ebf1dfd5aca10cd1..61239071a99a9dfca8613ef78eba17757c4276b7 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -20,7 +20,7 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::Memorize_OpImpl::getNbRequiredData(
+Aidge::Elts_t Aidge::Memorize_ProdConso::getNbRequiredData(
     Aidge::IOIndex_t inputIdx) const
 {
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
@@ -35,11 +35,11 @@ Aidge::Elts_t Aidge::Memorize_OpImpl::getNbRequiredData(
         return Elts_t::NoneElts();
     }
     else {
-        return OperatorImpl::getNbRequiredData(inputIdx);
+        return ProdConso::getNbRequiredData(inputIdx);
     }
 }
 
-Aidge::Elts_t Aidge::Memorize_OpImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+Aidge::Elts_t Aidge::Memorize_ProdConso::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                                                          const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
     assert(mOp.getRawOutput(outputIdx) && "requires valid output");
 
@@ -53,8 +53,8 @@ Aidge::Elts_t Aidge::Memorize_OpImpl::getRequiredMemory(const Aidge::IOIndex_t o
     }
 }
 
-void Aidge::Memorize_OpImpl::updateConsummerProducer() {
-    OperatorImpl::updateConsummerProducer();
+void Aidge::Memorize_ProdConso::updateConsummerProducer() {
+    ProdConso::updateConsummerProducer();
 
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
     AIDGE_ASSERT(op.endStep() == 0 || op.scheduleStep() <= op.endStep(), "cannot update consumer producer anymore, number of cycles exceeded");
@@ -82,6 +82,8 @@ Aidge::Memorize_Op::Memorize_Op(const std::uint32_t endStep)
                     attr<MemorizeAttr::ForwardStep>(0),
                     attr<MemorizeAttr::EndStep>(endStep)))
 {
+    // The input idx 0 is a back edge for Memorize where inputs are (back, init)
+    setBackEdges({0});
     mOutputs[1] = mOutputs[0];
 }
 
@@ -153,8 +155,12 @@ void Aidge::Memorize_Op::forward() {
     mAttributes->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
 }
 
+std::set<std::string> Aidge::Memorize_Op::getAvailableBackends() const {
+    return Registrar<Memorize_Op>::getKeys();
+}
+
 /////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Memorize(const std::uint32_t endStep, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index 71e3a4781569820267b7d623da8d73134692c05d..e3acba9b4cccdf525d80f85344ba500cc7ac885f 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -18,12 +18,19 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 
 Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph)
     : OperatorTensor(type, [graph]() {
         std::vector<InputCategory> inputsCategory;
         for (const auto& in : graph->getOrderedInputs()) {
-            inputsCategory.push_back(in.first->getOperator()->inputCategory(in.second));
+            if (in.first) {
+                inputsCategory.push_back(in.first->getOperator()->inputCategory(in.second));
+            }
+            else {
+                // Dummy input, default to OptionalData
+                inputsCategory.push_back(InputCategory::OptionalData);
+            }
         }
         return inputsCategory;
     }(), graph->getOrderedOutputs().size()),
@@ -63,6 +70,12 @@ void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, const std
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
 }
 
+std::string Aidge::MetaOperator_Op::backend() const noexcept {
+    return (mImpl)
+        ? mImpl->backend()
+        : mGraph->rootNode()->getOperator()->backend();
+}
+
 void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
     if (Registrar<MetaOperator_Op>::exists({name, type()})) {
         // A custom implementation exists for this meta operator
@@ -75,9 +88,36 @@ void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceId
     mGraph->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::MetaOperator_Op::getAvailableBackends() const {
+    std::set<std::string> backendsList;
+    for (const auto& tupleKey : Registrar<MetaOperator_Op>::getKeys()) {
+        if (std::get<1>(tupleKey) == type()) {
+            backendsList.insert(std::get<0>(tupleKey));
+        }
+    }
+    return backendsList;
+}
+
+std::shared_ptr<Aidge::Attributes> Aidge::MetaOperator_Op::attributes() const {
+    auto attrs = std::make_shared<DynamicAttributes>();
+
+    for (const auto& node : mGraph->getRankedNodesName("{3}")) {
+        const auto attributes = node.first->getOperator()->attributes();
+        if (attributes) {
+            const auto nodeAttrs = DynamicAttributes(attributes->getAttrs());
+            attrs->addAttr(node.first->type() + "#" + node.second, nodeAttrs);
+            if (node.second == "0") {
+                attrs->addAttr(node.first->type(), nodeAttrs);
+            }
+        }
+    }
+
+    return attrs;
+}
+
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const {
     if (mImpl) {
-        return mImpl->getNbRequiredData(inputIdx);
+        return mImpl->prodConso()->getNbRequiredData(inputIdx);
     }
     else {
         const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
@@ -92,7 +132,7 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx
 
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredProtected(const IOIndex_t inputIdx) const {
     if (mImpl) {
-        return mImpl->getNbRequiredProtected(inputIdx);
+        return mImpl->prodConso()->getNbRequiredProtected(inputIdx);
     }
     else {
         const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
@@ -107,7 +147,7 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredProtected(const IOIndex_t inp
 
 Aidge::Elts_t Aidge::MetaOperator_Op::getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const {
     if (mImpl) {
-        return mImpl->getRequiredMemory(outputIdx, inputsSize);
+        return mImpl->prodConso()->getRequiredMemory(outputIdx, inputsSize);
     }
     else {
         const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
@@ -122,7 +162,7 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getRequiredMemory(const IOIndex_t outputId
 
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbConsumedData(IOIndex_t inputIdx) const {
     if (mImpl) {
-        return mImpl->getNbConsumedData(inputIdx);
+        return mImpl->prodConso()->getNbConsumedData(inputIdx);
     }
     else {
         const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
@@ -137,7 +177,7 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbConsumedData(IOIndex_t inputIdx) cons
 
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbProducedData(IOIndex_t outputIdx) const {
     if (mImpl) {
-        return mImpl->getNbProducedData(outputIdx);
+        return mImpl->prodConso()->getNbProducedData(outputIdx);
     }
     else {
         const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
@@ -152,7 +192,7 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbProducedData(IOIndex_t outputIdx) con
 
 void Aidge::MetaOperator_Op::resetConsummerProducer() {
     if (mImpl) {
-        mImpl->resetConsummerProducer();
+        mImpl->prodConso()->resetConsummerProducer();
     }
     else {
         if (!mScheduler) {
@@ -166,7 +206,7 @@ void Aidge::MetaOperator_Op::resetConsummerProducer() {
 
 void Aidge::MetaOperator_Op::updateConsummerProducer() {
     if (mImpl) {
-        mImpl->updateConsummerProducer();
+        mImpl->prodConso()->updateConsummerProducer();
     }
     else {
         if (!mScheduler) {
diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp
index 4190c10a06458036f2cd8953156b969afa51bebf..adabcd0d359927693965cec1987d2fad083328b9 100644
--- a/src/operator/Move.cpp
+++ b/src/operator/Move.cpp
@@ -50,6 +50,15 @@ void Aidge::Move_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Move_Op::getAvailableBackends() const {
+    std::set<std::string> backendsList;
+    for (const auto& tupleKey : Registrar<Move_Op>::getKeys()) {
+        backendsList.insert(std::get<0>(tupleKey));
+        backendsList.insert(std::get<1>(tupleKey));
+    }
+    return backendsList;
+}
+
 ////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Move(const std::string& name) {
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index e2e32805f6fde7ab6831fe4756ca60ad42c3925a..3f163c9d6a572cc488c621a0ec6819ea68143304 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -71,6 +71,10 @@ void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Mul_Op::getAvailableBackends() const {
+    return Registrar<Mul_Op>::getKeys();
+}
+
 ///////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Mul(const std::string& name) {
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index 317bbd364572f49a714e328bf33f3cd58c19215f..f15a7dc3899a7bc864e8e76ff0946fb70584bf05 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -16,6 +16,7 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
@@ -33,35 +34,35 @@ Aidge::Operator::~Operator() noexcept = default;
 
 Aidge::Elts_t Aidge::Operator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbRequiredData(): an implementation is required for {}!", type());
-    return mImpl->getNbRequiredData(inputIdx);
+    return mImpl->prodConso()->getNbRequiredData(inputIdx);
 }
 
 Aidge::Elts_t Aidge::Operator::getNbRequiredProtected(const Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbRequiredProtected(): an implementation is required for {}!", type());
-    return mImpl->getNbRequiredProtected(inputIdx);
+    return mImpl->prodConso()->getNbRequiredProtected(inputIdx);
 }
 
 Aidge::Elts_t Aidge::Operator::getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const {
     AIDGE_ASSERT(mImpl != nullptr, "getRequiredMemory(): an implementation is required for {}!", type());
-    return mImpl->getRequiredMemory(outputIdx, inputsSize);
+    return mImpl->prodConso()->getRequiredMemory(outputIdx, inputsSize);
 }
 
 Aidge::Elts_t Aidge::Operator::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbConsumedData(): an implementation is required for {}!", type());
-    return mImpl->getNbConsumedData(inputIdx);
+    return mImpl->prodConso()->getNbConsumedData(inputIdx);
 }
 
 Aidge::Elts_t Aidge::Operator::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbProducedData(): an implementation is required for {}!", type());
-    return mImpl->getNbProducedData(outputIdx);
+    return mImpl->prodConso()->getNbProducedData(outputIdx);
 }
 void Aidge::Operator::updateConsummerProducer(){
     AIDGE_ASSERT(mImpl != nullptr, "updateConsummerProducer(): an implementation is required for {}!", type());
-    mImpl->updateConsummerProducer();
+    mImpl->prodConso()->updateConsummerProducer();
 }
 void Aidge::Operator::resetConsummerProducer(){
     AIDGE_ASSERT(mImpl != nullptr, "resetConsummerProducer(): an implementation is required for {}!", type());
-    mImpl->resetConsummerProducer();
+    mImpl->prodConso()->resetConsummerProducer();
 }
 
 void Aidge::Operator::runHooks() const {
@@ -79,3 +80,17 @@ void Aidge::Operator::backward() {
     AIDGE_ASSERT(mImpl != nullptr, "backward(): an implementation is required for {}!", type());
     mImpl->backward(); 
 }
+
+void Aidge::Operator::setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends) {
+    const auto& availableBackends = getAvailableBackends();
+    // By default, try to set the last backend anyway
+    auto selectedBackend = backends.back();
+    for (const auto& backend : backends) {
+        if (availableBackends.find(backend.first) != availableBackends.end()) {
+            selectedBackend = backend;
+            break;
+        }
+    }
+
+    setBackend(selectedBackend.first, selectedBackend.second);
+}
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
index 5b1428c160f976a043bb5cbe6fc6cb3351bab336..39f61e328bd3f98bc836604462bbfc064fbb93be 100644
--- a/src/operator/Pad.cpp
+++ b/src/operator/Pad.cpp
@@ -53,6 +53,11 @@ void Aidge::Pad_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::Pad_Op<DIM>::getAvailableBackends() const {
+    return Registrar<Pad_Op<DIM>>::getKeys();
+}
+
 template class Aidge::Pad_Op<1>;
 template class Aidge::Pad_Op<2>;
 
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index 5d32a06fd01d8674d8e072f14838f3fd80d1f30a..cd5b18759cdd743f292054bca91ffee5da722ea6 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -20,7 +20,7 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::Pop_OpImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+Aidge::Elts_t Aidge::Pop_ProdConso::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getRawInput(inputIdx) && "requires valid input");
 
     const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
@@ -88,6 +88,10 @@ void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Pop_Op::getAvailableBackends() const {
+    return Registrar<Pop_Op>::getKeys();
+}
+
 void Aidge::Pop_Op::forward() {
     Operator::forward();
     ++mAttributes->template getAttr<PopAttr::ForwardStep>();
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index 1602c8c2aa28e305b340888cb3a77cb4d2fc4293..ada71d6cc56c6d88ff64bf720595b220b296801d 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -56,6 +56,10 @@ void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Pow_Op::getAvailableBackends() const {
+    return Registrar<Pow_Op>::getKeys();
+}
+
 ////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Pow(const std::string& name) {
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index e5c4a3e9e18af8b3236b612db2b959f5ce4ec30a..fdba4ac2e22d857a31779df2e5ff789c3eb92f5c 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -84,6 +84,10 @@ void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Producer_Op::getAvailableBackends() const {
+    return Registrar<Producer_Op>::getKeys();
+}
+
 void Aidge::Producer_Op::forward() {
     if (!backend().empty()) {
         mImpl->forward();
diff --git a/src/operator/ReLU.cpp b/src/operator/ReLU.cpp
index 03f9e0679facc452d5a8bdc71707a824240f15ac..bda26fa3332ee914325820f47d0babcb622905c8 100644
--- a/src/operator/ReLU.cpp
+++ b/src/operator/ReLU.cpp
@@ -38,6 +38,10 @@ void Aidge::ReLU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::ReLU_Op::getAvailableBackends() const {
+    return Registrar<ReLU_Op>::getKeys();
+}
+
 /////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ReLU(const std::string& name) {
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index d80525adc68f9692a042fdca2ce6869ac0600f5a..7935edb050824af92a8f130f975aa09e41ca875f 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -15,6 +15,7 @@
 #include <cstddef>    // std::size_t
 #include <cstdint>    // std::int32_t
 #include <memory>
+#include <numeric> // For std::iota
 #include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
@@ -26,11 +27,12 @@
 
 const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
 
-Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, Aidge::DimSize_t keep_dims)
+Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
     : OperatorTensor(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<ReduceMeanAttr::Axes>(axes),
-        attr<ReduceMeanAttr::KeepDims>(keep_dims)))
+        attr<ReduceMeanAttr::KeepDims>(keep_dims),
+        attr<ReduceMeanAttr::NoopWithEmptyAxes>(noop_with_empty_axes)))
 {}
 
 Aidge::ReduceMean_Op::ReduceMean_Op(const Aidge::ReduceMean_Op& op)
@@ -60,6 +62,18 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
 
         // build output dimensions
         std::vector<DimSize_t> outDims = getInput(0)->dims();
+
+        if (axes.empty())
+        {
+            if(mAttributes->template getAttr<ReduceMeanAttr::NoopWithEmptyAxes>()) {
+                mOutputs[0]->resize(outDims);
+                return true;
+            }
+            // if no axes are provided and NoopWithEmptyAxes is false, reduce on all axes
+            axes.resize(getInput(0)->nbDims());
+            std::iota(axes.begin(), axes.end(), 0);
+        }
+
         if (mAttributes->template getAttr<ReduceMeanAttr::KeepDims>()) {
             std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
         }
@@ -80,11 +94,18 @@ void Aidge::ReduceMean_Op::setBackend(const std::string& name, Aidge::DeviceIdx_
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::ReduceMean_Op::getAvailableBackends() const {
+    return Registrar<ReduceMean_Op>::getKeys();
+}
+
+Aidge::ReduceMean_Op::~ReduceMean_Op() noexcept = default;
+
 ////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ReduceMean(const std::vector<std::int32_t> &axes,
-                                        Aidge::DimSize_t keep_dims,
+                                        bool keep_dims,
+                                        bool noop_with_empty_axes,
                                         const std::string& name) {
     AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported");
-    return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims), name);
+    return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims, noop_with_empty_axes), name);
 }
\ No newline at end of file
diff --git a/src/operator/ReduceSum.cpp b/src/operator/ReduceSum.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0786f53c6b761e5cd9020352a2ecb92469a609d7
--- /dev/null
+++ b/src/operator/ReduceSum.cpp
@@ -0,0 +1,76 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ReduceSum.hpp"
+
+#include <algorithm>  // std::for_each, std::sort
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <numeric> // For std::iota
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ReduceSum_Op::Type = "ReduceSum";
+
+bool Aidge::ReduceSum_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        // make Axes attribute positive
+        std::vector<std::int32_t>& axes = mAttributes->template getAttr<ReduceSumAttr::Axes>();
+        std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
+            if (val < 0)
+                val+=static_cast<std::int32_t>(getInput(0)->nbDims());
+        });
+        std::sort(axes.begin(), axes.end());
+
+        // build output dimensions
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+
+        if (axes.empty())
+        {
+            if(mAttributes->template getAttr<ReduceSumAttr::NoopWithEmptyAxes>()) {
+                mOutputs[0]->resize(outDims);
+                return true;
+            }
+            // if no axes are provided and NoopWithEmptyAxes is false, reduce on all axes
+            axes.resize(getInput(0)->nbDims());
+            std::iota(axes.begin(), axes.end(), 0);
+        }
+
+        if (mAttributes->template getAttr<ReduceSumAttr::KeepDims>()) {
+            std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
+        }
+        else {
+            for (auto it = axes.crbegin(); it != axes.crend(); ++it)
+                outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
+        }
+
+        // TODO: change {1} for {} when scalar Tensors are better handled.
+        mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
+        return true;
+    }
+    return false;
+}
+
+void Aidge::ReduceSum_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(ReduceSum_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::ReduceSum_Op::getAvailableBackends() const {
+    return Registrar<ReduceSum_Op>::getKeys();
+}
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 5139a0b0c98b11a0cbf6770397be56c830d0aa49..0fa9a62816a36ad3afece02052224c966ee121a3 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -136,6 +136,10 @@ void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t d
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Reshape_Op::getAvailableBackends() const {
+    return Registrar<Reshape_Op>::getKeys();
+}
+
 //////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Reshape(const std::vector<std::int64_t>& shape,
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index f3a69848ebd3cb7dbfb43788d16030e21e071b9c..9e5762452e382a31c1e5da25708507653da2e474 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -149,6 +149,10 @@ void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     }
 }
 
+std::set<std::string> Aidge::Resize_Op::getAvailableBackends() const {
+    return Registrar<Resize_Op>::getKeys();
+}
+
 /////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Resize(const std::string &name) {
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
index a53695b58aab9ea8a50e15638b4c50d42cf444dd..5ac08cd2245e0caa3ca7072c70ccc69bcfcf9558 100644
--- a/src/operator/Scaling.cpp
+++ b/src/operator/Scaling.cpp
@@ -48,6 +48,10 @@ void Aidge::Scaling_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t d
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Scaling_Op::getAvailableBackends() const {
+    return Registrar<Scaling_Op>::getKeys();
+}
+
 ////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Scaling(float scalingFactor,
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index f2ad1005907b71ee279b9d9bc9853b667108855c..29a9ee6252a0c2baa6e07bc56e60650685db6bdd 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -91,6 +91,10 @@ void Aidge::Shape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Shape_Op::getAvailableBackends() const {
+    return Registrar<Shape_Op>::getKeys();
+}
+
 //////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Shape(const std::int64_t start, const std::int64_t end, const std::string& name) {
diff --git a/src/operator/ShiftGELU.cpp b/src/operator/ShiftGELU.cpp
index 63480ffccaaf78b2dd951c75b3830a8dfede7d99..bd229e6cf58a430922d08cff5301aa16ef636d5e 100644
--- a/src/operator/ShiftGELU.cpp
+++ b/src/operator/ShiftGELU.cpp
@@ -42,6 +42,10 @@ void Aidge::ShiftGELU_Op::setBackend(const std::string& name, DeviceIdx_t device
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::ShiftGELU_Op::getAvailableBackends() const {
+    return Registrar<ShiftGELU_Op>::getKeys();
+}
+
 ///////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ShiftGELU(const std::string& name) {
diff --git a/src/operator/ShiftMax.cpp b/src/operator/ShiftMax.cpp
index 5b0dd7ace0984c2397ef3a7bb4ef7a5526f4f288..58d4bf46100ce116ad4a179e972cbef81bc5b5c1 100644
--- a/src/operator/ShiftMax.cpp
+++ b/src/operator/ShiftMax.cpp
@@ -46,6 +46,10 @@ void Aidge::ShiftMax_Op::setBackend(const std::string& name, DeviceIdx_t device)
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::ShiftMax_Op::getAvailableBackends() const {
+    return Registrar<ShiftMax_Op>::getKeys();
+}
+
 /////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ShiftMax(const std::string& name) {
diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp
index aa112378fde50c7f36c63b8c0a8d00ed0baab12b..d97f8c52341dee4e6e0840afa6e023d8a4e3fd52 100644
--- a/src/operator/Sigmoid.cpp
+++ b/src/operator/Sigmoid.cpp
@@ -42,6 +42,10 @@ void Aidge::Sigmoid_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t d
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Sigmoid_Op::getAvailableBackends() const {
+    return Registrar<Sigmoid_Op>::getKeys();
+}
+
 ///////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Sigmoid(const std::string& name) {
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index bd7a4750dcbb129b56c541b3e75c2ec6faa7d55a..3bdee8c13c1759261140d634940b0a4e81210084 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -212,6 +212,10 @@ void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Slice_Op::getAvailableBackends() const {
+    return Registrar<Slice_Op>::getKeys();
+}
+
 ////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Slice(const std::vector<std::int64_t>& starts,
diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp
index f425d6fffb8934f00b1c503c1d296b8318377cb0..ad894c5e56a674a452d0388f88a7e4ad268dd216 100644
--- a/src/operator/Softmax.cpp
+++ b/src/operator/Softmax.cpp
@@ -46,6 +46,10 @@ void Aidge::Softmax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t d
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Softmax_Op::getAvailableBackends() const {
+    return Registrar<Softmax_Op>::getKeys();
+}
+
 ////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Softmax(std::int32_t axis, const std::string& name) {
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index 9c56c6a2a28c6acb8c3943cd859fdbe78fd2cd1b..e3ed13588d8c2b5ddde91d37fc926d675f0666a3 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -37,15 +37,15 @@ void Aidge::Split_OpImpl::forward() {
     const std::size_t stride_post = std::accumulate(dims.crbegin(), dims.crbegin() + dims.size() -1 - axis, 1, std::multiplies<std::size_t>());
     for (auto i = 0; i < op.nbOutputs(); ++i)
     {
-        DimIdx_t chunkIdxOnAxis = std::accumulate(splits.cbegin(), splits.cbegin() + i, 0) * stride_post;
-        DimIdx_t offset = 0;
+        DimSize_t chunkIdxOnAxis = std::accumulate(splits.cbegin(), splits.cbegin() + i, 0) * stride_post;
+        DimSize_t offset = 0;
         for (std::size_t j = 0; j < stride_pre; ++j)
         {
             // Compute chunk position in input tensor
-            DimIdx_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
+            DimSize_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
             // Copy chunk in ouput
             op.getOutput(i)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(idx),
-                                             splits[i] * stride_post, offset);
+                                            splits[i] * stride_post, offset);
             offset += splits[i] * stride_post;
         }
 
@@ -167,6 +167,10 @@ void Aidge::Split_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
 
 }
 
+std::set<std::string> Aidge::Split_Op::getAvailableBackends() const {
+    return Registrar<Split_Op>::getKeys();
+}
+
 ////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Split(Aidge::DimSize_t nbOutput,
diff --git a/src/operator/Sqrt.cpp b/src/operator/Sqrt.cpp
index 3af75a6ca19e301f6c14e1b5fd03d693c161dcc5..bd3286f098cd5c6985d7f33f88b723523ef94765 100644
--- a/src/operator/Sqrt.cpp
+++ b/src/operator/Sqrt.cpp
@@ -41,6 +41,10 @@ void Aidge::Sqrt_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Sqrt_Op::getAvailableBackends() const {
+    return Registrar<Sqrt_Op>::getKeys();
+}
+
 ////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Sqrt(const std::string& name) {
diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a8b20d21ae1f6c7bfba1a9e52d039f292b6aa62e
--- /dev/null
+++ b/src/operator/Squeeze.cpp
@@ -0,0 +1,168 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Squeeze.hpp"
+
+#include <algorithm>
+#include <bitset>
+#include <cstdint>
+#include <fmt/core.h>
+#include <functional>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Log.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+const std::string Squeeze_Op::Type = "Squeeze";
+
+bool Squeeze_Op::dimsForwarded() const {
+  if ((getInput(1) && !getInput(1)->undefined())) {
+    // output dims are data dependent
+    return false;
+  }
+
+  return OperatorTensor::dimsForwarded();
+}
+
+bool Squeeze_Op::forwardDims(bool allowDataDependency) {
+  // error checking
+  if (!inputsAssociated(false) || getInput(0)->undefined()) {
+    return false;
+  }
+
+  std::shared_ptr<Tensor> fallback;
+  // Input 1 is axes to squeeze (can also be given via attribute)
+  if (getInput(1)) {
+    if (!this->axes().empty()) {
+      Log::notice("{} : ignoring non-empty axes attribute because input#1 "
+                  "takes precedence",
+                  type());
+    }
+
+    if (!allowDataDependency) {
+      Log::warn("{} : unable to forwardDims() because output dims are data "
+                "dependent on input#1",
+                type());
+      return false;
+    }
+
+    this->axes().clear(); // If both are provided input would override attrs
+    this->axes().reserve(getInput(1)->size());
+    const auto &axes =
+        getInput(1)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
+    if (axes.nbDims() == 0) {
+      this->axes().clear();
+    } else {
+      AIDGE_ASSERT(
+          axes.nbDims() == 1,
+          "Axes input tensor should be of size 1. Received {} dimensions : {}",
+          axes.nbDims(), axes.dims());
+      std::copy_n(static_cast<int8_t *>(axes.getImpl()->hostPtr()), axes.size(),
+                  std::back_inserter(this->axes()));
+    }
+  }
+
+  std::vector<DimSize_t> input_dims = getInput(0)->dims();
+  std::vector<DimSize_t> output_dims;
+  output_dims.reserve(input_dims.size());
+  std::vector<DimIdx_t> axes_rectified_idx;
+  axes_rectified_idx.reserve(input_dims.size());
+
+  if (this->axes().size() == 0) { // squeeze() => squeeze all 1 sized dimensions
+    Log::debug("this->axes() is empty, all 1 sized dim will be squeezed. If "
+               "this is an error ensure that the values are properly set via "
+               "attribute or data input#1.");
+    std::copy_if(input_dims.begin(), input_dims.end(),
+                 std::back_inserter(output_dims),
+                 [](DimSize_t dim) { return dim != 1; });
+  } else { // squeeze({N,.....}) => squeeze all specified dimensions that are of
+           // size 1.
+    /////// ensure indexes validity and set pythonic negative indexes to their
+    // positive value
+    for (const int8_t &axis : this->axes()) {
+      AIDGE_ASSERT(axis >= static_cast<int8_t>(-input_dims.size()) &&
+                       axis < static_cast<int8_t>(input_dims.size()),
+                   "{} : Axis index OutOfBounds error, expected value "
+                   "within size limits of input tensor : "
+                   "[-{},{}), got {}.",
+                   type(), input_dims.size(), input_dims.size() - 1, axis);
+      auto temp =
+          static_cast<DimIdx_t>(axis >= 0 ? axis : axis + input_dims.size());
+      if (axes_rectified_idx.end() == std::find(axes_rectified_idx.begin(),
+                                                axes_rectified_idx.end(),
+                                                temp)) {
+        axes_rectified_idx.push_back(temp);
+      }
+    }
+
+    // Create output_dims
+    // speeds up binary search
+    std::sort(axes_rectified_idx.begin(), axes_rectified_idx.end());
+    DimSize_t i = 0;
+    std::copy_if(
+        input_dims.begin(), input_dims.end(), std::back_inserter(output_dims),
+        [&axes_rectified_idx, &i, &input_dims](DimSize_t dim) {
+          // if current dim index is found in axes to squeeze
+          // we ensure that this axis is 1 sized, otherwise an error is thrown
+          bool ok = true;
+          if (std::binary_search(axes_rectified_idx.begin(),
+                                 axes_rectified_idx.end(), i)) {
+            AIDGE_ASSERT(dim == 1,
+                         "{} : Tried to squeeze axis nb {} of a tensor of dim "
+                         "{}. Dim to squeeze has to be 1-sized, got size {}."
+                         "Axes to squeeze : {}",
+                         __func__, i, input_dims, input_dims[i],
+                         axes_rectified_idx);
+            ok = false;
+          }
+          i++; // Incrementing counter since there is no enumerate
+               // fctn (until C++23)
+          return ok;
+        });
+  }
+  mOutputs[0]->resize(output_dims);
+  return true;
+}
+
+void Squeeze_Op::setBackend(const std::string &name,
+                            Aidge::DeviceIdx_t device) {
+  if (Registrar<Squeeze_Op>::exists({name})) {
+    SET_IMPL_MACRO(Squeeze_Op, *this, name);
+  } else {
+    mImpl = std::make_shared<Squeeze_OpImpl>(*this);
+  }
+  mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Squeeze_Op::getAvailableBackends() const {
+  return Registrar<Squeeze_Op>::getKeys();
+}
+
+void Aidge::Squeeze_OpImpl::forward() {
+  const Squeeze_Op &op_ = static_cast<const Squeeze_Op &>(mOp);
+  // Check if input is provided
+  AIDGE_ASSERT(op_.getInput(0), "Squeeze : missing input 0");
+
+  op_.getOutput(0)->getImpl()->copy(op_.getInput(0)->getImpl()->rawPtr(),
+                                    op_.getInput(0)->size());
+}
+
+} // namespace Aidge
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index ee4fd5b0887c5d9fafa3acd5822334dba4070aa8..ca7348b3b415375c09ac1cfd69ac3d6f6e3488eb 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -72,6 +72,10 @@ void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Sub_Op::getAvailableBackends() const {
+    return Registrar<Sub_Op>::getKeys();
+}
+
 //////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Sub(const std::string& name) {
diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp
index 1f936b6c8c5f61d86e2832c4bee7b943fa8268a1..fe295ab71b67e8e62562066b1464ffba6e8ae404 100644
--- a/src/operator/Tanh.cpp
+++ b/src/operator/Tanh.cpp
@@ -41,6 +41,10 @@ void Aidge::Tanh_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Tanh_Op::getAvailableBackends() const {
+    return Registrar<Tanh_Op>::getKeys();
+}
+
 ////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Tanh(const std::string& name) {
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index bd1acee8a820ad2e3e54b7b0b21f979fc9ce1feb..0cb1717f1c96c393b8845db129eee1429966cd98 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -79,6 +79,10 @@ void Aidge::Transpose_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Transpose_Op::getAvailableBackends() const {
+    return Registrar<Transpose_Op>::getKeys();
+}
+
 //////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Transpose(const std::vector<Aidge::DimSize_t> &outputDimsOrder,
diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp
index 2b12f33585a7388bd2411a8ae84ef43915516024..53b8bd5442081e601a55853115f44067ae17fc2b 100644
--- a/src/operator/Unfold.cpp
+++ b/src/operator/Unfold.cpp
@@ -138,6 +138,11 @@ void Aidge::Unfold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx
     mOutputs[0]->setBackend(name, device);
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::Unfold_Op<DIM>::getAvailableBackends() const {
+    return Registrar<Unfold_Op<DIM>>::getKeys();
+}
+
 template class Aidge::Unfold_Op<2>;
 
 ///////////////////////////////////////////////////////////
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..43afd160e03395c65c4dcbe5504cb865da4ed8d8
--- /dev/null
+++ b/src/operator/Unsqueeze.cpp
@@ -0,0 +1,131 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Unsqueeze.hpp"
+
+#include <cstdint>
+#include <fmt/core.h>
+#include <functional>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Log.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+const std::string Unsqueeze_Op::Type = "Unsqueeze";
+
+bool Aidge::Unsqueeze_Op::dimsForwarded() const {
+  if ((getInput(1) && !getInput(1)->undefined())) {
+    // output dims are data dependent
+    return false;
+  }
+
+  return OperatorTensor::dimsForwarded();
+}
+
+bool Unsqueeze_Op::forwardDims(bool allowDataDependency) {
+  // error checking
+  if (!inputsAssociated(true)) {
+    return false;
+  }
+  std::shared_ptr<Tensor> fallback;
+  // Copy optional input #1, if present, to attribute Axes
+  if (getInput(1)) {
+    if (!this->axes().empty()) {
+      Log::notice("{} : ignoring non-empty \"axes\" attribute because input#1 "
+                  "takes precedence",
+                  type());
+    }
+
+    if (!allowDataDependency) {
+      Log::warn("{} : unable to forwardDims() because output dims are data "
+                "dependent on input#1",
+                type());
+      return false;
+    }
+
+    this->axes().clear(); // If both are provided input would override attrs
+    this->axes().reserve(getInput(1)->size());
+    const auto &axes =
+        getInput(1)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
+    std::copy_n(static_cast<int8_t *>(axes.getImpl()->hostPtr()),
+                axes.size(), std::back_inserter(this->axes()));
+  }
+  AIDGE_ASSERT(!this->axes().empty(),
+               "{} : Axes to unsqueeze can be defined via input#1 or axes "
+               "attribute. None of them were provided.",
+               type());
+
+  std::vector<DimSize_t> input_dims = getInput(0)->dims();
+  std::vector<DimIdx_t> axes_rectified_idx;
+  axes_rectified_idx.reserve(this->axes().size());
+  DimIdx_t output_nb_dims = input_dims.size() + this->axes().size();
+
+  for (const int8_t &axis : this->axes()) {
+    AIDGE_ASSERT(axis >= static_cast<int8_t>(-output_nb_dims) &&
+                     axis < static_cast<int8_t>(output_nb_dims),
+                 "{} : Axis index OutOfBounds enrror, expected value "
+                 "within size limits of input tensor : "
+                 "[-{},{}), got {}.",
+                 type(), output_nb_dims, output_nb_dims - 1, axis);
+    axes_rectified_idx.push_back(
+        static_cast<DimIdx_t>(axis >= 0 ? axis : axis + output_nb_dims));
+  }
+  // sort by descending order
+  std::sort(axes_rectified_idx.begin(), axes_rectified_idx.end());
+  // Raise error if duplicate indexes are found
+  const auto &it = std::adjacent_find(axes_rectified_idx.begin(), axes_rectified_idx.end());
+  AIDGE_ASSERT(
+      it == axes_rectified_idx.end(),
+      "{} : The index {} appears multiple times in list of input dims. "
+      "Check positive and negative indexes.\nRaw indexes :\t{}\nRectified "
+      "indexes :\t{}",
+      type(), *it, this->axes(), axes_rectified_idx);
+
+  // computation
+  std::vector<DimSize_t> output_dims(input_dims);
+  output_dims.reserve(input_dims.size() + this->axes().size());
+  for (const DimIdx_t &axis : axes_rectified_idx) {
+    output_dims.insert(output_dims.begin() + axis, 1);
+  }
+  mOutputs[0]->resize(output_dims);
+  return true;
+}
+
+void Unsqueeze_Op::setBackend(const std::string &name,
+                              Aidge::DeviceIdx_t device) {
+  if (Registrar<Unsqueeze_Op>::exists({name})) {
+    SET_IMPL_MACRO(Unsqueeze_Op, *this, name);
+  } else {
+    mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
+  }
+  mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Unsqueeze_Op::getAvailableBackends() const {
+  return Registrar<Unsqueeze_Op>::getKeys();
+}
+
+void Aidge::Unsqueeze_OpImpl::forward() {
+  const Unsqueeze_Op &op_ = static_cast<const Unsqueeze_Op &>(mOp);
+  // Check if input is provided
+  AIDGE_ASSERT(op_.getInput(0), "Unsqueeze : missing input 0");
+  op_.getOutput(0)->getImpl()->copy(op_.getInput(0)->getImpl()->rawPtr(),
+                                    op_.getInput(0)->size());
+}
+
+} // namespace Aidge
diff --git a/src/recipes/AdaptToBackend.cpp b/src/recipes/AdaptToBackend.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e625a52f6545c3b2b34f85745fd88087a1b9883b
--- /dev/null
+++ b/src/recipes/AdaptToBackend.cpp
@@ -0,0 +1,38 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Matching.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+void Aidge::adaptToBackend(std::shared_ptr<GraphView> graphView) {
+    const auto nodes = graphView->getNodes();
+    for (auto node : nodes) {
+        auto impl = node->getOperator()->getImpl();
+        AIDGE_ASSERT(impl, "Missing implementation for node {} (of type {})",
+            node->name(), node->type());
+        auto adaptedNode = impl->getBestAdaptation(impl->getRequiredSpec());
+
+        if (adaptedNode == nullptr) {
+            Log::notice("Unable to adapt node {} (of type {}) to backend {}",
+                node->name(), node->type(), impl->backend());
+        }
+        else if (!adaptedNode->getOperator()->isAtomic()) {
+            Log::info("Adapted node {} (of type {}) to backend {}",
+                node->name(), node->type(), impl->backend());
+            AIDGE_ASSERT(GraphView::replace({node}, {adaptedNode}), "Unable to replace adapted node!");
+        }
+    }
+}
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index e1553fda551795a0b6f0334ccf1dbd3d2b760085..34722c19f8c0fddaffa7357136f1512a027e1617 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -90,13 +90,13 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
             meanVariance += b_var.get<float>(outChId);
             ++count;
         } else {
-            fmt::print("Zero-variance: {} [{}]\n", convNode->name(), outChId);
+            Log::notice("Zero-variance: {} [{}]\n", convNode->name(), outChId);
         }
     }
     if (count > 0)
         meanVariance /= count;
     else {
-        fmt::print("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
+        Log::notice("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
     }
 
     // Add bias if it is non existant, as there will be a bias after the fuse
diff --git a/src/recipes/FuseToMetaOps.cpp b/src/recipes/FuseToMetaOps.cpp
index e7748936c00a20ec235ea7853f4d17e2c10261fb..0ad5e5a1da0e6aef74f7e47751dd2d4e8648980b 100644
--- a/src/recipes/FuseToMetaOps.cpp
+++ b/src/recipes/FuseToMetaOps.cpp
@@ -24,6 +24,15 @@ size_t Aidge::fuseToMetaOps(std::shared_ptr<GraphView> graphView, const std::str
     size_t nbReplaced = 0;
     for (const auto& match : matches) {
         auto metaOp = MetaOperator(metaType.c_str(), match.graph->clone());
+        // Clone does not clone implementation, which is therefore empty.
+        // Use the root node backend for the meta op backend, even though some
+        // matching nodes might be on a different backend, as nodes in the meta
+        // op are required to share the same backend!
+        const auto backend = match.graph->rootNode()->getOperator()->backend();
+        if (!backend.empty()) {
+            metaOp->getOperator()->setBackend(backend);
+        }
+
         auto metaOpGraph = std::make_shared<GraphView>();
         metaOpGraph->add(metaOp, false);
         const auto success = GraphView::replace(match.graph, metaOpGraph);
diff --git a/src/recipes/RemoveFlatten.cpp b/src/recipes/RemoveFlatten.cpp
index 8c1bf1bcf0bf79fda275867ff6430d5a937da172..bf80ab51749953a5b72d0e01f186265fdbb72e81 100644
--- a/src/recipes/RemoveFlatten.cpp
+++ b/src/recipes/RemoveFlatten.cpp
@@ -17,38 +17,20 @@
 
 
 //Graph Regex
-#include "aidge/graphRegex/GraphRegex.hpp"
+// #include "aidge/graphRegex/GraphRegex.hpp"
+#include "aidge/graph/Matching.hpp"
 
 
 namespace Aidge {
-    void removeFlatten(std::shared_ptr<Node> flatten) {
-        GraphView::replace({flatten}, {});
-    }
-
-    void removeFlatten(std::shared_ptr<MatchSolution> solution){
-
-        assert(solution->at("FC").size() == 1 && "Wrong number of nodes FC to replace\n");
-        assert(solution->at("Flatten").size() == 1 && "Wrong number of nodes Flatten to replace\n");
-
-        for (const auto& flatten : solution->at("Flatten")) {
-            removeFlatten(flatten);
-        }
-    }
-
-
-
     void removeFlatten(std::shared_ptr<GraphView> graphView){
-      
-
-        std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
-        regex->setNodeKey("Flatten","getType($) =='Flatten'");
-        regex->setNodeKey("FC","getType($) =='FC'");
-        regex->addQuery("Flatten->FC");
-
-        for (const auto& solution : regex->match(graphView)) {
-            removeFlatten(solution);
+        const auto matches = SinglePassGraphMatching(graphView).match(
+            "(FC|MatMul)<-(Flatten)+"
+        );
+
+        for (const auto& solution : matches) {
+            auto flattenNodes(solution.graph->getNodes());
+            flattenNodes.erase(solution.graph->rootNode());
+            GraphView::replace(flattenNodes, {});
         }
-
-
     }
 }
diff --git a/src/recipes/removeConstantOfShape.cpp b/src/recipes/removeConstantOfShape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5e84f7b494815ecb5a8937bb6f76ba1de80ad3f9
--- /dev/null
+++ b/src/recipes/removeConstantOfShape.cpp
@@ -0,0 +1,128 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include "aidge/recipes/Recipes.hpp"
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <functional>
+#include <memory>
+#include <numeric>
+#include <set>
+#include <stdexcept>
+#include <string>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Matching.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/ConstantOfShape.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+// Graph Regex
+#include "aidge/graphRegex/GraphRegex.hpp"
+
+namespace Aidge {
+
+size_t removeConstantOfShape(std::shared_ptr<GraphView> graph_view) {
+  const auto matches =
+      SinglePassGraphMatching(graph_view).match("Producer->ConstantOfShape");
+
+  size_t nbReplaced = 0;
+  for (const auto &match : matches) {
+    const auto prod_node = match.graph->rootNode();
+    const auto prod_op =
+        std::static_pointer_cast<Producer_Op>(prod_node->getOperator());
+
+    const NodePtr constantofshape_node =
+        prod_node->getOrderedChildren().at(0).at(0);
+
+    const auto constantofshape_op =
+        std::static_pointer_cast<ConstantOfShape_Op>(
+            constantofshape_node->getOperator());
+
+    if (prod_op->getOutput(0)->nbDims() != 1) {
+      Log::debug("{} : Producer output dimension number is {} != 1 and {} "
+                 "input has to have 1 dim, skipping match.",
+                 __func__, prod_op->getOutput(0)->nbDims(),
+                 ConstantOfShape_Op::Type);
+      continue;
+    }
+    if (!prod_op->constant()) {
+      Log::debug("{} : Producer is not constant, skipping match.", __func__);
+      continue;
+    }
+    if (prod_op->getOutput(0)->dataType() != DataType::Int64) {
+      AIDGE_THROW_OR_ABORT(
+          std::runtime_error,
+          "{} : Producer output dtype is {} != int64 and {} "
+          "input type is restricted to int64_t, this is an error."
+          "Fix your network. skipping match.",
+          __func__, prod_op->getOutput(0)->dataType(),
+          ConstantOfShape_Op::Type);
+      continue;
+    }
+
+    auto graph_to_replace = std::make_shared<GraphView>();
+    auto new_graph = std::make_shared<GraphView>();
+    graph_to_replace->add(constantofshape_node);
+    if (prod_node->getChildren().size() == 1) {
+      graph_to_replace->add(prod_node);
+    } else {
+      Log::debug("{} : Producer node has multiple children, only"
+                 "replacing the {} node.",
+                 __func__, ConstantOfShape_Op::Type);
+    }
+
+    prod_node->forward();
+    std::shared_ptr<Tensor> prod_output = prod_op->getOutput(0);
+    std::vector<DimSize_t> new_input_dims;
+    new_input_dims.reserve(prod_output->dims()[0]);
+    for (DimSize_t i = 0; i < prod_output->size(); ++i) {
+      new_input_dims.push_back(prod_output->get<int64_t>(i));
+    }
+
+    auto new_input = std::make_shared<Tensor>(new_input_dims);
+    new_input->setBackend(prod_op->backend() == "" ? "cpu"
+                                                   : prod_op->backend());
+    new_input->setDataType(constantofshape_op->value().dataType());
+    for (std::size_t i = 0; i < new_input->size(); ++i) {
+      new_input->getImpl()->copy(
+          constantofshape_op->value().getImpl()->rawPtr(), 1, i);
+    }
+    auto new_prod =
+        Producer(new_input, prod_node->name() + "_constant_of_shape", true);
+    new_graph->add(new_prod);
+
+    const auto success = GraphView::replace(graph_to_replace, new_graph);
+    if (!success) {
+      Log::warn("Could not replace Producer({})->ConstantOfShape({}) with"
+                "Producer",
+                prod_node->name(), constantofshape_node->name());
+    } else {
+      ++nbReplaced;
+    }
+  }
+
+  Log::info("Replaced {} (out of {}) matching Producer->ConstantOfShape with "
+            "Producers",
+            nbReplaced, matches.size());
+  return nbReplaced;
+}
+} // namespace Aidge
+
diff --git a/src/scheduler/ParallelScheduler.cpp b/src/scheduler/ParallelScheduler.cpp
index 4e515099006b9e0588eafc7e981c5f5e80bbe97d..1d70646b70091e2e3ff6f03b8ee82ae62aeb1e43 100644
--- a/src/scheduler/ParallelScheduler.cpp
+++ b/src/scheduler/ParallelScheduler.cpp
@@ -127,7 +127,12 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std::
         // in the next step
         for (size_t i = 0; i < staticSchedule.size(); ) {
             auto runnable = staticSchedule[i];
-            if (!pool.busy() && runnable->early <= latest) {
+            if (runnable->early > latest) {
+                // No more node can be run at this step (latest)
+                break;
+            }
+
+            if (!pool.busy()) {
                 // Check that potential preceding non-critical nodes are finished
                 bool ready = true;
                 for (auto elt : runnable->laterThan) {
@@ -168,9 +173,17 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std::
                 }
             }
             else {
-                // Thread pool is already full or no more node can be run at
-                // this step (latest)
-                break;
+                // Thread pool is already full
+                bool ready = true;
+                for (auto elt : mustFinish) {
+                    ready = ready && finished.at(elt);
+                }
+                if (!ready) {
+                    std::this_thread::yield();
+                }
+                else {
+                    break;
+                }
             }
         }
 
diff --git a/src/scheduler/ProdConso.cpp b/src/scheduler/ProdConso.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a3bff53c3643a5da361dec5944f47a27f148a995
--- /dev/null
+++ b/src/scheduler/ProdConso.cpp
@@ -0,0 +1,117 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <string>
+
+#include "aidge/scheduler/ProdConso.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+Aidge::ProdConso::ProdConso(const Operator& op, bool inPlace):
+    mOp(op),
+    mInPlace(inPlace),
+    mNbConsumedData(mOp.nbInputs(), Elts_t::NoneElts()),
+    mNbProducedData(mOp.nbOutputs(), Elts_t::NoneElts())
+{
+    //ctor
+}
+
+Aidge::Elts_t Aidge::ProdConso::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+    if (mOp.getRawInput(inputIdx)) {
+        const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
+        if (!input->undefined()) {
+            // Known amount of data: requires the whole tensor by default
+            return Elts_t::DataElts(input->size());
+        }
+        else {
+            // Unknown amount of data: require a single token by default
+            return Elts_t::TokenElts(1);
+        }
+    }
+
+    // Input not connected, meaning it is an optional input: do no require anything!
+    return Elts_t::NoneElts();
+}
+
+Aidge::Elts_t Aidge::ProdConso::getNbRequiredProtected(IOIndex_t inputIdx) const {
+    if (mOp.getRawInput(inputIdx)) {
+        const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
+        if (!input->undefined()) {
+            // Known amount of data: protect the whole tensor by default
+            return Elts_t::DataElts((mInPlace) ? 0 : input->size());
+        }
+        else {
+            // Unknown amount of data: protect a single token by default
+            // (this does not really make sense for now, as getNbRequiredProtected()
+            // is supposed to give a precise amount of data to protect for
+            // memory management purpose...)
+            return Elts_t::TokenElts((mInPlace) ? 0 : 1);
+        }
+    }
+
+    // Input not connected, meaning it is an optional input: do no require anything!
+    return Elts_t::NoneElts();
+}
+
+Aidge::Elts_t Aidge::ProdConso::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
+    if (mOp.getRawOutput(outputIdx)) {
+        const auto output = std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx));
+        if (!output->undefined()) {
+            // Known amount of data: requires the whole tensor by default,
+            // regardless of available data on inputs
+            return Elts_t::DataElts(output->size());
+        }
+        else {
+            // Unknown amount of data: require a single token by default
+            // (this does not really make sense for now, as getRequiredMemory()
+            // is supposed to give a precise amount of data to allocate for
+            // memory management purpose...)
+            return Elts_t::TokenElts(1);
+        }
+    }
+
+    // Output not set, meaning it is an optional output: do no require anything!
+    return Elts_t::NoneElts();
+}
+
+Aidge::Elts_t Aidge::ProdConso::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
+    AIDGE_ASSERT(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(),
+        "input index ({}) is out of bound ({}) for operator type {}",
+        inputIdx, mNbConsumedData.size(), mOp.type());
+    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
+}
+
+Aidge::Elts_t Aidge::ProdConso::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
+    AIDGE_ASSERT(static_cast<std::size_t>(outputIdx) < mNbProducedData.size(),
+        "output index ({}) is out of bound ({}) for operator type {}",
+        outputIdx, mNbProducedData.size(), mOp.type());
+    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
+}
+
+void Aidge::ProdConso::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx) {
+        // each input is consumed by the minimum amount for a forward pass
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));
+    }
+
+    for (std::size_t outputIdx = 0; outputIdx < mNbProducedData.size(); ++outputIdx) {
+        mNbProducedData[outputIdx] += getRequiredMemory(outputIdx, {});
+    }
+}
+
+void Aidge::ProdConso::resetConsummerProducer(){
+    std::fill(mNbConsumedData.begin(), mNbConsumedData.end(), Elts_t::NoneElts());
+    std::fill(mNbProducedData.begin(), mNbProducedData.end(), Elts_t::NoneElts());
+}
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 4585e08d5ca3a2c37e9d8911cea9b1f25c3720b6..1613450508ea84a230f36ba6526a1322c6a70559 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -33,6 +33,7 @@
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Log.hpp"
 #include "aidge/utils/Types.h"
 
 
@@ -219,7 +220,7 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S
             bool isProducer = false;
             for (IOIndex_t outId = 0; outId < consumer->nbOutputs(); ++outId) {
                 for (const auto& child : consumer->getChildren(outId)) {
-                    if (child) {
+                    if (child && mGraphView->inView(child)) {
                         IOIndex_t inputIdx = 0;
                         for (const auto& childParent : child->getParents()) {
                             if (childParent == consumer) {
@@ -611,6 +612,9 @@ void Aidge::Scheduler::saveStaticSchedulingDiagram(const std::string& fileName)
 }
 
 std::vector<std::shared_ptr<Aidge::Node>> Aidge::Scheduler::getStaticScheduling(std::size_t step) const {
+    AIDGE_ASSERT(!mStaticSchedule.empty(), "Scheduler::getStaticScheduling(): static scheduling is empty, did you generate scheduling first?");
+    AIDGE_ASSERT(step < mStaticSchedule.size(), "Scheduler::getStaticScheduling(): no static scheduling at step {} (available steps: {})", mStaticSchedule.size(), step);
+
     const auto& staticSchedule = mStaticSchedule.at(step);
     std::vector<std::shared_ptr<Node>> schedule;
     std::transform(staticSchedule.begin(), staticSchedule.end(), std::back_inserter(schedule), [](const auto& v) { return v->node; });
@@ -665,7 +669,7 @@ Aidge::Elts_t Aidge::Scheduler::getNbAvailableData(const std::shared_ptr<Node>&
         // => This means data was fed manually to the input, without a Producer
         // In this case, we assume a single-use data (unlike a Producer, which
         // keep producing the data each time it is needed).
-        fmt::print("No producer node attached to input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
+        Log::warn("No producer node attached to input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
         return Elts_t::DataElts(std::static_pointer_cast<Tensor>(node->getOperator()->getRawInput(inputIdx))->size());
     }
 
diff --git a/src/utils/DynamicAttributes.cpp b/src/utils/DynamicAttributes.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..909d3bb2f5fda977ac497a19e1a1088eb52cfc88
--- /dev/null
+++ b/src/utils/DynamicAttributes.cpp
@@ -0,0 +1,31 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/utils/DynamicAttributes.hpp"
+
+std::map<std::type_index, bool(*)(const future_std::any&, const future_std::any&)> Aidge::DynamicAttributes::mAnyCompare;
+
+bool future_std::operator<(const future_std::any& lhs, const future_std::any& rhs) {
+    if (lhs.type() == rhs.type()) {
+        return Aidge::DynamicAttributes::mAnyCompare.at(lhs.type())(lhs, rhs);
+    }
+#ifdef PYBIND
+    else if (lhs.type() == typeid(py::object)) {
+        return Aidge::DynamicAttributes::mAnyCompare.at(rhs.type())(lhs, rhs);
+    }
+    else if (rhs.type() == typeid(py::object)) {
+        return Aidge::DynamicAttributes::mAnyCompare.at(lhs.type())(lhs, rhs);
+    }
+#endif
+    else {
+        return (lhs.type().before(rhs.type()));
+    }
+}
diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp
index ae8816e78b6fc7b8f2288b6873642f0729e195b6..da32a8e0ec6a3c9f27da5c47f9e6166c1fc879bc 100644
--- a/src/utils/Log.cpp
+++ b/src/utils/Log.cpp
@@ -89,7 +89,7 @@ void Aidge::Log::log(Level level, const std::string& msg) {
             fmt::println("Context: {}", context);
         }
 
-        fmt::println(mFile.get(), msg);
+        fmt::println(mFile.get(), "{}", msg);
     }
 }
 
diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt
index 9280d5fbdfd0a6a35724e5afd5caf672fefd8bf8..fd96b060630c162e93143e8f51019a0ce3e82cc9 100644
--- a/unit_tests/CMakeLists.txt
+++ b/unit_tests/CMakeLists.txt
@@ -55,7 +55,7 @@ target_link_options(tests${module_name} PUBLIC $<$<OR:$<CXX_COMPILER_ID:Clang>,$
 
 endif()
 
-target_link_libraries(tests${module_name} PUBLIC ${module_name})
+target_link_libraries(tests${module_name} PRIVATE ${module_name})
 
 target_link_libraries(tests${module_name} PRIVATE Catch2::Catch2WithMain)
 
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index d9289c4aa3f4b44ce72d772c9a39dd8e66ab09e7..a08808ee5e6c2657a76213dcff80cec53b23e7ee 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -23,6 +23,9 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Testing.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Split.hpp"
+#include "aidge/operator/Memorize.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/graph/OpArgs.hpp"
@@ -108,13 +111,6 @@ TEST_CASE("clone_with_delete", "[GraphView][cloneDelete]") {
     const size_t nbTests = 100;
     size_t nbClonedWithDelete = 0;
 
-    // Note: initial seed is chosen such that for nbTests=100, the generated
-    // graphs keep the same inputs/outputs despites the deleted nodes
-    // (meaning the deleted nodes are not input/output of the graph).
-    // Otherwise, the last two REQUIRE are not garanteed to be true!
-    // Warning: distributions are not required to behave the same way by the standard,
-    // therefore the seed has to work for both GCC and MSVC...
-    // See https://stackoverflow.com/questions/38532927/why-gcc-and-msvc-stdnormal-distribution-are-different
     std::mt19937::result_type seed(243);
 
     for (int test = 0; test < nbTests; ++test) {
@@ -124,7 +120,21 @@ TEST_CASE("clone_with_delete", "[GraphView][cloneDelete]") {
         const auto g1 = std::make_shared<GraphView>("g1");
         const bool unicity1 = g1->add(randGraph.gen(seed, 10));
 
-        if (unicity1) {
+        bool stableInOut = true;
+        for (auto node : g1->inputNodes()) {
+            if (node->type() == "DelFictive") {
+                stableInOut = false;
+                break;
+            }
+        }
+        for (auto node : g1->outputNodes()) {
+            if (node->type() == "DelFictive") {
+                stableInOut = false;
+                break;
+            }
+        }
+
+        if (unicity1 && stableInOut) {
             randGraph.omitType = "DelFictive";
             const auto g2 = std::make_shared<GraphView>("g2");
             const bool unicity2 = g2->add(randGraph.gen(seed, 10));
@@ -433,6 +443,107 @@ TEST_CASE("[core/graph] GraphView(resetConnections)") {
     }
 }
 
+TEST_CASE("[core/graph] GraphView(getOrderedNodes)", "[GraphView][getOrderedNodes]") {
+    auto data1 = Producer({2}, "data1");
+    auto data2 = Producer({2}, "data2");
+    auto data3 = Producer({2}, "data3");
+    auto add1 = Add(2, "add1");
+    auto add2 = Add(2, "add2");
+    auto split1 = Split(2, 0, {1, 1}, "split1");
+    auto add3 = Add(3, "add3");
+    auto g = std::make_shared<GraphView>("TestGraph");
+    data1->addChild(add1);
+    data2->addChild(add1);
+    add1->addChild(add2);
+    data3->addChild(add2);
+    add1->addChild(add3);
+    add2->addChild(split1);
+    split1->addChild(add3);
+    g->add(data1);
+    g->add(data2);
+    g->add(data3);
+    g->add(add1);
+    g->add(add2);
+    g->add(split1);
+    g->add(add3);
+    REQUIRE(g->getNodes().size() == 7);
+
+    auto topo = g->getOrderedNodes();
+    SECTION("Topological order") {
+        REQUIRE(topo[0] == data1);
+        REQUIRE(topo[1] == data2);
+        REQUIRE(topo[2] == add1);
+        REQUIRE(topo[3] == data3);
+        REQUIRE(topo[4] == add2);
+        REQUIRE(topo[5] == split1);
+        REQUIRE(topo[6] == add3);
+    }
+
+    auto pdfs = g->getOrderedNodes(true);
+    SECTION("Post DFS order") {
+        REQUIRE(pdfs[0] == add3);
+        REQUIRE(pdfs[1] == split1);
+        REQUIRE(pdfs[2] == add2);
+        REQUIRE(pdfs[3] == add1);
+        REQUIRE(pdfs[4] == data1);
+        REQUIRE(pdfs[5] == data2);
+        REQUIRE(pdfs[6] == data3);
+    }
+
+    // Invert output order
+    g->setOrderedOutputs({{split1, 1}, {add3, 0}});
+    SECTION("Topological order output reversed") {
+        // As add3 depends upon split1, the order should not be changed
+        auto topo2 = g->getOrderedNodes();
+        REQUIRE(topo2 == topo);
+    }
+
+    SECTION("Post DFS order output reversed") {
+        // As add3 depends upon split1, the order should not be changed
+        auto pdfs2 = g->getOrderedNodes(true);
+        REQUIRE(pdfs2 == pdfs);
+    }
+}
+
+TEST_CASE("[core/graph] GraphView(getOrderedNodes) cyclic", "[GraphView][getOrderedNodes]") {
+    auto data1 = Producer({2}, "data1");
+    auto data2 = Producer({2}, "data2");
+    auto add1 = Add(2, "add1");
+    auto mem1 = Memorize(1, "mem1");
+    auto add2 = Add(2, "add2");
+    auto g = std::make_shared<GraphView>("TestGraph");
+    data1->addChild(add1);
+    data2->addChild(add1);
+    add1->addChild(mem1, 0, 1); // init
+    data1->addChild(add2);
+    mem1->addChild(add2);
+    add2->addChild(mem1); // back edge
+    g->add(data1);
+    g->add(data2);
+    g->add(add1);
+    g->add(mem1);
+    g->add(add2);
+    REQUIRE(g->getNodes().size() == 5);
+
+    auto topo = g->getOrderedNodes();
+    SECTION("Topological order") {
+        REQUIRE(topo[0] == data1);
+        REQUIRE(topo[1] == data2);
+        REQUIRE(topo[2] == add1);
+        REQUIRE(topo[3] == mem1);
+        REQUIRE(topo[4] == add2);
+    }
+
+    auto pdfs = g->getOrderedNodes(true);
+    SECTION("post DFS order") {
+        REQUIRE(pdfs[0] == add2);
+        REQUIRE(pdfs[1] == mem1);
+        REQUIRE(pdfs[2] == add1);
+        REQUIRE(pdfs[3] == data1);
+        REQUIRE(pdfs[4] == data2);
+    }
+}
+
 TEST_CASE("[core/graph] GraphView(forwardDims)", "[GraphView][forwardDims]") {
     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
     auto conv1 = Conv(3, 32, {3, 3}, "conv1");
diff --git a/unit_tests/graph/Test_Matching.cpp b/unit_tests/graph/Test_Matching.cpp
index 6abb4d37114d0952feb13c6cfbee66bd65dc5748..2fdcd611d378ceb6c3dbdc853920eecf92c31141 100644
--- a/unit_tests/graph/Test_Matching.cpp
+++ b/unit_tests/graph/Test_Matching.cpp
@@ -18,6 +18,8 @@
 #include "aidge/graph/Testing.hpp"
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/operator/Add.hpp"
+#include "aidge/operator/BatchNorm.hpp"
+#include "aidge/operator/FC.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/MetaOperatorDefs.hpp"
 #include "aidge/operator/Producer.hpp"
@@ -27,7 +29,7 @@
 using namespace Aidge;
 
 void checkMatches(const std::set<SinglePassGraphMatching::MatchingResult>& results, const std::map<std::string, std::set<std::string>>& expected) {
-    REQUIRE(results.size() == expected.size());
+    CHECK(results.size() == expected.size());
 
     for (const auto& result : results) {
         const auto found = nodePtrTo(result.graph->getNodes(), nodePtrToName);
@@ -347,6 +349,94 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
+    auto g2 = Sequential({
+        Producer({16, 3, 512, 512}, "dataProvider"),
+        Conv(3, 4, {5, 5}, "conv1"),
+        BatchNorm<2>(4, 1.0e-5, 0.1, "bn1"),
+        Conv(4, 4, {5, 5}, "conv2"),
+        ReLU("relu2"),
+        Conv(4, 4, {5, 5}, "conv3"),
+        BatchNorm<2>(4, 1.0e-5, 0.1, "bn3"),
+        FC(4, 4, false, "fc1"),
+        FC(4, 4, false, "fc2"),
+        FC(4, 4, false, "fc3"),
+        ReLU("relu3"),
+        Conv(1, 4, {5, 5}, "conv4")
+    });
+
+    SECTION("((Conv#->(.[exBN]|$))|(FC#->(.[exFC])*->$))") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exBN", [](const NodePtr& node) {
+            return (node->type() != "BatchNorm");
+        });
+        gm.addNodeLambda("exFC", [](const NodePtr& node) {
+            return (node->type() != "FC");
+        });
+
+        const auto results = gm.match("((Conv#->(.[exBN]|$))|(FC#->(.[exFC])*->$))");
+
+        checkMatches(results, {
+            {"conv2", {"conv2", "relu2"}},
+            {"conv4", {"conv4"}},
+            {"fc3", {"fc3", "relu3", "conv4"}}
+        });
+    }
+
+    // Find last node of a type
+    SECTION("FC#->(.[exFC])*->$") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exFC", [](const NodePtr& node) {
+            return (node->type() != "FC");
+        });
+
+        const auto results = gm.match("FC#->(.[exFC])*->$");
+
+        checkMatches(results, {
+            {"fc3", {"fc3", "relu3", "conv4"}}
+        });
+    }
+
+    SECTION("Conv#->(.[exConv])*->$") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exConv", [](const NodePtr& node) {
+            return (node->type() != "Conv");
+        });
+
+        const auto results = gm.match("Conv#->(.[exConv])*->$");
+
+        checkMatches(results, {
+            {"conv4", {"conv4"}}
+        });
+    }
+
+    // Find first node of a type
+    SECTION("FC#<-(.[exFC])*<-$") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exFC", [](const NodePtr& node) {
+            return (node->type() != "FC");
+        });
+
+        const auto results = gm.match("FC#<-(.[exFC])*<-$");
+
+        checkMatches(results, {
+            {"fc1", {"fc1", "bn3", "conv3", "relu2", "conv2", "bn1", "conv1", "dataProvider"}}
+        });
+    }
+
+    SECTION("(((FC#|Conv#)<-(.[exParam])*<-$)|((FC#|Conv#)->(.[exParam])*->$));(FC#|Conv#)<1-Producer#") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exParam", [](const NodePtr& node) {
+            return (node->type() != "FC" && node->type() != "Conv");
+        });
+
+        const auto results = gm.match("(((FC#|Conv#)<-(.[exParam])*<-$)|((FC#|Conv#)->(.[exParam])*->$));(FC#|Conv#)<1-Producer#");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "conv1_w", "dataProvider"}},
+            {"conv4", {"conv4", "conv4_w"}}
+        });
+    }
+
     SECTION("Conv->ReLU [perf]") {
         const size_t nbTests = 3;
         std::mt19937::result_type seed(1);
diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp
index e05e105d34a981e33cc1a0baaffa2702f1f6bbbb..68ac509e79e347106a9a132249f125ebe6e39f6a 100644
--- a/unit_tests/graphRegex/Test_GraphRegex.cpp
+++ b/unit_tests/graphRegex/Test_GraphRegex.cpp
@@ -175,27 +175,17 @@ TEST_CASE("GraphRegexUser") {
         matmul1->addChild(add1, 0, 0);
         b1->addChild(add1, 0, 1);
 
-        auto fc = GenericOperator("FC", 1, 0, 1, "c");
-        auto fl = GenericOperator("Flatten", 1, 0, 1, "c");
-
-
+        auto fc = GenericOperator("FC", 1, 0, 1, "fc1");
+        auto fl = GenericOperator("Flatten", 1, 0, 1, "flatten0");
+        add1->addChild(fl, 0, 0);
+        fl->addChild(fc, 0, 0);
         auto g = std::make_shared<GraphView>();
-        g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1,fl,fc});
-
-        std::shared_ptr<GraphRegex> kitchenBook = std::make_shared<GraphRegex>();
-
-        kitchenBook->setNodeKey("Add","getType($) =='Add'");
-        kitchenBook->setNodeKey("MatMul","getType($) =='MatMul'");
-        kitchenBook->setNodeKey("Flatten","getType($) =='Flatten'");
-        kitchenBook->setNodeKey("FC","getType($) =='FC'");
-
-        //kitchenBook->addQuery("MatMul->Add",static_cast<void(*)(std::shared_ptr<MatchSolution>)>(fuseMulAdd));
-        kitchenBook->addQuery("Flatten->FC",static_cast<void(*)(std::shared_ptr<MatchSolution>)>(removeFlatten));
-
-        kitchenBook->appliedRecipes(g);
+        g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1, fl, fc});
 
+        matMulToFC(g);
+        removeFlatten(g);
         std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
-        REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1,fc}));
+        REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1,fl,fc}));
 	    //REQUIRE(newNodes.size() == 6);
 
 
diff --git a/unit_tests/operator/Test_BitShift_Op.cpp b/unit_tests/operator/Test_BitShift_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..39916e4e75779ecc63680b43ece8ccd2bdc667c9
--- /dev/null
+++ b/unit_tests/operator/Test_BitShift_Op.cpp
@@ -0,0 +1,133 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/BitShift.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] BitShift_Op(forwardDims)", "[BitShift][forwardDims]") 
+{
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Shift Operator
+    std::shared_ptr<Node> myShift = BitShift(BitShift_Op::BitShiftDirection::right);
+    auto op = std::static_pointer_cast<OperatorTensor>(myShift-> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+     SECTION("BitShifOP Test dimensions [Scalar]") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+
+    SECTION("BitShifOP Test dimensions [Same Size]") {
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+    }
+    SECTION("BitShifOP Test dimensions [Broadcast]") {
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+        }
+    }
+    SECTION("BitShifOP Test dimensions [Wrong Dimensions]") {
+        
+       for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->forwardDims());
+    }
+}
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_ConstantOfShape.cpp b/unit_tests/operator/Test_ConstantOfShape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c10d97ce5fb774e051e75f051772e1cbcd41dbea
--- /dev/null
+++ b/unit_tests/operator/Test_ConstantOfShape.cpp
@@ -0,0 +1,85 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <algorithm>
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <cstddef> // std::size_t
+#include <cstdint>
+#include <functional>
+#include <memory>
+#include <numeric>
+#include <random> // std::mt19937, std::uniform_int_distribution
+#include <system_error>
+#include <vector>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+#include "aidge/operator/ConstantOfShape.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+TEST_CASE("[core/operator] ConstantOfShape_Op(forwardDims)",
+          "[ConstantOfShape][forwardDims]") {
+  constexpr std::uint16_t NBTRIALS = 10;
+
+  // Create a random number generator
+  auto random_seed = Catch::Generators::Detail::getSeed;
+  std::mt19937 gen(random_seed());
+  std::uniform_int_distribution<std::size_t> input_tensor_dims_dist(1, 10);
+  std::uniform_int_distribution<std::size_t> input_tensor_value_dist(1, 9);
+  std::uniform_real_distribution<float> op_value_attr_value_dist(1, 10000);
+
+  std::uniform_int_distribution<std::size_t> op_value_attr_type_dist(
+      0, static_cast<int>(Aidge::DataType::UInt64));
+  // TENSORS
+  std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+  input_T->setDataType(Aidge::DataType::Int64);
+  input_T->setBackend("cpu");
+
+  SECTION("operator test") {
+    // Create Operator
+    for (int i = 0; i < NBTRIALS; ++i) {
+      std::shared_ptr<Node> node =
+          ConstantOfShape(Tensor(op_value_attr_value_dist(gen)));
+      auto op =
+          std::static_pointer_cast<ConstantOfShape_Op>(node->getOperator());
+      op->associateInput(0, input_T);
+
+      std::vector<DimSize_t> input_dims;
+      input_dims.push_back(input_tensor_dims_dist(gen));
+
+      Log::setConsoleLevel(Log::Debug);
+      int input_nb_elems = input_dims.at(0);
+      int output_nb_elems = 1;
+      int64_t *array_in = new int64_t[input_nb_elems];
+      for (std::size_t i = 0; i < input_nb_elems; ++i) {
+        std::int64_t val = input_tensor_value_dist(gen);
+        array_in[i] = val;
+        output_nb_elems *= val;
+      }
+
+      input_T->resize(input_dims);
+      op->setInput(0, input_T);
+      input_T->getImpl()->setRawPtr(array_in, input_nb_elems);
+
+      REQUIRE(op->forwardDims(true));
+      REQUIRE(input_T->size() == op->getOutput(0)->nbDims());
+      for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) {
+        CHECK(array_in[i] == op->getOutput(0)->dims().at(i));
+      }
+    }
+  }
+}
+} // namespace Aidge
+
diff --git a/unit_tests/operator/Test_Squeeze_Op.cpp b/unit_tests/operator/Test_Squeeze_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..471a1dcd1e45384b2c65da75ddee9d3ec039dc34
--- /dev/null
+++ b/unit_tests/operator/Test_Squeeze_Op.cpp
@@ -0,0 +1,457 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Squeeze.hpp"
+
+#include <aidge/utils/Types.h>
+#include <algorithm>
+#include <array>
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <chrono>
+#include <cmath>
+#include <cstddef> // std::size_t
+#include <cstdint> // std::uint16_t
+#include <fmt/core.h>
+#include <iostream>
+#include <iterator>
+#include <memory>
+#include <numeric> // std::accumulate
+#include <ostream>
+#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
+  Log::setConsoleLevel(Log::Notice);
+  constexpr std::uint16_t NB_TRIALS = 10;
+  // Create a random number generator
+  auto random_seed = Catch::Generators::Detail::getSeed;
+  std::mt19937 gen(random_seed());
+
+  // Random float distribution between 0 and 1
+  constexpr int8_t max_nb_dims = 7;
+  std::uniform_real_distribution<float> tensor_value_dist(0.1f, 1.1f);
+  std::uniform_int_distribution<std::size_t> tensor_nb_dims_dist(
+      std::size_t(1), std::size_t(max_nb_dims));
+  std::uniform_int_distribution<std::size_t> tensor_dims_size_dist(
+      std::size_t(1), std::size_t(5));
+  std::uniform_int_distribution<std::size_t> nb_dims_to_squeeze_dist(
+      std::size_t(1), std::size_t(2));
+  std::uniform_int_distribution<short> idx_dims_to_squeeze_dist(-9, 8);
+
+  std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+
+  SECTION("ERROR : Inputs not ready") {
+    SECTION("unconnected input") {
+      std::shared_ptr<Node> squeeze_node = Squeeze();
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+      REQUIRE_THROWS(op->forwardDims());
+    }
+
+    SECTION("empty tensor") {
+      // Create the Squeeze Operator
+      std::shared_ptr<Node> squeeze_node = Squeeze(std::vector<int8_t>({0}));
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+      op->associateInput(0, input_T);
+
+      CHECK(op->forwardDims() == false);
+    }
+  }
+  SECTION("ERROR : nb_dims_to_squeeze>input.size()") {
+    constexpr size_t nb_dims_to_squeeze = 100;
+
+    std::vector<int8_t> dims_to_squeeze(nb_dims_to_squeeze);
+    std::generate(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                  [&gen, &idx_dims_to_squeeze_dist]() {
+                    return idx_dims_to_squeeze_dist(gen);
+                  });
+    Log::error("dims_to_sqeeze = {}", dims_to_squeeze);
+
+    std::shared_ptr<Node> squeeze_node = Squeeze(dims_to_squeeze);
+    auto op =
+        std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+
+    // input tensor
+    const std::size_t nb_dims = tensor_nb_dims_dist(gen);
+    std::vector<std::size_t> dims_in(nb_dims);
+    std::generate(dims_in.begin(), dims_in.end(),
+                  [&tensor_dims_size_dist, &gen]() {
+                    return tensor_dims_size_dist(gen);
+                  });
+
+    // Test
+    input_T->resize(dims_in);
+    op->setInput(0, input_T);
+    REQUIRE_THROWS(op->forwardDims());
+  }
+  SECTION("Compare with reference output") {
+    SECTION("axes is given via attribute") {
+      SECTION("Squeeze a 1-sized-axis") {
+        int8_t nb_dims = 4;
+        std::shared_ptr<Node> squeeze_node = Squeeze(std::vector<int8_t>({0}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            squeeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{1, 2, 3, 4};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims());
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({2, 3, 4}));
+        CHECK((op->getOutput(0)->dims().size()) == 3);
+      }
+      SECTION("Squeeze multiple 1-sized axes") {
+        // test should be successful
+        std::shared_ptr<Node> squeeze_node =
+            Squeeze(std::vector<int8_t>({1, -4}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            squeeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{1, 1, 13, 200};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims());
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>{13, 200});
+        CHECK((op->getOutput(0)->dims().size()) == 2);
+      }
+      SECTION("Squeeze a non-1-Sized axis") {
+        int8_t nb_dims = 4;
+        std::shared_ptr<Node> squeeze_node = Squeeze(std::vector<int8_t>({3}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            squeeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{1, 2, 3, 4};
+        input_T->resize(dims_in);
+
+        REQUIRE_THROWS(op->forwardDims());
+      }
+      SECTION("Squeeze multiple non-sized-axes") {
+        std::shared_ptr<Node> squeeze_node =
+            Squeeze(std::vector<int8_t>({1, -2}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            squeeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        std::array<DimSize_t, 3> dims_in{2, 3, 4};
+        input_T->resize(dims_in);
+
+        REQUIRE_THROWS((op->forwardDims()));
+      }
+    }
+    SECTION("axes is given via tensor") {
+      SECTION("tensor is empty") {
+        // arguments here should be overriden by axes_T values
+        std::shared_ptr<Node> myUnsqueeze =
+            Squeeze(std::vector<std::int8_t>({0, 4}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        auto axes_T =
+            std::make_shared<Aidge::Tensor>(std::vector<DimSize_t>({}));
+        axes_T->setDataType(Aidge::DataType::Int8);
+        axes_T->setBackend("cpu");
+
+        std::vector<DimSize_t> dims_in{3, 1, 4, 1, 1, 5};
+        input_T->resize(dims_in);
+        op->associateInput(0, input_T);
+        op->associateInput(1, axes_T);
+
+        CHECK(op->forwardDims(true));
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({3, 4, 5}));
+      }
+      SECTION("tensor not empty") {
+        // arguments here should be overriden by axes_T values
+        std::shared_ptr<Node> myUnsqueeze =
+            Squeeze(std::vector<std::int8_t>({3, 1}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        auto axes_T =
+            std::make_shared<Aidge::Tensor>(Aidge::Array1D<int8_t, 2>({0, 3}));
+        axes_T->setDataType(Aidge::DataType::Int8);
+        axes_T->setBackend("cpu");
+
+        std::vector<DimSize_t> dims_in{1, 3, 4, 1, 5};
+        input_T->resize(dims_in);
+        op->associateInput(0, input_T);
+        op->associateInput(1, axes_T);
+
+        CHECK(op->forwardDims(true) == true);
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({3, 4, 5}));
+      }
+    }
+  }
+  SECTION("Squeeze()") {
+    // Create the Operator
+    std::shared_ptr<Node> squeeze_node = Squeeze();
+    auto op =
+        std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+    op->associateInput(0, input_T);
+
+    for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+      // input tensor
+      const std::size_t nb_dims = tensor_nb_dims_dist(gen);
+      std::vector<std::size_t> dims_in(nb_dims);
+
+      std::generate(dims_in.begin(), dims_in.end(),
+                    [&gen, &tensor_dims_size_dist]() {
+                      return tensor_dims_size_dist(gen);
+                    });
+
+      // output tensor
+      std::vector<DimSize_t> dims_out;
+      dims_out.reserve(dims_in.size());
+      std::copy_if(dims_in.begin(), dims_in.end(), std::back_inserter(dims_out),
+                   [](DimSize_t dim) { return dim != 1; });
+      // Test
+      input_T->resize(dims_in);
+      op->setInput(0, input_T);
+      CHECK(op->forwardDims() == true);
+      CHECK(op->getOutput(0)->dims() == dims_out);
+
+      int nb_ones = std::count_if(dims_in.begin(), dims_in.end(),
+                                  [](int8_t dim) { return dim == 1; });
+      CHECK((op->getInput(0)->dims().size() -
+             op->getOutput(0)->dims().size()) == nb_ones);
+    }
+  }
+  SECTION("Squeeze({N,...})") {
+    int number_of_operation{0};
+    for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+      // Create the Operator
+      size_t nb_dims_to_squeeze = nb_dims_to_squeeze_dist(gen);
+      std::vector<int8_t> dims_to_squeeze(nb_dims_to_squeeze);
+      std::generate(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                    [&gen, &idx_dims_to_squeeze_dist]() {
+                      return idx_dims_to_squeeze_dist(gen);
+                    });
+      std::shared_ptr<Node> squeeze_node = Squeeze({dims_to_squeeze});
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+      op->associateInput(0, input_T);
+
+      // input tensor
+      const std::size_t nb_dims_tensor = tensor_nb_dims_dist(gen);
+      std::vector<std::size_t> dims_in(nb_dims_tensor);
+      std::generate(dims_in.begin(), dims_in.end(),
+                    [&gen, &tensor_dims_size_dist]() {
+                      return tensor_dims_size_dist(gen);
+                    });
+      input_T->resize(dims_in);
+      op->setInput(0, input_T);
+
+      // rectifying indexes
+      std::transform(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                     dims_to_squeeze.begin(),
+                     [&nb_dims_tensor](int8_t dim_to_squeeze) {
+                       return dim_to_squeeze < 0
+                                  ? dim_to_squeeze + nb_dims_tensor
+                                  : dim_to_squeeze;
+                     });
+      std::sort(dims_to_squeeze.begin(), dims_to_squeeze.end());
+      auto it = std::unique(dims_to_squeeze.begin(), dims_to_squeeze.end());
+      dims_to_squeeze.erase(it, dims_to_squeeze.end());
+
+      // ensuring arguments given to Squeeze are good
+      bool not_in_bounds = false;
+      bool dim_to_squeeze_not_1_sized = false;
+      for (const auto dim_to_squeeze : dims_to_squeeze) {
+        not_in_bounds = dim_to_squeeze >= nb_dims_tensor;
+        if (not_in_bounds) {
+          break;
+        }
+        dim_to_squeeze_not_1_sized = dims_in.at(dim_to_squeeze) != 1;
+        if (dim_to_squeeze_not_1_sized) {
+          break;
+        }
+      }
+
+      if (nb_dims_tensor > max_nb_dims || not_in_bounds ||
+          dim_to_squeeze_not_1_sized) {
+        REQUIRE_THROWS(op->forwardDims());
+      } else {
+        // output tensor
+        int i = 0;
+        std::vector<DimSize_t> dims_out;
+        dims_out.reserve(dims_in.size());
+        std::copy_if(dims_in.begin(), dims_in.end(),
+                     std::back_inserter(dims_out),
+                     [&dims_to_squeeze, &i](DimSize_t dim) {
+                       bool ok = dim != 1 ||
+                                 !std::binary_search(dims_to_squeeze.begin(),
+                                                     dims_to_squeeze.end(), i);
+                       i++; // incrementing counter since C++ has not enumerate
+                            // fctn (until C++23)
+                       return ok;
+                     });
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() == dims_out);
+      }
+    }
+  }
+}
+
+TEST_CASE("[core/operator] Squeeze(forward)", "[Squeeze][forward]") {
+  Log::setConsoleLevel(Log::Notice);
+  constexpr std::uint16_t NB_TRIALS = 10;
+  // Create a random number generator
+  auto random_seed = Catch::Generators::Detail::getSeed;
+  std::mt19937 gen(random_seed());
+
+  constexpr int8_t max_nb_dims = 7;
+  std::uniform_real_distribution<float> tensor_value_dist(0.1f, 1.1f);
+  std::uniform_int_distribution<std::size_t> tensor_nb_dims_dist(
+      std::size_t(1), std::size_t(max_nb_dims));
+  std::uniform_int_distribution<std::size_t> tensor_dims_size_dist(
+      std::size_t(1), std::size_t(5));
+  std::uniform_int_distribution<std::size_t> nb_dims_to_squeeze_dist(
+      std::size_t(1), std::size_t(2));
+  std::uniform_int_distribution<short> idx_dims_to_squeeze_dist(-9, 8);
+
+  std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+
+  // BENCHMARKING
+  std::chrono::time_point<std::chrono::system_clock> start;
+  std::chrono::time_point<std::chrono::system_clock> end;
+  std::chrono::duration<double, std::micro> duration{};
+
+  Log::setConsoleLevel(Log::Notice);
+  int number_of_operation{0};
+  for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+    // Create the Operator
+    size_t nb_dims_to_squeeze = nb_dims_to_squeeze_dist(gen);
+    std::vector<int8_t> dims_to_squeeze(nb_dims_to_squeeze);
+    std::generate(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                  [&gen, &idx_dims_to_squeeze_dist]() {
+                    return idx_dims_to_squeeze_dist(gen);
+                  });
+    std::shared_ptr<Node> squeeze_node = Squeeze({dims_to_squeeze});
+    auto op =
+        std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+    // input tensor
+    const std::size_t nb_dims_tensor = tensor_nb_dims_dist(gen);
+    std::vector<std::size_t> dims_in(nb_dims_tensor);
+    std::generate(dims_in.begin(), dims_in.end(),
+                  [&gen, &tensor_dims_size_dist]() {
+                    return tensor_dims_size_dist(gen);
+                  });
+    input_T->resize(dims_in);
+    op->setInput(0, input_T);
+
+    // rectifying indexes
+    std::transform(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                   dims_to_squeeze.begin(),
+                   [&nb_dims_tensor](int8_t dim_to_squeeze) {
+                     return dim_to_squeeze < 0 ? dim_to_squeeze + nb_dims_tensor
+                                               : dim_to_squeeze;
+                   });
+
+    // ensuring arguments given to Squeeze are good
+    bool not_in_bounds = false;
+    bool dim_to_squeeze_not_1_sized = false;
+    for (const auto dim_to_squeeze : dims_to_squeeze) {
+      not_in_bounds = dim_to_squeeze >= nb_dims_tensor;
+      if (not_in_bounds) {
+        break;
+      }
+      dim_to_squeeze_not_1_sized = dims_in.at(dim_to_squeeze) != 1;
+      if (dim_to_squeeze_not_1_sized) {
+        break;
+      }
+    }
+    if (nb_dims_tensor > max_nb_dims || not_in_bounds ||
+        dim_to_squeeze_not_1_sized) {
+      REQUIRE_THROWS(op->forwardDims());
+    } else {
+      // output tensor
+      int i = 0;
+      std::vector<DimSize_t> dims_out;
+      dims_out.reserve(dims_in.size());
+      for (DimIdx_t i = 0; i < dims_in.size(); ++i) {
+        if (dims_in[i] == 1 &&
+            std::find(dims_to_squeeze.begin(), dims_to_squeeze.end(), i) !=
+                dims_to_squeeze.end()) {
+          continue;
+        }
+        dims_out.push_back(dims_in[i]);
+      }
+      CHECK(op->forwardDims());
+      CHECK(op->getOutput(0)->dims() == dims_out);
+
+      SECTION("forward") {
+        // Create the input Tensor
+        std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+        input_T->setDataType(DataType::Float32);
+        input_T->setBackend("cpu");
+        op->associateInput(0, input_T);
+
+        // Create results Tensor
+        std::shared_ptr<Tensor> result_T = std::make_shared<Tensor>();
+        result_T->setDataType(DataType::Float32);
+        result_T->setBackend("cpu");
+
+        const std::size_t nb_elems =
+            std::accumulate(dims_in.cbegin(), dims_in.cend(), std::size_t(1),
+                            std::multiplies<std::size_t>());
+        float *array_in = new float[nb_elems];
+        for (std::size_t i = 0; i < nb_elems; ++i) {
+          float val = tensor_value_dist(gen);
+          array_in[i] = val;
+        }
+        number_of_operation += nb_elems; // Copying all values : 1
+                                         // assignation / item in the tensor
+        // input0
+        input_T->resize(dims_in);
+        input_T->getImpl()->setRawPtr(array_in, nb_elems);
+
+        result_T->resize(dims_out);
+        result_T->getImpl()->setRawPtr(array_in, nb_elems);
+
+        CHECK(op->forwardDims() == true);
+        start = std::chrono::system_clock::now();
+        REQUIRE_NOTHROW(squeeze_node->forward());
+        end = std::chrono::system_clock::now();
+        duration +=
+            std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+        CHECK(approxEq<float>(*result_T, *(op->getOutput(0))));
+        CHECK(result_T->nbDims() == op->getOutput(0)->nbDims());
+        for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) {
+          CHECK(result_T->dims().at(i) == op->getOutput(0)->dims().at(i));
+        }
+        CHECK(approxEq<float>(*result_T, *(op->getOutput(0))));
+
+        delete[] array_in;
+      }
+      std::cout << "Squeeze total execution time : " << duration.count() << "µs"
+                << std::endl;
+      std::cout << "Number of operations : " << number_of_operation
+                << std::endl;
+      std::cout << "Operation / µs = " << number_of_operation / duration.count()
+                << std::endl;
+    }
+  }
+}
+
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Unsqueeze_Op.cpp b/unit_tests/operator/Test_Unsqueeze_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..79f5b89b1c08f409b214a9439431c2d2a51ddbd2
--- /dev/null
+++ b/unit_tests/operator/Test_Unsqueeze_Op.cpp
@@ -0,0 +1,382 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <algorithm>
+#include <chrono>
+#include <cmath>
+#include <cstddef> // std::size_t
+#include <cstdint> // std::uint16_t
+#include <fmt/core.h>
+#include <iostream>
+#include <memory>
+#include <numeric> // std::accumulate
+#include <ostream>
+#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
+#include <vector>
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Unsqueeze.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+bool ensure_axes_validity(std::vector<int8_t> dims_to_unsqueeze,
+                          DimIdx_t nb_dims_input_tensor) {
+
+  bool in_bounds =
+      std::all_of(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end(),
+                  [&nb_dims_input_tensor,
+                   &dims_to_unsqueeze](const int8_t &dim_to_unsqueeze) {
+                    return (dim_to_unsqueeze <
+                            nb_dims_input_tensor + dims_to_unsqueeze.size());
+                  });
+
+  std::sort(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end());
+  bool index_appear_twice =
+      dims_to_unsqueeze.end() !=
+      std::adjacent_find(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end());
+
+  return in_bounds && !index_appear_twice;
+}
+
+std::vector<DimSize_t>
+generate_unsqueeze_output_dims(std::vector<size_t> dims_in,
+                               std::vector<int8_t> dims_to_unsqueeze) {
+
+  std::sort(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end());
+  std::vector<DimSize_t> dims_out(dims_in);
+  dims_out.reserve(dims_in.size() + dims_to_unsqueeze.size());
+  for (const DimIdx_t &dim : dims_to_unsqueeze) {
+    dims_out.insert(dims_out.begin() + dim, 1);
+  }
+  return dims_out;
+}
+
+std::vector<int8_t> rectify_indexes(const std::vector<int8_t> & dims_to_unsqueeze,
+                                    const int8_t offset) {
+  std::vector<int8_t> output;
+  output.reserve(dims_to_unsqueeze.size());
+  for (int8_t dim : dims_to_unsqueeze) {
+    output.push_back(dim >= 0 ? dim : dim + offset);
+  }
+  return output;
+}
+
+TEST_CASE("[core/operator] Unsqueeze(forwardDims)",
+          "[Unsqueeze][forwardDims]") {
+  constexpr std::uint16_t NB_TRIALS = 10;
+  // Create a random number generator
+  auto random_seed = Catch::Generators::Detail::getSeed;
+  std::mt19937 gen(random_seed());
+
+  std::uniform_real_distribution<float> valueDist(0.1f, 1.1f);
+  std::uniform_int_distribution<std::size_t> tensor_dims_size_dist(
+      std::size_t(1), std::size_t(10));
+  std::uniform_int_distribution<std::size_t> tensor_nb_dims_dist(
+      std::size_t(1), std::size_t(7));
+  std::uniform_int_distribution<std::size_t> nb_dims_to_unsqueeze_dist(
+      std::size_t(1), std::size_t(8));
+
+  std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+  std::shared_ptr<Tensor> axes_T = std::make_shared<Tensor>();
+
+  SECTION("ERROR : Inputs not ready") {
+    SECTION("unconnected input") {
+      std::shared_ptr<Node> myUnsqueeze =
+          Unsqueeze(std::vector<std::int8_t>({0}));
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(myUnsqueeze->getOperator());
+      REQUIRE_THROWS(op->forwardDims());
+    }
+
+    std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+
+    SECTION("empty tensor") {
+      // Create the Unsqueeze Operator
+      std::shared_ptr<Node> myUnsqueeze =
+          Unsqueeze(std::vector<std::int8_t>({0}));
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(myUnsqueeze->getOperator());
+      op->associateInput(0, input_T);
+
+      CHECK(op->forwardDims() == false);
+    }
+  }
+  SECTION("Compare with reference output") {
+    int8_t nb_dims = 3;
+    SECTION("axes is given via attribute") {
+      SECTION("unsqueez(0)") {
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({0}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{2, 3, 4};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({1, 2, 3, 4}));
+        CHECK((op->getOutput(0)->dims().size()) == nb_dims + 1);
+      }
+      SECTION("Unsqueeze(1)") {
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({1}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        std::array<DimSize_t, 3> dims_in{2, 3, 4};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({2, 1, 3, 4}));
+        CHECK((op->getOutput(0)->dims().size()) == nb_dims + 1);
+      }
+      SECTION("Unsqueeze(2)") {
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({2}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{2, 3, 4};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({2, 3, 1, 4}));
+        CHECK((op->getOutput(0)->dims().size()) == nb_dims + 1);
+      }
+      SECTION("Unsqueeze({0,4})") {
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({0, 4}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{3, 4, 5};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() ==
+              std::vector<DimSize_t>({1, 3, 4, 5, 1}));
+      }
+    }
+    SECTION("axes is given via tensor") {
+        // arguments here should be overriden by axes_T values
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({0, 4}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        auto axes_T = std::make_shared<Aidge::Tensor>(
+            Aidge::Array1D<int8_t, 3>({1, 3, 4}));
+        axes_T->setDataType(Aidge::DataType::Int8);
+        axes_T->setBackend("cpu");
+
+        std::vector<DimSize_t> dims_in{3, 4, 5};
+        input_T->resize(dims_in);
+        op->associateInput(0, input_T);
+        op->associateInput(1, axes_T);
+
+        CHECK(op->forwardDims(true) == true);
+        CHECK(op->getOutput(0)->dims() ==
+              std::vector<DimSize_t>({3, 1, 4, 1, 1, 5}));
+    }
+  }
+  SECTION("Random testing") {
+    SECTION("Unsqueeze({N,...})") {
+      int number_of_operation{0};
+      for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+        const size_t nb_dims_to_unsqueeze = nb_dims_to_unsqueeze_dist(gen);
+        const size_t nb_dims_tensor = tensor_nb_dims_dist(gen);
+        const size_t idx_dims_to_unsqueeze_max =
+            nb_dims_to_unsqueeze + nb_dims_tensor;
+        const size_t variance_error = 2;
+        std::uniform_int_distribution<short> idx_dims_to_unsqueeze_dist(
+            -idx_dims_to_unsqueeze_max - variance_error,
+            idx_dims_to_unsqueeze_max - 1 + variance_error);
+        // Create the Operator
+        std::vector<int8_t> dims_to_unsqueeze(nb_dims_to_unsqueeze);
+        std::generate(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end(),
+                      [&gen, &idx_dims_to_unsqueeze_dist]() {
+                        return idx_dims_to_unsqueeze_dist(gen);
+                      });
+        std::shared_ptr<Node> unsqueeze_node = Unsqueeze(dims_to_unsqueeze);
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            unsqueeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        // input tensor
+        std::vector<std::size_t> dims_in(nb_dims_tensor);
+        std::generate(dims_in.begin(), dims_in.end(),
+                      [&gen, &tensor_dims_size_dist]() {
+                        return tensor_dims_size_dist(gen);
+                      });
+        input_T->resize(dims_in);
+        op->setInput(0, input_T);
+
+        dims_to_unsqueeze = rectify_indexes(
+            dims_to_unsqueeze, input_T->nbDims() + dims_to_unsqueeze.size());
+        bool dims_to_unsqueeze_valid =
+            ensure_axes_validity(dims_to_unsqueeze, input_T->nbDims());
+        Log::warn("raw dims_to_unsqueeze : {}", dims_to_unsqueeze);
+        Log::warn("dims_to_unsqueeze : {}", dims_to_unsqueeze);
+        Log::warn("tensor dims : {}", input_T->dims());
+
+        if (!dims_to_unsqueeze_valid) {
+          ensure_axes_validity(dims_to_unsqueeze, input_T->nbDims());
+          REQUIRE_THROWS(op->forwardDims(true));
+        } else {
+          // output tensor
+          std::vector<DimSize_t> dims_out =
+              generate_unsqueeze_output_dims(dims_in, dims_to_unsqueeze);
+          Log::warn("dims_out : {}", dims_out);
+          CHECK(op->forwardDims(true) == true);
+          CHECK(op->getOutput(0)->dims() == dims_out);
+          generate_unsqueeze_output_dims(dims_in, dims_to_unsqueeze);
+        }
+      }
+    }
+  }
+}
+
+TEST_CASE("[core/operator] Unsqueeze(forward)", "[Unsqueeze][forward]") {
+  constexpr std::uint16_t NB_TRIALS = 10;
+  // Create a random number generator
+  std::random_device rd;
+  auto random_seed = rd();
+  std::cout << "True random seed : " << random_seed << std::endl;
+  std::mt19937 gen(random_seed);
+  // Random float distribution between 0 and 1
+  std::uniform_real_distribution<float> valueDist(0.1f, 1.1f);
+  std::uniform_int_distribution<std::size_t> tensor_dims_size_dist(
+      std::size_t(1), std::size_t(10));
+  std::size_t min_tensor_nb_dims{1};
+  std::size_t max_tensor_nb_dims{7};
+  std::uniform_int_distribution<std::size_t> tensor_nb_dims_dist(
+      min_tensor_nb_dims, max_tensor_nb_dims);
+  std::uniform_int_distribution<std::size_t> nb_dims_to_unsqueeze_dist(
+      std::size_t(1), std::size_t(8));
+  std::uniform_int_distribution<short> idx_dims_to_unsqueeze_dist(-9, 8);
+
+  std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+  input_T->setDataType(DataType::Float32);
+  input_T->setBackend("cpu");
+  std::shared_ptr<Tensor> result_T = std::make_shared<Tensor>();
+  result_T->setDataType(DataType::Float32);
+  result_T->setBackend("cpu");
+
+  // BENCHMARKING
+  std::chrono::time_point<std::chrono::system_clock> start;
+  std::chrono::time_point<std::chrono::system_clock> end;
+  std::chrono::duration<double, std::micro> duration{};
+
+  int number_of_operation{0};
+  for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+    // Create the Operator
+    size_t nb_dims_to_unsqueeze = nb_dims_to_unsqueeze_dist(gen);
+    std::vector<int8_t> dims_to_unsqueeze(nb_dims_to_unsqueeze);
+    std::generate(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end(),
+                  [&gen, &idx_dims_to_unsqueeze_dist]() {
+                    return idx_dims_to_unsqueeze_dist(gen);
+                  });
+    std::shared_ptr<Node> unsqueeze_node = Unsqueeze(dims_to_unsqueeze);
+    auto op =
+        std::static_pointer_cast<OperatorTensor>(unsqueeze_node->getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+    op->associateInput(0, input_T);
+
+    // input tensor
+    const std::size_t nb_dims_tensor = tensor_nb_dims_dist(gen);
+    std::vector<std::size_t> dims_in(nb_dims_tensor);
+    std::generate(dims_in.begin(), dims_in.end(),
+                  [&gen, &tensor_dims_size_dist]() {
+                    return tensor_dims_size_dist(gen);
+                  });
+    input_T->resize(dims_in);
+    op->setInput(0, input_T);
+
+    // rectifying indexes
+    std::transform(
+        dims_to_unsqueeze.begin(), dims_to_unsqueeze.end(),
+        dims_to_unsqueeze.begin(),
+        [&nb_dims_tensor, &nb_dims_to_unsqueeze](int8_t dim_to_unsqueeze) {
+          return dim_to_unsqueeze < 0
+                     ? dim_to_unsqueeze +
+                           (nb_dims_tensor + nb_dims_to_unsqueeze)
+                     : dim_to_unsqueeze;
+        });
+
+    // ensuring arguments given to Unsqueeze are good
+    bool axes_to_unsqueeze_valid =
+        ensure_axes_validity(dims_to_unsqueeze, input_T->nbDims());
+    if (!axes_to_unsqueeze_valid) {
+      REQUIRE_THROWS(op->forwardDims(true));
+    } else {
+      // output tensor
+      std::vector<DimSize_t> dims_out =
+          generate_unsqueeze_output_dims(dims_in, dims_to_unsqueeze);
+      CHECK(op->forwardDims(true) == true);
+      CHECK(op->getOutput(0)->dims() == dims_out);
+
+      SECTION("forward") {
+        const std::size_t nb_elems =
+            std::accumulate(dims_in.cbegin(), dims_in.cend(), std::size_t(1),
+                            std::multiplies<std::size_t>());
+        float *array_in = new float[nb_elems];
+        for (std::size_t i = 0; i < nb_elems; ++i) {
+          array_in[i] = valueDist(gen);
+        }
+        number_of_operation += nb_elems; // Copying all values : 1
+                                         // assignation / item in the tensor
+
+        // input0
+        input_T->resize(dims_in);
+        input_T->getImpl()->setRawPtr(array_in, nb_elems);
+
+        // results
+        result_T->resize(dims_out);
+        result_T->getImpl()->setRawPtr(array_in, nb_elems);
+
+        CHECK(op->forwardDims(true) == true);
+        start = std::chrono::system_clock::now();
+        REQUIRE_NOTHROW(unsqueeze_node->forward());
+        end = std::chrono::system_clock::now();
+        duration +=
+            std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+        CHECK(result_T->nbDims() == op->getOutput(0)->nbDims());
+        for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) {
+          CHECK(result_T->dims().at(i) == op->getOutput(0)->dims().at(i));
+        }
+        CHECK(approxEq<float>(*result_T, *(op->getOutput(0))));
+
+        delete[] array_in;
+      }
+    }
+    std::cout << "Unsqueeze total execution time : " << duration.count() << "µs"
+              << std::endl;
+    std::cout << "Number of operations : " << number_of_operation << std::endl;
+    std::cout << "Operation / µs = " << number_of_operation / duration.count()
+              << std::endl;
+  }
+}
+
+} // namespace Aidge
diff --git a/unit_tests/recipes/Test_removeConstantOfShape.cpp b/unit_tests/recipes/Test_removeConstantOfShape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..247149a0fdb1087f14ac17d125659d677ccfb506
--- /dev/null
+++ b/unit_tests/recipes/Test_removeConstantOfShape.cpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/operator/Identity.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/ConstantOfShape.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/recipies] removeConstantOfShape",
+          "[ConstantOfShape][removeConstantOfShape][recipies]") {
+  auto input_T = std::make_shared<Tensor>(Array1D<int64_t, 4>({1, 1, 3, 3}));
+
+  auto model = std::make_shared<GraphView>();
+  SECTION("Sequential model") {
+    model = Sequential({Producer(input_T, "prod_0", true),
+                        ConstantOfShape(3, "constantOfShape_0"),
+                        Conv(1, 1, {3, 3}, "Conv_0"), ReLU("ReLU_1")});
+    model->save("test_removeConstantOfShape_model_before_1");
+    CHECK(removeConstantOfShape(model) == 1);
+    CHECK(model->forwardDims());
+    model->save("test_removeConstantOfShape_model_after_1");
+  }
+}
+
diff --git a/unit_tests/recipes/Test_removeFlatten.cpp b/unit_tests/recipes/Test_removeFlatten.cpp
index 24f5aa2e231b5204add1c8f87cdeb7a71175ea05..c3b4c08d98115c9f081bbbf8cb677114b66c545a 100644
--- a/unit_tests/recipes/Test_removeFlatten.cpp
+++ b/unit_tests/recipes/Test_removeFlatten.cpp
@@ -42,7 +42,7 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
 
     CHECK(g->getOrderedInputs().size() == 1);
     CHECK(g->getOrderedInputs()[0].first == fc0);
-    
+
     CHECK(fc0->getParent(0) == nullptr);
     CHECK(fc0->getChildren(0).size() == 1);
     CHECK(g->rootNode() == fc0);
@@ -54,10 +54,10 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
 
     CHECK(g->getOrderedInputs().size() == 1);
     CHECK(g->getOrderedInputs()[0].first == fc0);
-    
+
     CHECK(g->getOrderedOutputs().size() == 1);
     CHECK(g->getOrderedOutputs()[0].first == fc0);
-    
+
     CHECK(fc0->getParent(0) == nullptr);
     CHECK(fc0->getChildren(0).size() == 0);
     CHECK(g->rootNode() == fc0);
@@ -73,7 +73,7 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
 
     CHECK(g->getOrderedOutputs().size() == 1);
     CHECK(g->getOrderedOutputs()[0].first == fc1);
-    
+
     CHECK(fc1->getParent(0) == fc0);
     CHECK(fc0->getChildren(0)[0] == fc1);
     CHECK(g->rootNode() == fc0);
@@ -87,10 +87,10 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
     removeFlatten(g);
 
     CHECK(g->getOrderedInputs().size() == 0);
-    
+
     CHECK(g->getOrderedOutputs().size() == 1);
     CHECK(g->getOrderedOutputs()[0].first == fc0);
-    
+
     CHECK(fc0->getParent(0) == prod);
     CHECK(fc0->getChildren(0).size() == 0);
 
diff --git a/version.txt b/version.txt
index ee1372d33a29e27945406f0527f8af8e6ee119c9..0d91a54c7d439e84e3dd17d3594f1b2b6737f430 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-0.2.2
+0.3.0