diff --git a/CMakeLists.txt b/CMakeLists.txt
index 776c4e3be35b6a2044015774c760d7b5b0d3956c..499c2971cb60f979e72419cf65b9897d0613bf0a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 3.15)
+cmake_minimum_required(VERSION 3.18)
 set(CXX_STANDARD 14)
 
 file(STRINGS "${CMAKE_SOURCE_DIR}/version.txt" version)
@@ -84,6 +84,8 @@ if( ${ENABLE_ASAN} )
 endif()
 
 # PYTHON BINDING
+set(AIDGE_REQUIRES_PYTHON FALSE) # Will be set if aidge_core lib depends upon python interpreter
+set(AIDGE_PYTHON_HAS_EMBED FALSE)  # Will be set if python interpreter is found on the system
 if (PYBIND)
     # Python binding lib is by default installed in <prefix>/python_packages/<package>/
     # When installed from python, setup.py should set it to the python package dir
@@ -92,13 +94,17 @@ if (PYBIND)
     include(PybindModuleCreation)
     generate_python_binding(${pybind_module_name} ${module_name})
 
-    # Handles Python + pybind11 headers dependencies
-    target_link_libraries(${module_name}
-        PUBLIC
-            pybind11::pybind11
-        PRIVATE
-            Python::Module
-        )
+    ##
+    # As of now, when PYBIND is set, the core archive itself depends upon pybind/python,
+    # we define -DPYBIND and the dependencies on pybind/python runtime where necessary.
+
+    # Add -DPYBIND to compilation and interface
+    target_compile_definitions(${module_name} PUBLIC PYBIND)
+
+    # Add dependencies on pybind/python. See details in add_pybind_dependency()
+    include(PybindDependency)
+    add_pybind_dependency(${module_name})
+    ##
 endif()
 
 target_link_libraries(${module_name} PUBLIC Threads::Threads fmt::fmt)
@@ -206,10 +212,10 @@ export(EXPORT ${CMAKE_PROJECT_NAME}-targets
 ##############################################
 ## Add test
 if(TEST)
-    if(PYBIND)
-        message(FATAL_ERROR "PYBIND and TEST are both enabled. But cannot compile with catch_2.\nChoose between pybind and Catch2 for compilation.")
+    if (AIDGE_REQUIRES_PYTHON AND NOT AIDGE_PYTHON_HAS_EMBED)
+        message(WARNING "Skipping compilation of tests: missing Python embedded interpreter")
+    else()
+        enable_testing()
+        add_subdirectory(unit_tests)
     endif()
-    enable_testing()
-    add_subdirectory(unit_tests)
 endif()
-
diff --git a/README.md b/README.md
index 4b7954d410bce0de1fb1f07c5a268cc962445d29..fe8fd5a4252054c730be8e948d0d2e415c009d47 100644
--- a/README.md
+++ b/README.md
@@ -16,6 +16,7 @@ pip install . -v
 > - `AIDGE_INSTALL` : to set the installation folder. Defaults to `<python_prefix>/lib/libAidge`
 > - `AIDGE_PYTHON_BUILD_TYPE` : to set the compilation mode to **Debug** or **Release** or "" (for default flags). Defaults to **Release**.
 > - `AIDGE_BUILD_GEN` : to set the build backend (for development mode) or "" for the cmake default. Default to "".
+> - `AIDGE_BUILD_TEST` : to build the C++ unit tests. Set to "ON" or "OFF". Default to "OFF".
 
 
 ## Pip installation for development
@@ -24,9 +25,10 @@ To setup aidge_core using pip in development (or editable mode), use the `--no-b
 
 For instance run the following command in your python environnement for a typical setup :
 ``` bash
+export AIDGE_BUILD_TEST=ON              # enable C++ unit tests
 export AIDGE_PYTHON_BUILD_TYPE=         # default flags (no debug info but fastest build time)
 export AIDGE_PYTHON_BUILD_TYPE=Debug    # or if one really need to debug the C++ code
-pip install setuptools setuptools_scm[toml] cmake   # Pre-install build requirements (refer to the pyproject.toml [build-system] section)
+pip install -U pip setuptools setuptools_scm[toml] cmake   # Pre-install build requirements (refer to the pyproject.toml [build-system] section)
 pip install -v --no-build-isolation -e .
 ```
 
@@ -41,7 +43,7 @@ cmake --build build -j $(nproc) && cmake --install build
 
 One can also use an alternate cmake build backend such as ninja which can be installed easily though pip, for instance :
 ``` bash
-pip install ninja
+pip install -U ninja
 export AIDGE_BUILD_GEN=Ninja
 pip install -v --no-build-isolation -e .
 ```
@@ -85,9 +87,12 @@ make all install
 | *-DCMAKE_INSTALL_PREFIX:PATH* | ``str``  | Path to the install folder |
 | *-DCMAKE_BUILD_TYPE*          | ``str``  | If ``Debug``, compile in debug mode, ``Release`` compile with highest optimisations or "" (empty) , default= ``Release`` |
 | *-DWERROR*                    | ``bool`` | If ``ON`` show warning as error during compilation phase, default=``OFF`` |
-| *-DPYBIND*                    | ``bool`` | If ``ON`` activate python binding, default=``ON`` |
+| *-DTEST*                      | ``bool`` | If ``ON`` build C++ unit tests, default=``ON`` |
+| *-DPYBIND*                    | ``bool`` | If ``ON`` activate python binding, default=``OFF`` |
+| *-DPYBIND_INSTALL_PREFIX:PATH*| ``str`` | Path to the python module install folder when ``-DPYBIND=ON``, defaults to ``$CMAKE_INSTALL_PREFIX/python_packages/<module>`` |
 
-If you have compiled with PyBind you can find at the root of the ``build`` file the python lib ``aidge_core.cpython*.so``
+If one compiles with ``-DPYBIND=ON``, ``-DPYBIND_INSTALL_PREFIX:PATH`` can be used to install the python module directly in the
+python sources tree (for instance ``$PWD/aidge_core``). ``setup.py`` takes care of this and installs the module at the right place.
 
 ## Run tests
 ### CPP
diff --git a/aidge_core-config.cmake.in b/aidge_core-config.cmake.in
index d97afe8a2a1ca98eb862d66c388081bca7b72edc..abe55b6faef64aa61d4df4076c035ac0c5f998b4 100644
--- a/aidge_core-config.cmake.in
+++ b/aidge_core-config.cmake.in
@@ -3,6 +3,11 @@
 include(CMakeFindDependencyMacro)
 find_dependency(fmt)
 find_dependency(Threads)
+set(AIDGE_REQUIRES_PYTHON @AIDGE_REQUIRES_PYTHON@)
+set(AIDGE_PYTHON_HAS_EMBED @AIDGE_PYTHON_HAS_EMBED@)
+if (AIDGE_REQUIRES_PYTHON AND AIDGE_PYTHON_HAS_EMBED)
+    find_dependency(Python COMPONENTS Interpreter Development)
+endif()
 
 include(${CMAKE_CURRENT_LIST_DIR}/aidge_core-config-version.cmake)
 
diff --git a/aidge_core/aidge_export_aidge/static/CMakeLists.txt b/aidge_core/aidge_export_aidge/static/CMakeLists.txt
index 4220bb9d502474301cf748252930ff8bdd5c97e3..d7fe26d9c286f72d898a21d07baae2c91d08b71a 100644
--- a/aidge_core/aidge_export_aidge/static/CMakeLists.txt
+++ b/aidge_core/aidge_export_aidge/static/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 3.15)
+cmake_minimum_required(VERSION 3.18)
 set(CXX_STANDARD 14)
 
 file(STRINGS "${CMAKE_SOURCE_DIR}/project_name.txt" project_name)
@@ -18,6 +18,7 @@ set(module_name _${CMAKE_PROJECT_NAME}) # target name
 ##############################################
 # Define options
 option(PYBIND "python binding" ON)
+option(STANDALONE "Build standalone executable" ON)
 option(WERROR "Warning as error" OFF)
 option(TEST "Enable tests" OFF)
 option(COVERAGE "Enable coverage" OFF)
@@ -61,16 +62,8 @@ set_property(TARGET ${module_name} PROPERTY POSITION_INDEPENDENT_CODE ON)
 
 # PYTHON BINDING
 if (PYBIND)
-    # Handles Python + pybind11 headers dependencies
     include(PybindModuleCreation)
     generate_python_binding(${CMAKE_PROJECT_NAME} ${module_name})
-
-    target_link_libraries(${module_name}
-        PUBLIC
-            pybind11::pybind11
-        PRIVATE
-            Python::Python
-        )
 endif()
 
 if( ${ENABLE_ASAN} )
@@ -94,7 +87,6 @@ target_include_directories(${module_name}
         ${CMAKE_CURRENT_SOURCE_DIR}/src
 )
 
-target_link_libraries(${module_name} PUBLIC fmt::fmt)
 target_compile_features(${module_name} PRIVATE cxx_std_14)
 
 target_compile_options(${module_name} PRIVATE
@@ -151,8 +143,13 @@ install(FILES
 ## Exporting from the build tree
 message(STATUS "Exporting created targets to use them in another build")
 export(EXPORT ${CMAKE_PROJECT_NAME}-targets
-    FILE "${CMAKE_CURRENT_BINARY_DIR}/${project}-targets.cmake")
-
-# Compile executable
-add_executable(main main.cpp)
-target_link_libraries(main PUBLIC _aidge_core ${module_name})
+    FILE "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_PROJECT_NAME}-targets.cmake")
+
+if(STANDALONE)
+    if(AIDGE_REQUIRES_PYTHON AND NOT AIDGE_PYTHON_HAS_EMBED)
+        message(WARNING "Skipping compilation of standalone executable: missing Python embedded interpreter")
+    else()
+        add_executable(main main.cpp)
+        target_link_libraries(main PRIVATE ${module_name})
+    endif()
+endif()
diff --git a/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake b/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake
index 193f3332231ac384daab2e5bf75c1a5de0d2bf1d..217a48351def531cf7da39c9e78e0627fdba87f4 100644
--- a/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake
+++ b/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake
@@ -1,8 +1,7 @@
 function(generate_python_binding name target_to_bind)
 
-    find_package(Python COMPONENTS Interpreter Development)
+    find_package(Python COMPONENTS Interpreter Development.Module)
 
-    add_definitions(-DPYBIND)
     Include(FetchContent)
     FetchContent_Declare(
     PyBind11
@@ -15,11 +14,9 @@ function(generate_python_binding name target_to_bind)
     file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
 
     pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
-    target_include_directories(${name} PUBLIC "python_binding")
+    target_include_directories(${name} PRIVATE "python_binding")
+
+    # Link target library to bind
+    target_link_libraries(${name} PRIVATE ${target_to_bind})
 
-    # Handles Python + pybind11 headers dependencies
-    target_link_libraries(${name}
-        PUBLIC
-            ${target_to_bind}
-    )
 endfunction()
diff --git a/aidge_core/aidge_export_aidge/static/export-config.cmake.in b/aidge_core/aidge_export_aidge/static/export-config.cmake.in
index f3604be11c27d86caf1ad8a48b333b9bd8f30625..f0be5e076dbdfef359fc00fd41c25c0bba815839 100644
--- a/aidge_core/aidge_export_aidge/static/export-config.cmake.in
+++ b/aidge_core/aidge_export_aidge/static/export-config.cmake.in
@@ -1,3 +1,8 @@
+@PACKAGE_INIT@
+
+include(CMakeFindDependencyMacro)
+find_dependency(aidge_core)
+
 include(${CMAKE_CURRENT_LIST_DIR}/aidge_backend_cpu-config-version.cmake)
 
 include(${CMAKE_CURRENT_LIST_DIR}/aidge_backend_cpu-targets.cmake)
diff --git a/aidge_core/aidge_export_aidge/static/main.cpp b/aidge_core/aidge_export_aidge/static/main.cpp
index ab8bac1851b6d2dae4bf97bd3af10e19e0b71c1e..61bc3ebeb915be12570c6300965e3b64ac2870dd 100644
--- a/aidge_core/aidge_export_aidge/static/main.cpp
+++ b/aidge_core/aidge_export_aidge/static/main.cpp
@@ -1,6 +1,10 @@
 #include <iostream>
 #include <aidge/backend/cpu.hpp>
 
+/* Register default cpu Tensor implementation */
+#include <aidge/backend/cpu/data/TensorImpl.hpp>
+
+/* Include model generator */
 #include "include/dnn.hpp"
 
 int main()
diff --git a/aidge_core/unit_tests/static/main.cpp b/aidge_core/unit_tests/static/main.cpp
index 06171e2a036a18b0dea3dca40de34c296d99222d..640fc1fe60b55070de41ca4ce35ccd08084498b9 100644
--- a/aidge_core/unit_tests/static/main.cpp
+++ b/aidge_core/unit_tests/static/main.cpp
@@ -4,6 +4,10 @@ This file is copied in the test export.
 */
 #include <iostream>
 
+/* Register default cpu Tensor implementation */
+#include <aidge/backend/cpu/data/TensorImpl.hpp>
+
+/* Include model generator */
 #include "include/dnn.hpp"
 
 int main()
diff --git a/aidge_core/unit_tests/test_export.py b/aidge_core/unit_tests/test_export.py
index 9fb16128eebed9102cdf0e46e359a832bf6ac140..5d2e700a86925d1455cdee83e7d40cd891e72ba6 100644
--- a/aidge_core/unit_tests/test_export.py
+++ b/aidge_core/unit_tests/test_export.py
@@ -65,6 +65,7 @@ class test_export(unittest.TestCase):
     def setUp(self):
         self.EXPORT_PATH: pathlib.Path = pathlib.Path("dummy_export")
         self.BUILD_DIR: pathlib.Path = self.EXPORT_PATH / "build"
+        self.INSTALL_DIR: pathlib.Path = (self.EXPORT_PATH / "install").absolute()
 
     def tearDown(self):
         pass
@@ -96,9 +97,10 @@ class test_export(unittest.TestCase):
         )
         os.makedirs(self.BUILD_DIR, exist_ok=True)
         clean_dir(self.BUILD_DIR)  # if build dir existed already ensure its emptyness
+        clean_dir(self.INSTALL_DIR)
 
         # Test compilation of export
-        install_path = (
+        search_path = (
             os.path.join(sys.prefix, "lib", "libAidge")
             if "AIDGE_INSTALL" not in os.environ
             else os.environ["AIDGE_INSTALL"]
@@ -116,14 +118,16 @@ class test_export(unittest.TestCase):
                 [
                     "cmake",
                     str(self.EXPORT_PATH.absolute()),
-                    "-DPYBIND=1",
-                    f"-DCMAKE_INSTALL_PREFIX:PATH={install_path}",
+                    "-DPYBIND=ON",
+                    f"-DCMAKE_PREFIX_PATH={search_path}", # search dependencies
+                    f"-DCMAKE_INSTALL_PREFIX:PATH={self.INSTALL_DIR}", # local install
                 ],
                 cwd=str(self.BUILD_DIR),
             ):
                 print(std_line, end="")
         except subprocess.CalledProcessError as e:
             print(f"An error occurred: {e}\nFailed to configure export.")
+            raise SystemExit(1)
 
         ##########################
         # BUILD EXPORT
@@ -135,6 +139,7 @@ class test_export(unittest.TestCase):
                 print(std_line, end="")
         except subprocess.CalledProcessError as e:
             print(f"An error occurred: {e}\nFailed to build export.")
+            raise SystemExit(1)
 
         ##########################
         # INSTALL EXPORT
@@ -146,6 +151,7 @@ class test_export(unittest.TestCase):
                 print(std_line, end="")
         except subprocess.CalledProcessError as e:
             print(f"An error occurred: {e}\nFailed to install export.")
+            raise SystemExit(1)
 
 
 if __name__ == "__main__":
diff --git a/cmake/PybindDependency.cmake b/cmake/PybindDependency.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..1f4e7d426fa8d78a98d6bcce44d9d7dfab17ec1e
--- /dev/null
+++ b/cmake/PybindDependency.cmake
@@ -0,0 +1,56 @@
+function(add_pybind_dependency target_name)
+
+    # This function add dependencies on pybind/python in the
+    # case where a target depends on it. This is orthogonal to
+    # the creation of a pybind python module.
+
+    # In this case we need to add additional dependencies and distinguish the two link time usage for the archive:
+
+    #### 1. link for producing a python binding module, which must not include the python interpreter
+
+    # For the case 1, the archive is bound to a python module which will provide the runtime,
+    # hence we add dependency only on the pybind and python headers. Also we install the pybind headers
+    # for backward compatibility of dependent build systems which may not depend upon pybind.
+
+    #### 2. link for producing an executable (tests for instance) which must include the python interpreter
+
+    # For the case 2, a library or executable must also depend on the embedded python libraries,
+    # hence we add dependency on Python::Python when the target is not a module. Also we account for
+    # the case where the python libraries are not present (such as on cibuildwheel). In this case
+    # only python modules can be built, not standalone executables.
+
+    # Make detection of Development.Embed optional, we need to separate the components detections
+    # otherwise the variables set by the Interpreter components may be undefined.
+    find_package(Python COMPONENTS Interpreter)
+    find_package(Python COMPONENTS Development)
+    if(NOT Python_Development.Embed_FOUND)
+        message(WARNING "Could not find Python embed libraries, fall back to Python Module only mode. If you are running this from `cibuildwheel, this warning is nominal.")
+        find_package(Python COMPONENTS Development.Module)
+    endif()
+
+    # Set these variables which are used in the package config (aidge_core-config.cmake.in)
+    # and for conditional build on the presence on the python interpreter library
+    set(AIDGE_REQUIRES_PYTHON TRUE PARENT_SCOPE)
+    set(AIDGE_PYTHON_HAS_EMBED ${Python_Development.Embed_FOUND} PARENT_SCOPE)
+
+    # Add pybind11 headers dependencies, the headers for the package interface are installed below
+    target_include_directories(${target_name} SYSTEM PUBLIC
+        $<INSTALL_INTERFACE:include/_packages_deps/${target_name}>
+        $<BUILD_INTERFACE:${pybind11_INCLUDE_DIR}>)
+
+    # Add include dirs for Python.h
+    target_include_directories(${target_name} SYSTEM PUBLIC ${Python_INCLUDE_DIRS})
+
+    # Add Python embedded interpreter when the target is not a module (tests executables for instance)
+    # Also requires to have Development.Embed installed on the system
+    if (Python_Development.Embed_FOUND)
+         set(target_is_module $<STREQUAL:$<TARGET_PROPERTY:TYPE>,MODULE_LIBRARY>)
+         target_link_libraries(${target_name} INTERFACE $<$<NOT:${target_is_module}>:Python::Python>)
+    endif()
+
+    # Install pybind headers such that dependent modules can find them
+    install(DIRECTORY ${pybind11_INCLUDE_DIR}/pybind11
+        DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/_packages_deps/${target_name}
+    )
+
+endfunction()
diff --git a/cmake/PybindModuleCreation.cmake b/cmake/PybindModuleCreation.cmake
index e2bbb2c3fb57867e8add781805033fa5979393a9..853810e24b40eadb0830645a4373c238177ad649 100644
--- a/cmake/PybindModuleCreation.cmake
+++ b/cmake/PybindModuleCreation.cmake
@@ -2,7 +2,6 @@ function(generate_python_binding name target_to_bind)
 
     find_package(Python COMPONENTS Interpreter Development.Module)
 
-    add_definitions(-DPYBIND)
     Include(FetchContent)
     FetchContent_Declare(
     PyBind11
@@ -15,11 +14,8 @@ function(generate_python_binding name target_to_bind)
     file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
 
     pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
-    target_include_directories(${name} PUBLIC "python_binding")
+    target_include_directories(${name} PRIVATE "python_binding")
 
-    # Handles Python + pybind11 headers dependencies
-    target_link_libraries(${name}
-        PUBLIC
-            ${target_to_bind}
-    )
+    # Link specified target to bind
+    target_link_libraries(${name} PRIVATE ${target_to_bind})
 endfunction()
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index d77e6693b27c08da5c60f5410406a08e4863f1c4..cadd8c85ca541862cc6f298fa055713a6f65e3ed 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -40,6 +40,7 @@
 #include "aidge/operator/ArgMax.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/BatchNorm.hpp"
+#include "aidge/operator/BitShift.hpp"
 #include "aidge/operator/Concat.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
@@ -75,6 +76,10 @@
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/stimuli/Stimulus.hpp"
 
+#include "aidge/operator/ShiftMax.hpp"
+#include "aidge/scheduler/ShiftGELU.hpp"
+#include "aidge/stimuli/ILayerNorm.hpp"
+
 #include "aidge/recipes/Recipes.hpp"
 
 #include "aidge/utils/Attributes.hpp"
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 1fc9168da120ba87c916b1a6a346997be69184b4..4af7da64ebca3c02eb9aabca1f2dad88fd8b9829 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -14,73 +14,172 @@
 
 #include <string>
 #include <vector>
+#include <functional>
 
 #include "aidge/utils/Types.h"
+#include "aidge/utils/DynamicAttributes.hpp"
+#include "aidge/data/Data.hpp"
 #include "aidge/data/Elts.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
 
 namespace Aidge {
+class Node;
 class Operator;
 
+/**
+ * @brief ImplSpec stores the requirements or the specifications of an implementation.
+ * 
+ */
+struct ImplSpec {
+    struct IOSpec {
+        IOSpec(DataType type_, DataFormat format_ = DataFormat::Any, const std::vector<std::pair<int, int>>& dims_ = {}):
+            type(type_),
+            format(format_),
+            dims(dims_)
+        {}
+
+        DataType type;
+        DataFormat format;
+        std::vector<std::pair<int, int>> dims;
+    };
+
+    ImplSpec(const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const IOSpec& io, const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const IOSpec& i, const IOSpec& o, const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const std::vector<IOSpec>& i, const std::vector<IOSpec>& o, const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const Aidge::ImplSpec&);
+    ~ImplSpec() noexcept;
+
+    std::vector<IOSpec> inputs;
+    std::vector<IOSpec> outputs;
+    DynamicAttributes attrs;
+};
+
+inline bool operator==(const ImplSpec::IOSpec& lhs, const ImplSpec::IOSpec& rhs) {
+    return (lhs.type == rhs.type)
+        && (lhs.format == rhs.format)
+        && (lhs.dims == rhs.dims);
+}
+
+inline bool operator<(const ImplSpec::IOSpec& lhs, const ImplSpec::IOSpec& rhs) {
+    return (lhs.type < rhs.type)
+        || (lhs.type == rhs.type && lhs.format < rhs.format)
+        || (lhs.type == rhs.type && lhs.format == rhs.format && lhs.dims < rhs.dims);
+}
+
+inline bool operator<(const ImplSpec& lhs, const ImplSpec& rhs) {
+    return (lhs.inputs < rhs.inputs)
+        || (lhs.inputs == rhs.inputs && lhs.outputs < rhs.outputs)
+        || (lhs.inputs == rhs.inputs && lhs.outputs == rhs.outputs && lhs.attrs < rhs.attrs);
+}
+
+/**
+ * @brief Impl stores the details of a specific implementation.
+ * It is associated to a ImplSpec in a registry.
+ * 
+ */
+template <class FwdFunc, class BwdFunc>
+struct Impl {
+    Impl(std::function<std::unique_ptr<ProdConso>(const Operator&)> prodConso_,
+      std::function<FwdFunc> forward_,
+      std::function<BwdFunc> backward_ = nullptr):
+        prodConso(prodConso_), forward(forward_), backward(backward_) {}
+
+    std::function<std::unique_ptr<ProdConso>(const Operator&)> prodConso;
+    std::function<FwdFunc> forward;
+    std::function<BwdFunc> backward;
+};
+
 class OperatorImpl {
 public:
     OperatorImpl(const Operator& op, const std::string& backend = "");
     virtual void forward();
     virtual void backward();
+    virtual std::shared_ptr<ProdConso> prodConso();
 
     const std::string& backend() const noexcept {
         return mBackend;
     }
-    /**
-     * @brief Minimum amount of data from a specific input required by the
-     * implementation to be run.
-     *
-     * @param inputIdx Index of the input analysed.
-     * @return std::size_t
-     */
-    virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
 
-    // Amount of input data that cannot be overwritten during the execution.
-    virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
-
-    // Memory required at an output for a given input size.
-    virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
+    const Operator& getOperator() const noexcept {
+        return mOp;
+    }
 
     /**
-     * @brief Total amount of consumed data from a specific input.
-     *
-     * @param inputIdx Index of the input analysed.
-     * @return DimSize_t
+     * @brief Get the operator required implementation specification, according
+     * to the current operator configuration.
+     * 
      */
-    virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
+    ImplSpec getRequiredSpec() const;
 
     /**
-     * @brief Total amount of produced data ready to be used on a specific output.
-     *
-     * @param outputIdx Index of the output analysed.
-     * @return DimSize_t
+     * @brief Get the best implementation that matches \p requiredSpecs.
+     * If no implementation matches \p requiredSpecs, \p requiredSpecs is
+     * returned.
+     * 
      */
-    virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
+    ImplSpec getBestMatch(const ImplSpec& requiredSpecs) const;
 
     /**
-     * @brief Update the Consummer Producer system by simulating the consumption and production of i/o
-     *
+     * @brief Get an adapted meta operator corresponding to the required 
+     * specifications \p requiredSpecs from the implementation specifications
+     * \p spec.
+     * 
+     * @param spec Implementation specification
+     * @param requiredSpecs Required specifications
+     * @return std::shared_ptr<Node> Adapted meta op or nullptr
      */
-    virtual void updateConsummerProducer();
+    std::shared_ptr<Node> getAdaptation(const ImplSpec& spec, const ImplSpec& requiredSpecs) const;
 
     /**
-     * @brief Reset the Consummer Producer system.
-     *
+     * @brief Get the best adapted meta operator corresponding to the required 
+     * specifications \p requiredSpecs.
+     * The best adaptation is the one with the lowest overhead cost.
+     * Currently, it is the one requiring the least number of additionnal 
+     * operators to match the available implementations.
+     * 
+     * @param requiredSpecs Required specifications
+     * @return std::shared_ptr<Node> Adapted meta op or nullptr
      */
-    virtual void resetConsummerProducer();
+    std::shared_ptr<Node> getBestAdaptation(const ImplSpec& requiredSpecs) const;
 
     virtual ~OperatorImpl() = default;
 
 protected:
+    virtual std::shared_ptr<ProdConso> getProdConso() const;
+    virtual std::set<ImplSpec> getAvailableImplSpecs() const;
+    bool checkIOSpec(const ImplSpec::IOSpec& required, const ImplSpec::IOSpec& spec) const;
+
     const Operator &mOp;
     const std::string mBackend;
-    std::vector<Elts_t> mNbConsumedData;
-    std::vector<Elts_t> mNbProducedData;
+    std::shared_ptr<ProdConso> mProdConso;
 };
 } // namespace Aidge
 
+template<>
+struct fmt::formatter<Aidge::ImplSpec::IOSpec> {
+    template<typename ParseContext>
+    inline constexpr auto parse(ParseContext& ctx) {
+        return ctx.begin();
+    }
+
+    template<typename FormatContext>
+    inline auto format(Aidge::ImplSpec::IOSpec const& ioSpec, FormatContext& ctx) const {
+        return fmt::format_to(ctx.out(), "{}, {}, {}", ioSpec.type, ioSpec.format, ioSpec.dims);
+    }
+};
+
+template<>
+struct fmt::formatter<Aidge::ImplSpec> {
+    template<typename ParseContext>
+    inline constexpr auto parse(ParseContext& ctx) {
+        return ctx.begin();
+    }
+
+    template<typename FormatContext>
+    inline auto format(Aidge::ImplSpec const& implSpec, FormatContext& ctx) const {
+        return fmt::format_to(ctx.out(), "{}, {}", implSpec.inputs, implSpec.outputs);
+    }
+};
+
 #endif /* AIDGE_BACKEND_OPERATORIMPL_H_ */
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index eaadc7a7ca5fa85672619fb2d3b5b17590fd3778..23221e653ba725e4463b06cfabb5483a20756701 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -48,7 +48,8 @@ enum class DataType {
     UInt8,
     UInt16,
     UInt32,
-    UInt64
+    UInt64,
+    Any
 };
 
 enum class DataFormat {
@@ -58,7 +59,8 @@ enum class DataFormat {
     CHWN,
     NCDHW,
     NDHWC,
-    CDHWN
+    CDHWN,
+    Any
 };
 
 using DataFormatTranspose = std::array<size_t, 5>;
@@ -82,35 +84,7 @@ constexpr std::array<DataFormatTranspose, 7> DataFormatTransposeDict = {{
  * @return DataFormatTranspose Permutation array to achieve a transposition
  *         from src to dst DataFormat.
 */
-constexpr inline DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
-    // Permutation array from default format to src format
-    const auto srcDefToFormat = DataFormatTransposeDict[static_cast<int>(src)];
-    // Permutation array from default format to dst format
-    const auto dstDefToFormat = DataFormatTransposeDict[static_cast<int>(dst)];
-    // Compute permutation array from src format to default format:
-    DataFormatTranspose srcFormatToDef{};
-    for (size_t i = 0; i < srcDefToFormat.size(); ++i) {
-        if (srcDefToFormat[i] > 0) {
-            srcFormatToDef[srcDefToFormat[i] - 1] = i;
-        }
-        else {
-            srcFormatToDef[i] = i;
-        }
-    }
-
-    // Compute permutation array from src format to dst format:
-    DataFormatTranspose srcToDst{};
-    for (size_t i = 0; i < dstDefToFormat.size(); ++i) {
-        if (dstDefToFormat[srcFormatToDef[i]] > 0) {
-            srcToDst[i] = dstDefToFormat[srcFormatToDef[i]] - 1;
-        }
-        else {
-            srcToDst[i] = i;
-        }
-    }
-
-    return srcToDst;
-}
+DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataFormat& dst);
 
 class Data {
 public:
@@ -145,11 +119,11 @@ const char* const EnumStrings<Aidge::DataType>::data[]
     = {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Ternary",
        "Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16",
        "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6",
-       "UInt7", "UInt8", "UInt16", "UInt32", "UInt64"};
+       "UInt7", "UInt8", "UInt16", "UInt32", "UInt64", "Any"};
 
 template <>
 const char* const EnumStrings<Aidge::DataFormat>::data[]
-    = {"Default", "NCHW", "NHWC", "CHWN", "NCDHW", "NDHWC", "CDHWN"};
+    = {"Default", "NCHW", "NHWC", "CHWN", "NCDHW", "NDHWC", "CDHWN", "Any"};
 
 template <Aidge::DataType D> struct cpptype {
     using type = void; // Placeholder
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index c7b712be460a748df12447b15883eff58abbf690..58e893ca5d5339d93799415f076dd69d54db69ca 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -36,7 +36,7 @@ namespace Aidge {
  * Contains a pointer to an actual contiguous implementation of data.
  */
 class Tensor : public Data,
-               public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
+               public Registrable<Tensor, std::tuple<std::string, DataType>, std::function<std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>> {
    private:
     DataType mDataType = DataType::Float32; /** enum to specify data type. */
     DataFormat mDataFormat = DataFormat::Default; /** enum to specify data format. */
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 3be17d6d21d18d63e75e384f2c6e037452db3a82..ecc47c74578a6ec8bba6c47c07df3f2be6d43078 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -77,7 +77,7 @@ public:
    */
   Node(std::shared_ptr<Operator> op, const std::string& name = "");
 
-  virtual ~Node() = default;
+  virtual ~Node();
 
   friend bool operator==(const Node &lhs, const Node &rhs) {
     return lhs.shared_from_this() == rhs.shared_from_this();
diff --git a/include/aidge/hook/Hook.hpp b/include/aidge/hook/Hook.hpp
index 5e00db5d68f11aadd4f3b6eb8174ba61b33e4a49..5edf231d51f913f58351b4817e145b5f48953ddd 100644
--- a/include/aidge/hook/Hook.hpp
+++ b/include/aidge/hook/Hook.hpp
@@ -24,8 +24,8 @@
 namespace Aidge {
 
 class Operator;
-class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)> {
-//class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>{
+class Hook : public Registrable<Hook, std::tuple<std::string>, std::function<std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>> {
+//class Hook : public Registrable<Hook, std::tuple<std::string>, std::function<std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>>{
 protected:
     const std::shared_ptr<Operator> mOperator;
 
diff --git a/include/aidge/operator/Abs.hpp b/include/aidge/operator/Abs.hpp
index 3c2f1bb388cf064be379f476f1d2df4491b57637..f1dc37003fbff9463d041030818ec0534c5ac1fd 100644
--- a/include/aidge/operator/Abs.hpp
+++ b/include/aidge/operator/Abs.hpp
@@ -25,7 +25,7 @@
 namespace Aidge {
 
 class Abs_Op : public OperatorTensor,
-    public Registrable<Abs_Op, std::string, std::shared_ptr<OperatorImpl>(const Abs_Op&)> {
+    public Registrable<Abs_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Abs_Op&)>> {
 public:
     static const std::string Type;
 
@@ -54,6 +54,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 97db476729abc07985b16de62084be5fce603bc9..daf50771703d6608dbbe90364aac8667aefbdd1d 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -24,7 +24,7 @@
 namespace Aidge {
 
 class Add_Op : public OperatorTensor,
-    public Registrable<Add_Op, std::string, std::shared_ptr<OperatorImpl>(const Add_Op&)> {
+    public Registrable<Add_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Add_Op&)>> {
 public:
     static const std::string Type;
 
@@ -55,6 +55,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input_0", "data_input_n"};
diff --git a/include/aidge/operator/And.hpp b/include/aidge/operator/And.hpp
index 04a2fab1ed3569da161049ecece85a6e906e1cd8..e4f04e2fa3ec2a4a01f023b9ab203e6b2ab36e76 100644
--- a/include/aidge/operator/And.hpp
+++ b/include/aidge/operator/And.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
  * @brief Tensor element-wise logical and operation.
  */
 class And_Op : public OperatorTensor,
-    public Registrable<And_Op, std::string, std::shared_ptr<OperatorImpl>(const And_Op&)> {
+    public Registrable<And_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const And_Op&)>> {
 public:
     static const std::string Type;
 
@@ -64,6 +64,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_1", "data_input_2"};
diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index 1b11e211d23563d75bf943a96fa26bc84a3aa4b8..13f63ce98c526f0c57a363ada4e7f50ccdbfb83b 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -32,7 +32,7 @@ enum class ArgMaxAttr { Axis, KeepDims, SelectLastIndex };
  * @brief This operator has as purpose to reduce given dimension by replacing with the Max value's index.
 */
 class ArgMax_Op : public OperatorTensor,
-                public Registrable<ArgMax_Op, std::string, std::shared_ptr<OperatorImpl>(const ArgMax_Op &)> {
+                public Registrable<ArgMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ArgMax_Op &)>> {
 
 public:
     static const std::string Type;
@@ -91,6 +91,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::Axis>(); }
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index b2f4ce92580afddcc7aa3627ea0ef89d4ac3ffee..54b40907e8b4127b7b96b95b229440d782149c3d 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -28,7 +28,7 @@ enum class AvgPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
 class AvgPooling_Op : public OperatorTensor,
-                public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)> {
+                public Registrable<AvgPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>> {
 
 public:
     static const std::string Type;
@@ -77,6 +77,7 @@ public:
 
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<AvgPoolingAttr::StrideDims>(); }
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 7f1f63c68a512c4b6a59a515d6130afe9696a8c2..cdac7935f6ded752201c04b2dda6cfb9e06438ec 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -28,7 +28,7 @@ enum class BatchNormAttr { Epsilon, Momentum };
 
 template <DimIdx_t DIM>
 class BatchNorm_Op : public OperatorTensor,
-                public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)> {
+                public Registrable<BatchNorm_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>> {
 public:
     static const std::string Type;
 
@@ -79,6 +79,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline float& epsilon() const { return mAttributes->template getAttr<BatchNormAttr::Epsilon>(); }
diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..bd14bea76937fbfc42cbafa9636df9b55832fa9d
--- /dev/null
+++ b/include/aidge/operator/BitShift.hpp
@@ -0,0 +1,125 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_BITSHIFT_H_
+#define AIDGE_CORE_OPERATOR_BITSHIFT_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/StaticAttributes.hpp"
+
+
+namespace Aidge {
+    enum class BitShiftAttr { BitShiftdirection };
+
+/**
+ * @brief Tensor BitShift Operator
+ */
+class BitShift_Op : public OperatorTensor,
+    public Registrable<BitShift_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const BitShift_Op&)>> {
+public:
+    enum BitShiftDirection {left,right};
+    static const std::string Type;
+private:     
+
+    using Attributes_ = StaticAttributes<BitShiftAttr,BitShiftDirection>;
+    template <BitShiftAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+public:
+
+    BitShift_Op(BitShiftDirection direction) 
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+                attr<BitShiftAttr::BitShiftdirection>(direction))) 
+                {}
+
+    /**¨PPPP
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    BitShift_Op(const BitShift_Op& op)
+        : OperatorTensor(op),mAttributes(op.mAttributes)
+    {
+        if (op.mImpl) {
+            SET_IMPL_MACRO(BitShift_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::BitShift_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<BitShift_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+   
+    /**
+     * @brief Setter to specify which backend to use
+     * 
+     * @return Boolean
+     */
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+    
+    /**
+     * @brief Getter to retrieve Attributes of the bitshift class
+     * 
+     * @return Attributes
+     */
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Retrieve the direction in which the shift should be applied (right or left)
+     * 
+     * @return BitShiftDirection 
+     */
+    inline BitShiftDirection& direction() const noexcept { return mAttributes ->template getAttr<BitShiftAttr::BitShiftdirection>(); }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"InputTensor", "ShiftAmount"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"OutputTensor"};
+    }
+
+
+};
+/**
+ * @brief The bitwise shift operator performs an element-wise operation between the input tensor and the shift tensor in 
+    the direction specified by "direction" 
+ * @param[in] direction Direction of the bitshift (Left or Right)
+ * @param[in] name Name of the node
+ * @return std::shared_ptr<Node> 
+ */
+    inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direction, const std::string& name = "") {
+        return std::make_shared<Node>(std::make_shared<BitShift_Op>(direction), name);
+    }
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::BitShiftAttr>::data[] = {"BitShiftdirection"};
+
+}
+
+#endif /* AIDGE_CORE_OPERATOR_BITSHIFT_H_ */
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index fd12f551a2251f3dfe8ea0a0d0528d9dad742e42..3fa1bb22a0dd9def11e0621b67cbd8395b5344fa 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -33,7 +33,7 @@ public:
 enum class CastAttr { TargetType };
 
 class Cast_Op : public OperatorTensor,
-    public Registrable<Cast_Op, std::string, std::unique_ptr<OperatorImpl>(const Cast_Op&)> {
+    public Registrable<Cast_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Cast_Op&)>> {
 public:
     static const std::string Type;
 
@@ -73,6 +73,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline DataType& targetType() const { return mAttributes->template getAttr<CastAttr::TargetType>(); }
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 46cd3a5a328984bde7e537d984b30e0774a3d259..98835dd2a4b02e51b50636ee8606382a50ba7b89 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -37,7 +37,7 @@ public:
 enum class ConcatAttr { Axis };
 
 class Concat_Op : public OperatorTensor,
-    public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)> {
+    public Registrable<Concat_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Concat_Op&)>> {
 public:
     static const std::string Type;
 
@@ -67,6 +67,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::int32_t& axis() const { return mAttributes->template getAttr<ConcatAttr::Axis>(); }
diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
index 1f62f6a62ff6fc0f37721e25967506aa12ac9704..18e626544606fd150b2843d2367aa8858669c2ba 100644
--- a/include/aidge/operator/ConstantOfShape.hpp
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -47,8 +47,8 @@ enum class ConstantOfShapeAttr {
 class ConstantOfShape_Op
     : public OperatorTensor,
       public Registrable<ConstantOfShape_Op, std::string,
-                         std::shared_ptr<OperatorImpl>(
-                             const ConstantOfShape_Op &)> {
+                         std::function<std::shared_ptr<OperatorImpl>(
+                             const ConstantOfShape_Op &)>> {
 
 public:
   // name of the type of the operation
@@ -103,6 +103,7 @@ public:
 
   void setBackend(const std::string &name,
                   DeviceIdx_t device = 0) override final;
+  std::set<std::string> getAvailableBackends() const override;
 
   inline std::shared_ptr<Attributes> attributes() const override {
     return mAttributes;
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 7366472d24b78b58aab589ea2b3ccd045e4a5ea7..cd1a57dd9ac52d2f5cdff3b5ed54c6dd2aeeed34 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -34,7 +34,7 @@ enum class ConvAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
-                public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)> {
+                public Registrable<Conv_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>> {
 
 public:
     static const std::string Type;
@@ -97,6 +97,7 @@ public:
 
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     DimSize_t inChannels() const {
         if (!getInput(1)) {
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 63d8e8419b47279c51783db057b5b1a63c7d0884..f0a55a299094add58bd3938e9cca9bbb48e21da8 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -33,7 +33,7 @@ enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
-                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)> {
+                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>> {
 public:
     static const std::string Type;
 
@@ -83,6 +83,7 @@ public:
                           const IOIndex_t outputIdx = 0) const override;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     DimSize_t nbChannels() const {
         if (!getInput(1)) {
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
index 72ff83834962c1860b135a4187e72199b04361db..856cd0e85d1abb47d3c163115bef6cbfb59bb66f 100644
--- a/include/aidge/operator/DepthToSpace.hpp
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -35,7 +35,7 @@ enum class DepthToSpaceAttr { BlockSize, Mode };
 class DepthToSpace_Op : public OperatorTensor,
                 public Registrable<DepthToSpace_Op,
                     std::string,
-                    std::shared_ptr<OperatorImpl>(const DepthToSpace_Op &)> {
+                    std::function<std::shared_ptr<OperatorImpl>(const DepthToSpace_Op &)>> {
 public:
     static const std::string Type;
     enum class Mode { DCR, CRD };
@@ -68,6 +68,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::uint32_t& blockSize() const { return mAttributes->template getAttr<DepthToSpaceAttr::BlockSize>(); }
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index b16a5e6733e8846b05e3e491cf5bc7f793d97f1c..5ed9e789deab71b107a6071ab11452c3cf73fa9d 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -25,7 +25,7 @@
 namespace Aidge {
 
 class Div_Op : public OperatorTensor,
-    public Registrable<Div_Op, std::string, std::shared_ptr<OperatorImpl>(const Div_Op&)> {
+    public Registrable<Div_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Div_Op&)>> {
 
 public:
     static const std::string Type;
@@ -57,6 +57,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_1", "data_input_2"};
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index b6cc8f30c0fff3366cb1d3fea678e4cad8f9cb10..88a4bfd29e7d27e7eaea00d967e0ba631354d253 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -25,7 +25,7 @@
 namespace Aidge {
 
 class Erf_Op : public OperatorTensor,
-    public Registrable<Erf_Op, std::string, std::shared_ptr<OperatorImpl>(const Erf_Op&)> {
+    public Registrable<Erf_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Erf_Op&)>> {
 public:
     static const std::string Type;
 
@@ -44,6 +44,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 31378532e28c05971e4e3eb5778d4821ce2b6fde..592ba4e2b796ba1aede24a737e296ddf1e285499 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -27,7 +27,7 @@ namespace Aidge {
 class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
-                                 std::shared_ptr<OperatorImpl>(const FC_Op &)> {
+                                 std::function<std::shared_ptr<OperatorImpl>(const FC_Op &)>> {
 public:
     static const std::string Type;
 
@@ -60,6 +60,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     DimSize_t inChannels() const {
         if (!getInput(1)) {
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index aebe3879b94fd13c8226fffe42e513715d8e3e5a..517d63adc59ed848c53852697ab9f8511dfc2a2a 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -34,7 +34,7 @@ enum class FoldAttr { OutputDims, StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class Fold_Op : public OperatorTensor,
-                public Registrable<Fold_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Fold_Op<DIM> &)> {
+                public Registrable<Fold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Fold_Op<DIM> &)>> {
 
 public:
     static const std::string Type;
@@ -78,6 +78,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, DIM>& outputDims() const { return mAttributes->template getAttr<FoldAttr::OutputDims>(); }
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index f2e3b0fe8c063a5eec5e0c2140c3b7eabf3ee68a..80dcdd67883529c710b142b6b547d4b02e85cd44 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -36,7 +36,7 @@ enum class GatherAttr { Axis, Indices, GatheredShape };
 class Gather_Op : public OperatorTensor,
                 public Registrable<Gather_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Gather_Op&)> {
+                                   std::function<std::shared_ptr<OperatorImpl>(const Gather_Op&)>> {
 public:
     static const std::string Type;
 
@@ -73,6 +73,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::int8_t& axis() const { return mAttributes -> getAttr<GatherAttr::Axis>(); }
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 41516a39723249b5b5c715a66ce3398dff8e65b1..2812da066887d63133ede2d69b5804f0b8a8101e 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 class GenericOperator_Op
     : public OperatorTensor,
-      public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)> {
+      public Registrable<GenericOperator_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>> {
 private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
@@ -57,6 +57,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override { return std::set<std::string>(); };
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
     template <class T>
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index 734e12344fed4cd25dd41e91dc8cfb18fea4fd45..ef440e8c697ff221aa8df42e459de7ac697e8a0c 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -32,8 +32,8 @@ namespace Aidge {
 class GlobalAveragePooling_Op
     : public OperatorTensor,
       public Registrable<GlobalAveragePooling_Op, std::string,
-                         std::shared_ptr<OperatorImpl>(
-                             const GlobalAveragePooling_Op &)> {
+                         std::function<std::shared_ptr<OperatorImpl>(
+                             const GlobalAveragePooling_Op &)>> {
 public:
   static const std::string Type;
 
@@ -46,6 +46,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
   void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
   static const std::vector<std::string> getInputsName() {
     return {"data_input"};
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index 81900824ed0d26572e593982fa21ed900eda88ee..dc2b2059e75711572e0f7fa94cc6ccb9f58c970b 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
 enum class GridSampleAttr { Mode, PaddingMode, AlignCorners };
 
 class GridSample_Op : public OperatorTensor,
-	public Registrable<GridSample_Op, std::string, std::shared_ptr<OperatorImpl>(const GridSample_Op&)> {
+	public Registrable<GridSample_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const GridSample_Op&)>> {
 
 public:
 	static const std::string Type;
@@ -58,6 +58,7 @@ public:
 	bool forwardDims(bool /*allowDataDependencies*/ = false) override final;
 
 	void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
 	inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 	inline Mode mode() const { return mAttributes->template getAttr<GridSampleAttr::Mode>(); }
diff --git a/include/aidge/operator/ILayerNorm.hpp b/include/aidge/operator/ILayerNorm.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f660cc64eb65770cc6cf5335d9c070b155d03c0f
--- /dev/null
+++ b/include/aidge/operator/ILayerNorm.hpp
@@ -0,0 +1,81 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 10.09.2024
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ILAYERNORM_H_
+#define AIDGE_CORE_OPERATOR_ILAYERNORM_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class ILayerNorm_Op : public OperatorTensor,
+    public Registrable<ILayerNorm_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ILayerNorm_Op&)>> {
+public:
+    static const std::string Type;
+
+    ILayerNorm_Op()
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::Param}, 1)
+    {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ILayerNorm_Op(const ILayerNorm_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ILayerNorm_Op, *this, op.backend());
+        }else{
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ILayerNorm_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ILayerNorm_Op>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "weight", "bias"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> ILayerNorm(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<ILayerNorm_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_ILAYERNORM_H_ */
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index 622d6290af55ef5a717c6f5763ade5a2750fb9f0..24476f231806bf38ae48b9e2d5ec405e072afdb2 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -26,6 +26,11 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
+class Identity_OpImpl : public OperatorImpl {
+public:
+    Identity_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
 
 /**
  * @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes.
@@ -35,7 +40,7 @@ namespace Aidge {
  *
  */
 class Identity_Op : public OperatorTensor,
-    public Registrable<Identity_Op, std::string, std::unique_ptr<OperatorImpl>(const Identity_Op&)> {
+    public Registrable<Identity_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Identity_Op&)>> {
 public:
     static const std::string Type;
 
@@ -54,29 +59,8 @@ public:
      */
     std::shared_ptr<Operator> clone() const override;
 
-    // bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
-
-    /**
-     * @brief Check if output dimensions have been computed.
-     * @note Since Indentity has no output Tensor, this function checks if its
-     * only input's dimensions have been computed.
-     *
-     * @return true Input has dimensions.
-     * @return false Input has no dimensions or is a nullptr.
-     */
-    bool dimsForwarded() const override final;
-
-
-    void forward() override final;
-
-    void backward() override final { }
-
-    void setBackend(const std::string& /*name*/, DeviceIdx_t /*device*/ = 0) override final {
-        // setBackend do nothing, Identity node has no backend it just pass the same Tensor
-    }
-    void setDataType(const DataType& /*dataType*/) const override final {
-        // setDatatype do nothing, Identity node has no backend it just pass the same Tensor
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 30d171eab3ee54864aae48f445e4d0f04792dd31..179eb90b39bb5d527781289b9b233d3a29d14494 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -29,7 +29,7 @@ enum class LeakyReLUAttr {
 };
 
 class LeakyReLU_Op : public OperatorTensor,
-    public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)> {
+    public Registrable<LeakyReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>> {
 public:
     static const std::string Type;
 
@@ -62,6 +62,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); }
diff --git a/include/aidge/operator/Ln.hpp b/include/aidge/operator/Ln.hpp
index c6a9ec4c8d59800cdbcc3f0229acdbbb436cd732..22fc51664b89bcdeb5970b0cc92beafdde52e43f 100755
--- a/include/aidge/operator/Ln.hpp
+++ b/include/aidge/operator/Ln.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 
 class Ln_Op : public OperatorTensor,
-    public Registrable<Ln_Op, std::string, std::unique_ptr<OperatorImpl>(const Ln_Op&)> {
+    public Registrable<Ln_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Ln_Op&)>> {
 public:
     static const std::string Type;
 
@@ -46,6 +46,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index f81fb7bd0a3156fcffccc10fe3d460273f353252..bf6ab84c7373962e71434050427c9b6ecae3b034 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -26,7 +26,7 @@ namespace Aidge {
 class MatMul_Op : public OperatorTensor,
               public Registrable<MatMul_Op,
                                  std::string,
-                                 std::shared_ptr<OperatorImpl>(const MatMul_Op &)> {
+                                 std::function<std::shared_ptr<OperatorImpl>(const MatMul_Op &)>> {
 public:
     static const std::string Type;
 
@@ -59,6 +59,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input1", "data_input2"};
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 3b7473a6a17e8ebf490941068c8245d5847e0299..0cc43a6fbe50849b169a59d048962668d3e4666c 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -33,7 +33,7 @@ enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public OperatorTensor,
-                public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)> {
+                public Registrable<MaxPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>> {
 public:
     static const std::string Type;
 
@@ -69,6 +69,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); }
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index a1d90f06f098eb7fa2fc199b595991702daf488a..2b05b5fffed98a7df99a450a5f99c88efa2f7288 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -25,19 +25,25 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-class Memorize_OpImpl : public OperatorImpl {
+class Memorize_ProdConso : public ProdConso {
 public:
-    Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Memorize_ProdConso(const Operator& op): ProdConso(op) {}
     Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
     Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
     void updateConsummerProducer() override;
+};
+
+class Memorize_OpImpl : public OperatorImpl {
+public:
+    Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Memorize_ProdConso>(mOp); };
     void forward() override;
 };
 
 enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep };
 
 class Memorize_Op : public OperatorTensor,
-    public Registrable<Memorize_Op, std::string, std::unique_ptr<OperatorImpl>(const Memorize_Op&)> {
+    public Registrable<Memorize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Memorize_Op&)>> {
 public:
     static const std::string Type;
 
@@ -66,6 +72,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
     bool dimsForwarded() const override;
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index bd85346f7211701e20a443685f24d37a76ae631b..ccff976cbb7cf8efc59223dfd658ca2a4d03a80b 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -21,13 +21,14 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
 #include "aidge/scheduler/SequentialScheduler.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 class MetaOperator_Op : public OperatorTensor,
-                public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)> {
+                public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::function<std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)>> {
 public:
     // outputs shared with micro-graph output Tensors
     // Micro-graph handling:
@@ -82,6 +83,7 @@ public:
     std::string backend() const noexcept override;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     void setDataType(const DataType &datatype) const override {
         // The micro-graph should always be set to the right data type, since it
@@ -90,6 +92,8 @@ public:
         mGraph->setDataType(datatype);
     }
 
+    std::shared_ptr<Attributes> attributes() const override;
+
     Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
     Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override;
     Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override;
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index 9908911419d8ce027cdb18c4abf45a5c71be67b1..49d92cd12f68a0b23530039c1df70ced9b2d2080 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -31,7 +31,7 @@ public:
 };
 
 class Move_Op : public OperatorTensor,
-    public Registrable<Move_Op, std::tuple<std::string, std::string>, std::unique_ptr<OperatorImpl>(const Move_Op&)> {
+    public Registrable<Move_Op, std::tuple<std::string, std::string>, std::function<std::unique_ptr<OperatorImpl>(const Move_Op&)>> {
 public:
     static const std::string Type;
 
@@ -50,6 +50,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 35a4b7e061bba76f1e63343e9230eddddfde11ac..bfe4fcb0de1cb7dda4a0ea8fc7b99638bc813f47 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
  * @brief Tensor element-wise multiplication.
  */
 class Mul_Op : public OperatorTensor,
-    public Registrable<Mul_Op, std::string, std::shared_ptr<OperatorImpl>(const Mul_Op&)> {
+    public Registrable<Mul_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Mul_Op&)>> {
 public:
     static const std::string Type;
 
@@ -50,6 +50,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_1", "data_input_2"};
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 282270736fb7d8ca1fbd3f2b9d1c12bf144e6d34..93e9664e266db6a14947170d960d52f198dcdce0 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -129,9 +129,12 @@ public:
     }
 
     virtual void setBackend(const std::string& name, DeviceIdx_t device = 0) = 0;
+    void setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends);
     virtual void setDataType(const DataType& dataType) const = 0;
     virtual void setDataFormat(const DataFormat& dataFormat) const = 0;
 
+    virtual std::set<std::string> getAvailableBackends() const = 0;
+
     /**
      * @brief Set a new OperatorImpl to the Operator
      *
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index bdb5330a6fd02693f4d75ccba06ce613d9a0dff1..2c670bf23d4703a5a9e8502c8b356fdde32e2561 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -29,7 +29,7 @@ enum class PadBorderType { Constant, Edge, Reflect, Wrap };
 
 template <DimIdx_t DIM>
 class Pad_Op : public OperatorTensor,
-                public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)> {
+                public Registrable<Pad_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>> {
 public:
     static const std::string Type;
 
@@ -74,6 +74,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<PadAttr::BeginEndBorders>(); }
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 41ab3c537eacc88920419cb5e0deecc4720796ba..d5898b3630721b036b3acb916e6dec87455009f7 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -24,17 +24,23 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Pop_ProdConso : public ProdConso {
+public:
+    Pop_ProdConso(const Operator& op): ProdConso(op) {}
+    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
+};
+
 class Pop_OpImpl : public OperatorImpl {
 public:
     Pop_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Pop_ProdConso>(mOp); };
     void forward() override;
 };
 
 enum class PopAttr { ForwardStep };
 
 class Pop_Op : public OperatorTensor,
-    public Registrable<Pop_Op, std::string, std::unique_ptr<OperatorImpl>(const Pop_Op&)> {
+    public Registrable<Pop_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Pop_Op&)>> {
 public:
     static const std::string Type;
 
@@ -59,6 +65,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
     void updateConsummerProducer() override;
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index eaf4297fd8b3751463a20ae219af5c25ecd789ae..f6762dd33088f486184bdfd0a5b8dbdbd0c641da 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -25,7 +25,7 @@
 namespace Aidge {
 
 class Pow_Op : public OperatorTensor,
-    public Registrable<Pow_Op, std::string, std::shared_ptr<OperatorImpl>(const Pow_Op&)> {
+    public Registrable<Pow_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pow_Op&)>> {
 public:
     static const std::string Type;
 
@@ -57,6 +57,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input_1", "data_input_2"};
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index c52e779cbfec04f9ae6796c3bb6f21407c0cb0fb..115ddcb5549b1c0daa01b3ab67946655cda7287c 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -30,8 +30,8 @@ enum class ProdAttr { Constant };
 
 class Producer_Op
     : public OperatorTensor,
-      public Registrable<Producer_Op, std::string, std::shared_ptr<OperatorImpl>(
-                                          const Producer_Op &)> {
+      public Registrable<Producer_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(
+                                          const Producer_Op &)>> {
 public:
     static const std::string Type;
 
@@ -89,6 +89,7 @@ public:
     inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); }
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index cc714c4619a0f8eee7af03993700fed7489a6c0e..9b264c1d3d7955f71538dd90f105cfd7ee469d0a 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 
 class ReLU_Op : public OperatorTensor,
-    public Registrable<ReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const ReLU_Op&)> {
+    public Registrable<ReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>> {
 public:
     static const std::string Type;
 
@@ -46,6 +46,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 43b121be2654c1dd63116075be397e421823b9b5..5d5895a8fb279f1efa5c6321614199f44402b83a 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -32,7 +32,7 @@ enum class ReduceMeanAttr { Axes, KeepDims, NoopWithEmptyAxes };
  * @brief This operator has as purpose to reduce given axes by replacing with the mean value.
 */
 class ReduceMean_Op : public OperatorTensor,
-                public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)> {
+                public Registrable<ReduceMean_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>> {
 
 public:
     static const std::string Type;
@@ -74,6 +74,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::Axes>(); }
@@ -87,6 +88,8 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+    virtual ~ReduceMean_Op() noexcept;
 };
 
 /**
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
index 9d1220b6b2e7c1e8029ebe20b03d5501d90ae0f6..bae03cb7d2e3ac855537eb22e54bf706ec0e0b4a 100644
--- a/include/aidge/operator/ReduceSum.hpp
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -33,7 +33,7 @@ enum class ReduceSumAttr { Axes, KeepDims, NoopWithEmptyAxes };
  * @brief This operator has as purpose to reduce given axes by replacing with the sum value.
 */
 class ReduceSum_Op : public OperatorTensor,
-                public Registrable<ReduceSum_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceSum_Op &)> {
+                public Registrable<ReduceSum_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceSum_Op &)>> {
 
 public:
     static const std::string Type;
@@ -92,6 +92,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::Axes>(); }
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 5bd9b3e8d56c106803bf65dc7bf595da85558a1a..721b964d3ff4cd87121d43e8719a8fde1445761b 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -32,7 +32,7 @@ public:
 enum class ReshapeAttr { Shape, AllowZero };
 
 class Reshape_Op : public OperatorTensor,
-                   public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)> {
+                   public Registrable<Reshape_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Reshape_Op&)>> {
 
 public:
     static const std::string Type;
@@ -47,7 +47,7 @@ private:
 public:
     Reshape_Op() = delete;
 
-    Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero);
+    Reshape_Op(const std::vector<std::int64_t>& shape = {}, bool allowzero = false);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -65,6 +65,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::vector<std::int64_t>& shape() const { return mAttributes->template getAttr<ReshapeAttr::Shape>(); }
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 622a1ff1b191aad9f3f8045380be522d32cf2845..a48b95aff2a18750d83f12a62c408ad41b20afee 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -25,7 +25,7 @@
 namespace Aidge {
 
 class Resize_Op : public OperatorTensor,
-                  public Registrable<Resize_Op, std::string, std::shared_ptr<OperatorImpl>(const Resize_Op&)>{
+                  public Registrable<Resize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Resize_Op&)>>{
 
 public:
     static const std::string Type;
@@ -49,6 +49,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         //  roi, scales, sizes, even if considered as const parameters/input
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 311dc0202d866253bb98285e77e6d6ea8b345e0f..4ef39f63a2f9af34cd3fe28b01cf2fc195bdfc6e 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -30,7 +30,7 @@ enum class ScalingAttr {
 
 class Scaling_Op
     : public OperatorTensor,
-      public Registrable<Scaling_Op, std::string, std::shared_ptr<OperatorImpl>(const Scaling_Op&)> {
+      public Registrable<Scaling_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Scaling_Op&)>> {
 public:
     static const std::string Type;
 
@@ -57,6 +57,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline float& scalingFactor() const noexcept { return mAttributes -> getAttr<ScalingAttr::ScalingFactor>(); }
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index d76a9fd069ebbda81e446e6f3486ff0ff66755bb..cfd43fa0dd5a064ee21eafc2d0f50c12ad6e3272 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -36,7 +36,7 @@ enum class ShapeAttr { Start, End };
 class Shape_Op : public OperatorTensor,
                 public Registrable<Shape_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Shape_Op&)> {
+                                   std::function<std::shared_ptr<OperatorImpl>(const Shape_Op&)>> {
 
 public:
     static const std::string Type;
@@ -66,6 +66,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::int64_t& start() const noexcept { return mAttributes -> getAttr<ShapeAttr::Start>(); }
diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp
index 4d3000750c2224aaea278beca4c8124e0845042e..30f1d71e0a56d92a70830a5def81040e0c5a186c 100644
--- a/include/aidge/operator/ShiftGELU.hpp
+++ b/include/aidge/operator/ShiftGELU.hpp
@@ -7,7 +7,7 @@
  *
  * SPDX-License-Identifier: EPL-2.0
  * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
- * Date: 25.06.2024
+ * Date: 10.09.2024
  *
  ********************************************************************************/
 
@@ -28,7 +28,7 @@
 namespace Aidge {
 
 class ShiftGELU_Op : public OperatorTensor,
-    public Registrable<ShiftGELU_Op, std::string, std::shared_ptr<OperatorImpl>(const ShiftGELU_Op&)> {
+    public Registrable<ShiftGELU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ShiftGELU_Op&)>> {
 public:
     static const std::string Type;
 
@@ -48,6 +48,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp
index d75e6559f5f4df9a1010d65ba97529e6165ae42f..9fbd81aedef1eb640a7ce805d745297edb640560 100644
--- a/include/aidge/operator/ShiftMax.hpp
+++ b/include/aidge/operator/ShiftMax.hpp
@@ -7,7 +7,7 @@
  *
  * SPDX-License-Identifier: EPL-2.0
  * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
- * Date: 25.06.2024
+ * Date: 10.09.2024
  *
  ********************************************************************************/
 
@@ -28,7 +28,7 @@
 namespace Aidge {
 
 class ShiftMax_Op : public OperatorTensor,
-    public Registrable<ShiftMax_Op, std::string, std::shared_ptr<OperatorImpl>(const ShiftMax_Op&)> {
+    public Registrable<ShiftMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ShiftMax_Op&)>> {
 public:
     static const std::string Type;
 
@@ -48,6 +48,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index b3204240cd130251fe8abe7d50bdad9b92b7558c..24bc3321673f4dcffd3e3663f7e0a0e584389492 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 
 class Sigmoid_Op : public OperatorTensor,
-    public Registrable<Sigmoid_Op, std::string, std::unique_ptr<OperatorImpl>(const Sigmoid_Op&)> {
+    public Registrable<Sigmoid_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Sigmoid_Op&)>> {
 public:
     static const std::string Type;
 
@@ -37,6 +37,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 241e165a0e441ccb856431225ce1d6fd170a25f8..811402420df170c011e478148cf646e6c585cc84 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -29,7 +29,7 @@ enum class SliceAttr { Starts, Ends, Axes, Steps };
 
 class Slice_Op
     : public OperatorTensor,
-      public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)> {
+      public Registrable<Slice_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Slice_Op &)>> {
 public:
     static const std::string Type;
 
@@ -69,6 +69,7 @@ public:
     bool forwardDims(bool allowDataDependency = true) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::vector<std::int64_t>& starts() const noexcept { return mAttributes -> getAttr<SliceAttr::Starts>(); }
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index c221a67e31fc6de1bcb2c727854c8ebee2986ee4..72ea56dd6293e416ddcca12ac38fd57d76071354 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -29,7 +29,7 @@ enum class SoftmaxAttr { Axis };
 class Softmax_Op : public OperatorTensor,
                 public Registrable<Softmax_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Softmax_Op&)> {
+                                   std::function<std::shared_ptr<OperatorImpl>(const Softmax_Op&)>> {
 
 public:
     static const std::string Type;
@@ -57,6 +57,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 661f9e32d47c7fb7e0c111805a50c6fcc131cffe..8c3a111c42dfeb2b4e27269839e41f3b362bdda3 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -34,7 +34,7 @@ enum class SplitAttr { Axis, Split };
 
 class Split_Op
     : public OperatorTensor,
-      public Registrable<Split_Op, std::string, std::shared_ptr<OperatorImpl>(const Split_Op &)> {
+      public Registrable<Split_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Split_Op &)>> {
 
 public:
     static const std::string Type;
@@ -68,6 +68,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::int8_t& axis() const { return mAttributes->template getAttr<SplitAttr::Axis>(); }
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index ce4aaafc92d1f7d601946c02d4eb025eb735a3f9..4858cdcd164d6be0582ddabe67c780461a9667aa 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -26,7 +26,7 @@ namespace Aidge {
 class Sqrt_Op : public OperatorTensor,
                 public Registrable<Sqrt_Op,
                                 std::string,
-                                std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> {
+                                std::function<std::shared_ptr<OperatorImpl>(const Sqrt_Op&)>> {
 public:
     static const std::string Type;
 
@@ -45,6 +45,7 @@ public:
     std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index 73321b5689c0c10d9d06ea60c551cc6dfaced149..64a775eb4209ecad0e29decd8336ebb77bbe652f 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -67,7 +67,7 @@ enum class SqueezeAttr {
 class Squeeze_Op
     : public OperatorTensor,
       public Registrable<Squeeze_Op, std::string,
-                         std::shared_ptr<OperatorImpl>(const Squeeze_Op &)> {
+                         std::function<std::shared_ptr<OperatorImpl>(const Squeeze_Op &)>> {
 
 public:
   static const std::string
@@ -122,6 +122,7 @@ public:
 
   void setBackend(const std::string &name,
                   DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
   inline std::shared_ptr<Attributes> attributes() const override {
     return mAttributes;
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index bb29ba67851bce8eed46ab1d4df3cf7a8ce91a1a..170baf6fd0f38668f64cbd36044c856fae261737 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -25,7 +25,7 @@
 namespace Aidge {
 
 class Sub_Op : public OperatorTensor,
-    public Registrable<Sub_Op, std::string, std::shared_ptr<OperatorImpl>(const Sub_Op&)> {
+    public Registrable<Sub_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Sub_Op&)>> {
 public:
     static const std::string Type;
 
@@ -48,6 +48,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_1", "data_input_2"};
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
index fd05bf7c434ec2547995800f47380c53585ca6d7..f1a30e3f08ce3886cc1ca39a55a3b23979a47860 100644
--- a/include/aidge/operator/Tanh.hpp
+++ b/include/aidge/operator/Tanh.hpp
@@ -24,7 +24,7 @@
 namespace Aidge {
 
 class Tanh_Op : public OperatorTensor,
-    public Registrable<Tanh_Op, std::string, std::unique_ptr<OperatorImpl>(const Tanh_Op&)> {
+    public Registrable<Tanh_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Tanh_Op&)>> {
 public:
     static const std::string Type;
 
@@ -44,6 +44,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 375d6e098324516b750f8054f9214390373737e2..155627f2cfd3173ccfbbe2a1ce8c23784cd06d71 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -36,7 +36,7 @@ public:
 enum class TransposeAttr { OutputDimsOrder };
 
 class Transpose_Op : public OperatorTensor,
-                public Registrable<Transpose_Op, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op&)> {
+                public Registrable<Transpose_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Transpose_Op&)>> {
 
 public:
     static const std::string Type;
@@ -67,6 +67,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<TransposeAttr::OutputDimsOrder>(); }
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index 3fda7c21405ef023f4324089e60be0330b5f34b6..09a689528a6814eca6bb56ef326e2da527f14843 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -41,7 +41,7 @@ enum class UnfoldAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class Unfold_Op : public OperatorTensor,
-                public Registrable<Unfold_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM> &)> {
+                public Registrable<Unfold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM> &)>> {
 
 public:
     static const std::string Type;
@@ -77,6 +77,7 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<UnfoldAttr::StrideDims>(); }
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index 3443801bc4a4771109b54a709bd6a77a96b57274..c0710540576959b62bbdf235ff6ea15f9d18cacd 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -60,7 +60,7 @@ enum class UnsqueezeAttr {
 class Unsqueeze_Op
     : public OperatorTensor,
       public Registrable<Unsqueeze_Op, std::string,
-                         std::shared_ptr<OperatorImpl>(const Unsqueeze_Op &)> {
+                         std::function<std::shared_ptr<OperatorImpl>(const Unsqueeze_Op &)>> {
 
 public:
   static const std::string
@@ -119,6 +119,7 @@ public:
 
   void setBackend(const std::string &name,
                   DeviceIdx_t device = 0) override final;
+  std::set<std::string> getAvailableBackends() const override;
 
   inline std::shared_ptr<Attributes> attributes() const override {
     return mAttributes;
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index aea39ded3e5f2547f6f47fbc5aa27d5f1ee4821f..a9b9213e914811ccff7d1e6d8efe4fdd8a505b87 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -150,6 +150,13 @@ size_t fuseToMetaOps(std::shared_ptr<GraphView> graph, const std::string& query,
 */
 size_t convToMatMul(std::shared_ptr<GraphView> graph);
 
+/**
+ * @brief Adapt a graph to the available kernels of a backend.
+ * 
+ * @param graph Graph to manipulate
+ */
+void adaptToBackend(std::shared_ptr<GraphView> graph);
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_RECIPES_H_ */
diff --git a/include/aidge/scheduler/ProdConso.hpp b/include/aidge/scheduler/ProdConso.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a7c0ed5ae73d1f891744e835f0da5ad14a37f850
--- /dev/null
+++ b/include/aidge/scheduler/ProdConso.hpp
@@ -0,0 +1,89 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_SCHEDULER_PRODCONSO_H_
+#define AIDGE_SCHEDULER_PRODCONSO_H_
+
+#include <string>
+#include <vector>
+
+#include "aidge/utils/Types.h"
+#include "aidge/data/Elts.hpp"
+
+namespace Aidge {
+class Operator;
+
+class ProdConso {
+public:
+    ProdConso(const Operator& op, bool inPlace = false);
+
+    static std::unique_ptr<ProdConso> defaultModel(const Operator& op) {
+        return std::make_unique<ProdConso>(op, false);
+    }
+
+    static std::unique_ptr<ProdConso> inPlaceModel(const Operator& op) {
+        return std::make_unique<ProdConso>(op, true);
+    }
+
+    /**
+     * @brief Minimum amount of data from a specific input required by the
+     * implementation to be run.
+     *
+     * @param inputIdx Index of the input analysed.
+     * @return std::size_t
+     */
+    virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
+
+    // Amount of input data that cannot be overwritten during the execution.
+    virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
+
+    // Memory required at an output for a given input size.
+    virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
+
+    /**
+     * @brief Total amount of consumed data from a specific input.
+     *
+     * @param inputIdx Index of the input analysed.
+     * @return DimSize_t
+     */
+    virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
+
+    /**
+     * @brief Total amount of produced data ready to be used on a specific output.
+     *
+     * @param outputIdx Index of the output analysed.
+     * @return DimSize_t
+     */
+    virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
+
+    /**
+     * @brief Update the Consummer Producer system by simulating the consumption and production of i/o
+     *
+     */
+    virtual void updateConsummerProducer();
+
+    /**
+     * @brief Reset the Consummer Producer system.
+     *
+     */
+    virtual void resetConsummerProducer();
+
+    virtual ~ProdConso() = default;
+
+protected:
+    const Operator &mOp;
+    const bool mInPlace;
+    std::vector<Elts_t> mNbConsumedData;
+    std::vector<Elts_t> mNbProducedData;
+};
+} // namespace Aidge
+
+#endif /* AIDGE_SCHEDULER_PRODCONSO_H_ */
diff --git a/include/aidge/stimuli/Stimulus.hpp b/include/aidge/stimuli/Stimulus.hpp
index 80e7c76d4857f577f30b90588f4c3998be80bdb8..3def790b65f441c567e5d43150f465233cb64557 100644
--- a/include/aidge/stimuli/Stimulus.hpp
+++ b/include/aidge/stimuli/Stimulus.hpp
@@ -26,7 +26,7 @@ namespace Aidge {
  * @brief Stimulus. A class wrapping a data sample. Stimulus has two functioning modes. The first mode enables to load data samples from a dataPath and optionnaly store the data in-memory. The second mode enables to store a data sample that was already loaded in memory.
  * @details When Stimulus is used in the first mode, the loading function is determined automaticaly based on the backend and the file extension.
  */
-class Stimulus : public Registrable<Stimulus, std::tuple<std::string, std::string>, std::unique_ptr<StimulusImpl>(const std::string&)> {
+class Stimulus : public Registrable<Stimulus, std::tuple<std::string, std::string>, std::function<std::unique_ptr<StimulusImpl>(const std::string&)>> {
 private:
     /// Stimulus data path
     const std::string mDataPath;
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index 7dce3d327d42de15dc2589788b4643742ed1a463..cf71ed0b5953fa1759e04c66311d3d829a603a01 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -14,6 +14,9 @@
 
 #include <string>
 #include <set>
+#include <map>
+
+#include "aidge/utils/future_std/any.hpp"
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
@@ -63,6 +66,8 @@ public:
     */
     virtual std::set<std::string> getAttrsName() const = 0;
 
+    virtual std::map<std::string, future_std::any> getAttrs() const = 0;
+
 #ifdef PYBIND
     virtual bool hasAttrPy(const std::string& name) const = 0;
 
@@ -84,6 +89,7 @@ public:
     virtual py::dict dict() const = 0;
 
 #endif
+
     virtual ~Attributes() {}
 };
 }
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index cf7f048dbe5999f433277c46e4e3cb9798c43674..04ed58f7e636d6a0d528f1946ead110857312576 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -18,6 +18,7 @@
 #include <typeinfo>
 #include <cassert>
 #include <string>
+#include <typeindex>
 
 #include "aidge/utils/future_std/any.hpp"
 #include "aidge/utils/Attributes.hpp"
@@ -38,6 +39,9 @@ namespace Aidge {
 ///\todo managing complex types or excluding non-trivial, non-aggregate types
 class DynamicAttributes : public Attributes {
 public:
+    DynamicAttributes() = default;
+    DynamicAttributes(const std::map<std::string, future_std::any>& attrs): mAttrs(attrs) {}
+
     /**
      * \brief Returning an Attribute identified by its name
      * \tparam T expected Attribute type
@@ -48,6 +52,22 @@ public:
      */
     template<class T> const T& getAttr(const std::string& name) const
     {
+        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
+            [](const future_std::any& lhs, const future_std::any& rhs) {
+#ifdef PYBIND
+                if (lhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
+                }
+                else if (rhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
+                }
+                else
+#endif
+                {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
+                }
+            }));
+
         const auto dot = name.find('.');
         if (dot == name.npos) {
 #ifdef PYBIND
@@ -83,6 +103,22 @@ public:
     ///\param value Attribute value
     template<class T> void addAttr(const std::string& name, const T& value)
     {
+        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
+            [](const future_std::any& lhs, const future_std::any& rhs) {
+#ifdef PYBIND
+                if (lhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
+                }
+                else if (rhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
+                }
+                else
+#endif
+                {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
+                }
+            }));
+
         const auto dot = name.find('.');
         if (dot == name.npos) {
             const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
@@ -111,6 +147,22 @@ public:
     ///\param value Attribute value
     template<class T> void setAttr(const std::string& name, const T& value)
     {
+        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
+            [](const future_std::any& lhs, const future_std::any& rhs) {
+#ifdef PYBIND
+                if (lhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
+                }
+                else if (rhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
+                }
+                else
+#endif
+                {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
+                }
+            }));
+
         const auto dot = name.find('.');
         if (dot == name.npos) {
             auto res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
@@ -328,8 +380,45 @@ public:
     };
 #endif
 
+    future_std::any getAny(const std::string& name) const
+    {
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+#ifdef PYBIND
+            // If attribute does not exist in C++, it might have been created or modified in Python
+            auto it = mAttrs.find(name);
+            if (it == mAttrs.end()) {
+                auto itPy = mAttrsPy.find(name);
+                if (itPy != mAttrsPy.end()) {
+                    // Attribute exists in Python, but its type is not known
+                    // Return a std::any of py::object, which will be comparable
+                    mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(py::object),
+                        [](const future_std::any& lhs, const future_std::any& rhs) {
+                            return (future_std::any_cast<py::object>(lhs) < future_std::any_cast<py::object>(rhs));
+                        }));
+
+                    return future_std::any(itPy->second);
+                }
+            }
+#endif
+
+            return mAttrs.at(name);
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            return future_std::any_cast<const DynamicAttributes&>(mAttrs.at(ns)).getAny(nsName);
+        }
+    }
+
+    std::map<std::string, future_std::any> getAttrs() const override {
+        return mAttrs;
+    }
+
     virtual ~DynamicAttributes() {}
 
+    friend bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs);
+
 private:
 #ifdef PYBIND
     // Stores C++ attributes (copy) and Python-only attributes
@@ -345,8 +434,19 @@ private:
 #else
     std::map<std::string, future_std::any> mAttrs;
 #endif
+
+public:
+    // Stores the comparison function for each attribute type ever used
+    static std::map<std::type_index, bool(*)(const future_std::any&, const future_std::any&)> mAnyCompare;
 };
 
+inline bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs) {
+    return (lhs.mAttrs < rhs.mAttrs);
+}
+}
+
+namespace future_std {
+bool operator<(const future_std::any& lhs, const future_std::any& rhs);
 }
 
 #endif /* AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_ */
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 872c3f6b5a258292c41428852580210ab32decbf..0468ae2616997c306bbd475fe6eb73cc033b0bcc 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -23,7 +23,7 @@
 
 #include <functional>
 #include <map>
-#include <vector>
+#include <set>
 
 namespace Aidge {
 #ifdef PYBIND
@@ -37,21 +37,21 @@ template <class DerivedClass, class Key, class Func> // curiously rucurring temp
 class Registrable {
 public:
     typedef Key registrar_key;
-    typedef std::function<Func> registrar_type;
+    typedef Func registrar_type;
 
-    static std::map<Key, std::function<Func>>& registry()
+    static std::map<Key, Func>& registry()
     {
         #ifdef PYBIND
         #define _CRT_SECURE_NO_WARNINGS
         if (Py_IsInitialized()){
             std::string name = std::string("registrar_")+typeid(Registrable<DerivedClass, Key, Func>).name();
-            static auto shared_data = reinterpret_cast<std::map<Key, std::function<Func>> *>(py::get_shared_data(name));
+            static auto shared_data = reinterpret_cast<std::map<Key, Func> *>(py::get_shared_data(name));
             if (!shared_data)
-                shared_data = static_cast<std::map<Key, std::function<Func>> *>(py::set_shared_data(name, new std::map<Key, std::function<Func>>()));
+                shared_data = static_cast<std::map<Key, Func> *>(py::set_shared_data(name, new std::map<Key, Func>()));
             return *shared_data;
         }
         #endif // PYBIND
-        static std::map<Key, std::function<Func>> rMap;
+        static std::map<Key, Func> rMap;
         return rMap;
     }
 
@@ -77,12 +77,12 @@ struct Registrar {
 
     static auto create(const registrar_key& key) {
         AIDGE_ASSERT(exists(key), "missing or invalid registrar key: {} for registrable object {}\nDid you include/import the corresponding module?\nIf so, it is possible that the object is not yet supported.", key, typeid(C).name());
-        return C::registry()[key];
+        return C::registry().at(key);
     }
-    static std::vector<registrar_key> getKeys(){
-        std::vector<registrar_key> keys;
+    static std::set<registrar_key> getKeys(){
+        std::set<registrar_key> keys;
         for(const auto& keyValue : C::registry())
-            keys.push_back(keyValue.first);
+            keys.insert(keyValue.first);
         return keys;
     }
 };
@@ -101,11 +101,14 @@ template <class C>
 void declare_registrable(py::module& m, const std::string& class_name){
     typedef typename C::registrar_key registrar_key;
     typedef typename C::registrar_type registrar_type;
-    m.def(("register_"+ class_name).c_str(), [](registrar_key& key, registrar_type function){
+    m.def(("register_"+ class_name).c_str(), [](const registrar_key& key, registrar_type function){
         Registrar<C>(key, function);
     })
     .def(("get_keys_"+ class_name).c_str(), [](){
         return Registrar<C>::getKeys();
+    })
+    .def(("get_key_value_"+ class_name).c_str(), [](const registrar_key& key){
+        return Registrar<C>::create(key);
     });
 }
 #endif
@@ -141,4 +144,13 @@ void declare_registrable(py::module& m, const std::string& class_name){
 
 }
 
+#define CONCAT(a, b) CONCAT_INNER(a, b)
+#define CONCAT_INNER(a, b) a ## b
+
+#define REGISTRAR(cls, ...) \
+    namespace { \
+    static Registrar<cls> CONCAT(CONCAT(aidge_register_ , cls), __COUNTER__)(__VA_ARGS__); \
+    } \
+    static_assert(true, "")
+
 #endif //AIDGE_CORE_UTILS_REGISTRAR_H_
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 18e75b7cef5a2e9e9568a900f826a31c87012318..414381891ce52046ee7c2df5b82a17e1314773cd 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -179,6 +179,12 @@ public:
         return mAttrs;
     }
 
+    virtual std::map<std::string, future_std::any> getAttrs() const override {
+        std::map<std::string, future_std::any> attrs;
+        appendAttr(mAttrs, attrs);
+        return attrs;
+    }
+
     //////////////////////////////////////
     ///     Generic Attributes API
     //////////////////////////////////////
@@ -323,6 +329,15 @@ private:
 
         return false;
     }
+    
+    template<std::size_t I = 0, typename... Tp>
+    inline typename std::enable_if<I == sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& /*t*/, std::map<std::string, future_std::any>& /*attrs*/) const {}
+
+    template<std::size_t I = 0, typename... Tp>
+    inline typename std::enable_if<I < sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& t, std::map<std::string, future_std::any>& attrs) const {
+        attrs.insert(std::make_pair(EnumStrings<ATTRS_ENUM>::data[I], future_std::any(std::get<I>(t))));
+        appendAttr<I + 1, Tp...>(t, attrs);
+    }
 
     std::tuple<T...> mAttrs;
 };
diff --git a/pyproject.toml b/pyproject.toml
index cc0a43c83394a2dd61ae4f99572bd902eb724c9b..b838aca5ee100d182ba88b79f23f3a2ebff9acf3 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -23,7 +23,7 @@ test = [
 requires = [
     "setuptools>=64",
     "setuptools_scm[toml]==7.1.0",
-    "cmake>=3.15.3.post1"
+    "cmake>=3.18.4.post1"
 ]
 build-backend = "setuptools.build_meta"
 
@@ -51,15 +51,15 @@ write_to = "aidge_core/_version.py"
 [tool.cibuildwheel]
 build-frontend = "build"
 test-requires = "pytest"
-# FIXME: The ignored export test requires a to build the generated export via cmake.
-# However due to a strange bug I haven't been able to properly link Python::Module to the export target
-# Resulting in the need to link Python::Python which is the python interpreter.
-# This suppresses the issue but sadly this target is not available on the cibuilwheel image.
-# Hence the test is ignored. If you want to try and solve this bug go on. 
-# Just take care to increment the counter just below.
-# 
-# Work time spent on this bug : 24h
-test-command = "pytest --ignore={package}/aidge_core/unit_tests/test_export.py {package}/aidge_core/unit_tests"
+# WARNING: in the test suite the `test_export.py` used to be skipped
+# because it did not build when the python embedded interpreter is not available
+# as it is the case for cibuildwheel containers.
+# Now the build system takes care of this and skips the generation of a standalone
+# executable when it is not possible.
+# The root causes for this conditional build is that 1. the python embedded interpreter
+# is not alweays available, and 2. the aidge_core library depends on it as of now.
+# Hopefully this latter dependency may be removed in the future, simplifying the build.
+test-command = "pytest -v --capture=no {package}/aidge_core/unit_tests"
 # uncomment to run cibuildwheel locally on selected distros
 # build=[
 # "cp38-manylinux_x86_64",
diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp
index 2819c78068cf7c64eeb0dca84de4518f1c6658b9..04172c3ff68641a9fe0d14f9a326cd17e7002912 100644
--- a/python_binding/backend/pybind_OperatorImpl.cpp
+++ b/python_binding/backend/pybind_OperatorImpl.cpp
@@ -13,6 +13,7 @@
 #include <pybind11/stl.h>
 #include <string>
 
+#include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 
@@ -35,6 +36,7 @@ public:
 
         );
     }
+
     void backward() override {
         PYBIND11_OVERRIDE(
             void,
@@ -43,90 +45,57 @@ public:
 
         );
     }
-    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override {
-        PYBIND11_OVERRIDE_NAME(
-            Elts_t,
-            OperatorImpl,
-            "get_nb_required_data",
-            getNbRequiredData,
-            inputIdx
-        );
-    }
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override {
-        PYBIND11_OVERRIDE_NAME(
-            Elts_t,
-            OperatorImpl,
-            "get_nb_required_protected",
-            getNbRequiredProtected,
-            inputIdx
 
-        );
-    }
-    Elts_t getRequiredMemory(const IOIndex_t outputIdx,
-    const std::vector<DimSize_t> &inputsSize) const override {
+    std::shared_ptr<ProdConso> getProdConso() const override {
         PYBIND11_OVERRIDE_NAME(
-            Elts_t,
+            std::shared_ptr<ProdConso>,
             OperatorImpl,
-            "get_required_memory",
-            getRequiredMemory,
-            outputIdx,
-            inputsSize
-
+            "get_prod_conso",
+            getProdConso
         );
     }
-    Elts_t getNbConsumedData(const IOIndex_t inputIdx) const override {
-        PYBIND11_OVERRIDE_NAME(
-            Elts_t,
-            OperatorImpl,
-            "get_nb_consumed_data",
-            getNbConsumedData,
-            inputIdx
 
-        );
-    }
-    Elts_t getNbProducedData(const IOIndex_t outputIdx) const override {
+    std::set<ImplSpec> getAvailableImplSpecs() const noexcept override {
         PYBIND11_OVERRIDE_NAME(
-            Elts_t,
+            std::set<ImplSpec>,
             OperatorImpl,
-            "get_nb_produced_data",
-            getNbProducedData,
-            outputIdx
-
+            "get_available_impl_specs",
+            getAvailableImplSpecs
         );
     }
-    void updateConsummerProducer() override {
-        PYBIND11_OVERRIDE_NAME(
-            void,
-            OperatorImpl,
-            "update_consummer_producer",
-            updateConsummerProducer,
-
-        );
-    }
-    void resetConsummerProducer() override {
-        PYBIND11_OVERRIDE_NAME(
-            void,
-            OperatorImpl,
-            "reset_consummer_producer",
-            resetConsummerProducer,
+};
 
-        );
-    }
+// See https://pybind11.readthedocs.io/en/stable/advanced/classes.html#binding-protected-member-functions
+class OperatorImpl_Publicist : public OperatorImpl {
+public:
+    using OperatorImpl::getProdConso;
+    using OperatorImpl::getAvailableImplSpecs;
 };
 
 void init_OperatorImpl(py::module& m){
+    py::class_<ImplSpec::IOSpec>(m, "IOSpec")
+    .def(py::init<DataType, DataFormat, const std::vector<std::pair<int, int>>&>(), py::arg("type"), py::arg("format") = DataFormat::Any, py::arg("dims") = std::vector<std::pair<int, int>>{})
+    ;
+
+    py::class_<ImplSpec>(m, "ImplSpec")
+    .def(py::init<const DynamicAttributes&>(), py::arg("attr") = DynamicAttributes())
+    .def(py::init<const ImplSpec::IOSpec&, const DynamicAttributes&>(), py::arg("io"), py::arg("attr") = DynamicAttributes())
+    .def(py::init<const ImplSpec::IOSpec&, const ImplSpec::IOSpec&, const DynamicAttributes&>(), py::arg("i"), py::arg("o"), py::arg("attr") = DynamicAttributes())
+    ;
 
     py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr())
     .def(py::init<const Operator&, const std::string&>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>())
     .def("forward", &OperatorImpl::forward)
     .def("backward", &OperatorImpl::backward)
-    .def("get_nb_required_data", &OperatorImpl::getNbRequiredData)
-    .def("get_nb_required_protected", &OperatorImpl::getNbRequiredProtected)
-    .def("get_required_memory", &OperatorImpl::getRequiredMemory)
-    .def("get_nb_consumed_data", &OperatorImpl::getNbConsumedData)
-    .def("get_nb_produced_data", &OperatorImpl::getNbProducedData)
-    .def("update_consummer_producer", &OperatorImpl::updateConsummerProducer)
-    .def("reset_consummer_producer", &OperatorImpl::resetConsummerProducer)
+    .def("prod_conso", &OperatorImpl::prodConso)
+    .def("backend", &OperatorImpl::backend)
+    .def("get_operator", &OperatorImpl::getOperator)
+    .def("get_required_spec", &OperatorImpl::getRequiredSpec)
+    .def("get_best_match", &OperatorImpl::getBestMatch)
+    .def("get_adaptation", &OperatorImpl::getAdaptation)
+    .def("get_best_adaptation", &OperatorImpl::getBestAdaptation)
+    .def("get_prod_conso", &OperatorImpl_Publicist::getProdConso)
+    .def("get_available_impl_specs", &OperatorImpl_Publicist::getAvailableImplSpecs)
     ;
 }
 }
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index 9db0a90769c2e69b567e83559d43e4fa1430a48b..5af8d7170313658f6c3a784b1e35c0815ebf8077 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -16,71 +16,79 @@
 namespace py = pybind11;
 namespace Aidge {
 
-void init_Data(py::module& m){
-    // Define enumeration names for python as lowercase dtype name
-    // This defined enum names compatible with basic numpy dtype
+template <class T>
+void bindEnum(py::module& m, const std::string& name) {
+    // Define enumeration names for python as lowercase type name
+    // This defined enum names compatible with basic numpy type
     // name such as: float32, flot64, [u]int32, [u]int64, ...
-    auto python_enum_name = [](const DataType& dtype) {
+    auto python_enum_name = [](const T& type) {
         auto str_lower = [](std::string& str) {
             std::transform(str.begin(), str.end(), str.begin(),
                            [](unsigned char c){
                                return std::tolower(c);
                            });
         };
-        auto dtype_name = std::string(Aidge::format_as(dtype));
-        str_lower(dtype_name);
-        return dtype_name;
+        auto type_name = std::string(Aidge::format_as(type));
+        str_lower(type_name);
+        return type_name;
     };
-    // Auto generate enumeration names from lowercase dtype strings
+    // Auto generate enumeration names from lowercase type strings
     std::vector<std::string> enum_names;
-    for (auto dtype_str : EnumStrings<Aidge::DataType>::data) {
-        auto dtype = static_cast<DataType>(enum_names.size());
-        auto enum_name = python_enum_name(dtype);
+    for (auto type_str : EnumStrings<T>::data) {
+        auto type = static_cast<T>(enum_names.size());
+        auto enum_name = python_enum_name(type);
         enum_names.push_back(enum_name);
     }
 
-    // Define python side enumeration aidge_core.dtype
-    auto e_dtype = py::enum_<DataType>(m, "dtype");
+    // Define python side enumeration aidge_core.type
+    auto e_type = py::enum_<T>(m, name.c_str());
 
     // Add enum value for each enum name
     for (std::size_t idx = 0; idx < enum_names.size(); idx++) {
-        e_dtype.value(enum_names[idx].c_str(), static_cast<DataType>(idx));
+        e_type.value(enum_names[idx].c_str(), static_cast<T>(idx));
     }
 
     // Define str() to return the bare enum name value, it allows
-    // to compare directly for instance str(tensor.dtype())
-    // with str(nparray.dtype)
-    e_dtype.def("__str__", [enum_names](const DataType& dtype) {
-        return enum_names[static_cast<int>(dtype)];
+    // to compare directly for instance str(tensor.type())
+    // with str(nparray.type)
+    e_type.def("__str__", [enum_names](const T& type) {
+        return enum_names[static_cast<int>(type)];
     }, py::prepend());;
+}
 
-    // TODO : extend with more values !
-    // py::enum_<DataType>(m, "dtype")
-    // .value("float64", DataType::Float64)
-    // .value("float32", DataType::Float32)
-    // .value("float16", DataType::Float16)
-    // .value("int8", DataType::Int8)
-    // .value("int16", DataType::Int16)
-    // .value("int32", DataType::Int32)
-    // .value("int64", DataType::Int64)
-    // .value("uint8", DataType::UInt8)
-    // .value("uint16", DataType::UInt16)
-    // .value("uint32", DataType::UInt32)
-    // .value("uint64", DataType::UInt64)
-    // ;
+// TODO : extend with more values !
+// py::enum_<DataType>(m, "dtype")
+// .value("float64", DataType::Float64)
+// .value("float32", DataType::Float32)
+// .value("float16", DataType::Float16)
+// .value("int8", DataType::Int8)
+// .value("int16", DataType::Int16)
+// .value("int32", DataType::Int32)
+// .value("int64", DataType::Int64)
+// .value("uint8", DataType::UInt8)
+// .value("uint16", DataType::UInt16)
+// .value("uint32", DataType::UInt32)
+// .value("uint64", DataType::UInt64)
+// ;
 
-    py::enum_<DataFormat>(m, "dformat")
-    .value("Default", DataFormat::Default)
-    .value("NCHW", DataFormat::NCHW) // default
-    .value("NHWC", DataFormat::NHWC) 
-    .value("CHWN", DataFormat::CHWN)
-    .value("NCDHW", DataFormat::NCDHW) 
-    .value("NDHWC", DataFormat::NDHWC)
-    .value("CDHWN", DataFormat::CDHWN)
-    ;
 
-    py::class_<Data, std::shared_ptr<Data>>(m,"Data");
+// py::enum_<DataFormat>(m, "dformat")
+// .value("Default", DataFormat::Default)
+// .value("NCHW", DataFormat::NCHW) // default
+// .value("NHWC", DataFormat::NHWC) 
+// .value("CHWN", DataFormat::CHWN)
+// .value("NCDHW", DataFormat::NCDHW) 
+// .value("NDHWC", DataFormat::NDHWC)
+// .value("CDHWN", DataFormat::CDHWN)
+// ;
+
+// py::class_<Data, std::shared_ptr<Data>>(m,"Data");
 
+void init_Data(py::module& m){
+    bindEnum<DataType>(m, "dtype");
+    bindEnum<DataFormat>(m, "dformat");
+
+    py::class_<Data, std::shared_ptr<Data>>(m,"Data");
 
 }
 }
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index cdbcf3dcc8e4b7578ba88e8f8375757daaef8122..2feaa1f8b8ecd50e1f2570107af1e62fc4f1f457 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -25,7 +25,7 @@ namespace Aidge {
 
 using registrableTensor = Registrable<Tensor,
                                       std::tuple<std::string, DataType>,
-                                      std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>;
+                                      std::function<std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>;
 
 using pyTensorClass = py::class_<Tensor,
                                  std::shared_ptr<Tensor>,
@@ -585,5 +585,6 @@ void init_Tensor(py::module& m){
     // Handles python scalars and numpy scalars with a single overload
     addScalarCtor(pyClassTensor);
 
+    declare_registrable<Tensor>(m, "Tensor");
 }
 }
diff --git a/python_binding/data/pybind_TensorImpl.cpp b/python_binding/data/pybind_TensorImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4c664274ec2c33174f51dad34ba1591c323b2d87
--- /dev/null
+++ b/python_binding/data/pybind_TensorImpl.cpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <pybind11/operators.h>
+#include <pybind11/numpy.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/TensorImpl.hpp"
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_TensorImpl(py::module& m){
+  py::class_<TensorImpl, std::shared_ptr<TensorImpl>>(m, "TensorImpl");
+
+  py::class_<TensorImpl_cpu<double>, std::shared_ptr<TensorImpl_cpu<double>>, TensorImpl>(m, "TensorImpl_cpu_float64")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+    
+  py::class_<TensorImpl_cpu<float>, std::shared_ptr<TensorImpl_cpu<float>>, TensorImpl>(m, "TensorImpl_cpu_float32")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<half_float::half>, std::shared_ptr<TensorImpl_cpu<half_float::half>>, TensorImpl>(m, "TensorImpl_cpu_float16")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<int64_t>, std::shared_ptr<TensorImpl_cpu<int64_t>>, TensorImpl>(m, "TensorImpl_cpu_int64")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<int32_t>, std::shared_ptr<TensorImpl_cpu<int32_t>>, TensorImpl>(m, "TensorImpl_cpu_int32")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<int16_t>, std::shared_ptr<TensorImpl_cpu<int16_t>>, TensorImpl>(m, "TensorImpl_cpu_int16")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<int8_t>, std::shared_ptr<TensorImpl_cpu<int8_t>>, TensorImpl>(m, "TensorImpl_cpu_int8")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<uint64_t>, std::shared_ptr<TensorImpl_cpu<uint64_t>>, TensorImpl>(m, "TensorImpl_cpu_uint64")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<uint32_t>, std::shared_ptr<TensorImpl_cpu<uint32_t>>, TensorImpl>(m, "TensorImpl_cpu_uint32")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<uint16_t>, std::shared_ptr<TensorImpl_cpu<uint16_t>>, TensorImpl>(m, "TensorImpl_cpu_uint16")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<uint8_t>, std::shared_ptr<TensorImpl_cpu<uint8_t>>, TensorImpl>(m, "TensorImpl_cpu_uint8")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+}
+}
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index 1fa552ce153b2b0f655ca9f38d1d80f62390184b..d8e77bb259cbcbae7940a09dc405bb8f50b5b79b 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -48,6 +48,16 @@ void init_Node(py::module& m) {
     :rtype: str
     )mydelimiter")
 
+    .def("create_unique_name", &Node::createUniqueName, py::arg("base_name"), 
+    R"mydelimiter(
+    Given a base name, generate a new name which is unique in all the GraphViews containing this node.
+
+    :param base_name: proposed name for the node.
+    :type base_name: str
+    :rtype: str
+    )mydelimiter")
+
+
     .def("__repr__", &Node::repr)
 
     .def("add_child",
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 103e7c1e4db6e197a1dac959a25d266e031d3e55..8a00a1cb4a419f1125411b5b1c823bf91570d62e 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -24,7 +24,8 @@ void declare_Add(py::module &m) {
   py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance())
     .def(py::init<const IOIndex_t>(), py::arg("nb_inputs"))
     .def_static("get_inputs_name", &Add_Op::getInputsName)
-    .def_static("get_outputs_name", &Add_Op::getOutputsName);
+    .def_static("get_outputs_name", &Add_Op::getOutputsName)
+    .def_readonly_static("Type", &Add_Op::Type);
 
   declare_registrable<Add_Op>(m, "AddOp");
 
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 0587554b722c99d009a248ce963f80cb4fd892ec..b98a642111402050fd3cba6dd8a12b11a3bbde8a 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -45,7 +45,8 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
             py::arg("kernel_dims"),
             py::arg("stride_dims") = create_array<DimSize_t,DIM>(1))
     .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
-    .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName);
+    .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
+    .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type);
 
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 42e31de2c7c8ba440cd8e479cf9285b398970b42..9a1bdacd169beebc843448d23bdaf8502de437b4 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -30,7 +30,8 @@ void declare_BatchNormOp(py::module& m) {
             py::arg("epsilon"),
             py::arg("momentum"))
         .def_static("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName);
+        .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
+        .def_readonly_static("Type", &BatchNorm_Op<DIM>::Type);
 
     declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_BitShift.cpp b/python_binding/operator/pybind_BitShift.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b4f6c90e54e781b011459be6e8e6e252e7347b00
--- /dev/null
+++ b/python_binding/operator/pybind_BitShift.cpp
@@ -0,0 +1,58 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include <string>
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/BitShift.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_BitShift(py::module &m) {
+    // Binding for BitShiftOp class
+    auto pyBitShiftOp = py::class_<BitShift_Op, std::shared_ptr<BitShift_Op>, OperatorTensor>(m, "BitShiftOp", py::multiple_inheritance(),R"mydelimiter(
+        BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements.
+        This class allows shifting tensor values either to the left or right based on the 
+        specified direction. The direction can be accessed and controlled using the 
+        BitShiftDirection enum.
+        :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right)
+        :type direction: BitShiftDirection
+        :param name: name of the node.
+    )mydelimiter")
+        .def(py::init<BitShift_Op::BitShiftDirection>(), py::arg("direction"))
+        .def("direction", &BitShift_Op::direction, "Get the direction of the bit shift (left or right).")
+        .def_static("get_inputs_name", &BitShift_Op::getInputsName, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", &BitShift_Op::getOutputsName, "Get the names of the output tensors.");
+
+    // Enum binding under BitShiftOp class
+    py::enum_<BitShift_Op::BitShiftDirection>(pyBitShiftOp, "BitShiftDirection")
+        .value("Right", BitShift_Op::BitShiftDirection::right)
+        .value("Left", BitShift_Op::BitShiftDirection::left)
+        .export_values();
+
+    // Binding for the BitShift function
+    m.def("BitShift", &BitShift, py::arg("direction") = BitShift_Op::BitShiftDirection::right, py::arg("name") = "",
+        R"mydelimiter(
+        BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements.
+        This class allows shifting tensor values either to the left or right based on the 
+        specified direction. The direction can be accessed and controlled using the 
+        BitShiftDirection enum.
+        :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right)
+        :type direction: BitShiftDirection
+        :param name: name of the node.
+    )mydelimiter");
+}
+} // namespace Aidge
\ No newline at end of file
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 9f02e04a41b20599a6cfe878f53db04c6d5bbe34..854f3783e9961bb5fd29746b88352438a43dd6e4 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -25,7 +25,8 @@ void init_Concat(py::module& m) {
                 py::arg("nb_inputs"),
                 py::arg("axis"))
         .def_static("get_inputs_name", &Concat_Op::getInputsName)
-        .def_static("get_outputs_name", &Concat_Op::getOutputsName);
+        .def_static("get_outputs_name", &Concat_Op::getOutputsName)
+        .def_readonly_static("Type", &Concat_Op::Type);
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
 
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 61fb37e788021757fa6c3aced9a5f4c30fb60548..bc72825b2161d8733334817e095c251c788e7eba 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -45,6 +45,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
         .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
         .def("in_channels", &Conv_Op<DIM>::inChannels)
         .def("out_channels", &Conv_Op<DIM>::outChannels)
+        .def_readonly_static("Type", &Conv_Op<DIM>::Type)
         ;
 
   declare_registrable<Conv_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 080df1832bf92a9db9d26e1fa18b652dc70c2a42..377d0fca5d78dff20b8df0cc0d5521eb9a3685a2 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -39,7 +39,8 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         py::arg("dilation_dims"))
   .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
-  .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels);
+  .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels)
+  .def_readonly_static("Type", &ConvDepthWise_Op<DIM>::Type);
 
   declare_registrable<ConvDepthWise_Op<DIM>>(m, pyClassName);
   m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
index 9dcb98a54596f32525d2880dd6e955d4643f6e7c..d2ad60725533be0b9db269ce5e022ac8560e1d91 100644
--- a/python_binding/operator/pybind_Div.cpp
+++ b/python_binding/operator/pybind_Div.cpp
@@ -22,7 +22,8 @@ void init_Div(py::module& m) {
     py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance())
         .def(py::init<>())
         .def_static("get_inputs_name", &Div_Op::getInputsName)
-        .def_static("get_outputs_name", &Div_Op::getOutputsName);
+        .def_static("get_outputs_name", &Div_Op::getOutputsName)
+        .def_readonly_static("Type", &Div_Op::Type);
     declare_registrable<Div_Op>(m, "DivOp");
     m.def("Div", &Div, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp
index c248753ca8de46293d49ce4dc614ae258c313256..6ca25f9569a53505385f37a02f3ab478a11f82a6 100644
--- a/python_binding/operator/pybind_Erf.cpp
+++ b/python_binding/operator/pybind_Erf.cpp
@@ -22,7 +22,8 @@ void init_Erf(py::module& m) {
     py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance())
         .def(py::init<>())
         .def_static("get_inputs_name", &Erf_Op::getInputsName)
-        .def_static("get_outputs_name", &Erf_Op::getOutputsName);
+        .def_static("get_outputs_name", &Erf_Op::getOutputsName)
+        .def_readonly_static("Type", &Erf_Op::Type);
 
     declare_registrable<Erf_Op>(m, "ErfOp");
 
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 9e0d61bc3a4d957e98db39577e120da5fe97ebea..2e9c41a16292d1e643415182d660b80105369d33 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -28,6 +28,7 @@ void declare_FC(py::module &m) {
     .def(py::init<>())
     .def_static("get_inputs_name", &FC_Op::getInputsName)
     .def_static("get_outputs_name", &FC_Op::getOutputsName)
+    .def_readonly_static("Type", &FC_Op::Type)
     .def("out_channels", &FC_Op::outChannels)
     // .def_property_readonly("a", &FC_Op::get_a)
     // .def_property_readonly("a", [](const FC_Op& self) {
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index aa831d1cfe92fb720df00bb7d8dd3af7f1c1a668..0aac0bbad69abb5faaaea3afd0183573db64b31f 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -29,7 +29,8 @@ void init_Gather(py::module& m) {
                 py::arg("indices"),
                 py::arg("gathered_shape"))
         .def_static("get_inputs_name", &Gather_Op::getInputsName)
-        .def_static("get_outputs_name", &Gather_Op::getOutputsName);
+        .def_static("get_outputs_name", &Gather_Op::getOutputsName)
+        .def_readonly_static("Type", &Gather_Op::Type);
 
     declare_registrable<Gather_Op>(m, "GatherOp");
 
diff --git a/python_binding/operator/pybind_GlobalAveragePooling.cpp b/python_binding/operator/pybind_GlobalAveragePooling.cpp
index d4d2a921addaef676913cee2a16991ad36686767..f37ac11f5c62d0334e34aff59561b2014d1977bd 100644
--- a/python_binding/operator/pybind_GlobalAveragePooling.cpp
+++ b/python_binding/operator/pybind_GlobalAveragePooling.cpp
@@ -25,7 +25,8 @@ void init_GlobalAveragePooling(py::module &m) {
                              py::multiple_inheritance())
       .def(py::init<>())
       .def_static("get_inputs_name", &GlobalAveragePooling_Op::getInputsName)
-      .def_static("get_outputs_name", &GlobalAveragePooling_Op::getOutputsName);
+      .def_static("get_outputs_name", &GlobalAveragePooling_Op::getOutputsName)
+      .def_readonly_static("Type", &GlobalAveragePooling_Op::Type);
 
   declare_registrable<GlobalAveragePooling_Op>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
index 49e74f4cbab90f141af5e76df7fbdef6e3794146..6d6c03b82ad4f905c41bb0cf849fc4e05fda4cb2 100644
--- a/python_binding/operator/pybind_GridSample.cpp
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -58,6 +58,7 @@ void declare_GridSampleOp(py::module &m) {
             py::arg("alogn_corners") = false)
         .def_static("get_inputs_name", &GridSample_Op::getInputsName)
         .def_static("get_outputs_name", &GridSample_Op::getOutputsName)
+        .def_readonly_static("Type", &GridSample_Op::Type)
         ;
 
   declare_registrable<GridSample_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_Identity.cpp b/python_binding/operator/pybind_Identity.cpp
index 560f2889f20233ef928557aa230e6dab7f0a5d2b..7599197226b2f8734c989755c6e7d3581a52974d 100644
--- a/python_binding/operator/pybind_Identity.cpp
+++ b/python_binding/operator/pybind_Identity.cpp
@@ -22,7 +22,8 @@ void init_Identity(py::module& m) {
     py::class_<Identity_Op, std::shared_ptr<Identity_Op>, OperatorTensor>(m, "IdentityOp", py::multiple_inheritance())
         .def(py::init<>())
         .def_static("get_inputs_name", &Identity_Op::getInputsName)
-        .def_static("get_outputs_name", &Identity_Op::getOutputsName);
+        .def_static("get_outputs_name", &Identity_Op::getOutputsName)
+        .def_readonly_static("Type", &Identity_Op::Type);
 
     m.def("Identity", &Identity, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index f46106fb3fb168631c9681d90bda857183c9bc04..e031d3dfb3348c5aec5bd497b40ff261528725ad 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -22,7 +22,8 @@ void init_LeakyReLU(py::module& m) {
     py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
         .def(py::init<float>(), py::arg("negative_slope"))
         .def_static("get_inputs_name", &LeakyReLU_Op::getInputsName)
-        .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName);
+        .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName)
+        .def_readonly_static("Type", &LeakyReLU_Op::Type);
     declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Ln.cpp b/python_binding/operator/pybind_Ln.cpp
index 0be710be1dfe1a5a83ceaf085094e8ded3f07ffd..50aa755821c257c174c4603404144dab4da26296 100755
--- a/python_binding/operator/pybind_Ln.cpp
+++ b/python_binding/operator/pybind_Ln.cpp
@@ -22,7 +22,8 @@ void init_Ln(py::module& m) {
     py::class_<Ln_Op, std::shared_ptr<Ln_Op>, OperatorTensor>(m, "LnOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Ln_Op::getInputsName)
-    .def_static("get_outputs_name", &Ln_Op::getOutputsName);
+    .def_static("get_outputs_name", &Ln_Op::getOutputsName)
+    .def_readonly_static("Type", &Ln_Op::Type);
 
     m.def("Ln", &Ln, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index 09e11f89ea579b5a3aa75f177958d981c53f1dce..f4f175afcb35eb1c10dcd1a1d9d2f2b1691dcfc0 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -24,7 +24,8 @@ void init_MatMul(py::module &m) {
   py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &MatMul_Op::getInputsName)
-    .def_static("get_outputs_name", &MatMul_Op::getOutputsName);
+    .def_static("get_outputs_name", &MatMul_Op::getOutputsName)
+    .def_readonly_static("Type", &MatMul_Op::Type);
   declare_registrable<MatMul_Op>(m, "MatMulOp");
   m.def("MatMul", &MatMul, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 2a850cd7bfe5cca21ea1ca54b5e9ad86b880bcc2..b59a4c5574ce5e56af13f9aea13e7514c9402c22 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -37,7 +37,8 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         py::arg("stride_dims"),
         py::arg("ceil_mode"))
   .def_static("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
-  .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName);
+  .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
+  .def_readonly_static("Type", &MaxPooling_Op<DIM>::Type);
   declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName);
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
diff --git a/python_binding/operator/pybind_Memorize.cpp b/python_binding/operator/pybind_Memorize.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3ac1122111aae1a9b7eb353399e46562ae51b0b1
--- /dev/null
+++ b/python_binding/operator/pybind_Memorize.cpp
@@ -0,0 +1,33 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/Memorize.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Memorize(py::module& m) {
+    py::class_<Memorize_Op, std::shared_ptr<Memorize_Op>, OperatorTensor>(m, "MemorizeOp", py::multiple_inheritance())
+        .def(py::init<const std::uint32_t>(), py::arg("end_step"))
+        .def_static("get_inputs_name", &Memorize_Op::getInputsName)
+        .def_static("get_outputs_name", &Memorize_Op::getOutputsName);
+
+    declare_registrable<Memorize_Op>(m, "MemorizeOp");
+
+    m.def("Memorize", &Memorize, py::arg("end_step"), py::arg("name") = "");
+}
+
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
index 1658b0d959c0882d53e078f6d68b4474b34c739e..23949b5fe3b22edf5b7105abd0de29b727740e35 100644
--- a/python_binding/operator/pybind_Mul.cpp
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -22,7 +22,8 @@ void init_Mul(py::module& m) {
     py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Mul_Op::getInputsName)
-    .def_static("get_outputs_name", &Mul_Op::getOutputsName);
+    .def_static("get_outputs_name", &Mul_Op::getOutputsName)
+    .def_readonly_static("Type", &Mul_Op::Type);
     declare_registrable<Mul_Op>(m, "MulOp");
     m.def("Mul", &Mul, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index dbf71a3cad870d848fbc2f5f67c13d5347b38b89..81a62f4ed0eb12844453581f68165a282fff9817 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -53,7 +53,8 @@ void init_Operator(py::module& m){
     )mydelimiter")
     .def("associate_input", &Operator::associateInput, py::arg("inputIdx"), py::arg("data"))
     .def("set_datatype", &Operator::setDataType, py::arg("dataType"))
-    .def("set_backend", &Operator::setBackend, py::arg("name"), py::arg("device") = 0)
+    .def("set_backend", py::overload_cast<const std::string&, DeviceIdx_t>(&Operator::setBackend), py::arg("name"), py::arg("device") = 0)
+    .def("set_backend", py::overload_cast<const std::vector<std::pair<std::string, DeviceIdx_t>>&>(&Operator::setBackend), py::arg("backends"))
     .def("forward", &Operator::forward)
     // py::keep_alive forbide Python to garbage collect the implementation lambda as long as the Operator is not deleted !
     .def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>())
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 3df203ed52967e3dbc393769276015a7fe0e016f..04882b7f5b86c7c09ed8b8e5a15c4bfabd03bb55 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -37,6 +37,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
         py::arg("borderValue") = 0.0)
     .def_static("get_inputs_name", &Pad_Op<DIM>::getInputsName)
     .def_static("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
+    .def_readonly_static("Type", &Pad_Op<DIM>::Type)
     ;
   declare_registrable<Pad_Op<DIM>>(m, pyClassName);
   m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
index 0c3b3f38803735d2df632496382e86a0c9f2735d..2040f642bbfc0428be48a6f7ec21fa3aed20a371 100644
--- a/python_binding/operator/pybind_Pop.cpp
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -22,7 +22,8 @@ void init_Pop(py::module& m) {
     py::class_<Pop_Op, std::shared_ptr<Pop_Op>, OperatorTensor>(m, "PopOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Pop_Op::getInputsName)
-    .def_static("get_outputs_name", &Pop_Op::getOutputsName);
+    .def_static("get_outputs_name", &Pop_Op::getOutputsName)
+    .def_readonly_static("Type", &Pop_Op::Type);
 
     m.def("Pop", &Pop, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
index e5d67542cd1acc5b2982081e4cf3a91948542147..ec29e3faa7c3efbc2b2dbe23372f57c30568b769 100644
--- a/python_binding/operator/pybind_Pow.cpp
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -22,7 +22,8 @@ void init_Pow(py::module& m) {
     py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Pow_Op::getInputsName)
-    .def_static("get_outputs_name", &Pow_Op::getOutputsName);
+    .def_static("get_outputs_name", &Pow_Op::getOutputsName)
+    .def_readonly_static("Type", &Pow_Op::Type);
     declare_registrable<Pow_Op>(m, "PowOp");
 
     m.def("Pow", &Pow, py::arg("name") = "");
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 30279dc477a0badbd5dc361ef7b5d071fa7b8cbc..3467ed970c3f830298b46897717d123a0ab11800 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -38,7 +38,8 @@ void init_Producer(py::module &m) {
         .def(py::init<const std::shared_ptr<Tensor>, bool>(), py::arg("tensor"), py::arg("constant"))
         .def("dims", &Producer_Op::dims)
         .def_static("get_inputs_name", &Producer_Op::getInputsName)
-        .def_static("get_outputs_name", &Producer_Op::getOutputsName);
+        .def_static("get_outputs_name", &Producer_Op::getOutputsName)
+        .def_readonly_static("Type", &Producer_Op::Type);
 
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(
                                         const std::shared_ptr<Tensor>,
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index d611523f15a7007b0e9ab9cce323ed9a57d8ecdf..79720845cf21103d3a9257880e8d2068673e36f0 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -22,7 +22,8 @@ void init_ReLU(py::module& m) {
     py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &ReLU_Op::getInputsName)
-    .def_static("get_outputs_name", &ReLU_Op::getOutputsName);
+    .def_static("get_outputs_name", &ReLU_Op::getOutputsName)
+    .def_readonly_static("Type", &ReLU_Op::Type);
     declare_registrable<ReLU_Op>(m, "ReLUOp");
 
     m.def("ReLU", &ReLU, py::arg("name") = "");
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 0fceed204152e214cc40495a0bafb9bfc000f0c0..028e45755fb10bb01602959f721cf003cb1e5136 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -43,6 +43,7 @@ void declare_ReduceMeanOp(py::module &m) {
     .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes") = std::vector<std::int32_t>(), py::arg("keep_dims") = true, py::arg("noop_with_empty_axes") = false)
     .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
+    .def_readonly_static("Type", &ReduceMean_Op::Type)
     ;
   declare_registrable<ReduceMean_Op>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index 89d93134ac2f590bcb067aa6936081c16fc1e2a3..c0b0e8c30ef127d5cdcaf24ded75b83f06c86588 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -22,7 +22,8 @@ void init_Reshape(py::module& m) {
     py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
         .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
         .def_static("get_inputs_name", &Reshape_Op::getInputsName)
-        .def_static("get_outputs_name", &Reshape_Op::getOutputsName);
+        .def_static("get_outputs_name", &Reshape_Op::getOutputsName)
+        .def_readonly_static("Type", &Reshape_Op::Type);
     declare_registrable<Reshape_Op>(m, "ReshapeOp");
     m.def("Reshape", &Reshape, py::arg("shape") = std::vector<std::int64_t>(), py::arg("allowzero") = false, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
index a925af8cf357dabc09f4e8e3c39af9519b4ed550..35321f525e486107af3715ce1c09f48b7c5cd60f 100644
--- a/python_binding/operator/pybind_Resize.cpp
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -20,7 +20,8 @@ namespace Aidge {
 void init_Resize(py::module& m) {
     py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance())
         .def_static("get_inputs_name", &Resize_Op::getInputsName)
-        .def_static("get_outputs_name", &Resize_Op::getOutputsName);
+        .def_static("get_outputs_name", &Resize_Op::getOutputsName)
+        .def_readonly_static("Type", &Resize_Op::Type);
 
     declare_registrable<Resize_Op>(m, "ResizeOp");
 
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
index 31e6c0b08194fbb8b6ec2270e8127a2f838ba78f..22e8011a9cd37f80a0678f2629809d4412ba6fd2 100644
--- a/python_binding/operator/pybind_Scaling.cpp
+++ b/python_binding/operator/pybind_Scaling.cpp
@@ -24,7 +24,8 @@ void init_Scaling(py::module& m)
     py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, OperatorTensor>(m, "ScalingOp", py::multiple_inheritance())
         .def(py::init<float, size_t, bool>(), py::arg("scaling_factor"), py::arg("nb_bits"), py::arg("is_output_unsigned"))
         .def_static("get_inputs_name", &Scaling_Op::getInputsName)
-        .def_static("get_outputs_name", &Scaling_Op::getOutputsName);
+        .def_static("get_outputs_name", &Scaling_Op::getOutputsName)
+        .def_readonly_static("Type", &Scaling_Op::Type);
     declare_registrable<Scaling_Op>(m, "ScalingOp");
     m.def("Scaling", &Scaling, py::arg("scaling_factor") = 1.0f, py::arg("nb_bits") = 8, py::arg("is_output_unsigned") = true, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
index 4e1d4203e48f714746587c9f209b4d28bfecb439..b3511f31eeab7d5df679d16c3bfb89f51d75cdbe 100644
--- a/python_binding/operator/pybind_Shape.cpp
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -26,7 +26,8 @@ void init_Shape(py::module& m) {
                 py::arg("start"),
                 py::arg("end"))
         .def_static("get_inputs_name", &Shape_Op::getInputsName)
-        .def_static("get_outputs_name", &Shape_Op::getOutputsName);
+        .def_static("get_outputs_name", &Shape_Op::getOutputsName)
+        .def_readonly_static("Type", &Shape_Op::Type);
 
     declare_registrable<Shape_Op>(m, "ShapeOp");
 
diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp
index 0ba94c73fcd1fb435194f8485567771a147ec616..db7fc7bfb60ff8360933e5f84ab54d4cec8df724 100644
--- a/python_binding/operator/pybind_Sigmoid.cpp
+++ b/python_binding/operator/pybind_Sigmoid.cpp
@@ -22,7 +22,8 @@ void init_Sigmoid(py::module& m) {
     py::class_<Sigmoid_Op, std::shared_ptr<Sigmoid_Op>, OperatorTensor>(m, "SigmoidOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Sigmoid_Op::getInputsName)
-    .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName);
+    .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName)
+    .def_readonly_static("Type", &Sigmoid_Op::Type);
 
     m.def("Sigmoid", &Sigmoid, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index b87cc8da4874c666de21a6e798a66e3c7fad9c10..c8cae2592b966fff7ebfde1e5905ed31d5b22455 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -30,7 +30,8 @@ void init_Slice(py::module& m) {
                   py::arg("axes"),
                   py::arg("steps"))
     .def_static("get_inputs_name", &Slice_Op::getInputsName)
-    .def_static("get_outputs_name", &Slice_Op::getOutputsName);
+    .def_static("get_outputs_name", &Slice_Op::getOutputsName)
+    .def_readonly_static("Type", &Slice_Op::Type);
     declare_registrable<Slice_Op>(m, "SliceOp");
 
     m.def("Slice",
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 8b6e16d93bbee6b0517398a56de44784cd893b97..3b98ab9dfa1590093c567a363f67d32d613651a2 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -23,7 +23,8 @@ void init_Softmax(py::module& m) {
     py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
         .def(py::init<std::int32_t>(), py::arg("axis"))
         .def_static("get_inputs_name", &Softmax_Op::getInputsName)
-        .def_static("get_outputs_name", &Softmax_Op::getOutputsName);
+        .def_static("get_outputs_name", &Softmax_Op::getOutputsName)
+        .def_readonly_static("Type", &Softmax_Op::Type);
     declare_registrable<Softmax_Op>(m, "SoftmaxOp");
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp
index f63a01f9815aa59cfbad0aea36f148899f44c9ea..9b3feda9f791e65a9c32f2bda3da4da450838b40 100644
--- a/python_binding/operator/pybind_Split.cpp
+++ b/python_binding/operator/pybind_Split.cpp
@@ -27,7 +27,8 @@ void init_Split(py::module& m) {
                 py::arg("axis"),
                 py::arg("split"))
         .def_static("get_inputs_name", &Split_Op::getInputsName)
-        .def_static("get_outputs_name", &Split_Op::getOutputsName);
+        .def_static("get_outputs_name", &Split_Op::getOutputsName)
+        .def_readonly_static("Type", &Split_Op::Type);
 
     declare_registrable<Split_Op>(m, "SplitOp");
 
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index 9fae2cef29748482dfeabe173d946c6446a60a35..ba0c5aab02349df4c50f960bbeb7df2082aa9233 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -21,7 +21,8 @@ void init_Sqrt(py::module& m) {
     py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, OperatorTensor>(m, "SqrtOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Sqrt_Op::getInputsName)
-    .def_static("get_outputs_name", &Sqrt_Op::getOutputsName);
+    .def_static("get_outputs_name", &Sqrt_Op::getOutputsName)
+    .def_readonly_static("Type", &Sqrt_Op::Type);
     declare_registrable<Sqrt_Op>(m, "SqrtOp");
     m.def("Sqrt", &Sqrt, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
index 752490a72bc35ec8a0ab08dd8d51a31c887b4dc6..52a622f0fdf6480a375d17c9729017fca32b3092 100644
--- a/python_binding/operator/pybind_Sub.cpp
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -22,7 +22,8 @@ void init_Sub(py::module& m) {
     py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Sub_Op::getInputsName)
-    .def_static("get_outputs_name", &Sub_Op::getOutputsName);
+    .def_static("get_outputs_name", &Sub_Op::getOutputsName)
+    .def_readonly_static("Type", &Sub_Op::Type);
     declare_registrable<Sub_Op>(m, "SubOp");
     m.def("Sub", &Sub, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Tanh.cpp b/python_binding/operator/pybind_Tanh.cpp
index 74cde8dd3831c8d29ca87e2314afc27276ec025f..ded15ee78951d389d614d932e4a9c22bf310b814 100644
--- a/python_binding/operator/pybind_Tanh.cpp
+++ b/python_binding/operator/pybind_Tanh.cpp
@@ -22,7 +22,8 @@ void init_Tanh(py::module& m) {
     py::class_<Tanh_Op, std::shared_ptr<Tanh_Op>, OperatorTensor>(m, "TanhOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Tanh_Op::getInputsName)
-    .def_static("get_outputs_name", &Tanh_Op::getOutputsName);
+    .def_static("get_outputs_name", &Tanh_Op::getOutputsName)
+    .def_readonly_static("Type", &Tanh_Op::Type);
 
     m.def("Tanh", &Tanh, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index c0c3ad617bef3eda3e283667944ac423cd10a622..930dd95f3c3e4b10d2b4f8b496dfbbbcc6822050 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -31,7 +31,8 @@ void declare_Transpose(py::module &m) {
     m, "TransposeOp", py::multiple_inheritance())
     .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order"))
     .def_static("get_inputs_name", &Transpose_Op::getInputsName)
-    .def_static("get_outputs_name", &Transpose_Op::getOutputsName);
+    .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
+    .def_readonly_static("Type", &Transpose_Op::Type);
   declare_registrable<Transpose_Op>(m, pyClassName);
   m.def("Transpose", &Transpose, py::arg("output_dims_order"), py::arg("name") = "");
 }
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 616a8424b1f9df7e52a8af485b1bb82235f66a2f..52c8cc8a0199ac64b0f7bae97442178614ea5622 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -21,8 +21,9 @@ void init_Data(py::module&);
 void init_Database(py::module&);
 void init_DataProvider(py::module&);
 void init_Tensor(py::module&);
-void init_OperatorImpl(py::module&);
+void init_TensorImpl(py::module&);
 void init_Attributes(py::module&);
+void init_OperatorImpl(py::module&);
 void init_Log(py::module&);
 void init_Operator(py::module&);
 void init_OperatorTensor(py::module&);
@@ -32,6 +33,7 @@ void init_And(py::module&);
 void init_ArgMax(py::module&);
 void init_AvgPooling(py::module&);
 void init_BatchNorm(py::module&);
+void init_BitShift(py::module&);
 void init_Concat(py::module&);
 void init_ConstantOfShape(py::module&);
 void init_Conv(py::module&);
@@ -47,6 +49,7 @@ void init_Identity(py::module&);
 void init_LeakyReLU(py::module&);
 void init_MatMul(py::module&);
 void init_MaxPooling(py::module&);
+void init_Memorize(py::module&);
 void init_MetaOperatorDefs(py::module&);
 void init_Mul(py::module&);
 void init_Pad(py::module&);
@@ -84,6 +87,7 @@ void init_GraphViewHelper(py::module&);
 
 void init_Scheduler(py::module&);
 void init_MemoryManager(py::module&);
+void init_ProdConso(py::module& m);
 void init_TensorUtils(py::module&);
 void init_Filler(py::module&);
 
@@ -94,6 +98,8 @@ void init_Aidge(py::module& m) {
     init_Database(m);
     init_DataProvider(m);
     init_Tensor(m);
+    init_TensorImpl(m);
+    init_Attributes(m);
 
     init_Node(m);
     init_GraphView(m);
@@ -101,7 +107,6 @@ void init_Aidge(py::module& m) {
     init_Connector(m);
 
     init_OperatorImpl(m);
-    init_Attributes(m);
     init_Log(m);
     init_Operator(m);
     init_OperatorTensor(m);
@@ -111,6 +116,7 @@ void init_Aidge(py::module& m) {
     init_ArgMax(m);
     init_AvgPooling(m);
     init_BatchNorm(m);
+    init_BitShift(m);
     init_Concat(m);
     init_Conv(m);
     init_ConvDepthWise(m);
@@ -126,6 +132,7 @@ void init_Aidge(py::module& m) {
     init_LeakyReLU(m);
     init_MatMul(m);
     init_MaxPooling(m);
+    init_Memorize(m);
     init_MetaOperatorDefs(m);
     init_Mul(m);
     init_Pad(m);
@@ -158,6 +165,7 @@ void init_Aidge(py::module& m) {
     init_GraphViewHelper(m);
     init_Scheduler(m);
     init_MemoryManager(m);
+    init_ProdConso(m);
     init_TensorUtils(m);
     init_Filler(m);
 }
diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp
index a23b54e6f02f832fbc70482329966445f723b573..6908cbd912b506a7adb7f33a02416d0173174969 100644
--- a/python_binding/recipes/pybind_Recipes.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -124,6 +124,13 @@ void init_Recipes(py::module &m)
     :return: Number of sub-graph actually fused in a Meta Operator.
     :rtype: int
     )mydelimiter");
+
+  m.def("adapt_to_backend", adaptToBackend, py::arg("graph_view"), R"mydelimiter(
+    Adapt the graph to a specific backend.
+
+    :param graph_view: Graph view on which we want to apply the recipe
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    )mydelimiter");
 }
 
 } // namespace Aidge
diff --git a/python_binding/scheduler/pybind_ProdConso.cpp b/python_binding/scheduler/pybind_ProdConso.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..abd6d5379178916b5842095d50a1de2155345b6f
--- /dev/null
+++ b/python_binding/scheduler/pybind_ProdConso.cpp
@@ -0,0 +1,116 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+/**
+ * @brief Trampoline class for binding
+ *
+ */
+class pyProdConso: public ProdConso {
+public:
+    using ProdConso::ProdConso; // Inherit constructors
+
+    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_nb_required_data",
+            getNbRequiredData,
+            inputIdx
+        );
+    }
+    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_nb_required_protected",
+            getNbRequiredProtected,
+            inputIdx
+
+        );
+    }
+    Elts_t getRequiredMemory(const IOIndex_t outputIdx,
+    const std::vector<DimSize_t> &inputsSize) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_required_memory",
+            getRequiredMemory,
+            outputIdx,
+            inputsSize
+
+        );
+    }
+    Elts_t getNbConsumedData(const IOIndex_t inputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_nb_consumed_data",
+            getNbConsumedData,
+            inputIdx
+
+        );
+    }
+    Elts_t getNbProducedData(const IOIndex_t outputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_nb_produced_data",
+            getNbProducedData,
+            outputIdx
+
+        );
+    }
+    void updateConsummerProducer() override {
+        PYBIND11_OVERRIDE_NAME(
+            void,
+            ProdConso,
+            "update_consummer_producer",
+            updateConsummerProducer,
+
+        );
+    }
+    void resetConsummerProducer() override {
+        PYBIND11_OVERRIDE_NAME(
+            void,
+            ProdConso,
+            "reset_consummer_producer",
+            resetConsummerProducer,
+
+        );
+    }
+};
+
+void init_ProdConso(py::module& m){
+
+    py::class_<ProdConso, std::shared_ptr<ProdConso>, pyProdConso>(m, "ProdConso", py::dynamic_attr())
+    .def(py::init<const Operator&, bool>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>())
+    .def_static("default_model", &ProdConso::defaultModel)
+    .def_static("in_place_model", &ProdConso::inPlaceModel)
+    .def("get_nb_required_data", &ProdConso::getNbRequiredData)
+    .def("get_nb_required_protected", &ProdConso::getNbRequiredProtected)
+    .def("get_required_memory", &ProdConso::getRequiredMemory)
+    .def("get_nb_consumed_data", &ProdConso::getNbConsumedData)
+    .def("get_nb_produced_data", &ProdConso::getNbProducedData)
+    .def("update_consummer_producer", &ProdConso::updateConsummerProducer)
+    .def("reset_consummer_producer", &ProdConso::resetConsummerProducer)
+    ;
+}
+}
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index ac35ce0a62408a69637a4160c9a008aba9dceb66..472af2a9465b121593613492f5120ddc9d7fe254 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -25,6 +25,7 @@ void init_Scheduler(py::module& m){
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
     .def("graph_view", &Scheduler::graphView)
     .def("save_scheduling_diagram", &Scheduler::saveSchedulingDiagram, py::arg("file_name"))
+    .def("save_static_scheduling_diagram", &Scheduler::saveStaticSchedulingDiagram, py::arg("file_name"))
     .def("resetScheduling", &Scheduler::resetScheduling)
     .def("generate_scheduling", &Scheduler::generateScheduling)
     .def("get_static_scheduling", &Scheduler::getStaticScheduling, py::arg("step") = 0)
diff --git a/setup.py b/setup.py
index f0c41626f2fa348ac5d52778d0d865a31b4c344c..4f2e21711f193eb7d5c37ace7b5ad83ac63d3635 100644
--- a/setup.py
+++ b/setup.py
@@ -61,13 +61,14 @@ class CMakeBuild(build_ext):
             if build_gen
             else []
         )
+        test_onoff = os.environ.get("AIDGE_BUILD_TEST", "OFF")
 
         self.spawn(
             [
                 "cmake",
                 *build_gen_opts,
                 str(cwd),
-                "-DTEST=OFF",
+                f"-DTEST={test_onoff}",
                 f"-DCMAKE_INSTALL_PREFIX:PATH={install_path}",
                 f"-DCMAKE_BUILD_TYPE={compile_type}",
                 "-DPYBIND=ON",
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index d992703fedb224e6650ce2ad50317cda3bae650f..0fa2cfdadb3af350a5668444c0a330e023818a41 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -14,106 +14,345 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Transpose.hpp"
+#include "aidge/operator/Cast.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
+Aidge::ImplSpec::ImplSpec(const DynamicAttributes& attrs_):
+    attrs(attrs_) {}
+Aidge::ImplSpec::ImplSpec(const IOSpec& io, const DynamicAttributes& attrs_):
+    inputs(1, io), outputs(1, io), attrs(attrs_) {}
+Aidge::ImplSpec::ImplSpec(const IOSpec& i, const IOSpec& o, const DynamicAttributes& attrs_):
+    inputs(1, i), outputs(1, o), attrs(attrs_) {}
+Aidge::ImplSpec::ImplSpec(const std::vector<IOSpec>& i, const std::vector<IOSpec>& o, const DynamicAttributes& attrs_):
+    inputs(i), outputs(o), attrs(attrs_) {}
+Aidge::ImplSpec::ImplSpec(const Aidge::ImplSpec&) = default;
+Aidge::ImplSpec::~ImplSpec() noexcept = default;
+
 Aidge::OperatorImpl::OperatorImpl(const Operator& op, const std::string& backend):
     mOp(op),
-    mBackend(backend),
-    mNbConsumedData(mOp.nbInputs(), Elts_t::NoneElts()),
-    mNbProducedData(mOp.nbOutputs(), Elts_t::NoneElts())
+    mBackend(backend)
 {
     //ctor
 }
 
-Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    if (mOp.getRawInput(inputIdx)) {
-        const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
-        if (!input->undefined()) {
-            // Known amount of data: requires the whole tensor by default
-            return Elts_t::DataElts(input->size());
+std::shared_ptr<Aidge::ProdConso> Aidge::OperatorImpl::prodConso() {
+    if (!mProdConso) {
+        mProdConso = getProdConso();
+    }
+    return mProdConso;
+}
+
+Aidge::ImplSpec Aidge::OperatorImpl::getRequiredSpec() const {
+    const auto& opTensor = dynamic_cast<const OperatorTensor&>(mOp);
+
+    ImplSpec requiredSpec;
+    // Inputs specs
+    for (size_t i = 0; i < opTensor.nbInputs(); ++i) {
+        if (opTensor.getInput(i)) {
+            std::vector<std::pair<int, int>> dims;
+            for (auto dim : opTensor.getInput(i)->dims()) {
+                dims.push_back(std::make_pair<int, int>(dim, dim));
+            }
+
+            requiredSpec.inputs.push_back({opTensor.getInput(i)->dataType(), opTensor.getInput(i)->dataFormat(), dims});
         }
         else {
-            // Unknown amount of data: require a single token by default
-            return Elts_t::TokenElts(1);
+            requiredSpec.inputs.push_back({DataType::Any});
         }
     }
+    // Outputs specs
+    for (size_t i = 0; i < opTensor.nbOutputs(); ++i) {
+        std::vector<std::pair<int, int>> dims;
+        for (auto dim : opTensor.getOutput(i)->dims()) {
+            dims.push_back(std::make_pair<int, int>(dim, dim));
+        }
 
-    // Input not connected, meaning it is an optional input: do no require anything!
-    return Elts_t::NoneElts();
+        requiredSpec.outputs.push_back({opTensor.getOutput(i)->dataType(), opTensor.getOutput(i)->dataFormat(), dims});
+    }
+    // Attributes
+    if (!mOp.isAtomic()) {
+        requiredSpec.attrs.setAttr("type:!", mOp.type()); // :! mandatory qualifier
+    }
+    else {
+        requiredSpec.attrs.setAttr("type", mOp.type());
+    }
+    return requiredSpec;
 }
 
-Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
-    if (mOp.getRawInput(inputIdx)) {
-        const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
-        if (!input->undefined()) {
-            // Known amount of data: protect the whole tensor by default
-            return Elts_t::DataElts(input->size());
+Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs) const {
+    Log::debug("getBestMatch() for requirements: {}", requiredSpecs);
+
+    const auto availableSpecsSet = getAvailableImplSpecs();
+    const std::vector<ImplSpec> availableSpecs(availableSpecsSet.begin(), availableSpecsSet.end());
+    std::vector<int> matchingSpecs(availableSpecs.size(), -1);
+
+    for (size_t s = 0; s < availableSpecs.size(); ++s) {
+        auto spec = availableSpecs[s];
+        bool match = true;
+        int priority = 0;
+
+        // Check inputs
+        for (size_t i = 0; i < requiredSpecs.inputs.size(); ++i) {
+            const auto inputSpec = (i < spec.inputs.size()) ? spec.inputs[i] : spec.inputs.back();
+            if (!checkIOSpec(requiredSpecs.inputs[i], inputSpec)) {
+                match = false;
+                break;
+            }
         }
-        else {
-            // Unknown amount of data: protect a single token by default
-            // (this does not really make sense for now, as getNbRequiredProtected()
-            // is supposed to give a precise amount of data to protect for
-            // memory management purpose...)
-            return Elts_t::TokenElts(1);
+
+        // Check outputs
+        for (size_t i = 0; i < requiredSpecs.outputs.size(); ++i) {
+            const auto outputSpec = (i < spec.outputs.size()) ? spec.outputs[i] : spec.outputs.back();
+            if (!checkIOSpec(requiredSpecs.outputs[i], outputSpec)) {
+                match = false;
+                break;
+            }
+        }
+
+        // Check attributes
+        for (const auto& attrName : requiredSpecs.attrs.getAttrsName()) {
+            std::string name = attrName;
+            std::string qualifier;
+            const auto qualifierPos = std::find_if(attrName.begin(), attrName.end(),
+                [](char c) { return c == ':'; });
+            if (qualifierPos != attrName.begin()) {
+                name = attrName.substr(0, qualifierPos - attrName.begin());
+                qualifier = attrName.substr(qualifierPos - attrName.begin());
+            }
+
+            const bool mandatory = (qualifier == "!");
+            if (mandatory) {
+                // Required attribute:
+                if (!spec.attrs.hasAttr(name)) {
+                    // Missing attribute
+                    match = false;
+                    break;
+                }
+                else if (requiredSpecs.attrs.getAny(attrName) < spec.attrs.getAny(name)
+                    || spec.attrs.getAny(name) < requiredSpecs.attrs.getAny(attrName))
+                {
+                    // Attribute value mismatch
+                    match = false;
+                    break;
+                }
+            }
+            else {
+                const int attrPriority = (!qualifier.empty()) ? std::stoi(qualifier) : 0;
+
+                if (spec.attrs.hasAttr(name)
+                    && !(requiredSpecs.attrs.getAny(attrName) < spec.attrs.getAny(name))
+                    && !(spec.attrs.getAny(name) < requiredSpecs.attrs.getAny(attrName)))
+                {
+                    // Attribute value match
+                    priority = std::max(priority, attrPriority);
+                }
+            }
         }
+
+        if (match) {
+            matchingSpecs[s] = priority;
+        }
+
+        Log::debug("  {}:{} - {}", (match) ? "MATCH" : "MISMATCH", priority, spec);
     }
 
-    // Input not connected, meaning it is an optional input: do no require anything!
-    return Elts_t::NoneElts();
+    // Return best match
+    const auto bestMatch = std::max_element(matchingSpecs.begin(), matchingSpecs.end());
+    if (*bestMatch >= 0) {
+        const auto bestSpecIdx = bestMatch - matchingSpecs.begin();
+        return availableSpecs[bestSpecIdx];
+    }
+
+    // If there is no match, return the required specs for the registrar, which
+    // will throw a "missing or invalid registrar key"
+    return requiredSpecs;
 }
 
-Aidge::Elts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    if (mOp.getRawOutput(outputIdx)) {
-        const auto output = std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx));
-        if (!output->undefined()) {
-            // Known amount of data: requires the whole tensor by default,
-            // regardless of available data on inputs
-            return Elts_t::DataElts(output->size());
+bool Aidge::OperatorImpl::checkIOSpec(const ImplSpec::IOSpec& required, const ImplSpec::IOSpec& spec) const {
+    // Check type
+    if (required.type != DataType::Any
+        && spec.type != DataType::Any
+        && required.type != spec.type)
+    {
+        return false;
+    }
+
+    // Check format
+    if (required.format != DataFormat::Any
+        && spec.format != DataFormat::Any
+        && required.format != spec.format)
+    {
+        const auto transpose = getDataFormatTranspose(required.format, spec.format);
+        std::vector<size_t> identity(transpose.size());
+        std::iota(std::begin(identity), std::end(identity), 0);
+
+        if (!std::equal(transpose.begin(), transpose.end(), identity.begin())) {
+            return false;
         }
-        else {
-            // Unknown amount of data: require a single token by default
-            // (this does not really make sense for now, as getRequiredMemory()
-            // is supposed to give a precise amount of data to allocate for
-            // memory management purpose...)
-            return Elts_t::TokenElts(1);
+    }
+
+    // Check dims
+    if (!required.dims.empty() && !spec.dims.empty()) {
+        if (required.dims.size() != spec.dims.size()) {
+            return false;
+        }
+
+        for (size_t dim = 0; dim < required.dims.size(); ++dim) {
+            const auto requiredDim = required.dims[dim];
+            const auto specDim = spec.dims[dim];
+
+            if (requiredDim.first != -1
+                && specDim.first != -1
+                && !(specDim.first <= requiredDim.first && specDim.second >= requiredDim.second))
+            {
+                return false;
+            }
         }
     }
 
-    // Output not set, meaning it is an optional output: do no require anything!
-    return Elts_t::NoneElts();
+    return true;
 }
 
-Aidge::Elts_t Aidge::OperatorImpl::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
-    AIDGE_ASSERT(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(),
-        "input index ({}) is out of bound ({}) for operator type {}",
-        inputIdx, mNbConsumedData.size(), mOp.type());
-    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
-}
+std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec& spec, const ImplSpec& requiredSpecs) const {
+    auto op = std::static_pointer_cast<OperatorTensor>(mOp.clone());
+    auto node = std::make_shared<Node>(op);
 
-Aidge::Elts_t Aidge::OperatorImpl::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
-    AIDGE_ASSERT(static_cast<std::size_t>(outputIdx) < mNbProducedData.size(),
-        "output index ({}) is out of bound ({}) for operator type {}",
-        outputIdx, mNbProducedData.size(), mOp.type());
-    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
-}
+    // Adapt inputs
+    for (size_t i = 0; i < requiredSpecs.inputs.size(); ++i) {
+        const auto IOSpec = (i < spec.inputs.size()) ? spec.inputs[i] : spec.inputs.back();
+        const ImplSpec::IOSpec& requiredIOSpec = requiredSpecs.inputs[i];
+        std::shared_ptr<Node> parent = node;
+
+        // Input type
+        if (requiredIOSpec.type != DataType::Any
+            && IOSpec.type != DataType::Any
+            && requiredIOSpec.type != IOSpec.type)
+        {
+            const auto cast = Cast(IOSpec.type);
+            cast->addChild(parent, 0, i);
+
+            op->getInput(i)->setDataType(IOSpec.type);
+        }
+
+        // Input format
+        if (requiredIOSpec.format != DataFormat::Any
+            && IOSpec.format != DataFormat::Any
+            && requiredIOSpec.format != IOSpec.format)
+        {
+            const auto transpose = getDataFormatTranspose(requiredIOSpec.format, IOSpec.format);
+            auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
+            transposeOp->getOperator()->setDataFormat(IOSpec.format);
+            transposeOp->getOperator()->setDataType(IOSpec.type);
+            transposeOp->addChild(parent, 0, i);
+
+            op->getInput(i)->setDataFormat(IOSpec.format);
+        }
+
+        // Input dims
+        if (!requiredIOSpec.dims.empty() && !IOSpec.dims.empty()) {
+            if (requiredIOSpec.dims.size() != IOSpec.dims.size()) {
+                return nullptr;
+            }
+
+            for (size_t dim = 0; dim < requiredIOSpec.dims.size(); ++dim) {
+                const auto requiredDim = requiredIOSpec.dims[dim];
+                const auto specDim = IOSpec.dims[dim];
 
-void Aidge::OperatorImpl::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx) {
-        // each input is consumed by the minimum amount for a forward pass
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));
+                if (requiredDim.first != -1
+                    && specDim.first != -1
+                    && !(specDim.first <= requiredDim.first && specDim.second >= requiredDim.second))
+                {
+                    return nullptr;
+                }
+            }
+        }
     }
 
-    for (std::size_t outputIdx = 0; outputIdx < mNbProducedData.size(); ++outputIdx) {
-        mNbProducedData[outputIdx] += getRequiredMemory(outputIdx, {});
+    // Adapt outputs
+    for (size_t i = 0; i < requiredSpecs.outputs.size(); ++i) {
+        const auto IOSpec = (i < spec.outputs.size()) ? spec.outputs[i] : spec.outputs.back();
+        const ImplSpec::IOSpec& requiredIOSpec = requiredSpecs.outputs[i];
+        std::shared_ptr<Node> parent = node;
+
+        // Output type
+        if (requiredIOSpec.type != DataType::Any
+            && IOSpec.type != DataType::Any
+            && requiredIOSpec.type != IOSpec.type)
+        {
+            const auto cast = Cast(requiredIOSpec.type);
+            parent->addChild(cast, i, 0);
+
+            op->getOutput(i)->setDataType(IOSpec.type);
+        }
+
+        // Output format
+        if (requiredIOSpec.format != DataFormat::Any
+            && IOSpec.format != DataFormat::Any
+            && requiredIOSpec.format != IOSpec.format)
+        {
+            const auto transpose = getDataFormatTranspose(IOSpec.format, requiredIOSpec.format);
+            auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
+            transposeOp->getOperator()->setDataFormat(requiredIOSpec.format);
+            transposeOp->getOperator()->setDataType(requiredIOSpec.type);
+            parent->addChild(transposeOp, i, 0);
+
+            op->getOutput(i)->setDataFormat(IOSpec.format);
+        }
+
+        // Output dims
+        if (!requiredIOSpec.dims.empty() && !IOSpec.dims.empty()) {
+            if (requiredIOSpec.dims.size() != IOSpec.dims.size()) {
+                return nullptr;
+            }
+
+            for (size_t dim = 0; dim < requiredIOSpec.dims.size(); ++dim) {
+                const auto requiredDim = requiredIOSpec.dims[dim];
+                const auto specDim = IOSpec.dims[dim];
+
+                if (requiredDim.first != -1
+                    && specDim.first != -1
+                    && !(specDim.first <= requiredDim.first && specDim.second >= requiredDim.second))
+                {
+                    return nullptr;
+                }
+            }
+        }
     }
+
+    return MetaOperator(std::string("Adapted_" + op->type()).c_str(), getConnectedGraphView(node));
 }
 
-void Aidge::OperatorImpl::resetConsummerProducer(){
-    std::fill(mNbConsumedData.begin(), mNbConsumedData.end(), Elts_t::NoneElts());
-    std::fill(mNbProducedData.begin(), mNbProducedData.end(), Elts_t::NoneElts());
+std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getBestAdaptation(const ImplSpec& requiredSpecs) const {
+    const auto availableSpecs = getAvailableImplSpecs();
+    Log::debug("Adapt operator type {}: {} impl. available", mOp.type(), availableSpecs.size());
+
+    using AdaptationCost = int;
+    std::map<std::shared_ptr<Node>, AdaptationCost> adaptations;
+
+    for (const auto& availableSpec : availableSpecs) {
+        auto adaptation = getAdaptation(availableSpec, requiredSpecs);
+
+        if (adaptation) {
+            auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(adaptation->getOperator())->getMicroGraph();
+            adaptations.insert(std::make_pair(adaptation, microGraph->getNodes().size()));
+        }
+    }
+
+    Log::debug("Adapt operator type {}: found {} possible adaptations", mOp.type(), adaptations.size());
+
+    if (!adaptations.empty()) {
+        // Return best adaptation (with min. AdaptationCost)
+        const auto bestAdaptation = std::min_element(adaptations.begin(), adaptations.end(),
+            [](const auto& lhs, const auto& rhs) { return lhs.second < rhs.second; });
+        return bestAdaptation->first;
+    }
+
+    return nullptr;
 }
 
 void Aidge::OperatorImpl::forward() {
@@ -123,3 +362,11 @@ void Aidge::OperatorImpl::forward() {
 void Aidge::OperatorImpl::backward() {
     AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented yet for operator of type {}", mOp.type());
 }
+
+std::shared_ptr<Aidge::ProdConso> Aidge::OperatorImpl::getProdConso() const {
+    return std::make_shared<ProdConso>(mOp);
+}
+
+std::set<Aidge::ImplSpec> Aidge::OperatorImpl::getAvailableImplSpecs() const {
+    return std::set<ImplSpec>();
+}
diff --git a/src/data/Data.cpp b/src/data/Data.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..62a883d08a401e02c86408214a061f893ffbfb4a
--- /dev/null
+++ b/src/data/Data.cpp
@@ -0,0 +1,42 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/data/Data.hpp"
+
+Aidge::DataFormatTranspose Aidge::getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
+    // Permutation array from default format to src format
+    const auto srcDefToFormat = DataFormatTransposeDict[static_cast<int>(src)];
+    // Permutation array from default format to dst format
+    const auto dstDefToFormat = DataFormatTransposeDict[static_cast<int>(dst)];
+    // Compute permutation array from src format to default format:
+    DataFormatTranspose srcFormatToDef{};
+    for (size_t i = 0; i < srcDefToFormat.size(); ++i) {
+        if (srcDefToFormat[i] > 0) {
+            srcFormatToDef[srcDefToFormat[i] - 1] = i;
+        }
+        else {
+            srcFormatToDef[i] = i;
+        }
+    }
+
+    // Compute permutation array from src format to dst format:
+    DataFormatTranspose srcToDst{};
+    for (size_t i = 0; i < dstDefToFormat.size(); ++i) {
+        if (dstDefToFormat[srcFormatToDef[i]] > 0) {
+            srcToDst[i] = dstDefToFormat[srcFormatToDef[i]] - 1;
+        }
+        else {
+            srcToDst[i] = i;
+        }
+    }
+
+    return srcToDst;
+}
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index b203b60bb32f0feb9b76d73cd6953cf81bea5e56..5a3eb695d7288c6414c01a82b36638f8b93d6b5f 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -650,9 +650,12 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
 
 std::pair<std::vector<Aidge::NodePtr>, size_t> Aidge::GraphView::getRankedNodes() const {
   std::set<NodePtr> nodesToRank(mNodes);
-  nodesToRank.erase(mRootNode);
   std::vector<NodePtr> rankedNodes;
-  rankedNodes.push_back(mRootNode);
+
+  if (mRootNode) {
+    nodesToRank.erase(mRootNode);
+    rankedNodes.push_back(mRootNode);
+  }
 
   for (size_t curNodeIdx = 0; curNodeIdx < rankedNodes.size(); ++curNodeIdx) {
     NodePtr curNode = rankedNodes[curNodeIdx];
@@ -1416,7 +1419,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
   }
 
   // For each node, convert old node -> new node connections
-  for (auto &oldToNewNode : oldToNewNodes) {
+  for (const auto &oldToNewNode : oldToNewNodes) {
     if (oldToNewNode.second == nullptr) {
       continue;  // deleted node
     }
@@ -1424,7 +1427,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
     // Connect parent nodes. Nodes that were removed with cloneNode() are set to nullptr
     size_t parentId = 0;
     for (auto parent : oldToNewNode.first->inputs()) {
-      if (parent.first != nullptr) {
+      if (parent.first != nullptr && inView(parent.first)) {
         while (oldToNewNodes[parent.first] == nullptr) {
           // Find next valid parent in line, going backward in the graph
           AIDGE_INTERNAL_ASSERT(parent.first->getChildren().size() == 1);
@@ -1460,7 +1463,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
     newGraph->add(oldToNewNodes[mRootNode], false);
   }
 
-  for (auto &oldToNewNode : oldToNewNodes) {
+  for (const auto &oldToNewNode : oldToNewNodes) {
     if (oldToNewNode.second == nullptr)
       continue;  // deleted node
 
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 382052535cc6b5cd8089f720b8fa9f8d3a0ebce1..b2ceb903d51dbb880979cd2191825a6310f9e5ff 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -73,13 +73,24 @@ void Aidge::Node::setName(const std::string& name) {
     mName = name;
 }
 
-std::string Aidge::Node::createUniqueName(std::string name){
-    for (auto graphView : views()){
-        if (graphView->inView(name)){
-            return createUniqueName(name.append("_"));
+std::string Aidge::Node::createUniqueName(std::string baseName)
+{
+    int index = 0;
+    bool nameAlreadyUsed = true;
+    std::string newName;
+    while (nameAlreadyUsed) {
+        std::string suffix = "_" + std::to_string(index);
+        newName = (index == 0) ? baseName : baseName + suffix;
+        nameAlreadyUsed = false;
+        for (auto graphView : views()) {
+            if (graphView->inView(newName)) {
+                nameAlreadyUsed = true;
+                break;
+            }
         }
+        index++;
     }
-    return name;
+    return newName;
 }
 
 ///////////////////////////////////////////////////////
@@ -430,6 +441,9 @@ std::set<Aidge::NodePtr> Aidge::Node::getNodeDelta(int delta, std::set<Aidge::No
     return out;
 }
 
+
+Aidge::Node::~Node() = default;
+
 // namespace Aidge {
 // std::ostream& operator << (std::ostream& os, Aidge::Node& n) {
 //     using namespace std;
diff --git a/src/operator/Abs.cpp b/src/operator/Abs.cpp
index a8ee706f6c993362e2569b6be86f5e17545ae679..1dd7836ad220d031d60356a5663db84adaa486ec 100644
--- a/src/operator/Abs.cpp
+++ b/src/operator/Abs.cpp
@@ -23,3 +23,7 @@ void Aidge::Abs_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     SET_IMPL_MACRO(Abs_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::Abs_Op::getAvailableBackends() const {
+    return Registrar<Abs_Op>::getKeys();
+}
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index f9dc3335a3b62e87edf33d25c5a516a63c4129a0..033c476c8a9e865fdf9d5670e295c3e4fb6101b3 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -85,6 +85,10 @@ void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Add_Op::getAvailableBackends() const {
+    return Registrar<Add_Op>::getKeys();
+}
+
 std::shared_ptr<Aidge::Node> Aidge::Add(const IOIndex_t nbIn, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
 }
\ No newline at end of file
diff --git a/src/operator/And.cpp b/src/operator/And.cpp
index 43aeebe24ef0e6d0e0b820d1459f25d64e7054a7..aebd5a71725f0999635f3844d8b2589bfb885138 100644
--- a/src/operator/And.cpp
+++ b/src/operator/And.cpp
@@ -56,3 +56,7 @@ void Aidge::And_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     SET_IMPL_MACRO(And_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::And_Op::getAvailableBackends() const {
+    return Registrar<And_Op>::getKeys();
+}
diff --git a/src/operator/ArgMax.cpp b/src/operator/ArgMax.cpp
index 58ade4754a013a65af80e5b754d0d44ad3b18189..4808b730d2261ba0c1ea6d0d09871b1f322fc8fb 100644
--- a/src/operator/ArgMax.cpp
+++ b/src/operator/ArgMax.cpp
@@ -50,4 +50,8 @@ bool Aidge::ArgMax_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::ArgMax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(ArgMax_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
-}
\ No newline at end of file
+}
+
+std::set<std::string> Aidge::ArgMax_Op::getAvailableBackends() const {
+    return Registrar<ArgMax_Op>::getKeys();
+}
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 296ae789197f88c655c0097d94b370ef91f0189f..f8c8e5e3f32fff8306184dfdf3baa87392479ebf 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -113,6 +113,11 @@ void Aidge::AvgPooling_Op<DIM>::setBackend(const std::string &name, Aidge::Devic
     mOutputs[0]->setBackend(name, device);
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::AvgPooling_Op<DIM>::getAvailableBackends() const {
+    return Registrar<AvgPooling_Op<DIM>>::getKeys();
+}
+
 template class Aidge::AvgPooling_Op<1>;
 template class Aidge::AvgPooling_Op<2>;
 template class Aidge::AvgPooling_Op<3>;
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index a81cfc132773134889a5164762091229759b4f38..bcf3b29c45abe2c40788fd1ec0bad87db8ee227b 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -95,6 +95,11 @@ void Aidge::BatchNorm_Op<DIM>::setBackend(const std::string &name, Aidge::Device
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::BatchNorm_Op<DIM>::getAvailableBackends() const {
+    return Registrar<BatchNorm_Op<DIM>>::getKeys();
+}
+
 template class Aidge::BatchNorm_Op<2>;
 template class Aidge::BatchNorm_Op<3>;
 template class Aidge::BatchNorm_Op<4>;
diff --git a/src/operator/BitShift.cpp b/src/operator/BitShift.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7595590f7811f08eb2b790a259cff6a8ee72ffbf
--- /dev/null
+++ b/src/operator/BitShift.cpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>    // std::size_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/operator/BitShift.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::BitShift_Op::Type = "BitShift";
+
+bool Aidge::BitShift_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (!inputsAssociated()) {
+    return false;
+    }
+
+    const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+    const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+    std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+    const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+    std::size_t out_id = outDims.size() - 1;
+    std::size_t low_id = lowDims.size() - 1;
+    std::size_t i = 0;
+
+    while (i++ < lowDims.size()) {
+        if (outDims[out_id] == 1) {
+            outDims[out_id] = lowDims[low_id];
+        }
+        else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for BitShift Operation: {} for input#0 vs {} for input#1",
+                inputsDims0, inputsDims1);
+        }
+        --out_id;
+        --low_id;
+    }
+    mOutputs[0]->resize(outDims);
+    return true;
+}
+
+
+void Aidge::BitShift_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(BitShift_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::BitShift_Op::getAvailableBackends() const {
+    return Registrar<BitShift_Op>::getKeys();
+}
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index b6164a77cb47e0b9127fa4b02255ed0991805fe7..54eef17b67b320ef244881cee44ed8cabaa9bf47 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -47,6 +47,10 @@ void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Cast_Op::getAvailableBackends() const {
+    return Registrar<Cast_Op>::getKeys();
+}
+
 std::shared_ptr<Aidge::Node> Aidge::Cast(const Aidge::DataType targetType, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
 }
\ No newline at end of file
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index c78afa8665322a9cbca42a3326d527c1ebd949d4..55efdd51d56f7db4f64880b967def661e5354af5 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -134,6 +134,10 @@ void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Concat_Op::getAvailableBackends() const {
+    return Registrar<Concat_Op>::getKeys();
+}
+
 /////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Concat(const Aidge::IOIndex_t nbIn, const std::int32_t axis, const std::string& name) {
diff --git a/src/operator/ConstantOfShape.cpp b/src/operator/ConstantOfShape.cpp
index 4c245d27d1c4b5c9865d298ff7b8647a9ba5ec0d..7fe9dc1309080f844961a8e8a28c4a05964ae741 100644
--- a/src/operator/ConstantOfShape.cpp
+++ b/src/operator/ConstantOfShape.cpp
@@ -64,5 +64,9 @@ void ConstantOfShape_Op::setBackend(const std::string &name,
   value().setBackend(name,device);
 }
 
+std::set<std::string> Aidge::ConstantOfShape_Op::getAvailableBackends() const {
+  return Registrar<ConstantOfShape_Op>::getKeys();
+}
+
 } // namespace Aidge
 
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 92f4ec593a1dcb26a5a16ffb527667e39502e547..e055c7e5ebb9a6cff9f774da444cc582ed7de34c 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -43,16 +43,17 @@ bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         // first check weight since it defines inChannels and outChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
-                    "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
+                    "Wrong weight Tensor dimension: {} for Conv{}D operator. Expected number of dimensions is {}.", getInput(1)->nbDims(), DIM, DIM+2);
         // check data
         AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
                     (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
-                    "Wrong input size for Conv operator.");
+                    "Wrong input size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), inChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", "));
         // check optional bias
         if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
                     (getInput(2)->template dims<1>()[0] == outChannels()),
-                    "Wrong bias size for Conv operator.");
+                    "Wrong bias size ({}) for Conv operator. Expected dims are [{}].", getInput(2)->dims(), outChannels());
+
         std::array<DimSize_t, DIM + 2> outputDims{};
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
@@ -157,6 +158,11 @@ void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::Conv_Op<DIM>::getAvailableBackends() const {
+    return Registrar<Conv_Op<DIM>>::getKeys();
+}
+
 template class Aidge::Conv_Op<1>;
 template class Aidge::Conv_Op<2>;
 
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index 9e95e78ea6867c41a332916b352f091ad528894a..f4d524356bd207a7ed101c2887c2fcda53f3bb83 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -44,16 +44,17 @@ bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         // first check weight since it defines nbChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
-                    "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
+                    "Wrong weight Tensor dimension: {} for ConvDepthWise{}D operator. Expected number of dimensions is {}.", getInput(1)->nbDims(), DIM, DIM+2);
         // check data
         AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
                     (getInput(0)->template dims<DIM+2>()[1] == nbChannels()),
-                    "Wrong input size for Conv operator.");
+                    "Wrong input size ({}) for ConvDepthWise operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), nbChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", "));
         // check optional bias
         if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
                     (getInput(2)->template dims<1>()[0] == nbChannels()),
-                    "Wrong bias size for Conv operator.");
+                    "Wrong bias size ({}) for ConvDepthWise operator. Expected dims are [{}].", getInput(2)->dims(), nbChannels());
+
         std::array<DimSize_t, DIM + 2> outputDims = {};
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
@@ -156,6 +157,11 @@ void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::De
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::ConvDepthWise_Op<DIM>::getAvailableBackends() const {
+    return Registrar<ConvDepthWise_Op<DIM>>::getKeys();
+}
+
 template class Aidge::ConvDepthWise_Op<1>;
 template class Aidge::ConvDepthWise_Op<2>;
 
diff --git a/src/operator/DepthToSpace.cpp b/src/operator/DepthToSpace.cpp
index 0c858548ec484c34a651efa4adec1cde7ccb9e54..6b8d05625b99aec05be4f531460a5d25c120a5e0 100644
--- a/src/operator/DepthToSpace.cpp
+++ b/src/operator/DepthToSpace.cpp
@@ -113,6 +113,10 @@ void Aidge::DepthToSpace_Op::setBackend(const std::string& name, Aidge::DeviceId
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::DepthToSpace_Op::getAvailableBackends() const {
+    return Registrar<DepthToSpace_Op>::getKeys();
+}
+
 //////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::DepthToSpace(const std::uint32_t blockSize,
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 2140b17a3abee329effaae63fada187fc522495f..96eea3df966b273445be8a6e9d9a5acf2d6fafb2 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -57,6 +57,10 @@ void Aidge::Div_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Div_Op::getAvailableBackends() const {
+    return Registrar<Div_Op>::getKeys();
+}
+
 ///////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Div(const std::string& name) {
diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp
index ed1f79f79a3011f72da1a1804d84960595f880c0..bd5f76f8aa7c0889311e4f922fec8d20168e24b5 100644
--- a/src/operator/Erf.cpp
+++ b/src/operator/Erf.cpp
@@ -38,6 +38,10 @@ void Aidge::Erf_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Erf_Op::getAvailableBackends() const {
+    return Registrar<Erf_Op>::getKeys();
+}
+
 /////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Erf(const std::string& name) {
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 577a1842d76d3f58763ccd598205935e2c6d6eb4..dd3ed7aba65cf1875d691d9bc2c8c94bb03856c7 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -91,6 +91,10 @@ void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device
     }
 }
 
+std::set<std::string> Aidge::FC_Op::getAvailableBackends() const {
+    return Registrar<FC_Op>::getKeys();
+}
+
 std::shared_ptr<Aidge::Node> Aidge::FC(const Aidge::DimSize_t inChannels,
                                        const Aidge::DimSize_t outChannels,
                                        bool noBias,
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
index 1a2ec88bbfb2bfed134e779619a0a3f0604ce155..99ccb7505cd959178e4bd7132e32552ea5a72ecf 100644
--- a/src/operator/Fold.cpp
+++ b/src/operator/Fold.cpp
@@ -82,6 +82,11 @@ void Aidge::Fold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::Fold_Op<DIM>::getAvailableBackends() const {
+    return Registrar<Fold_Op<DIM>>::getKeys();
+}
+
 template class Aidge::Fold_Op<2>;
 
 ///////////////////////////////////////
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 00d471f6dc3e1417e4b343002b12a26260030d30..0ebc3e3bc81b15d9414d01f12a2768be6a7ddc42 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -142,6 +142,10 @@ void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Gather_Op::getAvailableBackends() const {
+    return Registrar<Gather_Op>::getKeys();
+}
+
 /////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Gather(std::int8_t axis,
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index e7b2bdffb979fe377de5c7bd1e86147874e7d043..bbcfd0d28ca039318647d206af876727793e1bfc 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -57,6 +57,10 @@ void Aidge::GlobalAveragePooling_Op::setBackend(const std::string &name, Aidge::
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::GlobalAveragePooling_Op::getAvailableBackends() const {
+    return Registrar<GlobalAveragePooling_Op>::getKeys();
+}
+
 ////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::GlobalAveragePooling(const std::string &name) {
diff --git a/src/operator/GridSample.cpp b/src/operator/GridSample.cpp
index fa1efc75a4c0a85717343ce4fcdea1a8adcfb4e7..d26679f8337390879c8f4c4d10deb883fb40e6da 100644
--- a/src/operator/GridSample.cpp
+++ b/src/operator/GridSample.cpp
@@ -95,6 +95,10 @@ void Aidge::GridSample_Op::setBackend(const std::string &name, Aidge::DeviceIdx_
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::GridSample_Op::getAvailableBackends() const {
+    return Registrar<GridSample_Op>::getKeys();
+}
+
 
 ////////////////////////////////////////////////
 
diff --git a/src/operator/ILayerNorm.cpp b/src/operator/ILayerNorm.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..daa7ecf86b7ea9a9b10b962d356581f926e92eed
--- /dev/null
+++ b/src/operator/ILayerNorm.cpp
@@ -0,0 +1,56 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 10.09.2024
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ILayerNorm.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ILayerNorm_Op::Type = "ILayerNorm";
+
+void Aidge::ILayerNorm_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
+    AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs());
+    AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
+    mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    if (inputIdx == 0 && getInput(0)->nbDims() == 1)
+        mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()});
+}
+
+bool Aidge::ILayerNorm_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        const DimSize_t nbFeatures =  getInput(0)->dims()[1];
+        for (std::size_t i = 0; i < nbInputs(); ++i) {
+            if(inputCategory(i) == InputCategory::Param && getInput(i)->size() != nbFeatures) {
+                getInput(i)->resize({getInput(0)->dims()[1]});
+            }
+        }
+        mOutputs[0]->resize(getInput(0)->dims());
+        return true;
+    }
+    return false;
+}
+
+
+void Aidge::ILayerNorm_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(ILayerNorm_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+    getInput(1)->setBackend(name, device);
+    getInput(2)->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::ILayerNorm_Op::getAvailableBackends() const {
+    return Registrar<ILayerNorm_Op>::getKeys();
+}
diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp
index 2f60eb2fd9c5d43c60ae7ee3af49c3b2e407a1fe..f0b8720bc1e22d8d6308460eabe436db8a4c9f6d 100644
--- a/src/operator/Identity.cpp
+++ b/src/operator/Identity.cpp
@@ -13,35 +13,37 @@
 
 #include "aidge/operator/Identity.hpp"
 
+void Aidge::Identity_OpImpl::forward() {
+    const Identity_Op& op = dynamic_cast<const Identity_Op&>(mOp);
+    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+}
+
+//////////////////////////////////////////////////
+
 const std::string Aidge::Identity_Op::Type = "Identity";
 
 Aidge::Identity_Op::Identity_Op()
     : OperatorTensor(Type, {InputCategory::Data}, 1)
 {
-    mImpl = std::make_shared<OperatorImpl>(*this);
+    mImpl = std::make_shared<Identity_OpImpl>(*this);
 }
 
 Aidge::Identity_Op::Identity_Op(const Aidge::Identity_Op& op)
     : OperatorTensor(op)
 {
-    mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
+    mImpl = std::make_shared<Identity_OpImpl>(*this, op.backend());
 }
 
 std::shared_ptr<Aidge::Operator> Aidge::Identity_Op::clone() const {
     return std::make_shared<Identity_Op>(*this);
 }
 
-bool Aidge::Identity_Op::dimsForwarded() const {
-    const auto& input0 = getInput(0);
-    return input0 ? (input0->undefined() ? false :
-                            input0->dims() == getOutput(0)->dims()) :
-                                false;
+void Aidge::Identity_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    mOutputs[0]->setBackend(name, device);
 }
 
-void Aidge::Identity_Op::forward() {
-    // Perform a shallow copy
-    *(mOutputs[0]) = *(mInputs[0]);
-    runHooks();
+std::set<std::string> Aidge::Identity_Op::getAvailableBackends() const {
+    return Registrar<Identity_Op>::getKeys();
 }
 
 std::shared_ptr<Aidge::Node> Aidge::Identity(const std::string& name) {
diff --git a/src/operator/LeakyReLU.cpp b/src/operator/LeakyReLU.cpp
index 9def23758d5f779f14dec2ee19199fe0f48c4980..dea73f3101887c5213a02b029d344a34f74ba4af 100644
--- a/src/operator/LeakyReLU.cpp
+++ b/src/operator/LeakyReLU.cpp
@@ -38,6 +38,10 @@ void Aidge::LeakyReLU_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::LeakyReLU_Op::getAvailableBackends() const {
+    return Registrar<LeakyReLU_Op>::getKeys();
+}
+
 /////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::LeakyReLU(float negativeSlope, const std::string& name) {
diff --git a/src/operator/Ln.cpp b/src/operator/Ln.cpp
index 31012cbb1eec22f8dc02497f9e46b88ec713eabe..90ae8d8c7dac464665828248c923a1f278dad79b 100755
--- a/src/operator/Ln.cpp
+++ b/src/operator/Ln.cpp
@@ -38,6 +38,10 @@ void Aidge::Ln_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Ln_Op::getAvailableBackends() const {
+    return Registrar<Ln_Op>::getKeys();
+}
+
 /////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Ln(const std::string& name) {
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index c95fe544cbd29f715e8bd7caae58193deaac6331..668ffd04b7acb0e72b4a3313805fa89ca3466f32 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -97,6 +97,10 @@ void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::MatMul_Op::getAvailableBackends() const {
+    return Registrar<MatMul_Op>::getKeys();
+}
+
 ////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::MatMul(const std::string& name) {
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
index 85f2dd930f2d35b9d9e9ea597b588637a56cb952..5ce137fe6b6c0e4b7150bfc0f1182f6f8ee94850 100644
--- a/src/operator/MaxPooling.cpp
+++ b/src/operator/MaxPooling.cpp
@@ -83,6 +83,11 @@ void Aidge::MaxPooling_Op<DIM>::setBackend(const std::string &name, Aidge::Devic
     mOutputs[0]->setBackend(name, device);
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::MaxPooling_Op<DIM>::getAvailableBackends() const {
+    return Registrar<MaxPooling_Op<DIM>>::getKeys();
+}
+
 template class Aidge::MaxPooling_Op<1>;
 template class Aidge::MaxPooling_Op<2>;
 template class Aidge::MaxPooling_Op<3>;
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index f713fdaad793aebebf5047d4ebf1dfd5aca10cd1..c3d64d5bc66bc00e1ed67fc6158f656c75fb2b82 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -20,7 +20,7 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::Memorize_OpImpl::getNbRequiredData(
+Aidge::Elts_t Aidge::Memorize_ProdConso::getNbRequiredData(
     Aidge::IOIndex_t inputIdx) const
 {
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
@@ -35,11 +35,11 @@ Aidge::Elts_t Aidge::Memorize_OpImpl::getNbRequiredData(
         return Elts_t::NoneElts();
     }
     else {
-        return OperatorImpl::getNbRequiredData(inputIdx);
+        return ProdConso::getNbRequiredData(inputIdx);
     }
 }
 
-Aidge::Elts_t Aidge::Memorize_OpImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+Aidge::Elts_t Aidge::Memorize_ProdConso::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                                                          const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
     assert(mOp.getRawOutput(outputIdx) && "requires valid output");
 
@@ -53,8 +53,8 @@ Aidge::Elts_t Aidge::Memorize_OpImpl::getRequiredMemory(const Aidge::IOIndex_t o
     }
 }
 
-void Aidge::Memorize_OpImpl::updateConsummerProducer() {
-    OperatorImpl::updateConsummerProducer();
+void Aidge::Memorize_ProdConso::updateConsummerProducer() {
+    ProdConso::updateConsummerProducer();
 
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
     AIDGE_ASSERT(op.endStep() == 0 || op.scheduleStep() <= op.endStep(), "cannot update consumer producer anymore, number of cycles exceeded");
@@ -153,6 +153,10 @@ void Aidge::Memorize_Op::forward() {
     mAttributes->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
 }
 
+std::set<std::string> Aidge::Memorize_Op::getAvailableBackends() const {
+    return Registrar<Memorize_Op>::getKeys();
+}
+
 /////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Memorize(const std::uint32_t endStep, const std::string& name) {
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index fc094464bdce9473c40c9056e0f384400c4af72a..e3acba9b4cccdf525d80f85344ba500cc7ac885f 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -18,6 +18,7 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 
 Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph)
     : OperatorTensor(type, [graph]() {
@@ -87,9 +88,36 @@ void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceId
     mGraph->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::MetaOperator_Op::getAvailableBackends() const {
+    std::set<std::string> backendsList;
+    for (const auto& tupleKey : Registrar<MetaOperator_Op>::getKeys()) {
+        if (std::get<1>(tupleKey) == type()) {
+            backendsList.insert(std::get<0>(tupleKey));
+        }
+    }
+    return backendsList;
+}
+
+std::shared_ptr<Aidge::Attributes> Aidge::MetaOperator_Op::attributes() const {
+    auto attrs = std::make_shared<DynamicAttributes>();
+
+    for (const auto& node : mGraph->getRankedNodesName("{3}")) {
+        const auto attributes = node.first->getOperator()->attributes();
+        if (attributes) {
+            const auto nodeAttrs = DynamicAttributes(attributes->getAttrs());
+            attrs->addAttr(node.first->type() + "#" + node.second, nodeAttrs);
+            if (node.second == "0") {
+                attrs->addAttr(node.first->type(), nodeAttrs);
+            }
+        }
+    }
+
+    return attrs;
+}
+
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const {
     if (mImpl) {
-        return mImpl->getNbRequiredData(inputIdx);
+        return mImpl->prodConso()->getNbRequiredData(inputIdx);
     }
     else {
         const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
@@ -104,7 +132,7 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx
 
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredProtected(const IOIndex_t inputIdx) const {
     if (mImpl) {
-        return mImpl->getNbRequiredProtected(inputIdx);
+        return mImpl->prodConso()->getNbRequiredProtected(inputIdx);
     }
     else {
         const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
@@ -119,7 +147,7 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredProtected(const IOIndex_t inp
 
 Aidge::Elts_t Aidge::MetaOperator_Op::getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const {
     if (mImpl) {
-        return mImpl->getRequiredMemory(outputIdx, inputsSize);
+        return mImpl->prodConso()->getRequiredMemory(outputIdx, inputsSize);
     }
     else {
         const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
@@ -134,7 +162,7 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getRequiredMemory(const IOIndex_t outputId
 
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbConsumedData(IOIndex_t inputIdx) const {
     if (mImpl) {
-        return mImpl->getNbConsumedData(inputIdx);
+        return mImpl->prodConso()->getNbConsumedData(inputIdx);
     }
     else {
         const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
@@ -149,7 +177,7 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbConsumedData(IOIndex_t inputIdx) cons
 
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbProducedData(IOIndex_t outputIdx) const {
     if (mImpl) {
-        return mImpl->getNbProducedData(outputIdx);
+        return mImpl->prodConso()->getNbProducedData(outputIdx);
     }
     else {
         const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
@@ -164,7 +192,7 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbProducedData(IOIndex_t outputIdx) con
 
 void Aidge::MetaOperator_Op::resetConsummerProducer() {
     if (mImpl) {
-        mImpl->resetConsummerProducer();
+        mImpl->prodConso()->resetConsummerProducer();
     }
     else {
         if (!mScheduler) {
@@ -178,7 +206,7 @@ void Aidge::MetaOperator_Op::resetConsummerProducer() {
 
 void Aidge::MetaOperator_Op::updateConsummerProducer() {
     if (mImpl) {
-        mImpl->updateConsummerProducer();
+        mImpl->prodConso()->updateConsummerProducer();
     }
     else {
         if (!mScheduler) {
diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp
index 4190c10a06458036f2cd8953156b969afa51bebf..adabcd0d359927693965cec1987d2fad083328b9 100644
--- a/src/operator/Move.cpp
+++ b/src/operator/Move.cpp
@@ -50,6 +50,15 @@ void Aidge::Move_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Move_Op::getAvailableBackends() const {
+    std::set<std::string> backendsList;
+    for (const auto& tupleKey : Registrar<Move_Op>::getKeys()) {
+        backendsList.insert(std::get<0>(tupleKey));
+        backendsList.insert(std::get<1>(tupleKey));
+    }
+    return backendsList;
+}
+
 ////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Move(const std::string& name) {
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index e2e32805f6fde7ab6831fe4756ca60ad42c3925a..3f163c9d6a572cc488c621a0ec6819ea68143304 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -71,6 +71,10 @@ void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Mul_Op::getAvailableBackends() const {
+    return Registrar<Mul_Op>::getKeys();
+}
+
 ///////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Mul(const std::string& name) {
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index 317bbd364572f49a714e328bf33f3cd58c19215f..f15a7dc3899a7bc864e8e76ff0946fb70584bf05 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -16,6 +16,7 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
@@ -33,35 +34,35 @@ Aidge::Operator::~Operator() noexcept = default;
 
 Aidge::Elts_t Aidge::Operator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbRequiredData(): an implementation is required for {}!", type());
-    return mImpl->getNbRequiredData(inputIdx);
+    return mImpl->prodConso()->getNbRequiredData(inputIdx);
 }
 
 Aidge::Elts_t Aidge::Operator::getNbRequiredProtected(const Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbRequiredProtected(): an implementation is required for {}!", type());
-    return mImpl->getNbRequiredProtected(inputIdx);
+    return mImpl->prodConso()->getNbRequiredProtected(inputIdx);
 }
 
 Aidge::Elts_t Aidge::Operator::getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const {
     AIDGE_ASSERT(mImpl != nullptr, "getRequiredMemory(): an implementation is required for {}!", type());
-    return mImpl->getRequiredMemory(outputIdx, inputsSize);
+    return mImpl->prodConso()->getRequiredMemory(outputIdx, inputsSize);
 }
 
 Aidge::Elts_t Aidge::Operator::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbConsumedData(): an implementation is required for {}!", type());
-    return mImpl->getNbConsumedData(inputIdx);
+    return mImpl->prodConso()->getNbConsumedData(inputIdx);
 }
 
 Aidge::Elts_t Aidge::Operator::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbProducedData(): an implementation is required for {}!", type());
-    return mImpl->getNbProducedData(outputIdx);
+    return mImpl->prodConso()->getNbProducedData(outputIdx);
 }
 void Aidge::Operator::updateConsummerProducer(){
     AIDGE_ASSERT(mImpl != nullptr, "updateConsummerProducer(): an implementation is required for {}!", type());
-    mImpl->updateConsummerProducer();
+    mImpl->prodConso()->updateConsummerProducer();
 }
 void Aidge::Operator::resetConsummerProducer(){
     AIDGE_ASSERT(mImpl != nullptr, "resetConsummerProducer(): an implementation is required for {}!", type());
-    mImpl->resetConsummerProducer();
+    mImpl->prodConso()->resetConsummerProducer();
 }
 
 void Aidge::Operator::runHooks() const {
@@ -79,3 +80,17 @@ void Aidge::Operator::backward() {
     AIDGE_ASSERT(mImpl != nullptr, "backward(): an implementation is required for {}!", type());
     mImpl->backward(); 
 }
+
+void Aidge::Operator::setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends) {
+    const auto& availableBackends = getAvailableBackends();
+    // By default, try to set the last backend anyway
+    auto selectedBackend = backends.back();
+    for (const auto& backend : backends) {
+        if (availableBackends.find(backend.first) != availableBackends.end()) {
+            selectedBackend = backend;
+            break;
+        }
+    }
+
+    setBackend(selectedBackend.first, selectedBackend.second);
+}
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
index 5b1428c160f976a043bb5cbe6fc6cb3351bab336..39f61e328bd3f98bc836604462bbfc064fbb93be 100644
--- a/src/operator/Pad.cpp
+++ b/src/operator/Pad.cpp
@@ -53,6 +53,11 @@ void Aidge::Pad_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::Pad_Op<DIM>::getAvailableBackends() const {
+    return Registrar<Pad_Op<DIM>>::getKeys();
+}
+
 template class Aidge::Pad_Op<1>;
 template class Aidge::Pad_Op<2>;
 
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index 5d32a06fd01d8674d8e072f14838f3fd80d1f30a..cd5b18759cdd743f292054bca91ffee5da722ea6 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -20,7 +20,7 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::Pop_OpImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+Aidge::Elts_t Aidge::Pop_ProdConso::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getRawInput(inputIdx) && "requires valid input");
 
     const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
@@ -88,6 +88,10 @@ void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Pop_Op::getAvailableBackends() const {
+    return Registrar<Pop_Op>::getKeys();
+}
+
 void Aidge::Pop_Op::forward() {
     Operator::forward();
     ++mAttributes->template getAttr<PopAttr::ForwardStep>();
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index 1602c8c2aa28e305b340888cb3a77cb4d2fc4293..ada71d6cc56c6d88ff64bf720595b220b296801d 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -56,6 +56,10 @@ void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Pow_Op::getAvailableBackends() const {
+    return Registrar<Pow_Op>::getKeys();
+}
+
 ////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Pow(const std::string& name) {
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index e5c4a3e9e18af8b3236b612db2b959f5ce4ec30a..fdba4ac2e22d857a31779df2e5ff789c3eb92f5c 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -84,6 +84,10 @@ void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Producer_Op::getAvailableBackends() const {
+    return Registrar<Producer_Op>::getKeys();
+}
+
 void Aidge::Producer_Op::forward() {
     if (!backend().empty()) {
         mImpl->forward();
diff --git a/src/operator/ReLU.cpp b/src/operator/ReLU.cpp
index 03f9e0679facc452d5a8bdc71707a824240f15ac..bda26fa3332ee914325820f47d0babcb622905c8 100644
--- a/src/operator/ReLU.cpp
+++ b/src/operator/ReLU.cpp
@@ -38,6 +38,10 @@ void Aidge::ReLU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::ReLU_Op::getAvailableBackends() const {
+    return Registrar<ReLU_Op>::getKeys();
+}
+
 /////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ReLU(const std::string& name) {
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index 2a215d897884e936aa9265e5ae16b1774d94bae6..7935edb050824af92a8f130f975aa09e41ca875f 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -94,6 +94,12 @@ void Aidge::ReduceMean_Op::setBackend(const std::string& name, Aidge::DeviceIdx_
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::ReduceMean_Op::getAvailableBackends() const {
+    return Registrar<ReduceMean_Op>::getKeys();
+}
+
+Aidge::ReduceMean_Op::~ReduceMean_Op() noexcept = default;
+
 ////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ReduceMean(const std::vector<std::int32_t> &axes,
diff --git a/src/operator/ReduceSum.cpp b/src/operator/ReduceSum.cpp
index aa8271f4c1696d46274e536e14d255525d848f80..0786f53c6b761e5cd9020352a2ecb92469a609d7 100644
--- a/src/operator/ReduceSum.cpp
+++ b/src/operator/ReduceSum.cpp
@@ -69,4 +69,8 @@ bool Aidge::ReduceSum_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::ReduceSum_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(ReduceSum_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
-}
\ No newline at end of file
+}
+
+std::set<std::string> Aidge::ReduceSum_Op::getAvailableBackends() const {
+    return Registrar<ReduceSum_Op>::getKeys();
+}
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 5139a0b0c98b11a0cbf6770397be56c830d0aa49..0fa9a62816a36ad3afece02052224c966ee121a3 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -136,6 +136,10 @@ void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t d
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Reshape_Op::getAvailableBackends() const {
+    return Registrar<Reshape_Op>::getKeys();
+}
+
 //////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Reshape(const std::vector<std::int64_t>& shape,
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index f3a69848ebd3cb7dbfb43788d16030e21e071b9c..9e5762452e382a31c1e5da25708507653da2e474 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -149,6 +149,10 @@ void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     }
 }
 
+std::set<std::string> Aidge::Resize_Op::getAvailableBackends() const {
+    return Registrar<Resize_Op>::getKeys();
+}
+
 /////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Resize(const std::string &name) {
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
index a53695b58aab9ea8a50e15638b4c50d42cf444dd..5ac08cd2245e0caa3ca7072c70ccc69bcfcf9558 100644
--- a/src/operator/Scaling.cpp
+++ b/src/operator/Scaling.cpp
@@ -48,6 +48,10 @@ void Aidge::Scaling_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t d
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Scaling_Op::getAvailableBackends() const {
+    return Registrar<Scaling_Op>::getKeys();
+}
+
 ////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Scaling(float scalingFactor,
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index f2ad1005907b71ee279b9d9bc9853b667108855c..29a9ee6252a0c2baa6e07bc56e60650685db6bdd 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -91,6 +91,10 @@ void Aidge::Shape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Shape_Op::getAvailableBackends() const {
+    return Registrar<Shape_Op>::getKeys();
+}
+
 //////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Shape(const std::int64_t start, const std::int64_t end, const std::string& name) {
diff --git a/src/operator/ShiftGELU.cpp b/src/operator/ShiftGELU.cpp
index 63480ffccaaf78b2dd951c75b3830a8dfede7d99..bd229e6cf58a430922d08cff5301aa16ef636d5e 100644
--- a/src/operator/ShiftGELU.cpp
+++ b/src/operator/ShiftGELU.cpp
@@ -42,6 +42,10 @@ void Aidge::ShiftGELU_Op::setBackend(const std::string& name, DeviceIdx_t device
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::ShiftGELU_Op::getAvailableBackends() const {
+    return Registrar<ShiftGELU_Op>::getKeys();
+}
+
 ///////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ShiftGELU(const std::string& name) {
diff --git a/src/operator/ShiftMax.cpp b/src/operator/ShiftMax.cpp
index 5b0dd7ace0984c2397ef3a7bb4ef7a5526f4f288..58d4bf46100ce116ad4a179e972cbef81bc5b5c1 100644
--- a/src/operator/ShiftMax.cpp
+++ b/src/operator/ShiftMax.cpp
@@ -46,6 +46,10 @@ void Aidge::ShiftMax_Op::setBackend(const std::string& name, DeviceIdx_t device)
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::ShiftMax_Op::getAvailableBackends() const {
+    return Registrar<ShiftMax_Op>::getKeys();
+}
+
 /////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ShiftMax(const std::string& name) {
diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp
index aa112378fde50c7f36c63b8c0a8d00ed0baab12b..d97f8c52341dee4e6e0840afa6e023d8a4e3fd52 100644
--- a/src/operator/Sigmoid.cpp
+++ b/src/operator/Sigmoid.cpp
@@ -42,6 +42,10 @@ void Aidge::Sigmoid_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t d
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Sigmoid_Op::getAvailableBackends() const {
+    return Registrar<Sigmoid_Op>::getKeys();
+}
+
 ///////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Sigmoid(const std::string& name) {
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index bd7a4750dcbb129b56c541b3e75c2ec6faa7d55a..3bdee8c13c1759261140d634940b0a4e81210084 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -212,6 +212,10 @@ void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Slice_Op::getAvailableBackends() const {
+    return Registrar<Slice_Op>::getKeys();
+}
+
 ////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Slice(const std::vector<std::int64_t>& starts,
diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp
index f425d6fffb8934f00b1c503c1d296b8318377cb0..ad894c5e56a674a452d0388f88a7e4ad268dd216 100644
--- a/src/operator/Softmax.cpp
+++ b/src/operator/Softmax.cpp
@@ -46,6 +46,10 @@ void Aidge::Softmax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t d
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Softmax_Op::getAvailableBackends() const {
+    return Registrar<Softmax_Op>::getKeys();
+}
+
 ////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Softmax(std::int32_t axis, const std::string& name) {
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index 9c56c6a2a28c6acb8c3943cd859fdbe78fd2cd1b..e3ed13588d8c2b5ddde91d37fc926d675f0666a3 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -37,15 +37,15 @@ void Aidge::Split_OpImpl::forward() {
     const std::size_t stride_post = std::accumulate(dims.crbegin(), dims.crbegin() + dims.size() -1 - axis, 1, std::multiplies<std::size_t>());
     for (auto i = 0; i < op.nbOutputs(); ++i)
     {
-        DimIdx_t chunkIdxOnAxis = std::accumulate(splits.cbegin(), splits.cbegin() + i, 0) * stride_post;
-        DimIdx_t offset = 0;
+        DimSize_t chunkIdxOnAxis = std::accumulate(splits.cbegin(), splits.cbegin() + i, 0) * stride_post;
+        DimSize_t offset = 0;
         for (std::size_t j = 0; j < stride_pre; ++j)
         {
             // Compute chunk position in input tensor
-            DimIdx_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
+            DimSize_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
             // Copy chunk in ouput
             op.getOutput(i)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(idx),
-                                             splits[i] * stride_post, offset);
+                                            splits[i] * stride_post, offset);
             offset += splits[i] * stride_post;
         }
 
@@ -167,6 +167,10 @@ void Aidge::Split_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
 
 }
 
+std::set<std::string> Aidge::Split_Op::getAvailableBackends() const {
+    return Registrar<Split_Op>::getKeys();
+}
+
 ////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Split(Aidge::DimSize_t nbOutput,
diff --git a/src/operator/Sqrt.cpp b/src/operator/Sqrt.cpp
index 3af75a6ca19e301f6c14e1b5fd03d693c161dcc5..bd3286f098cd5c6985d7f33f88b723523ef94765 100644
--- a/src/operator/Sqrt.cpp
+++ b/src/operator/Sqrt.cpp
@@ -41,6 +41,10 @@ void Aidge::Sqrt_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Sqrt_Op::getAvailableBackends() const {
+    return Registrar<Sqrt_Op>::getKeys();
+}
+
 ////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Sqrt(const std::string& name) {
diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp
index df81ef3ec980b5cf8bd9f8bd39d093cee529cf75..a8b20d21ae1f6c7bfba1a9e52d039f292b6aa62e 100644
--- a/src/operator/Squeeze.cpp
+++ b/src/operator/Squeeze.cpp
@@ -152,6 +152,10 @@ void Squeeze_Op::setBackend(const std::string &name,
   mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Squeeze_Op::getAvailableBackends() const {
+  return Registrar<Squeeze_Op>::getKeys();
+}
+
 void Aidge::Squeeze_OpImpl::forward() {
   const Squeeze_Op &op_ = static_cast<const Squeeze_Op &>(mOp);
   // Check if input is provided
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index ee4fd5b0887c5d9fafa3acd5822334dba4070aa8..ca7348b3b415375c09ac1cfd69ac3d6f6e3488eb 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -72,6 +72,10 @@ void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Sub_Op::getAvailableBackends() const {
+    return Registrar<Sub_Op>::getKeys();
+}
+
 //////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Sub(const std::string& name) {
diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp
index 1f936b6c8c5f61d86e2832c4bee7b943fa8268a1..fe295ab71b67e8e62562066b1464ffba6e8ae404 100644
--- a/src/operator/Tanh.cpp
+++ b/src/operator/Tanh.cpp
@@ -41,6 +41,10 @@ void Aidge::Tanh_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Tanh_Op::getAvailableBackends() const {
+    return Registrar<Tanh_Op>::getKeys();
+}
+
 ////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Tanh(const std::string& name) {
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index bd1acee8a820ad2e3e54b7b0b21f979fc9ce1feb..0cb1717f1c96c393b8845db129eee1429966cd98 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -79,6 +79,10 @@ void Aidge::Transpose_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Transpose_Op::getAvailableBackends() const {
+    return Registrar<Transpose_Op>::getKeys();
+}
+
 //////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Transpose(const std::vector<Aidge::DimSize_t> &outputDimsOrder,
diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp
index 2b12f33585a7388bd2411a8ae84ef43915516024..53b8bd5442081e601a55853115f44067ae17fc2b 100644
--- a/src/operator/Unfold.cpp
+++ b/src/operator/Unfold.cpp
@@ -138,6 +138,11 @@ void Aidge::Unfold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx
     mOutputs[0]->setBackend(name, device);
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::Unfold_Op<DIM>::getAvailableBackends() const {
+    return Registrar<Unfold_Op<DIM>>::getKeys();
+}
+
 template class Aidge::Unfold_Op<2>;
 
 ///////////////////////////////////////////////////////////
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
index e88e0f8ca861f4f7765ae3ca71bf864c20b54461..43afd160e03395c65c4dcbe5504cb865da4ed8d8 100644
--- a/src/operator/Unsqueeze.cpp
+++ b/src/operator/Unsqueeze.cpp
@@ -116,6 +116,10 @@ void Unsqueeze_Op::setBackend(const std::string &name,
   mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Unsqueeze_Op::getAvailableBackends() const {
+  return Registrar<Unsqueeze_Op>::getKeys();
+}
+
 void Aidge::Unsqueeze_OpImpl::forward() {
   const Unsqueeze_Op &op_ = static_cast<const Unsqueeze_Op &>(mOp);
   // Check if input is provided
diff --git a/src/recipes/AdaptToBackend.cpp b/src/recipes/AdaptToBackend.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e625a52f6545c3b2b34f85745fd88087a1b9883b
--- /dev/null
+++ b/src/recipes/AdaptToBackend.cpp
@@ -0,0 +1,38 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Matching.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+void Aidge::adaptToBackend(std::shared_ptr<GraphView> graphView) {
+    const auto nodes = graphView->getNodes();
+    for (auto node : nodes) {
+        auto impl = node->getOperator()->getImpl();
+        AIDGE_ASSERT(impl, "Missing implementation for node {} (of type {})",
+            node->name(), node->type());
+        auto adaptedNode = impl->getBestAdaptation(impl->getRequiredSpec());
+
+        if (adaptedNode == nullptr) {
+            Log::notice("Unable to adapt node {} (of type {}) to backend {}",
+                node->name(), node->type(), impl->backend());
+        }
+        else if (!adaptedNode->getOperator()->isAtomic()) {
+            Log::info("Adapted node {} (of type {}) to backend {}",
+                node->name(), node->type(), impl->backend());
+            AIDGE_ASSERT(GraphView::replace({node}, {adaptedNode}), "Unable to replace adapted node!");
+        }
+    }
+}
diff --git a/src/scheduler/ParallelScheduler.cpp b/src/scheduler/ParallelScheduler.cpp
index 4e515099006b9e0588eafc7e981c5f5e80bbe97d..1d70646b70091e2e3ff6f03b8ee82ae62aeb1e43 100644
--- a/src/scheduler/ParallelScheduler.cpp
+++ b/src/scheduler/ParallelScheduler.cpp
@@ -127,7 +127,12 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std::
         // in the next step
         for (size_t i = 0; i < staticSchedule.size(); ) {
             auto runnable = staticSchedule[i];
-            if (!pool.busy() && runnable->early <= latest) {
+            if (runnable->early > latest) {
+                // No more node can be run at this step (latest)
+                break;
+            }
+
+            if (!pool.busy()) {
                 // Check that potential preceding non-critical nodes are finished
                 bool ready = true;
                 for (auto elt : runnable->laterThan) {
@@ -168,9 +173,17 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std::
                 }
             }
             else {
-                // Thread pool is already full or no more node can be run at
-                // this step (latest)
-                break;
+                // Thread pool is already full
+                bool ready = true;
+                for (auto elt : mustFinish) {
+                    ready = ready && finished.at(elt);
+                }
+                if (!ready) {
+                    std::this_thread::yield();
+                }
+                else {
+                    break;
+                }
             }
         }
 
diff --git a/src/scheduler/ProdConso.cpp b/src/scheduler/ProdConso.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a3bff53c3643a5da361dec5944f47a27f148a995
--- /dev/null
+++ b/src/scheduler/ProdConso.cpp
@@ -0,0 +1,117 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <string>
+
+#include "aidge/scheduler/ProdConso.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+Aidge::ProdConso::ProdConso(const Operator& op, bool inPlace):
+    mOp(op),
+    mInPlace(inPlace),
+    mNbConsumedData(mOp.nbInputs(), Elts_t::NoneElts()),
+    mNbProducedData(mOp.nbOutputs(), Elts_t::NoneElts())
+{
+    //ctor
+}
+
+Aidge::Elts_t Aidge::ProdConso::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+    if (mOp.getRawInput(inputIdx)) {
+        const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
+        if (!input->undefined()) {
+            // Known amount of data: requires the whole tensor by default
+            return Elts_t::DataElts(input->size());
+        }
+        else {
+            // Unknown amount of data: require a single token by default
+            return Elts_t::TokenElts(1);
+        }
+    }
+
+    // Input not connected, meaning it is an optional input: do no require anything!
+    return Elts_t::NoneElts();
+}
+
+Aidge::Elts_t Aidge::ProdConso::getNbRequiredProtected(IOIndex_t inputIdx) const {
+    if (mOp.getRawInput(inputIdx)) {
+        const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
+        if (!input->undefined()) {
+            // Known amount of data: protect the whole tensor by default
+            return Elts_t::DataElts((mInPlace) ? 0 : input->size());
+        }
+        else {
+            // Unknown amount of data: protect a single token by default
+            // (this does not really make sense for now, as getNbRequiredProtected()
+            // is supposed to give a precise amount of data to protect for
+            // memory management purpose...)
+            return Elts_t::TokenElts((mInPlace) ? 0 : 1);
+        }
+    }
+
+    // Input not connected, meaning it is an optional input: do no require anything!
+    return Elts_t::NoneElts();
+}
+
+Aidge::Elts_t Aidge::ProdConso::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
+    if (mOp.getRawOutput(outputIdx)) {
+        const auto output = std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx));
+        if (!output->undefined()) {
+            // Known amount of data: requires the whole tensor by default,
+            // regardless of available data on inputs
+            return Elts_t::DataElts(output->size());
+        }
+        else {
+            // Unknown amount of data: require a single token by default
+            // (this does not really make sense for now, as getRequiredMemory()
+            // is supposed to give a precise amount of data to allocate for
+            // memory management purpose...)
+            return Elts_t::TokenElts(1);
+        }
+    }
+
+    // Output not set, meaning it is an optional output: do no require anything!
+    return Elts_t::NoneElts();
+}
+
+Aidge::Elts_t Aidge::ProdConso::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
+    AIDGE_ASSERT(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(),
+        "input index ({}) is out of bound ({}) for operator type {}",
+        inputIdx, mNbConsumedData.size(), mOp.type());
+    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
+}
+
+Aidge::Elts_t Aidge::ProdConso::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
+    AIDGE_ASSERT(static_cast<std::size_t>(outputIdx) < mNbProducedData.size(),
+        "output index ({}) is out of bound ({}) for operator type {}",
+        outputIdx, mNbProducedData.size(), mOp.type());
+    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
+}
+
+void Aidge::ProdConso::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx) {
+        // each input is consumed by the minimum amount for a forward pass
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));
+    }
+
+    for (std::size_t outputIdx = 0; outputIdx < mNbProducedData.size(); ++outputIdx) {
+        mNbProducedData[outputIdx] += getRequiredMemory(outputIdx, {});
+    }
+}
+
+void Aidge::ProdConso::resetConsummerProducer(){
+    std::fill(mNbConsumedData.begin(), mNbConsumedData.end(), Elts_t::NoneElts());
+    std::fill(mNbProducedData.begin(), mNbProducedData.end(), Elts_t::NoneElts());
+}
diff --git a/src/utils/DynamicAttributes.cpp b/src/utils/DynamicAttributes.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..909d3bb2f5fda977ac497a19e1a1088eb52cfc88
--- /dev/null
+++ b/src/utils/DynamicAttributes.cpp
@@ -0,0 +1,31 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/utils/DynamicAttributes.hpp"
+
+std::map<std::type_index, bool(*)(const future_std::any&, const future_std::any&)> Aidge::DynamicAttributes::mAnyCompare;
+
+bool future_std::operator<(const future_std::any& lhs, const future_std::any& rhs) {
+    if (lhs.type() == rhs.type()) {
+        return Aidge::DynamicAttributes::mAnyCompare.at(lhs.type())(lhs, rhs);
+    }
+#ifdef PYBIND
+    else if (lhs.type() == typeid(py::object)) {
+        return Aidge::DynamicAttributes::mAnyCompare.at(rhs.type())(lhs, rhs);
+    }
+    else if (rhs.type() == typeid(py::object)) {
+        return Aidge::DynamicAttributes::mAnyCompare.at(lhs.type())(lhs, rhs);
+    }
+#endif
+    else {
+        return (lhs.type().before(rhs.type()));
+    }
+}
diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp
index ae8816e78b6fc7b8f2288b6873642f0729e195b6..da32a8e0ec6a3c9f27da5c47f9e6166c1fc879bc 100644
--- a/src/utils/Log.cpp
+++ b/src/utils/Log.cpp
@@ -89,7 +89,7 @@ void Aidge::Log::log(Level level, const std::string& msg) {
             fmt::println("Context: {}", context);
         }
 
-        fmt::println(mFile.get(), msg);
+        fmt::println(mFile.get(), "{}", msg);
     }
 }
 
diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt
index 9280d5fbdfd0a6a35724e5afd5caf672fefd8bf8..fd96b060630c162e93143e8f51019a0ce3e82cc9 100644
--- a/unit_tests/CMakeLists.txt
+++ b/unit_tests/CMakeLists.txt
@@ -55,7 +55,7 @@ target_link_options(tests${module_name} PUBLIC $<$<OR:$<CXX_COMPILER_ID:Clang>,$
 
 endif()
 
-target_link_libraries(tests${module_name} PUBLIC ${module_name})
+target_link_libraries(tests${module_name} PRIVATE ${module_name})
 
 target_link_libraries(tests${module_name} PRIVATE Catch2::Catch2WithMain)
 
diff --git a/unit_tests/operator/Test_BitShift_Op.cpp b/unit_tests/operator/Test_BitShift_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..39916e4e75779ecc63680b43ece8ccd2bdc667c9
--- /dev/null
+++ b/unit_tests/operator/Test_BitShift_Op.cpp
@@ -0,0 +1,133 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/BitShift.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] BitShift_Op(forwardDims)", "[BitShift][forwardDims]") 
+{
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Shift Operator
+    std::shared_ptr<Node> myShift = BitShift(BitShift_Op::BitShiftDirection::right);
+    auto op = std::static_pointer_cast<OperatorTensor>(myShift-> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+     SECTION("BitShifOP Test dimensions [Scalar]") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+
+    SECTION("BitShifOP Test dimensions [Same Size]") {
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+    }
+    SECTION("BitShifOP Test dimensions [Broadcast]") {
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+        }
+    }
+    SECTION("BitShifOP Test dimensions [Wrong Dimensions]") {
+        
+       for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->forwardDims());
+    }
+}
+}
+} // namespace Aidge