diff --git a/aidge_core/__init__.py b/aidge_core/__init__.py
index a89e2f6adb0bf56df5182783c388196679d1ad42..3a8e4ff6b8ac4f69d3bc140de9de2edcb8f12c1b 100644
--- a/aidge_core/__init__.py
+++ b/aidge_core/__init__.py
@@ -7,6 +7,7 @@ http://www.eclipse.org/legal/epl-2.0.
 
 SPDX-License-Identifier: EPL-2.0
 """
-from .aidge_core import *
-from .export import ExportNode, generate_file, generate_str
+from .aidge_core import * # import so generated by PyBind
+from .export_utils import ExportNode, generate_file, generate_str
+from .aidge_export_aidge import *
 from . import utils
diff --git a/aidge_core/aidge_export_aidge/__init__.py b/aidge_core/aidge_export_aidge/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5d6f96b3300c0e86147baac30d7cae8a3a0b798
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/__init__.py
@@ -0,0 +1,8 @@
+from pathlib import Path
+
+# Constants
+FILE = Path(__file__).resolve()
+ROOT_EXPORT = FILE.parents[0]
+
+from .operator_export import *
+from .export import export
diff --git a/aidge_core/aidge_export_aidge/export.py b/aidge_core/aidge_export_aidge/export.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0e859b71e77bb03b95acb6ca75dcf09a8af0722
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/export.py
@@ -0,0 +1,104 @@
+import aidge_core
+import shutil
+import os
+from pathlib import Path
+from .utils import supported_operators, OPERATORS_REGISTRY
+from . import ROOT_EXPORT
+
+
+from aidge_core import ExportNode, generate_file
+
+
+
+def export(export_folder: str,
+           graph_view: aidge_core.GraphView,
+           enable_python_binding: bool = True,
+           ):
+    export_folder_path = Path(export_folder)
+    export_name = export_folder_path.name
+
+    ### Creating export folder ###
+    # Create export directory
+    os.makedirs(export_folder, exist_ok=True)
+
+    ### Cpy static files ###
+    shutil.copytree(ROOT_EXPORT / "static/include",
+                    export_folder_path / "include", dirs_exist_ok=True)
+    shutil.copytree(ROOT_EXPORT / "static/cmake",
+                    export_folder_path / "cmake", dirs_exist_ok=True)
+    shutil.copyfile(ROOT_EXPORT / "static/CMakeLists.txt",
+                    export_folder_path / "CMakeLists.txt")
+    shutil.copyfile(ROOT_EXPORT / "static/version.txt",
+                    export_folder_path / "version.txt")
+    shutil.copyfile(ROOT_EXPORT / "static/README.md",
+                    export_folder_path / "README.md")
+    shutil.copyfile(ROOT_EXPORT / "static/main.cpp",
+                    export_folder_path / "main.cpp")
+    shutil.copyfile(ROOT_EXPORT / "static/export-config.cmake.in",
+                    export_folder_path / f"{export_name}-config.cmake.in")
+
+    # Create project_name file
+    with open(export_folder_path / "project_name.txt", "w") as f:
+        f.write(export_name)
+
+    # Add files related to python binding if
+    if enable_python_binding:
+        os.makedirs(export_folder_path / "python_binding", exist_ok=True)
+        generate_file(
+            export_folder_path / "python_binding/pybind.cpp",
+            ROOT_EXPORT / "templates/pybind.jinja",
+            name=export_name,
+        )
+        # TODO: Add a main.py file ?
+
+    ### Generating an export for each nodes and dnn file ###
+    list_configs = []  # List of headers to include in dnn.cpp to access attribute and parameters
+    list_actions = []  # List of string to construct graph
+    set_operator = set()
+    # Queue of Aidge nodes to explore, guarantee a topological exploration of the graph
+    open_nodes = list(graph_view.get_input_nodes())
+    # List of Aidge nodes already explored
+    closed_nodes = []
+
+    while open_nodes:
+        node = open_nodes.pop(0)
+        if node in closed_nodes:
+            continue  # Node already converted, moving on ...
+        parents_not_converted = False
+        # Check all parents have been converted
+        for parent in node.get_parents():
+            if parent is not None and \
+                    parent not in closed_nodes:
+                # If parents have not been converted, push back current node
+                if not parents_not_converted:
+                    open_nodes.insert(0, node)
+                    parents_not_converted = True
+                # Add to the stack the not converted parent as next node to convert
+                open_nodes.insert(0, parent)
+        if parents_not_converted:
+
+            continue
+        # Next nodes to treat are children of current node
+        open_nodes += list(node.get_children())
+
+        if node.type() in supported_operators():
+            set_operator.add(node.type())
+            op = OPERATORS_REGISTRY[node.type()](node)
+
+            # TODO: list_configs and list_actions don't need to be passed by argument
+            # Export the configuration
+            list_configs = op.export(export_folder_path, list_configs)
+
+            # Add forward kernel
+            list_actions = op.forward(list_actions)
+        else:
+            raise RuntimeError(f"Operator: {node.type()} is not supported")
+        closed_nodes.append(node)
+    # Generate full dnn.cpp
+    aidge_core.generate_file(
+        export_folder_path / "src/dnn.cpp",
+        ROOT_EXPORT / "templates/dnn.jinja",
+        headers=list_configs,
+        operators=set_operator,
+        actions=list_actions,
+    )
diff --git a/aidge_core/aidge_export_aidge/operator_export/__init__.py b/aidge_core/aidge_export_aidge/operator_export/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..37d674ac84f72d643ba1a628a86fbcde9780f4a4
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/__init__.py
@@ -0,0 +1,14 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+from pathlib import Path
+
+DIR_PATH = Path(__file__).parent
+modules = [Path(module).stem for module in DIR_PATH.glob("*.py")]
+__all__ = [ f for f in modules if f != "__init__"]
diff --git a/aidge_core/aidge_export_aidge/operator_export/conv.py b/aidge_core/aidge_export_aidge/operator_export/conv.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb7092fb18982a3cc3f11a1ca47394ce2f77d0b6
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/conv.py
@@ -0,0 +1,31 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import ExportNode, generate_file, generate_str
+from pathlib import Path
+
+@operator_register("Conv")
+class Conv(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+    def export(self, export_folder:Path, list_configs:list):
+        include_path = f"attributes/{self.name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/attributes/conv.jinja",
+            name=self.name,
+            **self.attributes
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT /"templates/graph_ctor/conv.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/fc.py b/aidge_core/aidge_export_aidge/operator_export/fc.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcd528528707dc6eec917790b46e509c2984fa66
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/fc.py
@@ -0,0 +1,37 @@
+from aidge_core.aidge_export_aidge.utils import operator_register,parse_node_input
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import ExportNode, generate_file, generate_str
+from pathlib import Path
+
+@operator_register("FC")
+class FC(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+
+    def export(self, export_folder:Path, list_configs:list):
+
+
+        include_path = f"attributes/{self.name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/attributes/fc.jinja",
+            name=self.name,
+            InChannels=self.inputs_dims[1][1],
+            OutChannels=self.operator.out_channels(),
+            **self.attributes
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/fc.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/maxpooling.py b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c63e71b423b90f62536cafd25c61101e76e0562
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py
@@ -0,0 +1,32 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import ExportNode, generate_file, generate_str
+from pathlib import Path
+
+@operator_register("MaxPooling")
+class MaxPooling(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+
+    def export(self, export_folder:Path, list_configs:list):
+        include_path = f"attributes/{self.name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/attributes/maxpooling.jinja",
+            name=self.name,
+            **self.attributes
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/maxpooling.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/producer.py b/aidge_core/aidge_export_aidge/operator_export/producer.py
new file mode 100644
index 0000000000000000000000000000000000000000..d082e9726b7ca33fbe6f4692bf7b55930b69cb9d
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/producer.py
@@ -0,0 +1,65 @@
+from aidge_core.aidge_export_aidge.utils import operator_register
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import dtype, ExportNode, generate_file, generate_str
+import numpy as np
+from pathlib import Path
+
+# Convert aidge datatype to C++ type
+datatype_converter = {
+    dtype.float64 : "double",
+    dtype.float32 : "float",
+    dtype.float16 : "half_float::half",
+    dtype.int8    : "int8_t",
+    dtype.int16   : "int16_t",
+    dtype.int32   : "int32_t",
+    dtype.int64   : "int64_t",
+    dtype.uint8   : "uint8_t",
+    dtype.uint16  : "uint16_t",
+    dtype.uint32  : "uint32_t",
+    dtype.uint64  : "uint64_t"
+}
+
+
+@operator_register("Producer")
+class Producer(ExportNode):
+    """
+    If there is a standardization of the export operators
+    then this class should be just a inheritance of ProducerCPP
+    """
+    def __init__(self, node):
+        super().__init__(node)
+        child, in_idx = self.node.output(0)[0]
+        self.tensor_name = f"{child.name()}_{in_idx}"
+        self.values = np.array(self.operator.get_output(0))
+
+    def export(self, export_folder:Path, list_configs:list):
+        assert(len(self.node.output(0)) == 1)
+
+        include_path = f"parameters/{self.tensor_name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+        aidge_tensor = self.operator.get_output(0)
+        aidge_type = aidge_tensor.dtype()
+        if aidge_type in datatype_converter:
+            datatype = datatype_converter[aidge_type]
+        else:
+            raise RuntimeError(f"No conversion found for data type {aidge_type}.")
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/parameter.jinja",
+            dims = aidge_tensor.dims(),
+            data_t = datatype, # TODO : get data from producer
+            name = self.tensor_name,
+            values = str(aidge_tensor)
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/producer.jinja",
+            name=self.name,
+            tensor_name=self.tensor_name,
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/relu.py b/aidge_core/aidge_export_aidge/operator_export/relu.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0f4f6afdc35737a8967f51c1859bda0c9773f88
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/relu.py
@@ -0,0 +1,21 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core import ExportNode, generate_str
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from pathlib import Path
+
+@operator_register("ReLU")
+class ReLU(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+    def export(self, export_folder:Path, list_configs:list):
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/relu.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/sub.py b/aidge_core/aidge_export_aidge/operator_export/sub.py
new file mode 100644
index 0000000000000000000000000000000000000000..efcdd0924fbcf6944b0fb95a967e1a3e16ccc3c5
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/sub.py
@@ -0,0 +1,21 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core import ExportNode, generate_str
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from pathlib import Path
+
+@operator_register("Sub")
+class Sub(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+    def export(self, export_folder:Path, list_configs:list):
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/sub.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/static/CMakeLists.txt b/aidge_core/aidge_export_aidge/static/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c52e09e82699b1f2c0f8e10ed9f4ab83fbb0f10f
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/CMakeLists.txt
@@ -0,0 +1,148 @@
+cmake_minimum_required(VERSION 3.15)
+
+
+file(STRINGS "${CMAKE_SOURCE_DIR}/version.txt" version)
+file(STRINGS "${CMAKE_SOURCE_DIR}/project_name.txt" project)
+
+message(STATUS "Project name: ${project}")
+message(STATUS "Project version: ${version}")
+
+# Note : project name is {project} and python module name is also {project}
+set(module_name _${project}) # target name
+
+project(${project})
+set(CXX_STANDARD 14)
+
+##############################################
+# Define options
+option(PYBIND "python binding" ON)
+option(WERROR "Warning as error" OFF)
+option(TEST "Enable tests" ON)
+option(COVERAGE "Enable coverage" OFF)
+option(ENABLE_ASAN "Enable ASan (AddressSanitizer) for runtime analysis of memory use (over/underflow, memory leak, ...)" OFF)
+
+##############################################
+# Import utils CMakeLists
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
+include(PybindModuleCreation)
+
+if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
+    Include(CodeCoverage)
+endif()
+
+##############################################
+# Find system dependencies
+find_package(aidge_core REQUIRED)
+# Example of adding dependency to backend CPU
+# find_package(aidge_backend_cpu REQUIRED)
+
+##############################################
+# Create target and set properties
+file(GLOB_RECURSE src_files "src/*.cpp")
+file(GLOB_RECURSE inc_files "include/*.hpp")
+
+add_library(${module_name} ${src_files} ${inc_files})
+target_link_libraries(${module_name}
+    PUBLIC
+        _aidge_core # _ is added because we link the target not the project
+        # _aidge_backend_cpu  # Example of adding dependency to backend CPUs
+)
+
+#Set target properties
+set_property(TARGET ${module_name} PROPERTY POSITION_INDEPENDENT_CODE ON)
+
+if( ${ENABLE_ASAN} )
+    message("Building ${module_name} with ASAN.")
+    set(SANITIZE_FLAGS -fsanitize=address -fno-omit-frame-pointer)
+    target_link_libraries(${module_name}
+        PUBLIC
+            -fsanitize=address
+    )
+    target_compile_options(${module_name}
+        PRIVATE
+            ${SANITIZE_FLAGS}
+    )
+endif()
+
+target_include_directories(${module_name}
+    PUBLIC
+        $<INSTALL_INTERFACE:include>
+        $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+    PRIVATE
+        ${CMAKE_CURRENT_SOURCE_DIR}/src
+)
+
+# PYTHON BINDING
+if (PYBIND)
+    generate_python_binding(${project} ${module_name})
+
+    # Handles Python + pybind11 headers dependencies
+    target_link_libraries(${module_name}
+        PUBLIC
+            pybind11::pybind11
+        )
+endif()
+
+target_compile_features(${module_name} PRIVATE cxx_std_14)
+
+target_compile_options(${module_name} PRIVATE
+    $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
+    -Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror>>)
+target_compile_options(${module_name} PRIVATE
+    $<$<CXX_COMPILER_ID:MSVC>:
+    /W4>)
+
+if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
+    append_coverage_compiler_flags()
+endif()
+
+##############################################
+# Installation instructions
+
+include(GNUInstallDirs)
+set(INSTALL_CONFIGDIR ${CMAKE_INSTALL_LIBDIR}/cmake/${project})
+
+install(TARGETS ${module_name} EXPORT ${project}-targets
+  LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+  ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+  RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+)
+
+install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
+
+#Export the targets to a script
+
+install(EXPORT ${project}-targets
+ FILE "${project}-targets.cmake"
+ DESTINATION ${INSTALL_CONFIGDIR}
+ COMPONENT ${module_name}
+)
+
+#Create a ConfigVersion.cmake file
+include(CMakePackageConfigHelpers)
+write_basic_package_version_file(
+    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config-version.cmake"
+    VERSION ${version}
+    COMPATIBILITY AnyNewerVersion
+)
+
+configure_package_config_file("${project}-config.cmake.in"
+    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config.cmake"
+    INSTALL_DESTINATION ${INSTALL_CONFIGDIR}
+)
+
+#Install the config, configversion and custom find modules
+install(FILES
+    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config.cmake"
+    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config-version.cmake"
+    DESTINATION ${INSTALL_CONFIGDIR}
+)
+
+##############################################
+## Exporting from the build tree
+export(EXPORT ${project}-targets
+    FILE "${CMAKE_CURRENT_BINARY_DIR}/${project}-targets.cmake")
+
+# Compile executable
+add_executable(main main.cpp)
+target_link_libraries(main PUBLIC _aidge_core ${module_name})
diff --git a/aidge_core/aidge_export_aidge/static/README.md b/aidge_core/aidge_export_aidge/static/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1ce48d5274cbc2f007bde7c7be01e41e617cb19a
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/README.md
@@ -0,0 +1,5 @@
+To compile:
+
+> mkdir build && cd build
+> cmake -DCMAKE_INSTALL_PREFIX:PATH=/data1/is156025/cm264821/anaconda3/envs/aidge_demo/lib/libAidge ..
+> make all install
diff --git a/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake b/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..87e70fc38c9e4ec4ddb44cbe5d7fb2a31c2e94d6
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake
@@ -0,0 +1,21 @@
+function(generate_python_binding name target_to_bind) 
+    add_definitions(-DPYBIND)
+    Include(FetchContent)
+
+    FetchContent_Declare(
+    PyBind11
+    GIT_REPOSITORY https://github.com/pybind/pybind11.git
+    GIT_TAG        v2.10.4 # or a later release
+    )
+
+    # Use the New FindPython mode, recommanded. Requires CMake 3.15+
+    find_package(Python COMPONENTS Interpreter Development)
+    FetchContent_MakeAvailable(PyBind11)
+
+    message(STATUS "Creating binding for module ${name}")
+    file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
+
+    pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
+    target_include_directories(${name} PUBLIC "python_binding")
+    target_link_libraries(${name} PUBLIC ${target_to_bind})
+endfunction()
diff --git a/aidge_core/aidge_export_aidge/static/export-config.cmake.in b/aidge_core/aidge_export_aidge/static/export-config.cmake.in
new file mode 100644
index 0000000000000000000000000000000000000000..f3604be11c27d86caf1ad8a48b333b9bd8f30625
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/export-config.cmake.in
@@ -0,0 +1,3 @@
+include(${CMAKE_CURRENT_LIST_DIR}/aidge_backend_cpu-config-version.cmake)
+
+include(${CMAKE_CURRENT_LIST_DIR}/aidge_backend_cpu-targets.cmake)
diff --git a/aidge_core/aidge_export_aidge/static/include/dnn.hpp b/aidge_core/aidge_export_aidge/static/include/dnn.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..3a4d5c02eceee1054c93b8ad635a71d3d04ac2fc
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/include/dnn.hpp
@@ -0,0 +1,17 @@
+#ifndef DNN_HPP
+#define DNN_HPP
+#include <aidge/graph/GraphView.hpp>
+/**
+ * @brief This file contains all of what is related to the construction of the
+ * neural network
+ *
+ */
+
+/**
+ * @brief This function generate the exported Aidge::GraphView.
+ *
+ * @return std::shared_ptr<Aidge::GraphView>
+ */
+std::shared_ptr<Aidge::GraphView> generateModel();
+
+#endif /* DNN_HPP */
diff --git a/aidge_core/aidge_export_aidge/static/main.cpp b/aidge_core/aidge_export_aidge/static/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ab8bac1851b6d2dae4bf97bd3af10e19e0b71c1e
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/main.cpp
@@ -0,0 +1,16 @@
+#include <iostream>
+#include <aidge/backend/cpu.hpp>
+
+#include "include/dnn.hpp"
+
+int main()
+{
+
+    std::cout << "BEGIN" << std::endl;
+
+    std::shared_ptr<Aidge::GraphView> graph = generateModel();
+
+    std::cout << "END" << std::endl;
+
+    return 0;
+}
diff --git a/aidge_core/aidge_export_aidge/static/project_name.txt b/aidge_core/aidge_export_aidge/static/project_name.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d5637593fe045bb602bd181bf0f242f0943fb9fd
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/project_name.txt
@@ -0,0 +1 @@
+export
diff --git a/aidge_core/aidge_export_aidge/static/python_binding/pybind.cpp b/aidge_core/aidge_export_aidge/static/python_binding/pybind.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..072fc4cd6012996a4fcda1d18b8244209c69797a
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/python_binding/pybind.cpp
@@ -0,0 +1,14 @@
+#include <pybind11/pybind11.h>
+
+#include "dnn.hpp"
+
+namespace py = pybind11;
+
+
+void init_export(py::module& m){
+    m.def("generate_model", generateModel);
+}
+
+PYBIND11_MODULE(export, m) {
+    init_export(m);
+}
diff --git a/aidge_core/aidge_export_aidge/static/version.txt b/aidge_core/aidge_export_aidge/static/version.txt
new file mode 100644
index 0000000000000000000000000000000000000000..77d6f4ca23711533e724789a0a0045eab28c5ea6
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/version.txt
@@ -0,0 +1 @@
+0.0.0
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..48d07e8db8d5fb116148e9d41100fffa01fcf622
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
@@ -0,0 +1,17 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+#define _{{name|upper}}_IN_CHANNELS  {{InChannels}}
+#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}}
+
+{% for i in range(KernelDims|length) %}
+#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}}
+{%- endfor %}
+{% for i in range(StrideDims|length) %}
+#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}}
+{%- endfor %}
+{% for i in range(DilationDims|length) %}
+#define _{{name|upper}}_DILATION_{{i}} {{DilationDims[i]}}
+{%- endfor %}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..e292f9b611978877c47b15e91f926f30d27a1cc5
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
@@ -0,0 +1,7 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+#define _{{name|upper}}_IN_CHANNELS  {{InChannels}}
+#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja b/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..d258f580e6ff9c523a87b834fdccf2f3b14fb133
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja
@@ -0,0 +1,13 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+{% for i in range(KernelDims|length) %}
+#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}}
+{%- endfor %}
+{% for i in range(StrideDims|length) %}
+#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}}
+{%- endfor %}
+
+#define _{{name|upper}}_CEIL_MODE {{CeilMode|int}}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/dnn.jinja b/aidge_core/aidge_export_aidge/templates/dnn.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..5da46b2d8a439a359dfb1c7ec8ebc18e8d516767
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/dnn.jinja
@@ -0,0 +1,36 @@
+/********************************************************************************
+ * This file has been generated by the Aidge export.
+ ********************************************************************************/
+
+/*** STD INCLUDES ***/
+#include <memory>  // std::shared_ptr
+
+/*** AIDGE INCLUDES ***/
+#include <aidge/graph/GraphView.hpp>  // Aidge::GraphView
+#include <aidge/graph/Node.hpp>       // Aidge::Node
+#include <aidge/graph/OpArgs.hpp>     // Aidge::Sequential
+
+/*** AIDGE OPERATORS ***/
+{%- for operator in operators %}
+#include <aidge/operator/{{operator}}.hpp>
+{%- endfor %}
+
+/*** OPERATOR ATTRIBUTES & PARAMETERS ***/
+{%- for header in headers %}
+#include "{{ header }}"
+{%- endfor %}
+
+/*** HEADER ***/
+#include "dnn.hpp"
+
+
+std::shared_ptr<Aidge::GraphView> generateModel() {
+    /*** BUILDING GRAPH ***/
+    std::shared_ptr<Aidge::GraphView> graph = std::make_shared<Aidge::GraphView>();
+
+    {%- for action in actions %}
+    {{ action }}
+    {%- endfor %}
+
+    return graph;
+}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8e841ea2a10c71b884736dcbd7cfd03b52c5ad4f
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja
@@ -0,0 +1,7 @@
+{# NOTE: Trying a shorter notation like {%- for input in inputs if input[0] %}
+will mess up loop.index as the input set up at None will not increment ! #}
+{%- for input in inputs %}
+{%- if input[0] %}
+{{input[0]}}->addChild({{name}}, {{input[1]}}, {{loop.index - 1}}); {# NOTE: loop.index begin at 1 #}
+{%- endif %}
+{%- endfor %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..a805f8065e87244bf0546ca42d294b86f144a26d
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
@@ -0,0 +1,26 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Conv(
+            _{{name|upper}}_IN_CHANNELS,
+            _{{name|upper}}_OUT_CHANNELS,
+            {
+            {%- for i in range(KernelDims|length) -%}
+                _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            "{{name}}",
+            {
+            {%- for i in range(StrideDims|length) -%}
+                _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            {
+            {%- for i in range(DilationDims|length) -%}
+                _{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            }
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..df6dbc83492174fc49348b8073deb47a5deca313
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja
@@ -0,0 +1,11 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::FC(
+            _{{name|upper}}_IN_CHANNELS,
+            _{{name|upper}}_OUT_CHANNELS,
+            "{{name}}"
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..c6587c128509712e1a8e903e7484476548e9347d
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja
@@ -0,0 +1,20 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::MaxPooling(
+            {
+            {%- for i in range(KernelDims|length) -%}
+                _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            "{{name}}",
+            {
+            {%- for i in range(StrideDims|length) -%}
+                _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            _{{name|upper}}_CEIL_MODE
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/producer.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/producer.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8e0a465a044ebfcc249206f9f5886c7a38fc3252
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/producer.jinja
@@ -0,0 +1,9 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Producer(
+            {{tensor_name}},
+            "{{name}}"
+        );
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/relu.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/relu.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8fd58a30bddd39647dd3b25b2982e67174220381
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/relu.jinja
@@ -0,0 +1,9 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::ReLU(
+            "{{name}}"
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/sub.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/sub.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..d9417751e4117cb296ec874d946b93c2c64df614
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/sub.jinja
@@ -0,0 +1,9 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Sub(
+            "{{name}}"
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/parameter.jinja b/aidge_core/aidge_export_aidge/templates/parameter.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..11a407cc89f72f24167871a594decc6d90ab489d
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/parameter.jinja
@@ -0,0 +1,11 @@
+#ifndef EXPORT_PARAMETERS_{{name|upper}}_H
+#define EXPORT_PARAMETERS_{{name|upper}}_H
+
+#include <aidge/data/Tensor.hpp>
+#include <memory>
+
+std::shared_ptr<Aidge::Tensor> {{name}} = std::make_shared<Aidge::Tensor>(Aidge::Array{{dims|length}}D<{{data_t}}, {{ dims|join(", ") }}> {
+{{ values }}
+});
+
+#endif /* EXPORT_PARAMETERS_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/pybind.jinja b/aidge_core/aidge_export_aidge/templates/pybind.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..9b48cc97f242813cc33b77bfc028e85c08b0cec7
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/pybind.jinja
@@ -0,0 +1,14 @@
+#include <pybind11/pybind11.h>
+
+#include "dnn.hpp"
+
+namespace py = pybind11;
+
+
+void init_{{name}}(py::module& m){
+    m.def("generate_model", generateModel);
+}
+
+PYBIND11_MODULE({{name}}, m) {
+    init_{{name}}(m);
+}
diff --git a/aidge_core/aidge_export_aidge/utils/__init__.py b/aidge_core/aidge_export_aidge/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecdf2aec2692a48e108d5f4ad05ed05803319525
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/utils/__init__.py
@@ -0,0 +1,11 @@
+from .operator_registry import *
+
+def parse_node_input(node_inputs: list) -> list:
+    """Parse node intputs in order to adapt the list for Jinja.
+
+    :param node_inputs: return of node.inputs()
+    :type node_inputs: list of tuple of aidge_core.Node, output idx.
+    :return: list of tuple of node name, output idx.
+    :rtype: list
+    """
+    return [None if parent_node is None else (parent_node.name(), outId) for parent_node, outId in node_inputs]
diff --git a/aidge_core/aidge_export_aidge/utils/operator_registry.py b/aidge_core/aidge_export_aidge/utils/operator_registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd6fbaaceeba9c2125b38354eca9cc116acd29b1
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/utils/operator_registry.py
@@ -0,0 +1,18 @@
+OPERATORS_REGISTRY = {}
+
+def operator_register(*args):
+
+    key_list = [arg for arg in args]
+
+    def decorator(operator):
+        def wrapper(*args, **kwargs):
+            return operator(*args, **kwargs)
+
+        for key in key_list:
+            OPERATORS_REGISTRY[key] = operator
+
+        return wrapper
+    return decorator
+
+def supported_operators():
+    return list(OPERATORS_REGISTRY.keys())
diff --git a/aidge_core/export/code_generation.py b/aidge_core/export/code_generation.py
deleted file mode 100644
index b18b5476f8e083bcbe3d4f6c4a57132ebe7b780f..0000000000000000000000000000000000000000
--- a/aidge_core/export/code_generation.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-from jinja2 import Environment, FileSystemLoader
-
-
-def generate_file(file_path: str, template_path: str, **kwargs) -> None:
-    """Generate a file at `file_path` using the jinja template located at `file_path`.
-
-    kwargs are used to fill the template.
-
-    :param file_path: path where to generate the file
-    :type file_path: str
-    :param template_path: Path to the template to use for code generation
-    :type template_path: str
-    """
-    # Get directory name of the file
-    dirname = os.path.dirname(file_path)
-
-    # If directory doesn't exist, create it
-    if not os.path.exists(dirname):
-        os.makedirs(dirname)
-
-    # Get directory name and name of the template
-    template_dir = os.path.dirname(template_path)
-    template_name = os.path.basename(template_path)
-
-    # Select template
-    template = Environment(loader=FileSystemLoader(
-        template_dir)).get_template(template_name)
-
-    # Generate file
-    content = template.render(kwargs)
-    with open(file_path, mode="w", encoding="utf-8") as message:
-        message.write(content)
-
-def generate_str(template_path:str, **kwargs) -> str:
-    """Generate a string using the jinja template located at `file_path`.
-    kwargs are used to fill the template.
-
-    :param template_path: Path to the template to use for code generation
-    :type template_path: str
-    :return: A string of the interpreted template
-    :rtype: str
-    """
-    dirname = os.path.dirname(template_path)
-    filename = os.path.basename(template_path)
-    template = Environment(loader=FileSystemLoader(dirname)).get_template(filename)
-    return template.render(kwargs)
diff --git a/aidge_core/export/__init__.py b/aidge_core/export_utils/__init__.py
similarity index 100%
rename from aidge_core/export/__init__.py
rename to aidge_core/export_utils/__init__.py
diff --git a/aidge_core/export_utils/code_generation.py b/aidge_core/export_utils/code_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..a02fc0966702cec7a2cbe33f8411bb71e3035e90
--- /dev/null
+++ b/aidge_core/export_utils/code_generation.py
@@ -0,0 +1,46 @@
+from pathlib import Path
+from jinja2 import Environment, FileSystemLoader
+from typing import Union
+
+
+def generate_file(file_path: Union[Path, str], template_path: Union[Path, str], **kwargs) -> None:
+    """Generate a file at `file_path` using the jinja template located at `file_path`.
+
+    kwargs are used to fill the template.
+
+    :param file_path: path where to generate the file
+    :type file_path: pathlib.Path or str
+    :param template_path: Path to the template to use for code generation
+    :type template_path: pathlib.Path or str
+    """
+    # Convert str -> Path for compatibility !
+    if isinstance(file_path, str):
+        file_path = Path(file_path)
+    if isinstance(template_path, str):
+        template_path = Path(template_path)
+    # Make dir
+    file_path.parent.mkdir(parents=True, exist_ok=True)
+
+    # Select template
+    template = Environment(loader=FileSystemLoader(
+        template_path.parent)).get_template(template_path.name)
+
+    # Generate file
+    with open(file_path, mode="w", encoding="utf-8") as file:
+        file.write(template.render(kwargs))
+
+
+def generate_str(template_path: Union[Path, str], **kwargs) -> str:
+    """Generate a string using the jinja template located at `file_path`.
+    kwargs are used to fill the template.
+
+    :param template_path: Path to the template to use for code generation
+    :type template_path: pathlib.Path or str
+    :return: A string of the interpreted template
+    :rtype: str
+    """
+    # Convert str -> Path for compatibility !
+    if isinstance(template_path, str):
+        template_path = Path(template_path)
+    return Environment(loader=FileSystemLoader(
+        template_path.parent)).get_template(template_path.name).render(kwargs)
diff --git a/aidge_core/export/node_export.py b/aidge_core/export_utils/node_export.py
similarity index 87%
rename from aidge_core/export/node_export.py
rename to aidge_core/export_utils/node_export.py
index b009aa46e6660d3af063b0314ca2a393388e2635..7aceaa0ccc1f07674241d6f35bbeff90330f2596 100644
--- a/aidge_core/export/node_export.py
+++ b/aidge_core/export_utils/node_export.py
@@ -20,10 +20,7 @@ class ExportNode(ABC):
         self.node = aidge_node
         self.operator = aidge_node.get_operator()
         self.name = self.node.name()
-        self.attributes = {} # Attributes are auto fetched from aidge operators
-        if isinstance(self.operator, Attributes):
-            for attr_name in self.operator.get_attrs_name():
-                self.attributes[attr_name] = self.operator.get_attr(attr_name)
+        self.attributes = self.operator.attr.dict() if self.operator.attr is not None else {} # Attributes are auto fetched from aidge operators
 
         # rename is_leaf ?
         self.is_last = len(self.node.get_children()) == 0
diff --git a/aidge_core/unit_tests/static/main.cpp b/aidge_core/unit_tests/static/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..06171e2a036a18b0dea3dca40de34c296d99222d
--- /dev/null
+++ b/aidge_core/unit_tests/static/main.cpp
@@ -0,0 +1,19 @@
+/*
+Example main.cpp used to test aidge export.
+This file is copied in the test export.
+*/
+#include <iostream>
+
+#include "include/dnn.hpp"
+
+int main()
+{
+
+    std::cout << "BEGIN" << std::endl;
+
+    std::shared_ptr<Aidge::GraphView> graph = generateModel();
+
+    std::cout << "END" << std::endl;
+
+    return 0;
+}
diff --git a/aidge_core/unit_tests/test_export.py b/aidge_core/unit_tests/test_export.py
new file mode 100644
index 0000000000000000000000000000000000000000..3061d7940603b8eeca2c07368bc1bbd6756fa1f3
--- /dev/null
+++ b/aidge_core/unit_tests/test_export.py
@@ -0,0 +1,83 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+from functools import reduce
+import pathlib
+import os
+import sys
+import subprocess
+import shutil
+import numpy as np
+
+def initFiller(model):
+    # Initialize parameters (weights and biases)
+    for node in model.get_nodes():
+        if node.type() == "Producer":
+            prod_op = node.get_operator()
+            value = prod_op.get_output(0)
+            value.set_backend("cpu")
+            tuple_out = node.output(0)[0]
+            # No conv in current network
+            if tuple_out[0].type() == "Conv" and tuple_out[1]==1:
+                # Conv weight
+                aidge_core.xavier_uniform_filler(value)
+            elif tuple_out[0].type() == "Conv" and tuple_out[1]==2:
+                # Conv bias
+                aidge_core.constant_filler(value, 0.01)
+            elif tuple_out[0].type() == "FC" and tuple_out[1]==1:
+                # FC weight
+                aidge_core.normal_filler(value)
+            elif tuple_out[0].type() == "FC" and tuple_out[1]==2:
+                # FC bias
+                aidge_core.constant_filler(value, 0.01)
+            else:
+                pass
+
+
+class test_export(unittest.TestCase):
+    """Test aidge export
+    """
+    def setUp(self):
+        self.EXPORT_PATH = pathlib.Path("myexport")
+    def tearDown(self):
+        pass
+
+    def test_generate_export(self):
+        # Create model
+
+        model = aidge_core.sequential([
+            aidge_core.FC(in_channels=32*32*3, out_channels=512, name="InputNode"),
+            aidge_core.ReLU(name="Relu0"),
+            aidge_core.FC(in_channels=512, out_channels=256, name="FC1"),
+            aidge_core.ReLU(name="Relu1"),
+            aidge_core.FC(in_channels=256, out_channels=128, name="FC2"),
+            aidge_core.ReLU(name="Relu2"),
+            aidge_core.FC(in_channels=128, out_channels=10, name="OutputNode"),
+        ])
+
+        initFiller(model)
+
+        # Export model
+        aidge_core.export(self.EXPORT_PATH, model)
+        self.assertTrue(self.EXPORT_PATH.is_dir(), "Export folder has not been generated")
+        os.makedirs(self.EXPORT_PATH / "build", exist_ok=True)
+
+        # Test compilation of export
+        install_path = os.path.join(sys.prefix, "lib", "libAidge")  if "AIDGE_INSTALL" not in os.environ else os.environ["AIDGE_INSTALL"]
+
+        shutil.copyfile(pathlib.Path(__file__).parent / "static/main.cpp", self.EXPORT_PATH / "main.cpp")
+
+        subprocess.check_call(['cmake', str(self.EXPORT_PATH.absolute()), f'-DCMAKE_INSTALL_PREFIX:PATH={install_path}'], cwd=str(self.EXPORT_PATH / "build"))
+        subprocess.check_call(['make', 'all', 'install'], cwd=str(self.EXPORT_PATH / "build"))
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py
index 6e0c1f9b9a0828e266ef3bf19ee75df3e275b282..26d60f2fbaf0f3903baf191cf0a2ad5550fb3275 100644
--- a/aidge_core/unit_tests/test_impl.py
+++ b/aidge_core/unit_tests/test_impl.py
@@ -41,6 +41,7 @@ class test_OperatorImpl(unittest.TestCase):
         generic_matmul_op = matmul.get_operator()
         generic_matmul_op.set_forward_dims(lambda x: x)
         generic_matmul_op.set_impl(testImpl(generic_matmul_op))
+        generic_matmul_op.set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
         generic_matmul_op.forward()
         self.assertEqual(GLOBAL_CPT, 1)
 
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index 164aee726255e0478b629ee853d9a1f619945f3a..5b25eb7975d439816dbf91cc95b462f217fd0227 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -30,42 +30,39 @@ class test_operator_binding(unittest.TestCase):
         self.assertNotEqual(gop.name(), "")
 
     def test_param_bool(self):
-        self.generic_operator.add_attr("bool", True)
-        self.assertEqual(self.generic_operator.has_attr("bool"), True)
-        self.assertEqual(self.generic_operator.get_attr("bool"), True)
-        self.assertEqual(self.generic_operator.get_attr_type("bool"), "bool")
-        self.assertEqual(self.generic_operator.get_attrs_name(), {"bool"})
-        self.generic_operator.del_attr("bool")
-        self.assertEqual(self.generic_operator.has_attr("bool"), False)
-        self.assertEqual(len(self.generic_operator.get_attrs_name()), 0)
+        self.generic_operator.attr.add_attr("bool", True)
+        self.assertEqual(self.generic_operator.attr.has_attr("bool"), True)
+        self.assertEqual(self.generic_operator.attr.get_attr("bool"), True)
+        self.generic_operator.attr.del_attr("bool")
+        self.assertEqual(self.generic_operator.attr.has_attr("bool"), False)
 
     def test_param_int(self):
-        self.generic_operator.add_attr("int", 1)
-        self.assertEqual(self.generic_operator.get_attr("int"), 1)
+        self.generic_operator.attr.add_attr("int", 1)
+        self.assertEqual(self.generic_operator.attr.get_attr("int"), 1)
 
     def test_param_float(self):
-        self.generic_operator.add_attr("float", 2.0)
-        self.assertEqual(self.generic_operator.get_attr("float"), 2.0)
+        self.generic_operator.attr.add_attr("float", 2.0)
+        self.assertEqual(self.generic_operator.attr.get_attr("float"), 2.0)
 
     def test_param_str(self):
-        self.generic_operator.add_attr("str", "value")
-        self.assertEqual(self.generic_operator.get_attr("str"), "value")
+        self.generic_operator.attr.add_attr("str", "value")
+        self.assertEqual(self.generic_operator.attr.get_attr("str"), "value")
 
     def test_param_l_int(self):
-        self.generic_operator.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
-        self.assertEqual(self.generic_operator.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.generic_operator.attr.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
 
     def test_param_l_bool(self):
-        self.generic_operator.add_attr("l_bool", [True, False, False, True])
-        self.assertEqual(self.generic_operator.get_attr("l_bool"), [True, False, False, True])
+        self.generic_operator.attr.add_attr("l_bool", [True, False, False, True])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_bool"), [True, False, False, True])
 
     def test_param_l_float(self):
-        self.generic_operator.add_attr("l_float", [2.0, 1.0])
-        self.assertEqual(self.generic_operator.get_attr("l_float"), [2.0, 1.0])
+        self.generic_operator.attr.add_attr("l_float", [2.0, 1.0])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_float"), [2.0, 1.0])
 
     def test_param_l_str(self):
-        self.generic_operator.add_attr("l_str", ["ok"])
-        self.assertEqual(self.generic_operator.get_attr("l_str"), ["ok"])
+        self.generic_operator.attr.add_attr("l_str", ["ok"])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_str"), ["ok"])
 
     def test_dynamicattribute_binding(self):
         # Check original C++ attributes are binded
@@ -76,20 +73,20 @@ class test_operator_binding(unittest.TestCase):
         self.assertEqual(attrs.get_attr("b"), "test")
         self.assertEqual(attrs.has_attr("c"), True)
         self.assertEqual(attrs.get_attr("c"), [True, False, True])
-        self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c"})
+        self.assertEqual(attrs.dict().keys(), {"a", "b", "c"})
         self.assertEqual(attrs.has_attr("d"), False)
 
         # Add Python attributes
         attrs.add_attr("d", 18.56)
         self.assertEqual(attrs.get_attr("d"), 18.56)
         self.assertEqual(attrs.has_attr("d"), True)
-        self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c", "d"})
+        self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "d"})
         self.assertEqual(attrs.has_attr("e"), False)
 
         # Check that added Python attribute is accessible in C++
         # Return the value of an attribute named "d" of type float64 (double in C++)
         self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 18.56)
-        attrs.set_attr("d", 23.89)
+        attrs.d = 23.89
         self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
 
     def test_forward_dims(self):
@@ -129,18 +126,18 @@ class test_operator_binding(unittest.TestCase):
         myVar = 2
         myBool = True
         # Test dynamic attribute set
-        gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", myVar=myVar).get_operator()
-        gop.myBool = myBool
+        gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", my_var=myVar).get_operator()
+        gop.attr.my_bool = myBool
         # Test variable set by kwargs
-        self.assertEqual(gop.myVar, myVar)
+        self.assertEqual(gop.attr.my_var, myVar)
         # Test set attr
-        self.assertEqual(gop.myBool, myBool)
+        self.assertEqual(gop.attr.my_bool, myBool)
 
         # Test static attribute set !
         prod = aidge_core.Producer([1]).get_operator()
-        self.assertEqual(prod.Constant, False)
-        prod.Constant = True # By default Constant is False
-        self.assertEqual(prod.Constant, True)
+        self.assertEqual(prod.attr.constant, False)
+        prod.attr.constant = True # By default Constant is False
+        self.assertEqual(prod.attr.constant, True)
 
 
 
diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index e4d2cb4faca3dda64cff6aea541c30787c23d0ad..7c3bc0f6f68506c02af1723b263455a9c72b1f3a 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -27,17 +27,15 @@ class test_attributes(unittest.TestCase):
         out_channels = 8
         k_dims = [2, 2]
         conv_op = aidge_core.Conv2D(in_channels , out_channels, k_dims).get_operator()
-        self.assertEqual(conv_op.get_attr("InChannels"), in_channels)
-        self.assertEqual(conv_op.get_attr("OutChannels"), out_channels)
-        self.assertEqual(conv_op.get_attr("KernelDims"), k_dims)
+        self.assertEqual(conv_op.in_channels(), in_channels)
+        self.assertEqual(conv_op.out_channels(), out_channels)
+        self.assertEqual(conv_op.attr.get_attr("kernel_dims"), k_dims)
 
     def test_fc(self):
         in_channels = 4
         out_channels = 8
-        nb_bias = True
-        fc_op = aidge_core.FC(in_channels, out_channels, nb_bias).get_operator()
-        self.assertEqual(fc_op.get_attr("OutChannels"), out_channels)
-        self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
+        fc_op = aidge_core.FC(in_channels, out_channels).get_operator()
+        self.assertEqual(fc_op.out_channels(), out_channels)
 
     def test_producer_1D(self):
         dims = [5]
@@ -67,7 +65,7 @@ class test_attributes(unittest.TestCase):
     def test_leaky_relu(self):
         negative_slope = 0.25
         leakyrelu_op = aidge_core.LeakyReLU(negative_slope).get_operator()
-        self.assertEqual(leakyrelu_op.get_attr("NegativeSlope"), negative_slope)
+        self.assertEqual(leakyrelu_op.attr.get_attr("negative_slope"), negative_slope)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/aidge_core/unit_tests/test_recipes.py b/aidge_core/unit_tests/test_recipes.py
index 240bcd9501aa1fd64985fa59c87f01dfdf9343aa..8a0a470221e118fd450be7a7bf1bf6ede2df6178 100644
--- a/aidge_core/unit_tests/test_recipes.py
+++ b/aidge_core/unit_tests/test_recipes.py
@@ -49,23 +49,19 @@ class test_recipes(unittest.TestCase):
         add0 = aidge_core.Add(2, name="Add0")
         matmul1 = aidge_core.MatMul(name="MatMul1")
         add1 = aidge_core.Add(2, name="Add1")
-
-        graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
-
         w0 = aidge_core.Producer([1, 1], name="W0")
-        w0.add_child(matmul0, 0, 1)
-        graph_view.add(w0)
-
+        w0.add_child(matmul0, 0, 0)
         b0 = aidge_core.Producer([1], name="B0")
         b0.add_child(add0, 0, 1)
-        graph_view.add(b0)
-
         w1 = aidge_core.Producer([1, 1], name="W1")
-        w1.add_child(matmul1, 0, 1)
-        graph_view.add(w1)
-
+        w1.add_child(matmul1, 0, 0)
         b1 = aidge_core.Producer([1], name="B1")
         b1.add_child(add1, 0, 1)
+
+        graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
+        graph_view.add(w0)
+        graph_view.add(b0)
+        graph_view.add(w1)
         graph_view.add(b1)
 
         old_nodes = graph_view.get_nodes()
diff --git a/aidge_core/unit_tests/test_tensor.py b/aidge_core/unit_tests/test_tensor.py
index d479c98b20534daa804f6019b63d528883c2b568..6348ba8dd1a635ce0299760b6fd31dcef58716cf 100644
--- a/aidge_core/unit_tests/test_tensor.py
+++ b/aidge_core/unit_tests/test_tensor.py
@@ -42,7 +42,7 @@ class test_tensor(unittest.TestCase):
         np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32)
         # Numpy -> Tensor
         t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Int32)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int32)
         for i_t, i_n in zip(t, np_array.flatten()):
             self.assertTrue(i_t == i_n)
         for i,j in zip(t.dims(), np_array.shape):
@@ -62,7 +62,7 @@ class test_tensor(unittest.TestCase):
         np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64)
         # Numpy -> Tensor
         t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Int64)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int64)
         for i_t, i_n in zip(t, np_array.flatten()):
             self.assertTrue(i_t == i_n)
         for i,j in zip(t.dims(), np_array.shape):
@@ -73,7 +73,7 @@ class test_tensor(unittest.TestCase):
         np_array = np.random.rand(1, 1, 3, 3).astype(np.float32)
         # Numpy -> Tensor
         t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Float32)
+        self.assertEqual(t.dtype(), aidge_core.dtype.float32)
         for i_t, i_n in zip(t, np_array.flatten()):
             self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference
         for i,j in zip(t.dims(), np_array.shape):
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 931b1b26a04e8886c211d77f8b0147c2140d350a..651a5de69596ee867a97b06ba683f49b05a41303 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -59,9 +59,12 @@
 #include "aidge/operator/ReduceMean.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Reshape.hpp"
+#include "aidge/operator/Resize.hpp"
+#include "aidge/operator/Shape.hpp"
 #include "aidge/operator/Scaling.hpp"
 #include "aidge/operator/Slice.hpp"
 #include "aidge/operator/Softmax.hpp"
+#include "aidge/operator/Split.hpp"
 #include "aidge/operator/Sqrt.hpp"
 #include "aidge/operator/Sub.hpp"
 #include "aidge/operator/Transpose.hpp"
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index f3fa4ef5164a2eed7caaa7baa7f83e7ed00403b8..57c6c385d5fdcc9f2439983bd04cc8ece0d8d8f5 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -182,6 +182,17 @@ public:
     */
     inline std::size_t size() const noexcept { return mNbElts; }
 
+    /**
+     * @brief Return the current capacity of the tensor, i.e. the actual memory
+     * currently being allocated. It can be different from the size:
+     * - Capacity can be 0 if the tensor memory was not yet initialized (because
+     *   of lazy initialization, memory is allocated only when it needs to be
+     *   accessed the first time).
+     * - Capacity can be > size if the tensor was downsized but memory was not
+     *   reallocated.
+    */
+    virtual std::size_t capacity() const noexcept = 0;
+
     /**
      * @brief Return the size (in bytes) of one element (scalar).
     */
@@ -190,9 +201,7 @@ public:
     /**
      * @brief Set every element of the implementation to zero.
      */
-    virtual void zeros() {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Function not implented");
-    }
+    virtual void zeros() = 0;
 
     const std::string backend() const { return mBackend; }
 
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 7cd8c67262221fbf9c1b2415ebf98db56274cce5..ff5a4fc4b8fe728efd517a74d3a9613a97e8809b 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -43,6 +43,8 @@ public:
         return std::make_shared<TensorImpl_cpu<T>>(device, dims);
     }
 
+    inline std::size_t capacity() const noexcept override final { return mData.size(); }
+
     inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
 
     void zeros() override final;
@@ -51,16 +53,16 @@ public:
         const T* srcT = static_cast<const T *>(src);
         T* dstT = static_cast<T *>(rawPtr(offset));
 
-        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
-        AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported");
+        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "TensorImpl_cpu<{}>::copy(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts);
+        AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "TensorImpl_cpu<{}>::copy(): overlapping copy is not supported", typeid(T).name());
         std::copy(srcT, srcT + length, dstT);
     }
 
     void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final;
 
     void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
-        AIDGE_ASSERT(device.first == Backend, "backend must match");
-        AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
+        AIDGE_ASSERT(device.first == Backend, "TensorImpl_cpu<{}>::copyFromDevice(): backend must match", typeid(T).name());
+        AIDGE_ASSERT(device.second == 0, "TensorImpl_cpu<{}>::copyFromDevice(): device ({}) cannot be != 0 for CPU backend", typeid(T).name(), device.second);
         copy(src, length, offset);
     }
 
@@ -70,7 +72,7 @@ public:
 
     void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
         const T* src = static_cast<const T*>(rawPtr(offset));
-        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
+        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "TensorImpl_cpu<{}>::copyToHost(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts);
         std::copy(src, src + length, static_cast<T *>(dst));
     }
 
@@ -80,7 +82,7 @@ public:
     };
 
     const void *rawPtr(NbElts_t offset = 0) const override final {
-        AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const rawPtr");
+        AIDGE_ASSERT(mData.size() >= mNbElts, "TensorImpl_cpu<{}>::rawPtr(): accessing uninitialized const rawPtr", typeid(T).name());
         return (mData.data() + offset);
     };
 
@@ -90,12 +92,12 @@ public:
     };
 
     const void *hostPtr(NbElts_t offset = 0) const override final {
-        AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const hostPtr");
+        AIDGE_ASSERT(mData.size() >= mNbElts, "TensorImpl_cpu<{}>::hostPtr(): accessing uninitialized const hostPtr", typeid(T).name());
         return (mData.data() + offset);
     };
 
     void setRawPtr(void *ptr, NbElts_t length) override final {
-        AIDGE_ASSERT(length >= mNbElts, "trying to set raw pointer of insufficient capacity");
+        AIDGE_ASSERT(length >= mNbElts, "TensorImpl_cpu<{}>::setRawPtr(): trying to set raw pointer (length: {}) of insufficient capacity (required: {})", typeid(T).name(), length, mNbElts);
         mData = future_std::span<T>(static_cast<T *>(ptr), length);
         mDataOwner.reset();
     };
@@ -106,7 +108,7 @@ private:
     void lazyInit() {
         if (mData.size() < mNbElts) {
             // Need more data, a re-allocation will occur
-            AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data");
+            AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "TensorImpl_cpu<{}>: trying to enlarge non-owned data", typeid(T).name());
             mDataOwner.reset(new T[mNbElts]);
             mData = future_std::span<T>(mDataOwner.get(), mNbElts);
         }
@@ -125,9 +127,9 @@ static Registrar<Tensor> registrarTensorImpl_cpu_Float32(
 static Registrar<Tensor> registrarTensorImpl_cpu_Float16(
         {"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_Int64(
-        {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create);
+        {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<int64_t>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
-        {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
+        {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int32_t>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_Int16(
         {"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index a6ff03d36b662f4420424f930401844de25036d2..eaadc7a7ca5fa85672619fb2d3b5b17590fd3778 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -12,6 +12,12 @@
 #ifndef AIDGE_DATA_H_
 #define AIDGE_DATA_H_
 
+#include <cstdint>
+#include <fmt/format.h>
+#include <string>
+#include <tuple>
+#include <array>
+
 #include "aidge/data/half.hpp"
 #include "aidge/utils/Attributes.hpp"
 
@@ -45,6 +51,67 @@ enum class DataType {
     UInt64
 };
 
+enum class DataFormat {
+    Default,
+    NCHW,
+    NHWC,
+    CHWN,
+    NCDHW,
+    NDHWC,
+    CDHWN
+};
+
+using DataFormatTranspose = std::array<size_t, 5>;
+// Permutation arrays dict to obtain DataFormat (same order as DataFormat enum)
+constexpr std::array<DataFormatTranspose, 7> DataFormatTransposeDict = {{
+    // Important: in this array only, dimension index must start at 1, not 0!
+    // (0 is the default value)
+    {},
+    {1, 2, 3, 4},
+    {1, 3, 4, 2},
+    {2, 3, 4, 1},
+    {1, 2, 3, 4, 5},
+    {1, 3, 4, 5, 2},
+    {2, 3, 4, 5, 1}
+}};
+
+/**
+ * Get the DataFormatTranspose array to transpose data from src to dst DataFormat.
+ * @param src Source DataFormat
+ * @param dst Destinatin DataFormat
+ * @return DataFormatTranspose Permutation array to achieve a transposition
+ *         from src to dst DataFormat.
+*/
+constexpr inline DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
+    // Permutation array from default format to src format
+    const auto srcDefToFormat = DataFormatTransposeDict[static_cast<int>(src)];
+    // Permutation array from default format to dst format
+    const auto dstDefToFormat = DataFormatTransposeDict[static_cast<int>(dst)];
+    // Compute permutation array from src format to default format:
+    DataFormatTranspose srcFormatToDef{};
+    for (size_t i = 0; i < srcDefToFormat.size(); ++i) {
+        if (srcDefToFormat[i] > 0) {
+            srcFormatToDef[srcDefToFormat[i] - 1] = i;
+        }
+        else {
+            srcFormatToDef[i] = i;
+        }
+    }
+
+    // Compute permutation array from src format to dst format:
+    DataFormatTranspose srcToDst{};
+    for (size_t i = 0; i < dstDefToFormat.size(); ++i) {
+        if (dstDefToFormat[srcFormatToDef[i]] > 0) {
+            srcToDst[i] = dstDefToFormat[srcFormatToDef[i]] - 1;
+        }
+        else {
+            srcToDst[i] = i;
+        }
+    }
+
+    return srcToDst;
+}
+
 class Data {
 public:
     Data(const std::string& type): mType(type) {};
@@ -64,14 +131,14 @@ template <typename T> struct NativeType { static const Aidge::DataType type; };
 template <> const Aidge::DataType NativeType<double>::type = Aidge::DataType::Float64;
 template <> const Aidge::DataType NativeType<float>::type = Aidge::DataType::Float32;
 template <> const Aidge::DataType NativeType<half_float::half>::type = Aidge::DataType::Float16;
-template <> const Aidge::DataType NativeType<int8_t>::type = Aidge::DataType::Int8;
-template <> const Aidge::DataType NativeType<int16_t>::type = Aidge::DataType::Int16;
-template <> const Aidge::DataType NativeType<int32_t>::type = Aidge::DataType::Int32;
-template <> const Aidge::DataType NativeType<int64_t>::type = Aidge::DataType::Int64;
-template <> const Aidge::DataType NativeType<uint8_t>::type = Aidge::DataType::UInt8;
-template <> const Aidge::DataType NativeType<uint16_t>::type = Aidge::DataType::UInt16;
-template <> const Aidge::DataType NativeType<uint32_t>::type = Aidge::DataType::UInt32;
-template <> const Aidge::DataType NativeType<uint64_t>::type = Aidge::DataType::UInt64;
+template <> const Aidge::DataType NativeType<std::int8_t>::type = Aidge::DataType::Int8;
+template <> const Aidge::DataType NativeType<std::int16_t>::type = Aidge::DataType::Int16;
+template <> const Aidge::DataType NativeType<std::int32_t>::type = Aidge::DataType::Int32;
+template <> const Aidge::DataType NativeType<std::int64_t>::type = Aidge::DataType::Int64;
+template <> const Aidge::DataType NativeType<std::uint8_t>::type = Aidge::DataType::UInt8;
+template <> const Aidge::DataType NativeType<std::uint16_t>::type = Aidge::DataType::UInt16;
+template <> const Aidge::DataType NativeType<std::uint32_t>::type = Aidge::DataType::UInt32;
+template <> const Aidge::DataType NativeType<std::uint64_t>::type = Aidge::DataType::UInt64;
 
 template <>
 const char* const EnumStrings<Aidge::DataType>::data[]
@@ -79,10 +146,33 @@ const char* const EnumStrings<Aidge::DataType>::data[]
        "Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16",
        "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6",
        "UInt7", "UInt8", "UInt16", "UInt32", "UInt64"};
+
+template <>
+const char* const EnumStrings<Aidge::DataFormat>::data[]
+    = {"Default", "NCHW", "NHWC", "CHWN", "NCDHW", "NDHWC", "CDHWN"};
+
+template <Aidge::DataType D> struct cpptype {
+    using type = void; // Placeholder
+};
+template <> struct cpptype<Aidge::DataType::Float16> { using type = half_float::half; };
+template <> struct cpptype<Aidge::DataType::Float32> { using type = float; };
+template <> struct cpptype<Aidge::DataType::Float64> { using type = double; };
+template <> struct cpptype<Aidge::DataType::Int8> { using type = std::int8_t; };
+template <> struct cpptype<Aidge::DataType::Int16> { using type = std::int16_t; };
+template <> struct cpptype<Aidge::DataType::Int32> { using type = std::int32_t; };
+template <> struct cpptype<Aidge::DataType::Int64> { using type = std::int64_t; };
+template <> struct cpptype<Aidge::DataType::UInt8> { using type = std::uint8_t; };
+template <> struct cpptype<Aidge::DataType::UInt16> { using type = std::uint16_t; };
+template <> struct cpptype<Aidge::DataType::UInt32> { using type = std::uint32_t; };
+template <> struct cpptype<Aidge::DataType::UInt64> { using type = std::uint64_t; };
+
+template <Aidge::DataType D> using cpptype_t = typename cpptype<D>::type;
 }
 
+
 namespace Aidge {
 inline auto format_as(DataType dt) { return EnumStrings<Aidge::DataType>::data[static_cast<int>(dt)]; }
+inline auto format_as(DataFormat df) { return EnumStrings<Aidge::DataFormat>::data[static_cast<int>(df)]; }
 }
 
 #endif /* AIDGE_DATA_H_ */
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 3dbf54a5fa58be40b08f58d760f3991586203825..80f8408a01be5a9b1f485251af0b13b8069404c5 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -24,10 +24,7 @@
 
 #include "aidge/backend/TensorImpl.hpp"
 #include "aidge/data/Data.hpp"
-#include "aidge/operator/Add.hpp"
-#include "aidge/operator/Div.hpp"
-#include "aidge/operator/Mul.hpp"
-#include "aidge/operator/Sub.hpp"
+
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ArrayHelpers.hpp"
@@ -42,6 +39,7 @@ class Tensor : public Data,
                public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
    private:
     DataType mDataType = DataType::Float32; /** enum to specify data type. */
+    DataFormat mDataFormat = DataFormat::Default; /** enum to specify data format. */
     std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
     std::vector<DimSize_t> mStrides; /** Stride dimensions of the tensor. */
     std::shared_ptr<TensorImpl> mImpl = nullptr; /** Pointer to the actual data implementation. */
@@ -61,9 +59,10 @@ class Tensor : public Data,
      * @brief Construct a new empty Tensor object.
      * It has the features of an undefined scalar.
      */
-    Tensor(DataType dtype = DataType::Float32)
+    Tensor(DataType dtype = DataType::Float32, DataFormat dformat = DataFormat::Default)
         : Data(Type),
           mDataType(dtype),
+          mDataFormat(dformat),
           mDims(std::vector<DimSize_t>({})),
           mStrides({1}),
           mSize(1)
@@ -83,6 +82,7 @@ class Tensor : public Data,
     Tensor(T val)
         : Data(Type),
           mDataType(NativeType<VT>::type),
+          mDataFormat(DataFormat::Default),
           mDims({}),
           mStrides({1}),
           mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
@@ -112,6 +112,7 @@ class Tensor : public Data,
     constexpr Tensor(Array1D<T, SIZE_0> &&arr)
         : Data(Type),
           mDataType(NativeType<T>::type),
+          mDataFormat(DataFormat::Default),
           mDims({SIZE_0}),
           mStrides({1}),
           mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
@@ -130,6 +131,7 @@ class Tensor : public Data,
     constexpr Tensor(Array2D<T, SIZE_0, SIZE_1> &&arr)
         : Data(Type),
           mDataType(NativeType<T>::type),
+          mDataFormat(DataFormat::Default),
           mDims({SIZE_0, SIZE_1}),
           mStrides({SIZE_1, 1}),
           mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1})),
@@ -148,6 +150,7 @@ class Tensor : public Data,
     constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr)
         : Data(Type),
           mDataType(NativeType<T>::type),
+          mDataFormat(DataFormat::Default),
           mDims({SIZE_0, SIZE_1, SIZE_2}),
           mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}),
           mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2})),
@@ -167,6 +170,7 @@ class Tensor : public Data,
     constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr)
         : Data(Type),
           mDataType(NativeType<T>::type),
+          mDataFormat(DataFormat::Default),
           mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
           mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}),
           mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
@@ -244,19 +248,7 @@ class Tensor : public Data,
      * @param other
      * @return Tensor
      */
-    Tensor operator+(const Tensor& other) const {
-        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
-        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
-        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend");
-        auto add_ = Add_Op(2);
-        add_.associateInput(0, std::make_shared<Tensor>(*this));
-        add_.associateInput(1, std::make_shared<Tensor>(other));
-        add_.setDataType(dataType());
-        add_.setBackend(mImpl->backend());
-        add_.forward();
-        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-        return add_.getOutput(0)->clone();
-    }
+    Tensor operator+(const Tensor& other) const;
 
     /**
      * @brief Element-wise substraction operation for two ``Tensor``s.
@@ -267,19 +259,7 @@ class Tensor : public Data,
      * @param other
      * @return Tensor
      */
-    Tensor operator-(const Tensor& other) const {
-        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
-        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
-        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend");
-        auto sub_ = Sub_Op();
-        sub_.associateInput(0, std::make_shared<Tensor>(*this));
-        sub_.associateInput(1, std::make_shared<Tensor>(other));
-        sub_.setDataType(dataType());
-        sub_.setBackend(mImpl->backend());
-        sub_.forward();
-        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-        return sub_.getOutput(0)->clone();
-    }
+    Tensor operator-(const Tensor& other) const;
 
     /**
      * @brief Element-wise multiplication operation for two ``Tensor``s.
@@ -290,19 +270,7 @@ class Tensor : public Data,
      * @param other
      * @return Tensor
      */
-    Tensor operator*(const Tensor& other) const {
-        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
-        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
-        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend");
-        auto mul_ = Mul_Op();
-        mul_.associateInput(0, std::make_shared<Tensor>(*this));
-        mul_.associateInput(1, std::make_shared<Tensor>(other));
-        mul_.setDataType(dataType());
-        mul_.setBackend(mImpl->backend());
-        mul_.forward();
-        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-        return mul_.getOutput(0)->clone();
-    }
+    Tensor operator*(const Tensor& other) const;
 
     /**
      * @brief Element-wise division operation for two ``Tensor``s.
@@ -313,19 +281,13 @@ class Tensor : public Data,
      * @param other
      * @return Tensor
      */
-    Tensor operator/(const Tensor& other) const {
-        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
-        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
-        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend");
-        auto div_ = Div_Op();
-        div_.associateInput(0, std::make_shared<Tensor>(*this));
-        div_.associateInput(1, std::make_shared<Tensor>(other));
-        div_.setDataType(dataType());
-        div_.setBackend(mImpl->backend());
-        div_.forward();
-        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-        return div_.getOutput(0)->clone();
-    }
+    Tensor operator/(const Tensor& other) const;
+
+    /**
+     * @brief Element-wise sqrt operation for Tensor.
+     * @return Tensor
+     */
+    Tensor sqrt() const;
 
     ~Tensor() noexcept;
 
@@ -390,6 +352,12 @@ public:
      */
     constexpr DataType dataType() const noexcept { return mDataType; }
 
+    /**
+     * @brief Get the data format enum.
+     * @return constexpr DataFormat
+     */
+    constexpr DataFormat dataFormat() const noexcept { return mDataFormat; }
+
     /**
      * @brief Set the DataType of the Tensor and converts data
      * if the Tensor has already been initialized and copyCast is true.
@@ -408,6 +376,23 @@ public:
         mDataType = dt;
     }
 
+    /**
+     * @brief Set the DataFormat of the Tensor and transpose data, only
+     * if the Tensor has already been initialized and copyTrans is true.
+     * In this case, a transposition occurs only if both previous format and
+     * new format are different from DataFormat::Default.
+     * @param df New DataFormat
+     * @param copyTrans If true (default), when both previous format and new
+     *                  format are different from DataFormat::Default, previous
+     *                  data is copy-transposed.
+     */
+    void setDataFormat(const DataFormat df, bool copyTrans = true) {
+        if (mImpl && copyTrans && (dataFormat() != df) && df != DataFormat::Default && dataFormat() != DataFormat::Default) {
+            copyTranspose(*this, getDataFormatTranspose(dataFormat(), df));
+        }
+        mDataFormat = df;
+    }
+
     /**
      * @brief Get the Impl object
      * @return constexpr const std::shared_ptr<TensorImpl>&
@@ -474,6 +459,18 @@ public:
      */
     constexpr std::size_t size() const noexcept { return mSize; }
 
+    /**
+     * @brief Return the current capacity of the tensor, i.e. the actual memory
+     * currently being allocated. It can be different from the size:
+     * - Capacity can be 0 if the tensor memory was not yet initialized (because
+     *   of lazy initialization, memory is allocated only when it needs to be
+     *   accessed the first time).
+     * - Capacity can be > size if the tensor was downsized but memory was not
+     *   reallocated.
+    */
+    inline std::size_t capacity() const noexcept { return mImpl->capacity(); }
+
+
     /**
      * @brief Change the dimensions of the Tensor object according to the given argument.
      * If the overall size is not changed (meaning we actually only performed a
@@ -553,31 +550,30 @@ public:
 
     inline void print() const { fmt::print("{}\n", toString()); }
 
-    std::shared_ptr<Tensor> grad() {
-        return mGrad;
-    }
-    void setGrad(std::shared_ptr<Tensor> newGrad) {
-        mGrad = newGrad;
-    }
-
     /**
-     * @brief Associate the gradient with a Tensor instance and set its implementation
-     * if none was previously set.
+     * @brief Get the gradient Tensor. If not initialized, set a Tensor instance
+     * and set its implementation if none was previously set.
      * @note Dimensions for the Tensor instance are copied from the original current Tensor.
      * @note If a Tensor instance was already associated, only the implementation is created
      * with values set to 0.
      * @note If Tensor instance and implementation already existed for the gradient
      * nothing is done.
      */
-    void initGrad() {
+    std::shared_ptr<Tensor> grad() {
         if (!mGrad) {
             mGrad = std::make_shared<Tensor>(mDims);
         }
         if (!mGrad->hasImpl()) {
             mGrad->setDataType(dataType());
+            mGrad->setDataFormat(dataFormat());
             mGrad->setBackend(hasImpl() ? mImpl->backend() : "cpu");
             mGrad->zeros();
         }
+        return mGrad;
+    }
+
+    void setGrad(std::shared_ptr<Tensor> newGrad) {
+        mGrad = newGrad;
     }
 
     /**
@@ -686,6 +682,13 @@ public:
     */
     void copyFrom(const Tensor& src);
 
+    /**
+     * Transpose data from another Tensor (which can be itself).
+     * @param src Source tensor to copy from.
+    */
+    void copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose);
+    void copyTranspose(const Tensor& src, const DataFormatTranspose& transpose);
+
     /**
      * Copy-cast data from a Tensor.
      * @param src Source tensor to copy-cast from.
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index c9a4c11d780a41a1620518047d66a7de2d7b55fa..682634015376bf309a015046decfa40a36e2b177 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -20,9 +20,18 @@
 #include <utility>
 #include <vector>
 
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <fmt/format.h>
+#endif
+
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
 namespace Aidge {
 enum class DataType;
 
@@ -103,6 +112,13 @@ public:
     */
     bool inView(const NodePtr& nodePtr) const;
 
+    /**
+     * Check that a node is in the current GraphView.
+     * @param nodeName Name of the node to test the existence of.
+     * @return true if the GraphView contains a Node with the name ``nodeName``.
+     */
+    bool inView(const std::string& nodeName) const;
+
     inline NodePtr rootNode() const noexcept {
         return mRootNode;
     }
@@ -208,14 +224,21 @@ public:
 
     /**
      * @brief Compute dimensions of input/output Tensors for each Operator of the
-     * GraphView object's Nodes.
+     * GraphView object's Nodes, by calling Node::forwardDims().
+     * This function verifies the following conditions:
+     * - Every node will forwardDims() regardless of if dims were previously forwarded or not;
+     * - forwadDims() calls are made in node dependencies order, because if dims have changed
+     *   at any point in the graph, it must de propagated correctly to all succeeding nodes;
+     * - It handles cyclic dependencies correctly (currently only induced by the Memorize_Op).
      */
     bool forwardDims(const std::vector<std::vector<DimSize_t>>& dims = {}, bool allowDataDependency = false);
 
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const;
-    /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
+    /** @brief Set the same data type for each Operator of the GraphView object's Nodes. */
     void setDataType(const DataType& datatype) const;
+    /** @brief Set the same data format for each Operator of the GraphView object's Nodes. */
+    void setDataFormat(const DataFormat& dataformat) const;
 
 ///////////////////////////////////////////////////////
 //        TOPOLOGY
@@ -252,7 +275,7 @@ public:
      * @brief Get the Nodes pointed to by the GraphView object.
      * @return std::set<NodePtr>
      */
-    inline const std::set<NodePtr>& getNodes() const { return mNodes; }
+    inline const std::set<NodePtr>& getNodes() const noexcept { return mNodes; }
 
     /**
      * @brief Get the operator with the corresponding name if it is in the
@@ -336,7 +359,8 @@ public:
      * @param other_graph GraphView containing the Nodes to include.
      * @return true if graph ordering is unique (meaning inputs/outputs order is well defined).
      */
-    bool add(std::shared_ptr<GraphView> otherGraph);
+    bool add(std::shared_ptr<GraphView> otherGraph,
+             bool includeLearnableParam = true);
 
     /**
      * @brief Include a Node in the current GraphView and link it to another
@@ -445,8 +469,8 @@ public:
      * @return true replacement has been performed
      * @return false no replacement has been performed
      */
-    static bool replace(const std::shared_ptr<GraphView>& oldG, const std::shared_ptr<GraphView>& newG);
     static bool replace(const std::set<NodePtr>& oldNodes, const std::set<NodePtr>& newNodes);
+    static bool replace(const std::shared_ptr<GraphView>& oldG, const std::shared_ptr<GraphView>& newG);
 
     /**
      * @brief Clone the GraphView with shared Operators. It is a new GraphView, with cloned Nodes, but the new Nodes refer to the same Operators as the original ones.
@@ -494,6 +518,11 @@ public:
      */
     void updateInputsOutputs();
 
+#ifdef PYBIND
+    std::string repr() const {
+        return fmt::format("GraphView(name='{}', Nodes: {} (inputs: {}, outputs: {}))", name(), mNodes.size(), mInputNodes.size(), mOutputNodes.size());
+    }
+#endif
 private:
 ///////////////////////////////////////////////////////
 //        TENSOR MANAGEMENT
diff --git a/include/aidge/graph/Matching.hpp b/include/aidge/graph/Matching.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..31bae71e9b433d1b82ffe62d93837f440c8a936f
--- /dev/null
+++ b/include/aidge/graph/Matching.hpp
@@ -0,0 +1,225 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_GRAPH_MATCHING_H_
+#define AIDGE_CORE_GRAPH_MATCHING_H_
+
+#include <map>
+#include <memory>
+#include <set>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+
+namespace Aidge {
+/**
+ * A simple experimental graph matching class which works by direct, single pass
+ * parse and match, without constructing any intermediate representation.
+ * Due to its single pass nature, it has some constrains on how the queries must
+ * be formulated.
+*/
+class SinglePassGraphMatching {
+public:
+    struct Context {
+        std::string query;
+        bool firstSequence = true;
+        bool firstNode = true;
+        bool inSequence = false;
+        bool lookForChild = true;
+        bool singleOutput = true;
+        IOIndex_t edgeLeftIdx = 0;
+        IOIndex_t edgeRightIdx = 0;
+
+        // For check & debug purpose:
+        size_t depth = 0;
+        std::set<std::string> anchors;
+    };
+
+    struct MatchingResult {
+        // Mutable is required to allow modifying MatchingResult members with a std::set
+        // iterator. Any change should not modify the set ordering.
+        // We use graph->rootNode() as the std::set key, which is garanteed
+        // to never change after insertion!
+        mutable std::shared_ptr<GraphView> graph;
+        mutable std::map<std::string, std::map<std::string, NodePtr>> anchors;
+        mutable NodePtr startNode;
+
+        MatchingResult() {
+            graph = std::make_shared<GraphView>();
+        }
+
+        MatchingResult(const MatchingResult& result) {
+            graph = std::make_shared<GraphView>(*(result.graph.get()));
+            anchors = result.anchors;
+            startNode = result.startNode;
+        }
+
+        MatchingResult& operator=(const MatchingResult& result) {
+            graph = std::make_shared<GraphView>(*(result.graph.get()));
+            anchors = result.anchors;
+            startNode = result.startNode;
+            return *this;
+        }
+    };
+
+    SinglePassGraphMatching(std::shared_ptr<GraphView> graph) : mGraph(graph) {}
+
+    /**
+     * Matches a query by direct, single pass parse and match.
+     * The returned matches are non-ordered and therefore stored in a std::set.
+     * 
+     * Some rules:
+     * - The first node of the first sequence is the root node and cannot be optional
+     *   WRONG: Conv?->ReLU (will throw an error)
+     *   GOOD: ReLU<-Conv?
+     * 
+     * - The first node of any further sequence must be an existing anchor
+     *   (the anchor cannot be in the middle of the sequence)
+     *   WRONG: Conv->ReLU;Pad->Conv (will throw an error)
+     *          Pad->Conv;Conv->ReLU (will throw an error)
+     *   GOOD: Conv#->ReLU;Conv#<-Pad
+     *         Pad->Conv#;Conv#->ReLU
+     * 
+     * - Any node already matched cannot be matched again (except for anchors)
+     * 
+     * - By default, an edge matches the first output to the first input.
+     *   EXAMPLE: ReLU->Conv is equivalent to ReLU-0-0>Conv
+     *            To match the second input, use ReLU-0-1>Conv (or ReLU-1>Conv)
+     *            To match the second output, use ReLU-1-0>Conv
+     *            To match any input and/or any output, use *, like ReLU-1-*>Conv
+     *            or ReLU-*-0>Conv or ReLU-*-*>Conv
+     *            The same is true for the "<-" edge syntax.
+     * 
+     * - When several nodes could match for a given node query, the first one
+     *   not already in the matching result is matched, following the 
+     *   childs/parents ordered node list
+     *   EXAMPLE: Producer in "Conv<*-Producer" will match the weights Producer first
+     *   EXAMPLE: Producer in "Conv#<1-.;Conv#<*-Producer" will match the bias Producer
+     *            because the weights Producer has already been matched
+     * 
+     * - One always matches a sub-graph: additional connections can exist anywhere
+     *   in the matched sub-graph
+     *   EXAMPLE: "Add<*-." will match the Add operator and its first input, any
+     *            additional inputs will not be included in the result
+     *   EXAMPLE: "(Add#<*-.)+" will match the Add operator and all of its inputs
+     *            Note that the anchor is required since we intend to match several
+     *            inputs of the same node!
+     * 
+     * - In Aidge, a node output can be connected to multiple other nodes. In
+     *   your query, you can allow it or not, with the "~" or "-" modifier.
+     *   EXAMPLE: "Conv->ReLU" will match the Conv that are **only** connected
+     *            to a ReLU node at their output #0.
+     *            "Conv~>ReLU" will match all the Conv connected to a ReLU even
+     *            if they are also connected to other nodes at the same output #0.
+     *   When implementing a match & replace recipe, beware that you don't break
+     *   branches in the middle of your matching result if you use "~"!
+     * 
+     * - The matching results can be overlapping, meaning that some nodes may be
+     *   found in multiple results. Some results may be subsets of other results.
+     *   EXAMPLE: assume graph Conv#1->ReLU#1->Conv#2->ReLU#2
+     *            "Conv->ReLU?->Conv?->ReLU?" will return both
+     *            Conv#1->ReLU#1->Conv#2->ReLU#2 and Conv#2->ReLU#2
+     *   To avoid this behavior, set the disjoint argument to true. In this case,
+     *   only Conv#1->ReLU#1->Conv#2->ReLU#2 will be kept in the example above.
+     * 
+     * - Whitespaces are allowed anywhere in the query
+     * 
+     * QUERY = SEQ | NODE_OR_BLOCK (';' (SEQ | NODE_OR_BLOCK))*
+     * 
+     * @param query The query to search.
+     * @param disjoint If true, only keep the longuest disjoint (non-overlapping) matches.
+     * @return Set of matches, each stored in a MatchingResult struct.
+    */
+    std::set<MatchingResult> match(const std::string& query, bool disjoint = false);
+
+    /**
+     * Filter to keep only the longuest disjoint (non-overlapping) matches.
+    */
+    std::set<MatchingResult> filterLonguestDisjoint(const std::set<MatchingResult>& matches);
+
+    inline void addNodeLambda(const std::string& name, bool(func)(const NodePtr&)) {
+        mLambda[name] = func;
+    }
+
+private:
+    std::shared_ptr<GraphView> mGraph;
+    std::map<std::string, bool(*)(const NodePtr&)> mLambda;
+
+    /**
+     * QUANTIFIER = '?' | '*' | '+' | ('{' [0-9]+ '}')
+     * NODE_OR_BLOCK = (BLOCK | NODE) QUANTIFIER?
+    */
+    bool matchNodeOrBlock(Context& ctx, std::set<MatchingResult>& matches);
+
+    /**
+     * BLOCK = '(' SEQ | PAR | BLOCK | ALT | NODE ')'
+    */
+    bool matchBlock(Context& ctx, std::set<MatchingResult>& matches);
+
+    /**
+     * SEQ = NODE_OR_BLOCK (EDGE NODE_OR_BLOCK)+
+    */
+    bool matchSequence(Context& ctx, std::set<MatchingResult>& matches);
+
+    /**
+     * PAR = NODE_OR_BLOCK ('&' NODE_OR_BLOCK)+
+    */
+    bool matchParallel(Context& ctx, std::set<MatchingResult>& matches);
+
+    /**
+     * ALT = NODE_OR_BLOCK ('|' NODE_OR_BLOCK)+
+    */
+    bool matchAlternative(Context& ctx, std::set<MatchingResult>& matches);
+
+    /**
+     * IO_INDEX_ANY = '*'
+     * IO_INDEX = IO_INDEX_ANY | [0-9]+
+     * CHILD_EDGE = ('-' | '~') (IO_INDEX '-')? IO_INDEX? '>'
+     * PARENT_EDGE = '<' (IO_INDEX '-')? IO_INDEX? ('-' | '~')
+     * EDGE = CHILD_EDGE | PARENT_EDGE
+    */
+    bool matchEdge(Context& ctx, std::set<MatchingResult>& matches);
+
+    /**
+     * TYPE = [A-Za-z0-9_]+
+     * ANCHOR = [A-Za-z0-9_]+
+     * LAMBDA = [A-Za-z0-9_]+
+     * NODE = (TYPE | '.') ('#' ANCHOR)? ('[' LAMBDA ']')?
+    */
+    bool matchNode(Context& ctx, std::set<MatchingResult>& matches);
+
+    inline void removeWhiteSpace(std::string& str) {
+        str.erase(str.begin(),
+            std::find_if(str.begin(),
+                        str.end(),
+                        [](char c) { return !std::isspace(c); }));
+    }
+
+    struct CompareMatchingResultSize {
+        bool operator()(const MatchingResult& lhs, const MatchingResult& rhs) const {
+            // Some matches size could be the same
+            if (lhs.graph->getNodes().size() == rhs.graph->getNodes().size()) {
+                // In this case, use rootNode which is garanteed to be different!
+                return lhs.graph->rootNode() < rhs.graph->rootNode();
+            }
+
+            return lhs.graph->getNodes().size() > rhs.graph->getNodes().size();
+        }
+    };
+};
+
+inline bool operator<(const Aidge::SinglePassGraphMatching::MatchingResult& lhs, const Aidge::SinglePassGraphMatching::MatchingResult& rhs) {
+    // Matches rootNode are garanteed to be different!
+    return lhs.graph->rootNode() < rhs.graph->rootNode();
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CORE_GRAPH_MATCHING_H_ */
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 2a0a4a3b703670c8ace05e03fc5c797fe861a423..f694a1234b6037a0ae75a89380af9747765e290c 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -19,10 +19,19 @@
 #include <vector>
 #include <utility>
 
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <fmt/format.h>
+#endif
+
 #include "aidge/graph/Connector.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
 
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
 namespace Aidge {
 
 using NodePtr = std::shared_ptr<Node>;
@@ -100,6 +109,17 @@ public:
    */
   void setName(const std::string &name);
 
+  /**
+   * @brief Given the parameter name generate a new name which is unique
+   * in all the GraphView which contains this node.
+   * To generate the new name the method is called recursively and append
+   * the caracter ``_``.
+   * If no duplicate return name, this is the exit condition.
+   * @param name Base name to make unique.
+   * @return A unique name in all the GraphView which contains this one.
+  */
+  std::string createUniqueName(std::string name);
+
   /**
    * @brief Type of the node.
    * @return std::string
@@ -173,9 +193,14 @@ public:
    */
   inline IOIndex_t getFirstFreeDataInput() const {
     IOIndex_t i = 0;
-    for (; (i < nbData()) && (input(i).second != gk_IODefaultIndex); ++i) {}
-    // assert((i<nbData()) && "No free data input for Node");
-    return (i < nbData()) ? i : gk_IODefaultIndex;
+    for (; i < nbInputs(); ++i) {
+      if ((inputCategory(i) == InputCategory::Data || inputCategory(i) == InputCategory::OptionalData)
+        && input(i).second == gk_IODefaultIndex)
+      {
+        break;
+      }
+    }
+    return (i < nbInputs()) ? i : gk_IODefaultIndex;
   }
 
 
@@ -207,13 +232,12 @@ public:
   inline IOIndex_t nbInputs() const noexcept { return getOperator()->nbInputs(); }
 
   /**
-   * @brief Number of input specifically for data.
+   * @brief Category of a specific input (Data or Param, optional or not).
    * Data inputs exclude inputs expecting parameters (weights or bias).
-   * @details [data, data, weight, bias] => 2
-   * @return IOIndex_t
+   * @return InputCategory
    */
-  inline IOIndex_t nbData() const noexcept {
-    return getOperator()->nbData();
+  inline InputCategory inputCategory(IOIndex_t idx) const {
+    return getOperator()->inputCategory(idx);
   }
 
   /**
@@ -241,7 +265,9 @@ public:
   inline std::set<std::shared_ptr<GraphView>> views() const noexcept {
     std::set<std::shared_ptr<GraphView>> res;
     for (const auto &v : mViews) {
-      res.insert(v.lock());
+      if (auto p = v.lock()) {
+        res.insert(p);
+      }
     }
     return res;
   }
@@ -406,6 +432,27 @@ public:
 
   std::set<NodePtr> getNodeDelta(int delta,std::set<Aidge::NodePtr> nodeSee);
 
+#ifdef PYBIND
+    std::string repr() const {
+        std::string nodeString{fmt::format("Node(name='{}', optype='{}'", name(), type())};
+        if (mParents.size() > 0) {
+            std::vector<std::int8_t> connectedParents(mParents.size(), 0);
+            for (std::size_t i = 0; i < nbInputs(); ++i) {
+                if (mParents[i])
+                    connectedParents[i] = std::int8_t(1);
+            }
+            nodeString = fmt::format("{}, parents: {}", nodeString, connectedParents);
+        }
+        if (mChildren.size() > 0) {
+            std::vector<std::vector<std::int8_t>> connectedChildren{};
+            for (std::size_t i = 0; i < nbOutputs(); ++i) {
+                connectedChildren.push_back(std::vector<std::int8_t>(mChildren[i].size(), std::int8_t(1)));
+            }
+            nodeString = fmt::format("{}, children: {}", nodeString, connectedChildren);
+        }
+        return fmt::format("{})", nodeString);
+    }
+#endif
 
 private:
   ///////////////////////////////////////////////////////
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 4ac14bdaecd16e90586d14699f3b6f1bd6d88cab..0e709afe9f175443a28947be7f4c3f5b01f5e362 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -29,7 +29,7 @@ public:
     static const std::string Type;
 
     Add_Op(const IOIndex_t nbIn)
-        : OperatorTensor(Type, nbIn, 0, 1)
+        : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1)
     {
         if (nbIn == 0) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index af2993d67f16df498f13a0489a3837a8f9fc4a75..06ee4327e2f2d4df32c2decd73841bdf5f79a739 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -28,27 +28,31 @@ enum class AvgPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
 class AvgPooling_Op : public OperatorTensor,
-                public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
-                public StaticAttributes<AvgPoolingAttr,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>> {
+                public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)> {
 
 public:
     static const std::string Type;
 
-    AvgPooling_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<AvgPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>>;
     template <AvgPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    AvgPooling_Op() = delete;
+
 
     constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
-                      attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {}
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+                        attr<AvgPoolingAttr::StrideDims>(stride_dims),
+                        attr<AvgPoolingAttr::KernelDims>(kernel_dims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -76,6 +80,10 @@ public:
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<AvgPoolingAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<AvgPoolingAttr::KernelDims>(); }
+
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
@@ -101,18 +109,19 @@ inline std::shared_ptr<Node> AvgPooling(
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
     return AvgPooling(to_array(kernel_dims), name, stride_dims);
 }
+}  // namespace Aidge
 
 extern template class Aidge::AvgPooling_Op<1>;
 extern template class Aidge::AvgPooling_Op<2>;
 extern template class Aidge::AvgPooling_Op<3>;
 extern template class Aidge::AvgPooling_Op<4>;
 
-}  // namespace Aidge
-
 namespace {
 template <>
-const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
-                                                          "KernelDims"};
+const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
+    "StrideDims",
+    "KernelDims"
+};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index aa53f8c43f0be2a0e094946d66fd263bc19e39f5..b5b64eb428d709e804dd9f6711530b348e0be747 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -28,21 +28,31 @@ enum class BatchNormAttr { Epsilon, Momentum };
 
 template <DimIdx_t DIM>
 class BatchNorm_Op : public OperatorTensor,
-                public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
-                public StaticAttributes<BatchNormAttr, float, float> {
+                public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)> {
 public:
     static const std::string Type;
 
-    BatchNorm_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<BatchNormAttr, float, float>;
     template <BatchNormAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    BatchNorm_Op() = delete;
 
     constexpr BatchNorm_Op(float epsilon, float momentum)
-        : OperatorTensor(Type, 1, 4, 1),
-          Attributes_(attr<BatchNormAttr::Epsilon>(epsilon),
-                           attr<BatchNormAttr::Momentum>(momentum)) {}
+        : OperatorTensor(Type,
+                            {InputCategory::Data,
+                                InputCategory::Param,
+                                InputCategory::Param,
+                                InputCategory::Param,
+                                InputCategory::Param},
+                            1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<BatchNormAttr::Epsilon>(epsilon),
+            attr<BatchNormAttr::Momentum>(momentum))) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -72,6 +82,10 @@ public:
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline float& epsilon() const { return mAttributes->template getAttr<BatchNormAttr::Epsilon>(); }
+    inline float& momentum() const { return mAttributes->template getAttr<BatchNormAttr::Momentum>(); }
+
     static const std::vector<std::string> getInputsName() {
         return {"data_input", "scale", "shift", "mean", "variance"};
     }
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 6efbc0a214dde3ca969226f734b5ee903fe5ab50..6911053932afff6675be4eb2c713d8d3cd34b462 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -19,8 +19,8 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -30,21 +30,31 @@ public:
     void forward() override;
 };
 
+enum class CastAttr { TargetType };
+
 class Cast_Op : public OperatorTensor,
     public Registrable<Cast_Op, std::string, std::unique_ptr<OperatorImpl>(const Cast_Op&)> {
 public:
     static const std::string Type;
 
-    Cast_Op() : OperatorTensor(Type, 1, 0, 1) {
-        mImpl = std::make_shared<Cast_OpImpl>(*this);
-    }
+private:
+    using Attributes_ = StaticAttributes<CastAttr, DataType>;
+    template <CastAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Cast_Op() = delete;
+
+    Cast_Op(const DataType targetType);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Cast_Op(const Cast_Op& op)
-        : OperatorTensor(op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Cast_Op, *this, op.backend());
@@ -64,6 +74,9 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline DataType& targetType() const { return mAttributes->template getAttr<CastAttr::TargetType>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -72,9 +85,15 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Cast(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Cast_Op>(), name);
+
+inline std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
 }
+} // namespace Aidge
+
+namespace {
+template <>
+const char* const EnumStrings<Aidge::CastAttr>::data[] = { "TargetType" };
 }
 
-#endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index a9a4c9253f3af9f9cd82390256ec70d066017cc5..8341a93fe66d260ae3687170629b8759d0305a9c 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -28,25 +28,32 @@
 namespace Aidge {
 class Concat_OpImpl : public OperatorImpl {
 public:
-    Concat_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Concat_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend)
+    {}
     void forward() override;
 };
 
 enum class ConcatAttr { Axis };
 
 class Concat_Op : public OperatorTensor,
-    public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)>,
-    public StaticAttributes<ConcatAttr, DimSize_t> {
+    public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)> {
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<ConcatAttr, DimSize_t>;
+private:
+    using Attributes_ = StaticAttributes<ConcatAttr, std::int32_t>;
     template <ConcatAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    Concat_Op(const IOIndex_t nbIn, const DimSize_t axis)
-        : OperatorTensor(Type, nbIn, 0, 1),
-          Attributes_(attr<ConcatAttr::Axis>(axis))
+public:
+    Concat_Op() = delete;
+
+    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis)
+        : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ConcatAttr::Axis>(axis)))
     {
         if (nbIn == 0) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
@@ -60,7 +67,7 @@ public:
      */
     Concat_Op(const Concat_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Concat_Op, *this, op.backend());
@@ -82,6 +89,9 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int32_t& axis() const { return mAttributes->template getAttr<ConcatAttr::Axis>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input_0", "data_input_n"};
     }
@@ -90,7 +100,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const DimIdx_t axis = 0, const std::string& name = "") {
+inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
 }
 }
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index d6a0df5ab472c4a728e5b5042258d6d2bd34f871..87ff5854b310ca472994bd6b68fd6ae58d31e806 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -30,62 +30,42 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, NoBias };
+enum class ConvAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
-                public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
-                public StaticAttributes<ConvAttr,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        DimSize_t,
-                                        DimSize_t,
-                                        std::array<DimSize_t, DIM>,
-                                        bool> {
+                public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)> {
 
 public:
     static const std::string Type;
 
-    Conv_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ConvAttr,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>,
-                                        DimSize_t,
-                                        DimSize_t,
-                                        std::array<DimSize_t, DIM>,
-                                        bool>;
+                                        std::array<DimSize_t, DIM>>;
     template <ConvAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    constexpr Conv_Op(DimSize_t inChannels,
-                      DimSize_t outChannels,
-                      const std::array<DimSize_t, DIM> &kernelDims,
+public:
+    Conv_Op() = delete;
+
+    constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
                       const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                      bool noBias = false)
-        : OperatorTensor(Type, 1, 2, 1),
-          Attributes_(attr<ConvAttr::StrideDims>(strideDims),
-                      attr<ConvAttr::DilationDims>(dilationDims),
-                      attr<ConvAttr::InChannels>(inChannels),
-                      attr<ConvAttr::OutChannels>(outChannels),
-                      attr<ConvAttr::KernelDims>(kernelDims),
-                      attr<ConvAttr::NoBias>(noBias)) {}
+                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ConvAttr::StrideDims>(strideDims),
+            attr<ConvAttr::DilationDims>(dilationDims),
+            attr<ConvAttr::KernelDims>(kernelDims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Conv_Op(const Conv_Op<DIM>& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Conv_Op(const Conv_Op<DIM>& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -108,116 +88,35 @@ public:
 
     // }
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        // check inputs have been associated
-        bool associated = true;
-        for (IOIndex_t i = 0; i < 3; ++i) {
-            if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-            }
-            associated &= !(getInput(i)->empty());
-        }
-        if (associated) {
-            AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
-                     (getInput(0)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()),
-                     "Wrong input size for Conv operator.");
-            AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)) &&
-                        (getInput(1)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()) &&
-                        (getInput(1)->template dims<DIM+2>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
-                        "Wrong weight size for Conv operator.");
-            if(!this->template getAttr<ConvAttr::NoBias>())
-                AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
-                        (getInput(2)->template dims<1>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
-                        "Wrong bias size for Conv operator.");
-            std::array<DimSize_t, DIM + 2> outputDims{};
-            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-            for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
-                const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
-                                                       (this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
-                                               1;
-
-                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                        floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
-                              static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
-            }
-
-            outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
-            outputDims[0] = inputDims[0];
-            mOutputs[0]->resize(outputDims);
-        }
-
-        return associated;
-    }
-
-    std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
+    std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
     computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
                           const std::vector<DimSize_t>& outputDims,
-                          const IOIndex_t outputIdx = 0) const override {
-        if (outputIdx != 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
-        }
-        if (firstEltDims.size() != outputDims.size()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
-        }
-        if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
-            // Offset
-            auto inputIdxDims = firstEltDims; // batch idx is the same
-            inputIdxDims[1] = 0; // each channel is used so start with the first one
-
-            for (DimIdx_t i = 0; i < (DIM+2); ++i) {
-                if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
-                }
-            }
-
-            // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
-            // Input
-            // same batch value, every input channel is used
-            std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
-            for (DimIdx_t i = 0; i < DIM; ++i) {
-                inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                            * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
-                            + 1
-                            + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-                            * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-                inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
-            }
+                          const IOIndex_t outputIdx = 0) const override;
 
-            // Weight
-            // same output value, every input channel is used
-            std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
-            for (std::size_t i = 0; i < DIM; ++i) {
-                weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
-            }
-            std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
-            weightIdxDims[0] = firstEltDims[1];
 
-            // Result
-            std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
-            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
-            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
-            // Bias
-            if (! this->template getAttr<ConvAttr::NoBias>()){
-                const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
-                const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
-                res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
-            }
-            return res;
+    DimSize_t inChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
         }
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+        return getInput(1)->template dims<DIM+2>()[1];
     }
 
+    DimSize_t outChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of output channel imposed.");
+        }
+        return getInput(1)->template dims<DIM+2>()[0];
+    }
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Conv_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvAttr::KernelDims>(); }
 
-        // By default, automatically set backend for weight and bias inputs
-        getInput(1)->setBackend(name, device);
-        getInput(2)->setBackend(name, device);
-    }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
@@ -227,8 +126,6 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string Conv_Op<DIM>::Type = "Conv";
 
 /**
  * @brief Perform a convolution on the input Tensor.
@@ -252,10 +149,11 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
                                   bool noBias = false) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims, noBias), name);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
     addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
-    addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
-
+    if (!noBias) {
+        addProducer(conv, 2, {outChannels}, "b"); // already sets bias dims
+    }
     return conv;
 }
 
@@ -274,15 +172,15 @@ inline std::shared_ptr<Node> Conv(
 }
 }  // namespace Aidge
 
+extern template class Aidge::Conv_Op<1>;
+extern template class Aidge::Conv_Op<2>;
+
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
     "StrideDims",
     "DilationDims",
-    "InChannels",
-    "OutChannels",
-    "KernelDims",
-    "NoBias"
+    "KernelDims"
 };
 }
 
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 2337ff66f00b932a190d5b1735d53df3da8ffdbf..c8a83ff7de62a61e8125eac29d61c3938115cd09 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -29,57 +29,42 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims, NoBias };
+enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
-                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
-                public StaticAttributes<ConvDepthWiseAttr,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>,
-                                       DimSize_t,
-                                       std::array<DimSize_t, DIM>,
-                                       bool> {
+                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)> {
 public:
     static const std::string Type;
 
-    ConvDepthWise_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
-                                             DimSize_t,
-                                             std::array<DimSize_t, DIM>,
-                                             bool>;
+                                             std::array<DimSize_t, DIM>>;
     template <ConvDepthWiseAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    constexpr ConvDepthWise_Op(const DimSize_t nbChannels,
-                               const std::array<DimSize_t, DIM> &kernel_dims,
+public:
+
+    ConvDepthWise_Op() = delete;
+
+    constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                               const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                               bool no_bias=false)
-        : OperatorTensor(Type, 1, 2, 1),
-          Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
-                      attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
-                      attr<ConvDepthWiseAttr::Channels>(nbChannels),
-                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
-                      attr<ConvDepthWiseAttr::NoBias>(no_bias)) {}
+                               const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
+            attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
+            attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -90,106 +75,26 @@ public:
     }
 
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        // check inputs have been associated
-        // TODO : add a check of inputs dimensions ?
-        bool associated = true;
-        for (IOIndex_t i = 0; i < 3; ++i) {
-            if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-            }
-            associated &= !(getInput(i)->empty());
-        }
-        if (associated) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
-            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
-
-            for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
-                const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
-                                                       (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
-                                               1;
-
-                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                        floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
-                              static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
-            }
-            // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>()));
-            // if (mInputs[1]->empty()) {
-            //     mInputs[1]->resize(weightDims);
-            // }
-            // if (mInputs[2]->empty()) {
-            //     mInputs[2]->resize({mInputs[0]->dims()[1]});
-            // }
-            outputDims[1] = inputDims[1];
-            outputDims[0] = inputDims[0];
-            mOutputs[0]->resize(outputDims);
-        }
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-        return associated;
-    }
+    std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
+    computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
+                          const std::vector<DimSize_t>& outputDims,
+                          const IOIndex_t outputIdx = 0) const override;
 
-    std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
-        if (outputIdx != 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
-        }
-        if (firstEltDims.size() != outputDims.size()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
-        }
-        if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
-            // Offset
-            auto inputIdxDims = firstEltDims; // batch idx is the same
-
-            for (DimIdx_t i = 0; i < (DIM+2); ++i) {
-                if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
-                }
-            }
-
-            // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
-            // Input
-            // same batch value
-            std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
-            for (DimIdx_t i = 0; i < DIM; ++i) {
-                inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                            * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
-                            + 1
-                            + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-                            * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-                inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
-            }
-
-            // Weight
-            std::vector<DimSize_t> weightDims{outputDims[1], 1};
-            for (std::size_t i = 0; i < DIM; ++i) {
-                weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
-            }
-            std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
-            weightIdxDims[0] = firstEltDims[1];
-
-
-            // Result
-            std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
-            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
-            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
-            // Bias
-            if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
-                const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
-                const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
-                res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
-            }
-            return res;
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+
+    DimSize_t nbChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of channel imposed.");
         }
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+        return getInput(1)->template dims<DIM+2>()[0];
     }
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
-
-        // By default, automatically set backend for weight and bias inputs
-        getInput(1)->setBackend(name, device);
-        getInput(2)->setBackend(name, device);
-    }
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
@@ -199,9 +104,6 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string ConvDepthWise_Op<DIM>::Type = "ConvDepthWise";
-
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
                                            const std::array<DimSize_t, DIM> &kernelDims,
@@ -211,9 +113,11 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
                                            bool noBias=false) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims, noBias), name);
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
     addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
-    addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b");
+    if (!noBias) {
+        addProducer(convDW, 2, {nbChannels}, "b");
+    }
     return convDW;
 }
 
@@ -231,10 +135,13 @@ inline std::shared_ptr<Node> ConvDepthWise(
 }
 }  // namespace Aidge
 
+extern template class Aidge::ConvDepthWise_Op<1>;
+extern template class Aidge::ConvDepthWise_Op<2>;
+
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels",
-                                                          "KernelDims", "NoBias"};
+const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims",
+                                                          "KernelDims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 566f4a6ae69b090b3a035b034406d463eeb77317..3edb4a28851cffe060886a4660d6b524eb9b814a 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -30,7 +30,7 @@ class Div_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Div_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Div_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index 5ec10522e889bb1188b2304940fd892c0928b414..f615fedeef6fea59d2177cf886e8d910f064f5c2 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -29,7 +29,7 @@ class Erf_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Erf_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Erf_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index b97874f4e0deafd685453b3ce9865e65fafe7561..01da37a05414c5994ace767770e7c26fc8cd4646 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -24,26 +24,15 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class FCAttr { OutChannels, NoBias };
-
 class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
-                                 std::shared_ptr<OperatorImpl>(const FC_Op &)>,
-              public StaticAttributes<FCAttr, DimSize_t, bool> {
+                                 std::shared_ptr<OperatorImpl>(const FC_Op &)> {
 public:
     static const std::string Type;
 
-    FC_Op() = delete;
-
-    using Attributes_ = StaticAttributes<FCAttr, DimSize_t, bool>;
-    template <FCAttr e> using attr = typename Attributes_::template attr<e>;
-
-    FC_Op(DimSize_t out_channels, bool noBias)
-    : OperatorTensor(Type, 1, 2, 1),
-      Attributes_(
-        attr<FCAttr::OutChannels>(out_channels),
-        attr<FCAttr::NoBias>(noBias))
+    FC_Op()
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1)
     {}
 
     /**
@@ -51,12 +40,11 @@ public:
      * @param op Operator to copy.
      */
     FC_Op(const FC_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
+        : OperatorTensor(op)
     {
-        if (op.mImpl){
+        if (op.mImpl) {
             SET_IMPL_MACRO(FC_Op, *this, op.backend());
-        }else{
+        } else {
             mImpl = nullptr;
         }
     }
@@ -75,6 +63,13 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    DimSize_t outChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Fully Connected (FC) operator has no weight Tensor associated so no specific number of output channel imposed.");
+        }
+        return getInput(1)->template dims<2>()[0];
+    }
+
     static const std::vector<std::string> getInputsName() {
         return {"data_input", "weight", "bias"};
     }
@@ -83,19 +78,15 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> FC(DimSize_t inChannels, DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
+inline std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(outChannels, noBias), name);
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(), name);
     addProducer(fc, 1, {outChannels, inChannels}, "w");
-    addProducer(fc, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
+    if (!noBias) {
+        addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
+    }
     return fc;
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
-                                                        "NoBias"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 7534b66951cc9d8074d0af7742ba5165013431f5..3e9b780732fa9144f2e58bef854d1b42d063d0bf 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -12,7 +12,7 @@
 #ifndef AIDGE_CORE_OPERATOR_GATHER_H_
 #define AIDGE_CORE_OPERATOR_GATHER_H_
 
-#include <cstdint>  // std::int64_t
+#include <cstdint>  // std::int8_t, std::int64_t
 #include <memory>
 #include <string>
 #include <vector>
@@ -31,27 +31,36 @@ public:
     void forward() override;
 };
 
-enum class GatherAttr { Indices, GatheredShape, Axis };
+enum class GatherAttr { Axis, Indices, GatheredShape };
 
 class Gather_Op : public OperatorTensor,
                 public Registrable<Gather_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Gather_Op&)>,
-                public StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t> {
-
+                                   std::shared_ptr<OperatorImpl>(const Gather_Op&)> {
 public:
     static const std::string Type;
 
+    using Attributes_ = StaticAttributes<GatherAttr,
+                                            std::int8_t,
+                                            std::vector<int64_t>,
+                                            std::vector<DimSize_t>>;
+private:
+    template <GatherAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
     Gather_Op() = delete;
 
-    using Attributes_ = StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t>;
-    template <GatherAttr e> using attr = typename Attributes_::template attr<e>;
-    Gather_Op(const std::vector<std::int64_t>& indices, const std::vector<DimSize_t>& gatheredShape, std::int64_t axis)
-            : OperatorTensor(Type, 1, 0, 1),
-            Attributes_(
-                attr<GatherAttr::Indices>(indices),
-                attr<GatherAttr::GatheredShape>(gatheredShape),
-                attr<GatherAttr::Axis>(axis))
+    Gather_Op(std::int8_t axis,
+              const std::vector<int64_t>& indices,
+              const std::vector<DimSize_t>& gatheredShape)
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+            attr<GatherAttr::Axis>(axis),
+            attr<GatherAttr::Indices>(indices),
+            attr<GatherAttr::GatheredShape>(gatheredShape)))
     {
         mImpl = std::make_shared<Gather_OpImpl>(*this);
     }
@@ -62,7 +71,7 @@ public:
      */
     Gather_Op(const Gather_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Gather_Op, *this, op.backend());
@@ -80,26 +89,32 @@ public:
         return std::make_shared<Gather_Op>(*this);
     }
 
+    bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int8_t& axis() const { return mAttributes -> getAttr<GatherAttr::Axis>(); }
+    inline std::vector<int64_t>& indices() const { return mAttributes -> getAttr<GatherAttr::Indices>(); }
+    inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes -> getAttr<GatherAttr::GatheredShape>(); }
+
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input", "indices"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
 };
 
-inline std::shared_ptr<Node> Gather( const std::vector<std::int64_t>& indices, const std::vector<DimSize_t>& gatheredShape, std::int64_t axis = 0, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Gather_Op>(indices, gatheredShape, axis), name);
+inline std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int64_t>& indices = {}, const std::vector<DimSize_t>& gatheredShape = {}, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Gather_Op>(axis, indices, gatheredShape), name);
 }
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Indices", "GatheredShape", "Axis"};
+const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Axis", "Indices", "GatheredShape"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index f0b7e92d708dfef65eea0ec7649ccc8716533679..8196c4268e669001d99f25ed2cead546e1141aa7 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -26,16 +26,28 @@
 namespace Aidge {
 class GenericOperator_Op
     : public OperatorTensor,
-      public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>,
-      public DynamicAttributes {
+      public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)> {
 private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
     ComputeDimsFunc mForwardDims;
 
+    const std::shared_ptr<DynamicAttributes> mAttributes;
+
 public:
+    GenericOperator_Op(const std::string& type, const std::vector<InputCategory>& inputsCategory, IOIndex_t nbOut)
+        : OperatorTensor(type, inputsCategory, nbOut)
+    {
+        mImpl = std::make_shared<OperatorImpl>(*this);
+    }
+
     GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
-        : OperatorTensor(type, nbData, nbParam, nbOut)
+        : OperatorTensor(type, [nbData, nbParam]() {
+                                std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
+                                inputsCategory.resize(nbData + nbParam, InputCategory::Param);
+                                return inputsCategory;
+                            }(), nbOut),
+          mAttributes(std::make_shared<DynamicAttributes>())
     {
         mImpl = std::make_shared<OperatorImpl>(*this);
     }
@@ -45,7 +57,8 @@ public:
      * @param op Operator to copy.
      */
     GenericOperator_Op(const GenericOperator_Op& op)
-        : OperatorTensor(op)
+        : OperatorTensor(op),
+          mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>())
     {
         mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
     }
@@ -63,10 +76,23 @@ public:
 public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    bool dimsForwarded() const override final;
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
-    void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); }
-    void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); }
+    template <class T>
+    inline T& getAttr(const std::string& name)
+    { return mAttributes -> template getAttr<T>(name); }
+    template <class T>
+    inline const T& getAttr(const std::string& name) const
+    { return mAttributes -> template getAttr<T>(name); }
+
+    ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
+    ///\tparam T expected Attribute type
+    ///\param name Attribute name
+    ///\param value Attribute value
+    template <class T>
+    inline void addAttr(const std::string& name, const T& value) const
+    { mAttributes -> template addAttr<T>(name, value); }
 
     // Helper functions that can be used with setForwardDims():
     static const ComputeDimsFunc Identity;
@@ -76,6 +102,20 @@ public:
     }
 };
 
+/**
+ * @brief Fictive custom operator not associated with any implementation.
+ * Allows to import unknown operators and simulate new ones.
+ * @param type Type of the fictive operator.
+ * @param inputCategory List inputs with their category
+ * @param nbOut Number of output data.
+ * @param name (optional) name of the Operator.
+ * @return std::shared_ptr<Node> Node associated with the Generic Operator.
+ */
+inline std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector<InputCategory>& inputCategory, IOIndex_t nbOut,
+                                             const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, inputCategory, nbOut), name);
+}
+
 /**
  * @brief Fictive custom operator not associated with any implementation.
  * Allows to import unknown operators and simulate new ones.
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index 74529a0ba9481bf6280df8d3ce496f67635a5aef..8bb738e8b57598e4256d3850fc791976e73c834c 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -37,7 +37,7 @@ class GlobalAveragePooling_Op
 public:
   static const std::string Type;
 
-  GlobalAveragePooling_Op() : OperatorTensor(Type, 1, 0, 1) {}
+  GlobalAveragePooling_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
   GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op)
       : OperatorTensor(op) {
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index 367aa4e2d68fb1095b1e3b3be76f6ab59439e47f..393798da2fc26b3ef3f5e4cfe54f69fd82174a5f 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -27,6 +27,8 @@
 
 namespace Aidge {
 
+
+
 /**
  * @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes.
  * This Operator has no Implementation, it just forward its input Tensor.
@@ -40,7 +42,7 @@ public:
     static const std::string Type;
 
     Identity_Op()
-        : OperatorTensor(Type, 1, 0, 1)
+        : OperatorTensor(Type, {InputCategory::Data}, 1)
     {
         mImpl = std::make_shared<OperatorImpl>(*this);
     }
@@ -63,7 +65,7 @@ public:
         return std::make_shared<Identity_Op>(*this);
     }
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
+    // bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
 
     /**
      * @brief Check if output dimensions have been computed.
@@ -74,7 +76,7 @@ public:
      * @return false Input has no dimensions or is a nullptr.
      */
     bool dimsForwarded() const override final {
-        return mInputs[0] ? !mInputs[0]->empty() : false;
+        return mInputs[0] ? (mInputs[0]->empty() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
     }
 
 
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 83a7c30fce7e0f68576f367d4b0bfe48edf4b3b6..294e7ebb009ff184c9150d2aa18067a15deeba22 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -12,16 +12,16 @@
 #ifndef AIDGE_CORE_OPERATOR_LEAKYRELU_H_
 #define AIDGE_CORE_OPERATOR_LEAKYRELU_H_
 
-#include <vector>
 #include <memory>
+#include <vector>
 
-#include "aidge/utils/StaticAttributes.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -30,20 +30,24 @@ enum class LeakyReLUAttr {
 };
 
 class LeakyReLU_Op : public OperatorTensor,
-    public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
-    public StaticAttributes<LeakyReLUAttr, float> {
+    public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)> {
 public:
     static const std::string Type;
 
-    LeakyReLU_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<LeakyReLUAttr, float>;
     template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    LeakyReLU_Op() = delete;
 
     LeakyReLU_Op(float negativeSlope)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(
-            attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(
+            std::make_shared<Attributes_>(
+                attr<LeakyReLUAttr::NegativeSlope>(negativeSlope)))
     {}
 
     /**
@@ -52,7 +56,7 @@ public:
      */
     LeakyReLU_Op(const LeakyReLU_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl){
             SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
@@ -76,6 +80,9 @@ public:
         mOutputs[0]->setBackend(name, device);
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Ln.hpp b/include/aidge/operator/Ln.hpp
new file mode 100755
index 0000000000000000000000000000000000000000..d4010471c9af853556dbe1d60c8585d12f8fc638
--- /dev/null
+++ b/include/aidge/operator/Ln.hpp
@@ -0,0 +1,73 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_LN_H_
+#define AIDGE_CORE_OPERATOR_LN_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Ln_Op : public OperatorTensor,
+    public Registrable<Ln_Op, std::string, std::unique_ptr<OperatorImpl>(const Ln_Op&)> {
+public:
+    static const std::string Type;
+
+    Ln_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Ln_Op(const Ln_Op& op)
+        : OperatorTensor(op)
+    {
+       if (op.mImpl){
+            SET_IMPL_MACRO(Ln_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Ln_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Ln_Op>(*this);
+    }
+
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Ln(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Ln_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_LN_H_ */
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index 580d720e617e5b20c0acc7ce5e7f200fe5b25606..be460ee88bd79592e29581f6acd64813ecc39bec 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -30,7 +30,7 @@ class MatMul_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    MatMul_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    MatMul_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 8aff1582604a9e23e248e7c01521567483c793ad..082aa26bbdf1d55dcae29d1ffb2b9810db8b17d0 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -34,30 +34,31 @@ enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public OperatorTensor,
-                public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
-                public StaticAttributes<MaxPoolingAttr,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>,
-                                       bool> {
+                public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)> {
 public:
     static const std::string Type;
 
-    MaxPooling_Op() = delete;
-
     using Attributes_ = StaticAttributes<MaxPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              bool>;
+
+private:
     template <MaxPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    MaxPooling_Op() = delete;
 
     constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                             bool ceil_mode = false)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
-                      attr<MaxPoolingAttr::KernelDims>(kernel_dims),
-                      attr<MaxPoolingAttr::CeilMode>(ceil_mode))
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<MaxPoolingAttr::StrideDims>(stride_dims),
+            attr<MaxPoolingAttr::KernelDims>(kernel_dims),
+            attr<MaxPoolingAttr::CeilMode>(ceil_mode)))
         {}
 
     /**
@@ -66,7 +67,7 @@ public:
      */
     MaxPooling_Op(const MaxPooling_Op<DIM>& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl) {
             SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
@@ -85,25 +86,22 @@ public:
 
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        if (!getInput(0)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-        }
-        if (!(getInput(0)->empty())) {
+        if (inputsAssociated()) {
             std::array<DimSize_t, DIM + 2> outputDims{};
             const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
             std::function<float(float)> roundingFunction;
-            if (this->template getAttr<MaxPoolingAttr::CeilMode>()) {
+            if (mAttributes->template getAttr<MaxPoolingAttr::CeilMode>()) {
                 roundingFunction = [](float x) { return std::ceil(x); };
             } else {
                 roundingFunction = [](float x) { return std::floor(x); };
             }
 
-            for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
+            for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                             roundingFunction(static_cast<float>(inputDims[dim+2] -
-                                                                    this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
-                                            static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
+                                                                    mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
+                                            static_cast<float>(mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
             }
             outputDims[1] = inputDims[1];
             outputDims[0] = inputDims[0];
@@ -119,6 +117,11 @@ public:
         mOutputs[0]->setBackend(name, device);
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<MaxPoolingAttr::KernelDims>(); }
+    inline bool& ceilMode() const { return mAttributes->template getAttr<MaxPoolingAttr::CeilMode>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index 6b0ace2eb09fde069f8b9b104f92fc33811c25aa..d6af56f2faad18b9e39c793ea68e39eac4dd2f01 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -37,20 +37,25 @@ public:
 enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep };
 
 class Memorize_Op : public OperatorTensor,
-    public Registrable<Memorize_Op, std::string, std::unique_ptr<OperatorImpl>(const Memorize_Op&)>,
-    public StaticAttributes<MemorizeAttr, unsigned int, unsigned int, unsigned int> {
+    public Registrable<Memorize_Op, std::string, std::unique_ptr<OperatorImpl>(const Memorize_Op&)> {
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<MemorizeAttr, unsigned int, unsigned int, unsigned int>;
+private:
+    using Attributes_ = StaticAttributes<MemorizeAttr, std::uint32_t, std::uint32_t, std::uint32_t>;
     template <MemorizeAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    Memorize_Op(const unsigned int endStep)
-        : OperatorTensor(Type, 1, 1, 2),
-          Attributes_(attr<MemorizeAttr::ScheduleStep>(0),
-                      attr<MemorizeAttr::ForwardStep>(0),
-                      attr<MemorizeAttr::EndStep>(endStep))
+public:
+    Memorize_Op() = delete;
+
+    Memorize_Op(const std::uint32_t endStep)
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 2),
+          mAttributes(std::make_shared<Attributes_>(
+                        attr<MemorizeAttr::ScheduleStep>(0),
+                        attr<MemorizeAttr::ForwardStep>(0),
+                        attr<MemorizeAttr::EndStep>(endStep)))
     {
         mOutputs[1] = mOutputs[0];
     }
@@ -62,7 +67,7 @@ public:
      */
     Memorize_Op(const Memorize_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl) {
             SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
@@ -87,6 +92,11 @@ public:
     void updateConsummerProducer() override;
     void forward() override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::uint32_t& scheduleStep() const { return mAttributes->template getAttr<MemorizeAttr::ScheduleStep>(); }
+    inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<MemorizeAttr::ForwardStep>(); }
+    inline std::uint32_t& endStep() const { return mAttributes->template getAttr<MemorizeAttr::EndStep>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "data_input_init"};
     }
@@ -95,7 +105,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Memorize(const unsigned int endStep, const std::string& name = "") {
+inline std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
 }
 }  // namespace Aidge
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index a411101618a5f4acaf070516d67691a6b55e3ff5..744564b4bd591d84b871a6af71c4a54589103485 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -72,17 +72,9 @@ public:
 
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
     void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
-    void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final;
 
     bool forwardDims(bool allowDataDependency = false) override final {
-        // Check first that all required inputs are available, otherwise
-        // mGraph->forwardDims() will fail!
-        bool forwarded = true;
-        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-            forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
-        }
-
-        if (forwarded) {
+        if (inputsAssociated()) {
             // Forward dims of micro-graph
             return mGraph->forwardDims({}, allowDataDependency);
         }
@@ -116,9 +108,10 @@ public:
     Elts_t getNbProducedData(IOIndex_t outputIdx) const override;
 
     void updateConsummerProducer() override;
+    void resetConsummerProducer() override;
     void forward() override;
     void backward() override {
-        assert(false && "not implemented");
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented yet for a MetaOperator");
     }
 
     inline bool isAtomic() const noexcept override final { return false; }
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index fb3aa6384fc703d758cb8753dcf54c4694f96bd4..51681629cbae215fd529b6e7bb568d07264dd63e 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -12,22 +12,26 @@
 #ifndef AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_
 #define AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_
 
+#include <array>
+#include <memory>
+#include <string>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp" // Sequential
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/operator/Pad.hpp"
-#include "aidge/operator/Memorize.hpp"
-#include "aidge/operator/Add.hpp"
-#include "aidge/operator/Mul.hpp"
-#include "aidge/operator/FC.hpp"
-#include "aidge/operator/Identity.hpp"
-#include "aidge/operator/Concat.hpp"
-#include "aidge/operator/Tanh.hpp"
 #include "aidge/operator/Sigmoid.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
 
 namespace Aidge {
+
+
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
                                   DimSize_t out_channels,
@@ -40,14 +44,29 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
 {
     // Construct micro-graph
     auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
 
     auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name);
     addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
-    addProducer(metaOp, 2, {out_channels}, "b");
+    if (!no_bias) {
+        addProducer(metaOp, 2, {out_channels}, "b");
+    }
     return metaOp;
 }
 
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
+                                  const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+{
+    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
+
+    return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
+}
+
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> PaddedConv(
@@ -63,6 +82,8 @@ inline std::shared_ptr<Node> PaddedConv(
     return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
 }
 
+////////////////////////////////////////////////////////////////////////////////
+
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
                                   const std::array<DimSize_t, DIM> &kernel_dims,
@@ -74,14 +95,29 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
 {
     // Construct micro-graph
     auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
 
     auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
     addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w");
-    addProducer(metaOp, 2, {nb_channels}, "b");
+    if (!no_bias) {
+        addProducer(metaOp, 2, {nb_channels}, "b");
+    }
     return metaOp;
 }
 
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
+                                  const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+{
+    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
+
+    return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
+}
+
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> PaddedConvDepthWise(
@@ -96,30 +132,29 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(
     return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
 }
 
+////////////////////////////////////////////////////////////////////////////////
+
+
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+extern std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
-{
-    auto graph = Sequential({
-        Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
-        AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
-    });
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
 
-    return MetaOperator("PaddedAvgPooling", graph, name);
-}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+extern std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
-inline std::shared_ptr<Node> PaddedAvgPooling(
-    DimSize_t const (&kernel_dims)[DIM],
+extern std::shared_ptr<Node> PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM],
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
-{
-    return PaddedAvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
-}
+    const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
+
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
@@ -136,6 +171,20 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &
     return MetaOperator("PaddedMaxPooling", graph, name);
 }
 
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<MetaOperator_Op> PaddedMaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
+                                  bool ceil_mode = false)
+{
+    auto graph = Sequential({
+        Pad<DIM>(padding_dims, ""),
+        MaxPooling(kernel_dims, "", stride_dims, ceil_mode)
+    });
+
+    return std::make_shared<MetaOperator_Op>("PaddedMaxPooling", graph);
+}
+
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> PaddedMaxPooling(
@@ -148,115 +197,16 @@ inline std::shared_ptr<Node> PaddedMaxPooling(
     return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims, ceil_mode);
 }
 
-inline std::shared_ptr<Node> LSTM(DimSize_t in_channels,
-                                  DimSize_t hidden_channels,
-                                  DimSize_t seq_length,
-                                  bool noBias = false,
-                                  const std::string& name = "")
-{
-    // Construct micro-graph
-    auto input = Identity((!name.empty()) ? name + "_input" : "");
-    auto hiddenState = Memorize(seq_length, (!name.empty()) ? name + "_hidden_state" : "");
-    auto cellState = Memorize(seq_length, (!name.empty()) ? name + "_cell_state" : "");
-    auto add = Add(2, (!name.empty()) ? name + "_add" : "");
-
-    // Forget gate
-    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_forgetGateX" : "");
-    input->addChild(forgetGateX, 0, 0);
-    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_forgetGateH" : "");
-    hiddenState->addChild(forgetGateH, 1, 0);
-    auto forgetGate = Add(2, (!name.empty()) ? name + "_forgetGate" : "");
-    forgetGateX->addChild(forgetGate, 0, 0);
-    forgetGateH->addChild(forgetGate, 0, 1);
-    auto forgetGateAct = Sigmoid((!name.empty()) ? name + "_forgetGateAct" : "");
-    auto forgetGateMul = Mul((!name.empty()) ? name + "_forgetGateMul" : "");
-    forgetGate->addChild(forgetGateAct, 0, 0);
-    forgetGateAct->addChild(forgetGateMul, 0, 0);
-    forgetGateMul->addChild(add, 0, 0);
-    cellState->addChild(forgetGateMul, 1, 1);
-
-    // Input gate
-    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_inputGateX" : "");
-    input->addChild(inputGateX, 0, 0);
-    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_inputGateH" : "");
-    hiddenState->addChild(inputGateH, 1, 0);
-    auto inputGate = Add(2, (!name.empty()) ? name + "_inputGate" : "");
-    inputGateX->addChild(inputGate, 0, 0);
-    inputGateH->addChild(inputGate, 0, 1);
-    auto inputGateAct = Sigmoid((!name.empty()) ? name + "_inputGateAct" : "");
-    auto inputGateMul = Mul((!name.empty()) ? name + "_inputGateMul" : "");
-    inputGate->addChild(inputGateAct, 0, 0);
-    inputGateAct->addChild(inputGateMul, 0, 0);
-    inputGateMul->addChild(add, 0, 1);
-
-    // Candidate for cell update
-    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_cellCandidateX" : "");
-    input->addChild(cellCandidateX, 0, 0);
-    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_cellCandidateH" : "");
-    hiddenState->addChild(cellCandidateH, 1, 0);
-    auto cellCandidate = Add(2, (!name.empty()) ? name + "_cellCandidate" : "");
-    cellCandidateX->addChild(cellCandidate, 0, 0);
-    cellCandidateH->addChild(cellCandidate, 0, 1);
-    auto cellCandidateAct = Tanh((!name.empty()) ? name + "_cellCandidateAct" : "");
-    cellCandidate->addChild(cellCandidateAct, 0, 0);
-    cellCandidateAct->addChild(inputGateMul, 0, 1);
-
-    // Output gate
-    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_outputGateX" : "");
-    input->addChild(outputGateX, 0, 0);
-    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_outputGateH" : "");
-    hiddenState->addChild(outputGateH, 1, 0);
-    auto outputGate = Add(2, (!name.empty()) ? name + "_outputGate" : "");
-    outputGateX->addChild(outputGate, 0, 0);
-    outputGateH->addChild(outputGate, 0, 1);
-    auto outputGateAct = Sigmoid((!name.empty()) ? name + "_outputGateAct" : "");
-    auto outputGateMul = Mul((!name.empty()) ? name + "_outputGateMul" : "");
-    outputGate->addChild(outputGateAct, 0, 0);
-    outputGateAct->addChild(outputGateMul, 0, 0);
-
-    // Updated cell state to help determine new hidden state
-    auto cellUpdatedAct = Tanh((!name.empty()) ? name + "_cellUpdatedAct" : "");
-    add->addChild(cellUpdatedAct, 0, 0);
-    cellUpdatedAct->addChild(outputGateMul, 0, 1);
-    outputGateMul->addChild(hiddenState, 0, 0);
-    add->addChild(cellState, 0, 0);
-
-    std::shared_ptr<GraphView> microGraph = std::make_shared<GraphView>();
-    microGraph->add(input);
-    microGraph->add({hiddenState, cellState, add,
-        forgetGateX, forgetGateH, forgetGate, forgetGateAct, forgetGateMul,
-        inputGateX, inputGateH, inputGate, inputGateAct, inputGateMul,
-        cellCandidateX, cellCandidateH, cellCandidate, cellCandidateAct,
-        outputGateX, outputGateH, outputGate, outputGateAct, outputGateMul,
-        cellUpdatedAct}, false);
-
-    microGraph->setOrderedInputs({{input, 0},
-        {inputGateX, 1}, {outputGateX, 1}, {forgetGateX, 1}, {cellCandidateX, 1},
-        {inputGateH, 1}, {outputGateH, 1}, {forgetGateH, 1}, {cellCandidateH, 1},
-        {inputGateX, 2}, {outputGateX, 2}, {forgetGateX, 2}, {cellCandidateX, 2},
-        {inputGateH, 2}, {outputGateH, 2}, {forgetGateH, 2}, {cellCandidateH, 2},
-        {hiddenState, 1}, {cellState, 1}});
-    microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}});
-
-    auto metaOp = MetaOperator("LSTM", microGraph, name);
-    addProducer(metaOp, 1, {hidden_channels, in_channels}, "wi");
-    addProducer(metaOp, 2, {hidden_channels, in_channels}, "wo");
-    addProducer(metaOp, 3, {hidden_channels, in_channels}, "wf");
-    addProducer(metaOp, 4, {hidden_channels, in_channels}, "wc");
-    addProducer(metaOp, 5, {hidden_channels, hidden_channels}, "ri");
-    addProducer(metaOp, 6, {hidden_channels, hidden_channels}, "ro");
-    addProducer(metaOp, 7, {hidden_channels, hidden_channels}, "rf");
-    addProducer(metaOp, 8, {hidden_channels, hidden_channels}, "rc");
-    addProducer(metaOp, 9, {(noBias ? 0 : hidden_channels)}, "wbi");
-    addProducer(metaOp, 10, {(noBias ? 0 : hidden_channels)}, "wbo");
-    addProducer(metaOp, 11, {(noBias ? 0 : hidden_channels)}, "wbf");
-    addProducer(metaOp, 12, {(noBias ? 0 : hidden_channels)}, "wbc");
-    addProducer(metaOp, 13, {(noBias ? 0 : hidden_channels)}, "rbi");
-    addProducer(metaOp, 14, {(noBias ? 0 : hidden_channels)}, "rbo");
-    addProducer(metaOp, 15, {(noBias ? 0 : hidden_channels)}, "rbf");
-    addProducer(metaOp, 16, {(noBias ? 0 : hidden_channels)}, "rbc");
-    return metaOp;
-}
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Node> LSTM(DimSize_t in_channels,
+                           DimSize_t hidden_channels,
+                           DimSize_t seq_length,
+                           bool noBias = false,
+                           const std::string& name = "");
+
+std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length);
+
 }  // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index e9bcaa871619828a50dcd407d39744e7983fe2c4..cf5a3f188424fc52849eab580cce624ff714c729 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -35,7 +35,7 @@ class Move_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Move_Op() : OperatorTensor(Type, 1, 0, 1) {
+    Move_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {
         mImpl = std::make_shared<Move_OpImpl>(*this);
     }
 
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index f53a38a82a6771e416435222137e72366f5f69f3..e61393b28fc45bf46487ac2277753dec1b297b81 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -32,7 +32,7 @@ class Mul_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Mul_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Mul_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 3ee2342297208f6f4e4b061409bc5071c811d2ac..adec17d07f39727a0c75d32fa24bcc624aa66e1a 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -18,11 +18,21 @@
 #include <utility>
 #include <cstddef>
 
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <fmt/format.h>
+#endif
+
+
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Data.hpp"
+#include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/hook/Hook.hpp"
 
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
 namespace Aidge {
 
 enum class OperatorType {
@@ -30,6 +40,13 @@ enum class OperatorType {
     Tensor
 };
 
+enum class InputCategory {
+    Data,
+    Param,
+    OptionalData,
+    OptionalParam
+};
+
 class Operator : public std::enable_shared_from_this<Operator> {
 protected:
     std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator
@@ -38,17 +55,15 @@ protected:
 private:
     std::string mType;
     const OperatorType mOperatorType;
-    const IOIndex_t mNbData;
-    const IOIndex_t mNbParam;
+    const std::vector<InputCategory> mInputsCategory;
     const IOIndex_t mNbOut;
 
 public:
     Operator() = delete;
-    Operator(const std::string& type, const IOIndex_t nbData, const IOIndex_t nbParam, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)
+    Operator(const std::string& type, const std::vector<InputCategory>& inputsCategory, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)
     : mType(type),
       mOperatorType(operatorType),
-      mNbData(nbData),
-      mNbParam(nbParam),
+      mInputsCategory(inputsCategory),
       mNbOut(nbOut)
     {
         // ctor
@@ -57,8 +72,7 @@ public:
     Operator(const Operator& op):
         std::enable_shared_from_this<Operator>(),
         mOperatorType(op.mOperatorType),
-        mNbData(op.mNbData),
-        mNbParam(op.mNbParam),
+        mInputsCategory(op.mInputsCategory),
         mNbOut(op.mNbOut)
     {
         mType = op.mType;
@@ -73,12 +87,14 @@ public:
 public:
     virtual std::shared_ptr<Operator> clone() const = 0;
 
+    virtual std::shared_ptr<Attributes> attributes() const { return nullptr; };
     /**
      * @brief Set the specified input with a shallow copy.
      * @param inputIdx Index of the input to set.
      * @param data Data to copy.
      */
     virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
+    virtual void resetInput(const IOIndex_t inputIdx) = 0;
 
     /**
      * @brief Set the specified input value by performing a deep copy of the given data.
@@ -87,7 +103,6 @@ public:
      * @param data Data to copy.
      */
     virtual void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
-    virtual void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) = 0;
     virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const = 0;
         /**
      * @brief Set the specified output value by performing a deep copy of the given data.
@@ -95,7 +110,6 @@ public:
      * @param inputIdx Index of the input to set.
      */
     virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) = 0;
-    virtual void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) = 0;
     virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0;
 
     std::shared_ptr<Hook> getHook(const std::string& hookName) {
@@ -116,6 +130,7 @@ public:
 
     virtual void setBackend(const std::string& name, DeviceIdx_t device = 0) = 0;
     virtual void setDataType(const DataType& dataType) const = 0;
+    virtual void setDataFormat(const DataFormat& dataFormat) const = 0;
 
     /**
      * @brief Set a new OperatorImpl to the Operator
@@ -180,11 +195,14 @@ public:
         return mOperatorType;
     }
 
+    inline InputCategory inputCategory(IOIndex_t idx) const {
+        AIDGE_ASSERT(idx < mInputsCategory.size(), "Input #{} out of range (number of inputs is {})", idx, mInputsCategory.size());
+        return mInputsCategory[idx];
+    }
+
     virtual inline bool isAtomic() const noexcept { return true; }
 
-    inline IOIndex_t nbInputs() const noexcept { return mNbData+mNbParam; };
-    inline IOIndex_t nbData() const noexcept { return mNbData; };
-    inline IOIndex_t nbParam() const noexcept { return mNbParam; };
+    inline IOIndex_t nbInputs() const noexcept { return mInputsCategory.size(); };
     inline IOIndex_t nbOutputs() const noexcept { return mNbOut; };
 
     static const std::vector<std::string> getInputsName() {
@@ -193,6 +211,17 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {};
     }
+
+#ifdef PYBIND
+    std::string repr() const {
+        return fmt::format("Operator(type = '{}', nb_in = {}, nb_out = {}, attr = {}, backend = {})",
+                    type(),
+                    nbInputs(),
+                    nbOutputs(),
+                    (attributes() ? attributes()->repr() : "None"),
+                    (mImpl ? "'"+backend()+"'" : "None"));
+    }
+#endif
 };
 } // namespace Aidge
 
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index a493793278d42904d8a62e31571720f94ff1655d..657a6d8ab6124b8919a3ac8fea5b6bfa6c4254b9 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -40,7 +40,7 @@ protected:
 public:
     OperatorTensor() = delete;
 
-    OperatorTensor(const std::string& type, const IOIndex_t nbData, const IOIndex_t nbParam,
+    OperatorTensor(const std::string& type, const std::vector<InputCategory>& inputsCategory,
                    const IOIndex_t nbOut);
 
     OperatorTensor(const OperatorTensor& other);
@@ -51,19 +51,18 @@ public:
     ///////////////////////////////////////////////////
     virtual void associateInput(const IOIndex_t inputIdx,
                                 const std::shared_ptr<Data>& data) override;
+    void resetInput(const IOIndex_t inputIdx) override final;
     ///////////////////////////////////////////////////
 
     ///////////////////////////////////////////////////
     // Tensor access
     // input management
     void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override;
-    void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override;
     const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const;
     std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final;
 
     // output management
     void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override;
-    void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override;
     virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const;
     std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final;
     ///////////////////////////////////////////////////
@@ -85,8 +84,12 @@ public:
     ///////////////////////////////////////////////////
 
     virtual void setDataType(const DataType& dataType) const override;
-    
+    virtual void setDataFormat(const DataFormat& dataFormat) const override;
+
     virtual void forward() override;
+
+protected:
+    bool inputsAssociated(bool checkNonEmpty = true) const;
 };
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index a4e4ebdce801971de118ca8a263999046a13777d..5fd0f93986206e6cd958a85055159783eeb8bc8f 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -13,16 +13,16 @@
 #define AIDGE_CORE_OPERATOR_PAD_H_
 
 #include <array>
-#include <numeric>
+#include <memory>
+#include <string>
 #include <vector>
-#include <cmath>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -31,30 +31,31 @@ enum class PadBorderType { Constant, Edge, Reflect, Wrap };
 
 template <DimIdx_t DIM>
 class Pad_Op : public OperatorTensor,
-                public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
-                public StaticAttributes<PadAttr,
-                                       std::array<DimSize_t, 2*DIM>,
-                                       PadBorderType,
-                                       double> {
+                public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)> {
 public:
     static const std::string Type;
 
-    Pad_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<PadAttr,
-                                             std::array<DimSize_t, 2*DIM>,
-                                             PadBorderType,
-                                             double>;
+                                            std::array<DimSize_t, 2*DIM>,
+                                            PadBorderType,
+                                            double>;
     template <PadAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    Pad_Op() = delete;
 
     constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
                      const PadBorderType &borderType = PadBorderType::Constant,
                      double borderValue = 0.0)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples),
-                           attr<PadAttr::BorderType>(borderType),
-                           attr<PadAttr::BorderValue>(borderValue)) {}
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<PadAttr::BeginEndBorders>(beginEndTuples),
+            attr<PadAttr::BorderType>(borderType),
+            attr<PadAttr::BorderValue>(borderValue))) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -62,7 +63,7 @@ public:
      */
     Pad_Op(const Pad_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {}
 
     /**
@@ -75,28 +76,22 @@ public:
 
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        bool associated = true;
-        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-            if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-            }
-            associated &= !(getInput(i)->empty());
-        }
-        if (associated) {
+        if (inputsAssociated()) {
             std::array<DimSize_t, DIM + 2> outputDims{};
             const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
 
             for (std::size_t dim = 0; dim < DIM; ++dim) {
-                outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
+                outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
                                     + inputDims[dim+2]
-                                    + this->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
+                                    + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
             }
             outputDims[1] = inputDims[1];
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
+            return true;
         }
 
-        return associated;
+        return false;
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
@@ -104,6 +99,11 @@ public:
         mOutputs[0]->setBackend(name, device);
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<PadAttr::BeginEndBorders>(); }
+    inline PadBorderType& borderType() const noexcept { return mAttributes->template getAttr<PadAttr::BorderType>(); }
+    inline double& borderValue() const noexcept { return mAttributes->template getAttr<PadAttr::BorderValue>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -112,9 +112,6 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string Pad_Op<DIM>::Type = "Pad";
-
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
                                            const std::string& name = "",
@@ -137,6 +134,9 @@ inline std::shared_ptr<Node> Pad(
 }
 }  // namespace Aidge
 
+extern template class Aidge::Pad_Op<1>;
+extern template class Aidge::Pad_Op<2>;
+
 namespace {
 template <>
 const char *const EnumStrings<Aidge::PadAttr>::data[] = {"BeginEndBorders", "BorderType", "BorderValue"};
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 2219f30ec9db7acf55491882a78e7a1ed2931cf0..575d56b455940ea98571110dbaa9a83de09fef37 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -34,18 +34,19 @@ public:
 enum class PopAttr { ForwardStep };
 
 class Pop_Op : public OperatorTensor,
-    public Registrable<Pop_Op, std::string, std::unique_ptr<OperatorImpl>(const Pop_Op&)>,
-    public StaticAttributes<PopAttr, unsigned int> {
+    public Registrable<Pop_Op, std::string, std::unique_ptr<OperatorImpl>(const Pop_Op&)> {
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<PopAttr, unsigned int>;
-    template <PopAttr e>
-    using attr = typename Attributes_::template attr<e>;
+private:
+    using Attributes_ = StaticAttributes<PopAttr, std::uint32_t>;
+    template <PopAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
+public:
     Pop_Op()
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<PopAttr::ForwardStep>(0))
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(attr<PopAttr::ForwardStep>(0)))
     {
         mImpl = std::make_shared<Pop_OpImpl>(*this);
     }
@@ -56,7 +57,7 @@ public:
      */
     Pop_Op(const Pop_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Pop_Op, *this, op.backend());
@@ -80,6 +81,9 @@ public:
     void updateConsummerProducer() override;
     void forward() override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<PopAttr::ForwardStep>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index 08c4de2a254dd267eda4040b54108f93a0c2d922..ee5c01c2121d68a7988dc686c4dbb4bbf7331c84 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -29,7 +29,7 @@ class Pow_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Pow_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Pow_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 23825079673129ea08aa7da40b21a8cc921d6ba0..9e3bdd1ba2f601da27dea3a6a01131a0c8191eb4 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -31,20 +31,24 @@ enum class ProdAttr { Constant };
 class Producer_Op
     : public OperatorTensor,
       public Registrable<Producer_Op, std::string, std::shared_ptr<OperatorImpl>(
-                                          const Producer_Op &)>,
-      public StaticAttributes<ProdAttr, bool> {
+                                          const Producer_Op &)> {
 public:
     static const std::string Type;
 
+private:
     using Attributes_ = StaticAttributes<ProdAttr, bool>;
-    template <ProdAttr e>
-    using attr = typename Attributes_::template attr<e>;
+    template <ProdAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Producer_Op() = delete;
 
     template <std::size_t DIM>
     Producer_Op(const std::array<DimSize_t, DIM>& dims,
                 bool constant = false)
-        : OperatorTensor(Type, 0, 0, 1),
-          Attributes_(attr<ProdAttr::Constant>(constant))
+        : OperatorTensor(Type, {}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ProdAttr::Constant>(constant)))
     {
         mOutputs[0]->resize(dims);
         mImpl = std::make_shared<OperatorImpl>(*this);
@@ -95,6 +99,9 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {};
     }
@@ -107,15 +114,9 @@ public:
     void backward() override final {
         // fmt::print("Basic Producer backward() function.\n");
     }
-    void setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) override {
-        if (getAttr<ProdAttr::Constant>()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
-        }
-        OperatorTensor::setOutput(outputIdx, std::move(data));
-    }
 
     void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) override {
-        if (getAttr<ProdAttr::Constant>()) {
+        if (mAttributes->template getAttr<ProdAttr::Constant>()) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
         }
         OperatorTensor::setOutput(outputIdx, data);
@@ -139,19 +140,20 @@ inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, cons
 }
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const std::string& extension) {
+std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const std::string& extension) {
     assert(inputIdx != gk_IODefaultIndex);
     static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
     const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension);
     auto prod = Producer(dims, prodName);
     prod->addChild(otherNode, 0, inputIdx);
     otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
+    return prod;
 }
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <std::size_t DIM>
-void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) {
-    addProducer(otherNode, inputIdx, to_array(dims), extension);
+std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) {
+    return addProducer(otherNode, inputIdx, to_array(dims), extension);
 }
 } // namespace Aidge
 
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 963de31c49f48784e92434b2b563d6c008e2d4fd..40b5d581d53521e6086d24c5ecc53f725dd9f252 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -30,7 +30,7 @@ class ReLU_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    ReLU_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    ReLU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index ff8d8b0696aafdab48cd37d049fa0473078d7ea6..3fcf19ffd13645fb28b6efcfefaf8e347b148c89 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -29,22 +29,28 @@ namespace Aidge {
 enum class ReduceMeanAttr { Axes, KeepDims };
 
 class ReduceMean_Op : public OperatorTensor,
-                public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>,
-                public StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t> {
+                public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)> {
 
-   public:
+public:
     static const std::string Type;
 
-    ReduceMean_Op() = delete;
-
-    using Attributes_ = StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t>;
+private:
+    using Attributes_ = StaticAttributes<ReduceMeanAttr,
+                                            std::vector<std::int32_t>,
+                                            DimSize_t>;
     template <ReduceMeanAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    ReduceMean_Op() = delete;
 
     ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<ReduceMeanAttr::Axes>(axes),
-                      attr<ReduceMeanAttr::KeepDims>(keep_dims)) {}
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ReduceMeanAttr::Axes>(axes),
+            attr<ReduceMeanAttr::KeepDims>(keep_dims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -52,7 +58,7 @@ class ReduceMean_Op : public OperatorTensor,
      */
     ReduceMean_Op(const ReduceMean_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl){
             SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
@@ -73,6 +79,11 @@ class ReduceMean_Op : public OperatorTensor,
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::Axes>(); }
+    inline DimSize_t& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); }
+
+
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 49ddfc4d76a0602c58c0c768b04ed4b4202f028d..4ea0cca30089555ff7979f141f94e5c84f04ffa1 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -29,24 +29,29 @@ public:
     void forward() override;
 };
 
-enum class ReshapeAttr { Shape };
+enum class ReshapeAttr { Shape, AllowZero };
 
 class Reshape_Op : public OperatorTensor,
-                   public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)>,
-                   public StaticAttributes<ReshapeAttr, std::vector<std::int64_t>> {
+                   public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)> {
 
 public:
     static const std::string Type;
 
-    Reshape_Op() = delete;
+private:
+    using Attributes_ = StaticAttributes<ReshapeAttr,
+                                            std::vector<std::int64_t>,
+                                            bool>;
+    template <ReshapeAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    using Attributes_ = StaticAttributes<ReshapeAttr, std::vector<std::int64_t>>;
-    template <ReshapeAttr e>
-    using attr = typename Attributes_::template attr<e>;
+public:
+    Reshape_Op() = delete;
 
-    Reshape_Op(const std::vector<std::int64_t>& shape)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<ReshapeAttr::Shape>(shape))
+    Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ReshapeAttr::Shape>(shape),
+            attr<ReshapeAttr::AllowZero>(allowzero)))
     {
         mImpl = std::make_shared<Reshape_OpImpl>(*this);
     }
@@ -57,7 +62,7 @@ public:
      */
     Reshape_Op(const Reshape_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
@@ -75,10 +80,15 @@ public:
         return std::make_shared<Reshape_Op>(*this);
     }
 
+    bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
+    std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int64_t>& shape() const { return mAttributes->template getAttr<ReshapeAttr::Shape>(); }
+    inline bool& allowZero() const { return mAttributes->template getAttr<ReshapeAttr::AllowZero>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -87,16 +97,17 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape,
-                                   		const std::string &name = "") {
+inline std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
+                                     bool allowzero = false,
+                                   	 const std::string &name = "") {
     // FIXME: properly handle default w&b initialization in every cases
-    return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape), name);
+    return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape, allowzero), name);
 }
 }  // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = { "Shape" };
+const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = { "Shape", "AllowZero" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RESHAPE_H_ */
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..565affc57ae8e7b1838466733b0f5d8fa8e1a6d6
--- /dev/null
+++ b/include/aidge/operator/Resize.hpp
@@ -0,0 +1,88 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_Resize_H_
+#define AIDGE_CORE_OPERATOR_Resize_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Resize_Op : public OperatorTensor,
+                  public Registrable<Resize_Op, std::string, std::shared_ptr<OperatorImpl>(const Resize_Op&)>{
+
+public:
+    static const std::string Type;
+
+    Resize_Op()
+        : OperatorTensor(Type,
+            {InputCategory::Data,
+                InputCategory::OptionalData,
+                InputCategory::OptionalData,
+                InputCategory::OptionalData},
+            1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+
+    Resize_Op(const Resize_Op& op)
+        : OperatorTensor(op)
+    {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Resize_Op, *this, op.backend());
+        }
+        else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Resize_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Resize_Op>(*this);
+    }
+
+    bool dimsForwarded() const override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    static const std::vector<std::string> getInputsName(){
+        //  roi, scales, sizes, even if considered as const parameters/input
+        return {"data_input", "roi ", "scales", "sizes"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Resize(const std::string &name = "") {
+
+    return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
+}
+
+}  // namespace Aidge
+
+
+#endif /* AIDGE_CORE_OPERATOR_Resize_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index c864bd045d8a5a1fc5f4ee591d1d81fcaf241bac..7d8e11b31546cd87a8d6b2d36e2929c9ef6df7a2 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -12,39 +12,42 @@
 #ifndef AIDGE_CORE_OPERATOR_SCALING_H_
 #define AIDGE_CORE_OPERATOR_SCALING_H_
 
+#include <cstddef>  // std::size_t
 #include <vector>
 #include <memory>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 enum class ScalingAttr {
-    scalingFactor, quantizedNbBits, isOutputUnsigned
+    ScalingFactor, QuantizedNbBits, IsOutputUnsigned
 };
 
-class Scaling_Op 
+class Scaling_Op
     : public OperatorTensor,
-      public Registrable<Scaling_Op, std::string, std::shared_ptr<OperatorImpl>(const Scaling_Op&)>,
-      public StaticAttributes<ScalingAttr, float, size_t, bool> {
+      public Registrable<Scaling_Op, std::string, std::shared_ptr<OperatorImpl>(const Scaling_Op&)> {
 public:
     static const std::string Type;
 
-    Scaling_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ScalingAttr, float, std::size_t, bool>;
     template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Scaling_Op() = delete;
 
     Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(
-            attr<ScalingAttr::scalingFactor>(scalingFactor),
-            attr<ScalingAttr::quantizedNbBits>(nbBits),
-            attr<ScalingAttr::isOutputUnsigned>(isOutputUnsigned))
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ScalingAttr::ScalingFactor>(scalingFactor),
+            attr<ScalingAttr::QuantizedNbBits>(nbBits),
+            attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
     {}
 
     /**
@@ -53,7 +56,7 @@ public:
      */
     Scaling_Op(const Scaling_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl){
             SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
@@ -72,6 +75,11 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline float& scalingFactor() const noexcept { return mAttributes -> getAttr<ScalingAttr::ScalingFactor>(); }
+    inline std::size_t& quantizedNbBits() const noexcept { return mAttributes -> getAttr<ScalingAttr::QuantizedNbBits>(); }
+    inline bool& isOutputUnsigned() const noexcept { return mAttributes -> getAttr<ScalingAttr::IsOutputUnsigned>(); }
+
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
@@ -85,10 +93,10 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
 }
 */
-inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, 
-                                     std::size_t quantizedNbBits=8, 
-                                     bool isOutputUnsigned=true, 
-                                     const std::string& name = "") 
+inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
+                                     std::size_t quantizedNbBits=8,
+                                     bool isOutputUnsigned=true,
+                                     const std::string& name = "")
 {
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
 }
@@ -97,7 +105,7 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
 namespace {
 template <>
 const char* const EnumStrings<Aidge::ScalingAttr>::data[]
-    = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"};
+    = {"ScalingFactor", "QuantizedNbBits", "IsOutputUnsigned"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6d2d1b5e7c212fafa5ad6457d9e0a260e96b1c90
--- /dev/null
+++ b/include/aidge/operator/Shape.hpp
@@ -0,0 +1,111 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SHAPE_H_
+#define AIDGE_CORE_OPERATOR_SHAPE_H_
+
+#include <cstdint>  // std::int64_t
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+class Shape_OpImpl : public OperatorImpl {
+public:
+    Shape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
+enum class ShapeAttr { Start, End };
+
+class Shape_Op : public OperatorTensor,
+                public Registrable<Shape_Op,
+                                   std::string,
+                                   std::shared_ptr<OperatorImpl>(const Shape_Op&)> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<ShapeAttr, std::int64_t, std::int64_t>;
+    template <ShapeAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Shape_Op() = delete;
+
+    Shape_Op(const std::int64_t start, const std::int64_t end)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ShapeAttr::Start>(start),
+            attr<ShapeAttr::End>(end)))
+    {
+        mImpl = std::make_shared<Shape_OpImpl>(*this);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Shape_Op(const Shape_Op& op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
+    {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Shape_Op, *this, op.backend());
+        }
+        else {
+            mImpl = std::make_shared<Shape_OpImpl>(*this);
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Shape_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Shape_Op>(*this);
+    }
+
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int64_t& start() const noexcept { return mAttributes -> getAttr<ShapeAttr::Start>(); }
+    inline std::int64_t& end() const noexcept { return mAttributes -> getAttr<ShapeAttr::End>(); }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end = -1, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"Start", "End"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SHAPE_H_ */
diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..879edcac6a7ed9a78a2db8d82994071a6cf09635
--- /dev/null
+++ b/include/aidge/operator/ShiftGELU.hpp
@@ -0,0 +1,75 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 25.06.2024
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SHIFTGELU_H_
+#define AIDGE_CORE_OPERATOR_SHIFTGELU_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class ShiftGELU_Op : public OperatorTensor,
+    public Registrable<ShiftGELU_Op, std::string, std::shared_ptr<OperatorImpl>(const ShiftGELU_Op&)> {
+public:
+    static const std::string Type;
+
+    ShiftGELU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ShiftGELU_Op(const ShiftGELU_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ShiftGELU_Op, *this, op.backend());
+        }else{
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ShiftGELU_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ShiftGELU_Op>(*this);
+    }
+
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> ShiftGELU(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<ShiftGELU_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SHIFTGELU_H_ */
diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f171130213b2e51ca8fc9905d93944198f849ce7
--- /dev/null
+++ b/include/aidge/operator/ShiftMax.hpp
@@ -0,0 +1,75 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 25.06.2024
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SHIFTMAX_H_
+#define AIDGE_CORE_OPERATOR_SHIFTMAX_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class ShiftMax_Op : public OperatorTensor,
+    public Registrable<ShiftMax_Op, std::string, std::shared_ptr<OperatorImpl>(const ShiftMax_Op&)> {
+public:
+    static const std::string Type;
+
+    ShiftMax_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ShiftMax_Op(const ShiftMax_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ShiftMax_Op, *this, op.backend());
+        }else{
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ShiftMax_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ShiftMax_Op>(*this);
+    }
+
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> ShiftMax(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<ShiftMax_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SHIFTMAX_H_ */
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index bea9fc45eaa7f17f71963106b5bd3e1340a48a92..ae82d4a3a2d29755bba22b9a4194284310ac4f84 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -30,7 +30,7 @@ class Sigmoid_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Sigmoid_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Sigmoid_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 757e08fe97dd1cc572c08ac7c2b454daa234bdc1..7d425a0f3589e74b54ee0834fdc4291ea7f49bad 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -24,35 +24,36 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-class Slice_OpImpl : public OperatorImpl {
-public:
-    Slice_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-    void forward() override;
-};
 
-enum class SliceAttr { Starts, Ends, Axes };
+enum class SliceAttr { Starts, Ends, Axes, Steps };
 
 class Slice_Op
     : public OperatorTensor,
-      public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)>,
-      public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>> {
+      public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)> {
 public:
     static const std::string Type;
 
+private:
+    using Attributes_ = StaticAttributes<SliceAttr,
+                                            std::vector<std::int64_t>,
+                                            std::vector<std::int64_t>,
+                                            std::vector<std::int8_t>,
+                                            std::vector<std::int64_t>>;
+    template <SliceAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
     Slice_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>>;
-    template <SliceAttr e>
-    using attr = typename Attributes_::template attr<e>;
+    Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>& ends, const std::vector<std::int8_t>& axes, const std::vector<std::int64_t>& steps)
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<SliceAttr::Starts>(starts),
+            attr<SliceAttr::Ends>(ends),
+            attr<SliceAttr::Axes>(axes),
+            attr<SliceAttr::Steps>(steps)))
+    {}
 
-    Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>&  ends, const std::vector<std::int64_t>& axes)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<SliceAttr::Starts>(starts),
-                      attr<SliceAttr::Ends>(ends),
-                      attr<SliceAttr::Axes>(axes))
-    {
-        mImpl = std::make_shared<Slice_OpImpl>(*this);
-    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
@@ -61,13 +62,13 @@ public:
      */
     Slice_Op(const Slice_Op &op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Slice_Op, *this, op.backend());
         }
         else {
-            mImpl = std::make_shared<Slice_OpImpl>(*this);
+            mImpl = nullptr;
         }
     }
 
@@ -78,43 +79,43 @@ public:
      */
     std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); }
 
-    bool forwardDims(bool allowDataDependency = false) override final;
+    bool dimsForwarded() const override final;
+    bool forwardDims(bool allowDataDependency = true) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int64_t>& starts() const noexcept { return mAttributes -> getAttr<SliceAttr::Starts>(); }
+    inline std::vector<std::int64_t>& ends() const noexcept { return mAttributes -> getAttr<SliceAttr::Ends>(); }
+    inline std::vector<std::int8_t>& axes() const noexcept { return mAttributes -> getAttr<SliceAttr::Axes>(); }
+    inline std::vector<std::int64_t>& steps() const noexcept { return mAttributes -> getAttr<SliceAttr::Steps>(); }
+
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input", "starts", "ends", "axes", "steps"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
 };
 
 /**
  * @brief Exract a sub-Tensor from a bigger original Tensor.
- * @param starts Indexes for each dimension of the first element.
- * Can be a negative value. Negative values start their reference from the last index.
- * ``-1`` referes to the last index of a dimension.
- * @param ends Indexes for each dimension of the last element.
- * Can be a negative value. Negative values start their reference from the last index.
- * ``-1`` referes to the last index of a dimension.
- * @param axes Dimensions for which start/end indexes apply. Not specifying a dimensions
- * means the whole dimensions is extracted.
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
-inline std::shared_ptr<Node> Slice(const std::vector<std::int64_t> starts,
-                                   const std::vector<std::int64_t> ends,
-                                   const std::vector<std::int64_t> axes,
+inline std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
+                                   const std::vector<std::int64_t>& ends = {},
+                                   const std::vector<std::int8_t>& axes = {},
+                                   const std::vector<std::int64_t>& steps = {},
                                    const std::string &name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes), name);
+    return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes, steps), name);
 }
 }  // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "Starts", "Ends", "Axes" };
+const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "Starts", "Ends", "Axes", "Steps" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index d48dbc2b60e46eb5c074b8adae065383e29b1769..70f3a561ae5c9ba4720de8419bcd5aaf32a51e47 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -24,24 +24,29 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class SoftmaxAttr { AxisIdx };
+enum class SoftmaxAttr { Axis };
 
 class Softmax_Op : public OperatorTensor,
                 public Registrable<Softmax_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Softmax_Op&)>,
-                public StaticAttributes<SoftmaxAttr, int> {
+                                   std::shared_ptr<OperatorImpl>(const Softmax_Op&)> {
 
 public:
     static const std::string Type;
 
+private:
+    using Attributes_ = StaticAttributes<SoftmaxAttr, std::int32_t>;
+    template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
     Softmax_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SoftmaxAttr, int>;
-    template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
-    Softmax_Op(int axis)
-            :  OperatorTensor(Type, 1, 0, 1),
-            Attributes_(attr<SoftmaxAttr::AxisIdx>(axis)) {}
+    Softmax_Op(std::int32_t axis)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+                attr<SoftmaxAttr::Axis>(axis)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -49,7 +54,7 @@ public:
      */
     Softmax_Op(const Softmax_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl){
             SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
@@ -68,6 +73,10 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<SoftmaxAttr::Axis>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -76,7 +85,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Softmax(int axis, const std::string& name = "") {
+inline std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
 }
 } // namespace Aidge
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..42baf66e6722c6f9a0d3f40f12d4f4685fcc6980
--- /dev/null
+++ b/include/aidge/operator/Split.hpp
@@ -0,0 +1,111 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SPLIT_H_
+#define AIDGE_CORE_OPERATOR_SPLIT_H_
+
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+class Split_OpImpl : public OperatorImpl {
+public:
+    Split_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
+enum class SplitAttr { Axis, Split };
+
+class Split_Op
+    : public OperatorTensor,
+      public Registrable<Split_Op, std::string, std::shared_ptr<OperatorImpl>(const Split_Op &)>,
+      public StaticAttributes<SplitAttr, std::int8_t, std::vector<DimSize_t>> {
+
+public:
+    static const std::string Type;
+
+    Split_Op() = delete;
+
+    using Attributes_ = StaticAttributes<SplitAttr,  std::int8_t, std::vector<DimSize_t>>;
+    template <SplitAttr e> using attr = typename Attributes_::template attr<e>;
+    Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split)
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
+          Attributes_(attr<SplitAttr::Axis>(axis),
+                      attr<SplitAttr::Split>(split))
+    {
+        mImpl = std::make_shared<Split_OpImpl>(*this);
+    }
+
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
+     * input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Split_Op(const Split_Op &op)
+        : OperatorTensor(op),
+          Attributes_(op)
+    {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Split_Op, *this, op.backend());
+        }
+        else {
+            mImpl = std::make_shared<Split_OpImpl>(*this);
+        }
+    }
+public:
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Split_Op
+     */
+    std::shared_ptr<Operator> clone() const override { return std::make_shared<Split_Op>(*this); }
+
+    bool dimsForwarded() const override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "split"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output_0", "data_output_n"};
+    }
+
+};
+
+/**
+ * @brief Exract a sub-Tensor from a bigger original Tensor.
+ * @param name Name of the Operator.
+ * @return std::shared_ptr<Node> A Node containing the Operator.
+ */
+inline std::shared_ptr<Node> Split(DimSize_t nbOutput,
+                                   std::int8_t axis = 0,
+                                   const std::vector<DimSize_t>& split = {},
+                                   const std::string &name = "") {
+    return std::make_shared<Node>(std::make_shared<Split_Op>(axis, nbOutput, split), name);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::SplitAttr>::data[] = { "Axis", "Split" };
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SPLIT_H_ */
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index f5ffa431192d73a703c1ce973cb485dadb31420d..05b20286bc3f576d4e43fbece26ae270b3e583e6 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -33,7 +33,7 @@ public:
 public:
     static const std::string Type;
 
-    Sqrt_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Sqrt_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index d89584871a05194dbb316132215515d3c3d860fe..ba5a021c30f13bbc2ae73c90078548c5b677a3a5 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -34,7 +34,7 @@ public:
 public:
     static const std::string Type;
 
-    Sub_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Sub_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
index 3fd5377d30cfff864743dcab2da9e690e26e5263..b5f183a90aeeb4ef424c318e8942a818b568b44a 100644
--- a/include/aidge/operator/Tanh.hpp
+++ b/include/aidge/operator/Tanh.hpp
@@ -28,7 +28,7 @@ class Tanh_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Tanh_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Tanh_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 16ac2794a283d817f6a4e1586349e55ec626167e..72096448ebf0e00d73e33bdab094ca7f0b7d0633 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -17,7 +17,6 @@
 #include <numeric>
 #include <vector>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
@@ -26,32 +25,37 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-class Transpose_OpImpl : public OperatorImpl {
+class TransposeImpl : public OperatorImpl {
 public:
-    Transpose_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    TransposeImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend)
+    {}
     void forward() override;
 };
 
 enum class TransposeAttr { OutputDimsOrder };
 
 class Transpose_Op : public OperatorTensor,
-                public Registrable<Transpose_Op, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op&)>,
-                public StaticAttributes<TransposeAttr, std::vector<DimSize_t>> {
+                public Registrable<Transpose_Op, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op&)> {
 
-   public:
+public:
     static const std::string Type;
 
-    Transpose_Op() = delete;
 
+private:
     using Attributes_ = StaticAttributes<TransposeAttr, std::vector<DimSize_t>>;
-    template <TransposeAttr e>
-    using attr = typename Attributes_::template attr<e>;
+    template <TransposeAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Transpose_Op() = delete;
 
-    Transpose_Op(const std::vector<DimSize_t> &output_dims_order)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<TransposeAttr::OutputDimsOrder>(output_dims_order))
+    Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder)))
     {
-        mImpl = std::make_shared<Transpose_OpImpl>(*this);
+        mImpl = std::make_shared<TransposeImpl>(*this);
     }
 
     /**
@@ -60,13 +64,13 @@ class Transpose_Op : public OperatorTensor,
      */
     Transpose_Op(const Transpose_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
         }
         else {
-            mImpl = std::make_shared<Transpose_OpImpl>(*this);
+            mImpl = std::make_shared<TransposeImpl>(*this);
         }
     }
 
@@ -82,6 +86,9 @@ class Transpose_Op : public OperatorTensor,
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<TransposeAttr::OutputDimsOrder>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -90,9 +97,9 @@ class Transpose_Op : public OperatorTensor,
     }
 };
 
-inline std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &output_dims_order,
+inline std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder,
                                            const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Transpose_Op>(output_dims_order), name);
+    return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
 }
 }  // namespace Aidge
 
diff --git a/include/aidge/recipes/GraphViewHelper.hpp b/include/aidge/recipes/GraphViewHelper.hpp
index a2c571bf4ed164729f7c3416c814b913b4d07e6f..3b8ba7627362c945a6bfbe587ec952fdda013e98 100644
--- a/include/aidge/recipes/GraphViewHelper.hpp
+++ b/include/aidge/recipes/GraphViewHelper.hpp
@@ -39,8 +39,6 @@ std::set<std::shared_ptr<Tensor>> producers(std::shared_ptr<GraphView> graphview
  */
 std::set<std::shared_ptr<Tensor>> parameters(std::shared_ptr<GraphView> graphview);
 
-void compile_gradient(std::shared_ptr<Aidge::GraphView> gv);
-
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_GRAPHVIEWHELPER_H_ */
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index 97c608cd38ca76a4f40b8fb02282751a97ceed4e..48137610fe74fc8839c2e5dcf6db1df10e29d420 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -44,24 +44,30 @@ void fuseMulAdd(std::shared_ptr<Node> matmul,std::shared_ptr<Node> add);
  */
 void fuseMulAdd(std::shared_ptr<GraphView> graphView);
 
-// REMOVE Dropout
-
 /**
- * @brief Remove ``Dropout`` Node.
+ * @brief Remove a node type.
  *
- * @param nodes Node to remove.
+ * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
+ * @param type Type of the nodes to remove
+ * @param incProducers If true, also remove the producers attached to the removed nodes
+ * @return size_t Number of identity nodes removed
  */
-void removeDropout(std::shared_ptr<Node> dropout);
-
-
-void removeDropout(std::shared_ptr<MatchSolution> solution);
+size_t removeNode(std::shared_ptr<GraphView> graphView, const std::string& type, bool incProducers = false);
 
 /**
  * @brief Remove ``Dropout`` Node.
  *
  * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
+ * @return size_t Number of identity nodes removed
  */
-void removeDropout(std::shared_ptr<GraphView> graphView);
+size_t removeDropout(std::shared_ptr<GraphView> graphView);
+
+/**
+ * Remove all identity nodes
+ * @param graph Graph to manipulate
+ * @return size_t Number of identity nodes removed
+*/
+size_t removeIdentity(std::shared_ptr<GraphView> graph);
 
 // REMOVE FLATTEN + FC -> FC
 
@@ -111,11 +117,17 @@ std::set<std::shared_ptr<Node>> getConvHorizontalTiling(const std::shared_ptr<No
 
 
 /**
- * Add Convert operators where needed to ensure no conversion needs to be done
+ * Add Cast and Move operators where needed to ensure no conversion needs to be done
  * at the Operator level.
 */
 void explicitCastMove(std::shared_ptr<GraphView> graphView);
 
+/**
+ * Add Transpose operators where needed to ensure no transposition needs to be done
+ * at the Operator level.
+*/
+void explicitTranspose(std::shared_ptr<GraphView> graphView);
+
 /**
  * Flatten the graph by replacing the meta operators by their micro graph.
  * @param recursive If true, recursively replace meta operators until there is
@@ -123,6 +135,15 @@ void explicitCastMove(std::shared_ptr<GraphView> graphView);
 */
 void expandMetaOps(std::shared_ptr<GraphView> graph, bool recursive = false);
 
+/**
+ * Fuse each sub-graph matching a query in a Meta Operator.
+ * @param graph Graph to manipulate
+ * @param query Sub-graph matching query
+ * @param type Type name of the resulting meta operators
+ * @return size_t Number of replacement
+*/
+size_t fuseToMetaOps(std::shared_ptr<GraphView> graph, const std::string& query, const std::string& type = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_RECIPES_H_ */
diff --git a/include/aidge/scheduler/MemoryManager.hpp b/include/aidge/scheduler/MemoryManager.hpp
index 360b01f76e7a9b51f36b83d4d35286eced35016a..94add56e8afdebb8e42f7ae49a32da2aeed9e9cb 100644
--- a/include/aidge/scheduler/MemoryManager.hpp
+++ b/include/aidge/scheduler/MemoryManager.hpp
@@ -182,16 +182,7 @@ public:
                         const std::shared_ptr<MemorySpace>& p1);
     };
 
-    struct CompByNodeName {
-        bool operator()(const std::shared_ptr<Node>& lhs,
-                        const std::shared_ptr<Node>& rhs) const
-        {
-            return lhs->name() < rhs->name();
-        }
-    };
-
-    typedef std::map<std::shared_ptr<Node>, std::vector<MemoryPlane>,
-        CompByNodeName> MemMap_T;
+    typedef std::map<std::shared_ptr<Node>, std::vector<MemoryPlane>> MemMap_T;
 
 public:
     MemoryManager(): mClock(0) {}
diff --git a/include/aidge/scheduler/SequentialScheduler.hpp b/include/aidge/scheduler/SequentialScheduler.hpp
index a7929fde8a2affdd562d70d11a7c809aaf3357d0..35dafead6dc424550df7d83d54f5ec998c3b4d86 100644
--- a/include/aidge/scheduler/SequentialScheduler.hpp
+++ b/include/aidge/scheduler/SequentialScheduler.hpp
@@ -54,7 +54,7 @@ public:
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    void backward(bool instantiateGrad = true);
+    void backward();
 
 private:
     SchedulingPolicy mSchedulingPolicy;
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index 927686cfd5cca910c5ffb25364ae4bc971ad18bf..c1f6a8a7f704b4bd813983cb178d9e5acba5a5e1 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -12,15 +12,14 @@
 #ifndef AIDGE_CORE_UTILS_ATTRIBUTES_H_
 #define AIDGE_CORE_UTILS_ATTRIBUTES_H_
 
-#ifdef PYBIND
-#include <pybind11/pybind11.h>
-#include <pybind11/stl.h>
-#endif
-#include <vector>
 #include <string>
 #include <set>
 
 #ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <fmt/format.h>
+
 namespace py = pybind11;
 #endif
 
@@ -36,26 +35,53 @@ namespace Aidge {
 template<class T, std::size_t N>
 constexpr std::size_t size(T (&)[N]) { return N; }
 
+
 /* This abstract class allows to avoid binding Attributes.
 *  Otherwise we would need to bind every template possible of Attributes.
 *  Every operators can access the methods of this class by inheriting from
 *  Attributes in the binding code.
 */
 class Attributes {
+protected:
+    /**
+     * @brief Convert snake_case to PascalCase.
+     * @param snakeCase string to convert.
+    */
+    static std::string snakeToPascal(const std::string& snakeCase);
+
+
+    /**
+     * @brief Convert PascalCase to snake_case.
+     * @param pascalCase string to convert.
+    */
+    static std::string pascalToSnake(const std::string& pascalCase);
+
+    /**
+     * @brief Check whether a given string is in PascalCase.
+     * @param str String to check.
+     */
+    static bool isPascalCase(const std::string& str);
+
+    /**
+     * @brief Check whether a given string is in snake_case.
+     * @param str String to check.
+     */
+    static bool isSnakeCase(const std::string& str);
+
 public:
     /**
      * @brief Check if the attribute exists.
      * @param name Name of the attribute to check.
      * @return bool True if the attribute exists, false otherwise.
     */
-    virtual bool hasAttr(const std::string& name) const = 0;
+    virtual bool hasAttr(const std::string& /*name*/) const = 0;
 
     /**
      * @brief Get the (implementation defined) name of the type of an attribute, returned by std::type_info::name.
      * @param name Name of the attribute.
      * @return std::string Name of the type as returned by std::type_info::name.
     */
-    virtual std::string getAttrType(const std::string& name) const = 0;
+    virtual std::string getAttrType(const std::string& /*name*/) const = 0;
 
     /**
      * @brief Get the attribute's name list.
@@ -64,16 +90,25 @@ public:
     virtual std::set<std::string> getAttrsName() const = 0;
 
 #ifdef PYBIND
+    virtual bool hasAttrPy(const std::string& name) const = 0;
+
     /* Bindable get function, does not recquire any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from its return type.
     */
-    virtual py::object getAttrPy(const std::string& name) const = 0;
+    virtual py::object getAttrPy(const std::string& name) const  = 0;
     /* Bindable set function, does not recquire any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from ``value`` type.
     */
-    virtual void setAttrPy(const std::string& name, py::object&& value) = 0;
+    virtual void setAttrPy(const std::string& /*name*/, py::object&& /*value*/) = 0;
+
+    virtual std::string str() const = 0;
+
+    virtual std::string repr() const = 0;
+
+    virtual py::dict dict() const = 0;
+
 #endif
     virtual ~Attributes() {}
 };
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 113377b33d9827c3428eeb0adc92111f75c22abb..c5054eb2fd2e8bfa5e7fca898f343ce630643dbd 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -48,11 +48,12 @@ public:
      */
     template<class T> T& getAttr(const std::string& name)
     {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
 #ifdef PYBIND
         // If attribute does not exist in C++, it might have been created or modified in Python
         auto it = mAttrs.find(name);
         if (it == mAttrs.end()) {
-            auto itPy = mAttrsPy.find(name);
+            auto itPy = mAttrsPy.find(pascalToSnake(name));
             if (itPy != mAttrsPy.end()) {
                 // Insert the attribute back in C++
                 mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
@@ -65,11 +66,12 @@ public:
 
     template<class T> const T& getAttr(const std::string& name) const
     {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
 #ifdef PYBIND
         // If attribute does not exist in C++, it might have been created or modified in Python
         auto it = mAttrs.find(name);
         if (it == mAttrs.end()) {
-            auto itPy = mAttrsPy.find(name);
+            auto itPy = mAttrsPy.find(pascalToSnake(name));
             if (itPy != mAttrsPy.end()) {
                 // Insert the attribute back in C++
                 mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
@@ -86,6 +88,7 @@ public:
     ///\param value Attribute value
     template<class T> void addAttr(const std::string& name, const T& value)
     {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
         const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
         AIDGE_ASSERT(res.second, "attribute already exists");
 
@@ -93,7 +96,7 @@ public:
         // We cannot handle Python object if the Python interpreter is not running
         if (Py_IsInitialized()) {
             // Keep a copy of the attribute in py::object that is updated everytime
-            mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+            mAttrsPy.emplace(std::make_pair(pascalToSnake(name), py::cast(value)));
         }
 #endif
     }
@@ -129,7 +132,8 @@ public:
 #ifdef PYBIND
     void addAttrPy(const std::string& name, py::object&& value)
     {
-        auto it = mAttrs.find(name);
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python.");
+        auto it = mAttrs.find(snakeToPascal(name));
         AIDGE_ASSERT(it == mAttrs.end(), "attribute already exists");
 
         const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
@@ -138,26 +142,52 @@ public:
 
     void setAttrPy(const std::string& name, py::object&& value) override final
     {
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python.");
         auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
         if (!resPy.second)
             resPy.first->second = std::move(value);
 
         // Force getAttr() to take attribute value from mAttrsPy and update mAttrs
-        mAttrs.erase(name);
+        const std::string pascalName = snakeToPascal(name);
+        mAttrs.erase(pascalName);
+    }
+
+    py::dict dict() const override {
+        py::dict attributes;
+        for (const auto& elt : mAttrsPy) {
+            const std::string snakeName = pascalToSnake(elt.first);
+            attributes[snakeName.c_str()] = elt.second;
+        }
+        return attributes;
+    }
+
+    std::string str() const override {
+        return repr();
     }
+
+    std::string repr() const override {
+        // Call the __repr__ method of the base class py::dict
+        return fmt::format("AttrDict({})",  static_cast<std::string>(py::str(dict())));
+        // return fmt::format("AttrDict({})",  dict().attr("__repr__")().cast<std::string>());
+    }
+
 #endif
 
     //////////////////////////////////////
     ///     Generic Attributes API
     //////////////////////////////////////
     bool hasAttr(const std::string& name) const override final {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
+        return (mAttrs.find(name) != mAttrs.cend());
+    }
+
 #ifdef PYBIND
+    bool hasAttrPy(const std::string& name) const override final {
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python.");
         // Attributes might have been created in Python, the second condition is necessary.
-        return (mAttrs.find(name) != mAttrs.end() || mAttrsPy.find(name) != mAttrsPy.end());
-#else
-        return (mAttrs.find(name) != mAttrs.end());
-#endif
+        return (mAttrs.find(snakeToPascal(name)) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
     }
+#endif
 
     std::string getAttrType(const std::string& name) const override final {
         // In order to remain consistent between C++ and Python, with or without PyBind, the name of the type is:
@@ -195,7 +225,7 @@ public:
      * generic type caster for std::any is not feasable.
      * The strategy here is to keep a copy of each attribute in py::object that is updated everytime.
     */
-    py::object getAttrPy(const std::string& name) const override final {
+    inline py::object getAttrPy(const std::string& name) const override final {
         return mAttrsPy.at(name);
     };
 #endif
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index a01f81629c8425f9d860bf1ea03bfe421dbd04fa..a400f8046d07df4ff4493470737f5c4d42945db7 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -14,6 +14,7 @@
 #define AIDGE_LOG_H_
 
 #include <memory>
+#include <vector>
 
 #include <fmt/format.h>
 #include <fmt/ranges.h>
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index b0acdaff7cb75afec78f0564fb95c98f2b32f47b..47bb05ce7c435eda7e975a58544a416182c3853b 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -77,7 +77,7 @@ struct Registrar {
 
     static auto create(const registrar_key& key){
         const auto it = C::registry().find(key);
-        AIDGE_ASSERT(it != C::registry().cend(), "missing or invalid registrar key: {}\nDid you include/import the corresponding module?", key);
+        AIDGE_ASSERT(it != C::registry().cend(), "missing or invalid registrar key: {} for registrable object {}\nDid you include/import the corresponding module?\nIf so, it is possible that the object is not yet supported.", key, typeid(C).name());
 
         return (*it).second;
     }
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 6bf59155373cf73d158fce4eb5bda58f7d279e69..8fc88ff79c50751ba7b79662fc9fc430d4ed601d 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -12,11 +12,16 @@
 #ifndef AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
 #define AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
 
-#include <tuple>
+#include <array>
 #include <cassert>
 #include <cstddef>
+#include <string>
+#include <tuple>
 #include <typeinfo>
-#include <array>
+
+#ifdef PYBIND
+#include <fmt/format.h>
+#endif
 
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
@@ -149,8 +154,9 @@ public:
         AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
     }
 
-    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
-    constexpr typename std::enable_if<(SIZE > 0), const std::type_info&>::type getAttrType(std::size_t i) const {
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value,
+                std::enable_if_t<(SIZE > 0), bool> = true>
+    constexpr const std::type_info& getAttrType(std::size_t i) const {
         if (i == SIZE-1) {
             return typeid(typename std::tuple_element<SIZE-1,std::tuple<T...>>::type);
         }
@@ -159,8 +165,9 @@ public:
         }
     }
 
-    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
-    [[noreturn]] typename std::enable_if<(SIZE == 0), const std::type_info&>::type getAttrType(std::size_t /*i*/) const {
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value,
+                std::enable_if_t<(SIZE == 0), bool> = true>
+    [[noreturn]] const std::type_info& getAttrType(std::size_t /*i*/) const {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
     }
 
@@ -173,6 +180,7 @@ public:
     //////////////////////////////////////
     // Runtime existance check with name
     bool hasAttr(const std::string& name) const override final {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
             if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
                 return true;
@@ -182,6 +190,20 @@ public:
         return false;
     }
 
+#ifdef PYBIND
+        bool hasAttrPy(const std::string& name) const override final {
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python, got '{}'.", name);
+        const std::string pascalName = snakeToPascal(name);
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (pascalName == EnumStrings<ATTRS_ENUM>::data[i]) {
+                return true;
+            }
+        }
+
+        return false;
+    }
+#endif
+
     // Runtime type access with name
     std::string getAttrType(const std::string& name) const override final {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
@@ -212,28 +234,40 @@ public:
     static std::set<std::string> staticGetAttrsName() {
         std::set<std::string> attrsName;
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            attrsName.insert(EnumStrings<ATTRS_ENUM>::data[i]);
+            attrsName.insert(pascalToSnake(std::string(EnumStrings<ATTRS_ENUM>::data[i])));
         }
         return attrsName;
     }
 
 
     py::object getAttrPy(const std::string& name) const override {
+        if (name == "__dict__") {
+            return py::none();
+        }
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python, got '{}'.", name);
+        const std::string pascalName = snakeToPascal(name);
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+            if (pascalName == EnumStrings<ATTRS_ENUM>::data[i]) {
                 // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
-                // Normal accessor would not work has we convert the tuple to a py::object which can be anything
+                // Normal accessor would not work as we convert the tuple to a py::object which can be anything
                 return py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
             }
         }
-
-        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name);
+        // if (name == "_ipython_canary_method_should_not_exist_") {
+            // fmt::print("dict call {}", py::str(dict().attr("__getitem__")(name)).cast<std::string>());
+        // }
+        // ipython tries special methods and attributes (e.g "_ipython_canary_method_should_not_exist_") that require to throw
+        throw py::attribute_error(fmt::format("attribute \"{}\" not found.", name));
+        // AIDGE_THROW_OR_ABORT(py::key_error, "attribute \"{}\" not found in Python attribute getter", name);
+        // return py::none();
     }
 
 
     void setAttrPy(const std::string& name, py::object&& value) override final{
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python, got '{}'.", name);
+        const std::string pascalName = snakeToPascal(name);
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+            if (pascalName == EnumStrings<ATTRS_ENUM>::data[i]) {
                 // Cannot update attribute using reference has it would require templating
                 // Use a dirty
                 auto tmpAttr = py::cast(mAttrs);
@@ -242,8 +276,42 @@ public:
                 return;
             }
         }
-        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name);
+        throw py::attribute_error(fmt::format("attribute \"{}\" not found.", name));
+    }
+
+    py::dict dict() const override {
+        py::dict attributes;
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            const std::string snakeName = pascalToSnake(EnumStrings<ATTRS_ENUM>::data[i]);
+                // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
+                // Normal accessor would not work as we convert the tuple to a py::object which can be anything
+            attributes[snakeName.c_str()] = py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
+        }
+        return attributes;
     }
+
+    std::string str() const override {
+        return repr();
+    }
+
+    std::string repr() const override {
+        // Call the __repr__ method of the base class py::dict
+        return fmt::format("AttrDict({})",  static_cast<std::string>(py::str(dict())));
+        // return fmt::format("AttrDict({})",  dict().attr("__repr__")().cast<std::string>());
+    }
+
+    std::size_t len() const {
+        return size(EnumStrings<ATTRS_ENUM>::data);
+    }
+    // AttrDict get_a() const {
+    //     py::dict attributes_;
+    //     for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+    //         const std::string snakeName = pascalToSnake(std::string(EnumStrings<ATTRS_ENUM>::data[i]));
+    //         attributes_[snakeName.c_str()] = py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
+    //     }
+    //     return AttrDict(attributes_);
+    // }
+
     #endif
 
 private:
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index bca246c94434b280a12d070526ad4ffb2c7fbe7b..c6595360b17ee08eaa82d483987914adc67b60a8 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -10,6 +10,7 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+
 #include "aidge/data/Data.hpp"
 
 namespace py = pybind11;
@@ -17,16 +18,18 @@ namespace Aidge {
 
 void init_Data(py::module& m){
     // TODO : extend with more values !
-    py::enum_<DataType>(m, "DataType")
-    .value("Float64", DataType::Float64)
-    .value("Float32", DataType::Float32)
-    .value("Float16", DataType::Float16)
-    .value("Int8", DataType::Int8)
-    .value("Int32", DataType::Int32)
-    .value("Int64", DataType::Int64)
-    .value("UInt8", DataType::UInt8)
-    .value("UInt32", DataType::UInt32)
-    .value("UInt64", DataType::UInt64)
+    py::enum_<DataType>(m, "dtype")
+    .value("float64", DataType::Float64)
+    .value("float32", DataType::Float32)
+    .value("float16", DataType::Float16)
+    .value("int8", DataType::Int8)
+    .value("int16", DataType::Int16)
+    .value("int32", DataType::Int32)
+    .value("int64", DataType::Int64)
+    .value("uint8", DataType::UInt8)
+    .value("uint16", DataType::UInt16)
+    .value("uint32", DataType::UInt32)
+    .value("uint64", DataType::UInt64)
     ;
 
     py::class_<Data, std::shared_ptr<Data>>(m,"Data");
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 3c2120565e1637697e5258723b1b366a520fdf80..83bb4afeacdd6de181fd6738edad2229736854c8 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -73,21 +73,33 @@ void init_Tensor(py::module& m){
         (m,"Tensor", py::multiple_inheritance(), py::buffer_protocol());
 
     pyClassTensor.def(py::init<>())
+    .def(py::self + py::self)
+    .def(py::self - py::self)
+    .def(py::self * py::self)
+    .def(py::self / py::self)
+	.def("sqrt", &Tensor::sqrt)
     .def("set_datatype", &Tensor::setDataType, py::arg("datatype"), py::arg("copyCast") = true)
     .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true)
     .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims)
     .def("grad", &Tensor::grad)
     .def("set_grad", &Tensor::setGrad)
     .def("dtype", &Tensor::dataType)
-    .def("init_grad", &Tensor::initGrad)
     .def("size", &Tensor::size)
+    .def("capacity", &Tensor::capacity)
     .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize)
     .def("has_impl", &Tensor::hasImpl)
     .def("get_coord", &Tensor::getCoord)
     .def("get_idx", &Tensor::getIdx)
     .def_static("get_available_backends", &Tensor::getAvailableBackends)
     .def("__str__", [](Tensor& b) {
-        return b.toString();
+        if (b.empty()) {
+            return std::string("{}");
+        } else {
+            return b.toString();
+        }
+    })
+    .def("__repr__", [](Tensor& b) {
+        return fmt::format("Tensor(dims = {}, dtype = {})", b.dims(), std::string(EnumStrings<DataType>::data[static_cast<int>(b.dataType())]));
     })
     .def("__len__", [](Tensor& b) -> size_t{
         return b.size();
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 1000374454020625aada7f2043893b229deec833..2930383817d1555d51b8bddd8eff6402240e905a 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -30,6 +30,12 @@ void init_GraphView(py::module& m) {
           :param path: save location
           :type path: str
           )mydelimiter")
+          .def("in_view", (bool (GraphView::*)(const NodePtr&) const) &GraphView::inView)
+          .def("in_view", (bool (GraphView::*)(const std::string&) const) &GraphView::inView)
+          .def("root_node", &GraphView::rootNode)
+          .def("set_root_node", &GraphView::setRootNode, py::arg("node"))
+          .def("__repr__", &GraphView::repr)
+          .def("__len__", [](const GraphView& g){ return g.getNodes().size(); })
           .def("log_outputs", &GraphView::logOutputs, py::arg("path"))
           .def("get_ordered_inputs", &GraphView::getOrderedInputs)
           .def("get_ordered_outputs", &GraphView::getOrderedOutputs)
@@ -61,13 +67,15 @@ void init_GraphView(py::module& m) {
           :type include_learnable_parameters: bool, optional
           )mydelimiter")
 
-          .def("add", (bool (GraphView::*)(std::shared_ptr<GraphView>)) & GraphView::add,
-               py::arg("other_graph"),
+          .def("add", (bool (GraphView::*)(std::shared_ptr<GraphView>, bool)) & GraphView::add,
+               py::arg("other_graph"), py::arg("include_learnable_parameters") = true,
           R"mydelimiter(
           Include a GraphView to the current GraphView object.
 
           :param other_graph: GraphView to add
           :type other_graph: GraphView
+          :param include_learnable_parameters: include non-data inputs, like weights and biases, default True.
+          :type include_learnable_parameters: bool, optional
           )mydelimiter")
 
           .def("add_child",
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index b22ebdd0f6cdb5bd738cd164b3fc2e9fe36d9987..06c171214d5df261e5df832179a0fa69420aab7d 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -48,6 +48,8 @@ void init_Node(py::module& m) {
     :rtype: str
     )mydelimiter")
 
+    .def("__repr__", &Node::repr)
+
     .def("add_child",
          (void (Node::*)(std::shared_ptr<Node>, const IOIndex_t, IOIndex_t)) &
                  Node::addChild,
@@ -132,11 +134,12 @@ void init_Node(py::module& m) {
     :rtype: int
     )mydelimiter")
 
-    .def("get_nb_data", &Node::nbData,
+    .def("input_category", &Node::inputCategory, py::arg("idx"),
     R"mydelimiter(
-    Number of data inputs.
+    Category of a specific input (Data or Param, optional or not).
+    Data inputs exclude inputs expecting parameters (weights or bias).
 
-    :rtype: int
+    :rtype: InputCategory
     )mydelimiter")
 
     .def("get_nb_outputs", &Node::nbOutputs,
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index c3eeb192a88163be96f973a55e6ef7cc60ec48af..103e7c1e4db6e197a1dac959a25d266e031d3e55 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -9,11 +9,11 @@
  *
  ********************************************************************************/
 
+#include <memory>
+
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Add.hpp"
-#include "aidge/data/Tensor.hpp"
-#include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
@@ -22,13 +22,17 @@ namespace Aidge {
 
 void declare_Add(py::module &m) {
   py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance())
-  .def("get_inputs_name", &Add_Op::getInputsName)
-  .def("get_outputs_name", &Add_Op::getOutputsName);
+    .def(py::init<const IOIndex_t>(), py::arg("nb_inputs"))
+    .def_static("get_inputs_name", &Add_Op::getInputsName)
+    .def_static("get_outputs_name", &Add_Op::getOutputsName);
+
   declare_registrable<Add_Op>(m, "AddOp");
-  m.def("Add", &Add, py::arg("nbIn"), py::arg("name") = "");
+
+  m.def("Add", &Add, py::arg("nb_inputs"), py::arg("name") = "");
 }
 
 void init_Add(py::module &m) {
   declare_Add(m);
 }
+
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 5d72a3507c4926412b48cda42b1c3bcbe10e9460..0587554b722c99d009a248ce963f80cb4fd892ec 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -21,23 +21,34 @@
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/StaticAttributes.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
+
   const std::string pyClassName("AvgPoolingOp" + std::to_string(DIM) + "D");
-  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>(
-    m, pyClassName.c_str(),
-    py::multiple_inheritance())
-  .def(py::init<const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &>(),
-        py::arg("kernel_dims"),
-        py::arg("stride_dims"))
-  .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
-  .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
-  .def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName);
+  const std::string pyStaticAttrClassName("StaticAttributes" + pyClassName);
+//   py::class_<StaticAttributes<AvgPoolingAttr,
+//                                              std::array<DimSize_t, DIM>,
+//                                              std::array<DimSize_t, DIM>>,
+//     std::shared_ptr<StaticAttributes<AvgPoolingAttr,
+//                                              std::array<DimSize_t, DIM>,
+//                                              std::array<DimSize_t, DIM>>>, Attributes>(m, pyStaticAttrClassName.c_str());
+
+  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor>(
+        m, pyClassName.c_str(),
+        py::multiple_inheritance())
+    .def(py::init<const std::array<DimSize_t, DIM> &,
+                  const std::array<DimSize_t, DIM> &>(),
+            py::arg("kernel_dims"),
+            py::arg("stride_dims") = create_array<DimSize_t,DIM>(1))
+    .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
+    .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName);
+
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
+
   m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims) {
@@ -48,11 +59,13 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
-
 }
 
 
 void init_AvgPooling(py::module &m) {
+//   py::enum_<AvgPoolingAttr>(m, "_AvgPoolingAttr")
+    // .value("kernel_dims", AvgPoolingAttr::KernelDims)
+    // .value("stride_dims", AvgPoolingAttr::StrideDims);
   declare_AvgPoolingOp<1>(m);
   declare_AvgPoolingOp<2>(m);
   declare_AvgPoolingOp<3>(m);
@@ -61,4 +74,5 @@ void init_AvgPooling(py::module &m) {
   // m.def("AvgPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
   // (&)[1])>(&AvgPooling));
 }
+
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 9640141e03bcd811f5ce24c544c5cdbc9fe6b2f3..42e31de2c7c8ba440cd8e479cf9285b398970b42 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -9,9 +9,10 @@
  *
  ********************************************************************************/
 
-#include <pybind11/pybind11.h>
 #include <string>
 
+#include <pybind11/pybind11.h>
+
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
@@ -23,19 +24,21 @@ namespace Aidge {
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
     const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D");
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance())
-    .def(py::init<float, float>(),
-        py::arg("epsilon"),
-        py::arg("momentum"))
-    .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
-    .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
-    .def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor>(
+    m, pyClassName.c_str(), py::multiple_inheritance())
+        .def(py::init<float, float>(),
+            py::arg("epsilon"),
+            py::arg("momentum"))
+        .def_static("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
+        .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName);
+
     declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
 
-    m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nbFeatures"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
+    m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nb_features"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
 
 void init_BatchNorm(py::module &m) {
     declare_BatchNormOp<2>(m);
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 756686c209c33fe03f7bda4bbb53d8c3c71e8b4c..9f02e04a41b20599a6cfe878f53db04c6d5bbe34 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -20,12 +20,16 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Concat(py::module& m) {
-    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, Attributes, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Concat_Op::getInputsName)
-    .def("get_outputs_name", &Concat_Op::getOutputsName)
-    .def("attributes_name", &Concat_Op::staticGetAttrsName);
+    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance())
+        .def(py::init<const IOIndex_t, const int>(),
+                py::arg("nb_inputs"),
+                py::arg("axis"))
+        .def_static("get_inputs_name", &Concat_Op::getInputsName)
+        .def_static("get_outputs_name", &Concat_Op::getOutputsName);
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
-    m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = "");
+
+    m.def("Concat", &Concat, py::arg("nb_inputs"), py::arg("axis"), py::arg("name") = "");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index adb0e108c409032c7e132016f5b92ed9f9233491..61fb37e788021757fa6c3aced9a5f4c30fb60548 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -27,27 +27,27 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
   const std::string pyClassName("ConvOp" + std::to_string(DIM) + "D");
-  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>(
+  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
-  .def(py::init<DimSize_t,
-                DimSize_t,
-                const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                bool>(),
-        py::arg("in_channels"),
-        py::arg("out_channels"),
-        py::arg("kernel_dims"),
-        py::arg("stride_dims"),
-        py::arg("dilation_dims"),
-        py::arg("no_bias"))
-    .def("get_inputs_name", &Conv_Op<DIM>::getInputsName)
-    .def("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
-    .def("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
-    ;
-  declare_registrable<Conv_Op<DIM>>(m, pyClassName);
+        .def(py::init([](const std::vector<DimSize_t>& kernel_dims,
+                         const std::vector<DimSize_t> &stride_dims,
+                         const std::vector<DimSize_t> &dilation_dims) {
+            AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+            AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+            AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+            return new Conv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+        }), py::arg("kernel_dims"),
+            py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
+        .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
+        .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
+        .def("in_channels", &Conv_Op<DIM>::inChannels)
+        .def("out_channels", &Conv_Op<DIM>::outChannels)
+        ;
 
+  declare_registrable<Conv_Op<DIM>>(m, pyClassName);
 
   m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
                                                          DimSize_t out_channels,
@@ -74,6 +74,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
 void init_Conv(py::module &m) {
   declare_ConvOp<1>(m);
   declare_ConvOp<2>(m);
-  declare_ConvOp<3>(m);
+//   declare_ConvOp<3>(m);
 }
+
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 19b3332a84037185afdc87fd90cb9c8fea2e64f8..080df1832bf92a9db9d26e1fa18b652dc70c2a42 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -28,22 +28,19 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
   const std::string pyClassName("ConvDepthWiseOp" + std::to_string(DIM) + "D");
-  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
+  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
-  .def(py::init<const DimSize_t,
+  .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                bool>(),
-        py::arg("nb_channels"),
+                const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("dilation_dims"),
-        py::arg("no_bias"))
-  .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
-  .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
-  .def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName);
+        py::arg("dilation_dims"))
+  .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
+  .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
+  .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels);
+
   declare_registrable<ConvDepthWise_Op<DIM>>(m, pyClassName);
   m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
                                                                   const std::vector<DimSize_t>& kernel_dims,
@@ -69,7 +66,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
 void init_ConvDepthWise(py::module &m) {
   declare_ConvDepthWiseOp<1>(m);
   declare_ConvDepthWiseOp<2>(m);
-  declare_ConvDepthWiseOp<3>(m);
+//   declare_ConvDepthWiseOp<3>(m);
 
   // FIXME:
   // m.def("ConvDepthWise1D", static_cast<NodeAPI(*)(const char*, int, int, int const
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
index e9bf26b629aa05090c9601103676cbc12ff4c88d..9dcb98a54596f32525d2880dd6e955d4643f6e7c 100644
--- a/python_binding/operator/pybind_Div.cpp
+++ b/python_binding/operator/pybind_Div.cpp
@@ -20,9 +20,11 @@ namespace Aidge {
 
 void init_Div(py::module& m) {
     py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Div_Op::getInputsName)
-    .def("get_outputs_name", &Div_Op::getOutputsName);
+        .def(py::init<>())
+        .def_static("get_inputs_name", &Div_Op::getInputsName)
+        .def_static("get_outputs_name", &Div_Op::getOutputsName);
     declare_registrable<Div_Op>(m, "DivOp");
     m.def("Div", &Div, py::arg("name") = "");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp
index c5fd53f2a665b5b816a3778e6f874cd04956e99e..c248753ca8de46293d49ce4dc614ae258c313256 100644
--- a/python_binding/operator/pybind_Erf.cpp
+++ b/python_binding/operator/pybind_Erf.cpp
@@ -20,9 +20,12 @@ namespace Aidge {
 
 void init_Erf(py::module& m) {
     py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Erf_Op::getInputsName)
-    .def("get_outputs_name", &Erf_Op::getOutputsName);
+        .def(py::init<>())
+        .def_static("get_inputs_name", &Erf_Op::getInputsName)
+        .def_static("get_outputs_name", &Erf_Op::getOutputsName);
+
     declare_registrable<Erf_Op>(m, "ErfOp");
+
     m.def("Erf", &Erf, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index ab1ed9ce20bec01e205cd6478c6a93df9f91a2fb..9e0d61bc3a4d957e98db39577e120da5fe97ebea 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -15,21 +15,36 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 
+
+
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, Attributes, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
-  .def("get_inputs_name", &FC_Op::getInputsName)
-  .def("get_outputs_name", &FC_Op::getOutputsName)
-  .def("attributes_name", &FC_Op::staticGetAttrsName);
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
+    .def(py::init<>())
+    .def_static("get_inputs_name", &FC_Op::getInputsName)
+    .def_static("get_outputs_name", &FC_Op::getOutputsName)
+    .def("out_channels", &FC_Op::outChannels)
+    // .def_property_readonly("a", &FC_Op::get_a)
+    // .def_property_readonly("a", [](const FC_Op& self) {
+    //     const AttrDict a = AttrDict(self.get_a());
+    //     return a;
+    // })
+    .def("__repr__", [](FC_Op& b) {
+        return fmt::format("Operator(type='{}')", b.Type);
+    });
+
   declare_registrable<FC_Op>(m, "FCOp");
-  m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
+
+  m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("no_bias") = false, py::arg("name") = "");
 }
 
 void init_FC(py::module &m) {
   declare_FC(m);
 }
+
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index 8c32acfe2bd7e0118c186be8fa1297ee16fe6f6c..aa831d1cfe92fb720df00bb7d8dd3af7f1c1a668 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -11,6 +11,7 @@
 
 #include <pybind11/pybind11.h>
 #include <string>
+#include <vector>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Gather.hpp"
@@ -20,11 +21,19 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Gather(py::module& m) {
-    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, Attributes, OperatorTensor>(m, "GatherOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Gather_Op::getInputsName)
-    .def("get_outputs_name", &Gather_Op::getOutputsName)
-    .def("attributes_name", &Gather_Op::staticGetAttrsName);
+    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor>(m, "GatherOp", py::multiple_inheritance())
+        .def(py::init<std::int8_t,
+                      const std::vector<int64_t>,
+                      const std::vector<DimSize_t>>(),
+                py::arg("axis"),
+                py::arg("indices"),
+                py::arg("gathered_shape"))
+        .def_static("get_inputs_name", &Gather_Op::getInputsName)
+        .def_static("get_outputs_name", &Gather_Op::getOutputsName);
+
     declare_registrable<Gather_Op>(m, "GatherOp");
-    m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis")= 0, py::arg("name") = "");
+
+    m.def("Gather", &Gather, py::arg("axis") = 0, py::arg("indices") = std::vector<std::int64_t>(), py::arg("gathered_shape") = std::vector<std::size_t>(), py::arg("name") = "");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 897cd359a4b368dc599f37136ade3508b5ec5a76..6af8fef88e411af0a3ecbe5a771bf7af24de411a 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -9,23 +9,34 @@
  *
  ********************************************************************************/
 
+#include <stdio.h>
+
+#include <memory>
+#include <string>
+
+#include <pybind11/functional.h>
 #include <pybind11/pybind11.h>
 #include <pybind11/stl.h>
-#include <pybind11/functional.h>
-#include <stdio.h>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+
 namespace py = pybind11;
 namespace Aidge {
 
 void init_GenericOperator(py::module& m) {
-    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, DynamicAttributes, OperatorTensor>(m, "GenericOperatorOp",
+    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, OperatorTensor>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
-    .def_readonly_static("identity", &GenericOperator_Op::Identity)
-    .def("set_forward_dims", &GenericOperator_Op::setForwardDims, py::arg("computation_function"));
+        .def(py::init<const std::string&, IOIndex_t, IOIndex_t, IOIndex_t>(),
+                py::arg("type"),
+                py::arg("nb_data"),
+                py::arg("nb_param"),
+                py::arg("nb_outputs"))
+        .def_readonly_static("identity", &GenericOperator_Op::Identity)
+        .def("set_forward_dims", &GenericOperator_Op::setForwardDims, py::arg("computation_function"));
 
     // &GenericOperator
     m.def("GenericOperator",
@@ -44,10 +55,11 @@ void init_GenericOperator(py::module& m) {
             );
             if (kwargs){
                 std::shared_ptr<GenericOperator_Op> gop = std::static_pointer_cast<GenericOperator_Op>(genericNode->getOperator());
+                std::shared_ptr<DynamicAttributes> attr = std::dynamic_pointer_cast<DynamicAttributes>(gop->attributes());
                 for (auto item : kwargs) {
                     std::string key = py::cast<std::string>(item.first);
                     py::object value = py::reinterpret_borrow<py::object>(item.second);
-                    gop->setAttrPy(key, std::move(value));
+                    attr->setAttrPy(key, std::move(value));
                 }
             }
             return genericNode;
diff --git a/python_binding/operator/pybind_GlobalAveragePooling.cpp b/python_binding/operator/pybind_GlobalAveragePooling.cpp
index 08c8ad6f9c6b742eee9e9b0ad1ac20217d152bda..d4d2a921addaef676913cee2a16991ad36686767 100644
--- a/python_binding/operator/pybind_GlobalAveragePooling.cpp
+++ b/python_binding/operator/pybind_GlobalAveragePooling.cpp
@@ -23,9 +23,13 @@ void init_GlobalAveragePooling(py::module &m) {
   py::class_<GlobalAveragePooling_Op, std::shared_ptr<GlobalAveragePooling_Op>,
              OperatorTensor>(m, pyClassName.c_str(),
                              py::multiple_inheritance())
-      .def("get_inputs_name", &GlobalAveragePooling_Op::getInputsName)
-      .def("get_outputs_name", &GlobalAveragePooling_Op::getOutputsName);
+      .def(py::init<>())
+      .def_static("get_inputs_name", &GlobalAveragePooling_Op::getInputsName)
+      .def_static("get_outputs_name", &GlobalAveragePooling_Op::getOutputsName);
+
   declare_registrable<GlobalAveragePooling_Op>(m, pyClassName);
+
   m.def("globalaveragepooling", &GlobalAveragePooling, py::arg("name") = "");
 }
+
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Identity.cpp b/python_binding/operator/pybind_Identity.cpp
index 4538b72fcb012a35ca0ebf3a15449a4b5cfff7a8..560f2889f20233ef928557aa230e6dab7f0a5d2b 100644
--- a/python_binding/operator/pybind_Identity.cpp
+++ b/python_binding/operator/pybind_Identity.cpp
@@ -19,10 +19,12 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Identity(py::module& m) {
-    py::class_<Identity_Op, std::shared_ptr<Identity_Op>, Operator>(m, "IdentityOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Identity_Op::getInputsName)
-    .def("get_outputs_name", &Identity_Op::getOutputsName);
+    py::class_<Identity_Op, std::shared_ptr<Identity_Op>, OperatorTensor>(m, "IdentityOp", py::multiple_inheritance())
+        .def(py::init<>())
+        .def_static("get_inputs_name", &Identity_Op::getInputsName)
+        .def_static("get_outputs_name", &Identity_Op::getOutputsName);
 
     m.def("Identity", &Identity, py::arg("name") = "");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 9ad47e7a391698ae9b30d35d94f05e8b80138590..f46106fb3fb168631c9681d90bda857183c9bc04 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -19,11 +19,12 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Attributes, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
-    .def("get_inputs_name", &LeakyReLU_Op::getInputsName)
-    .def("get_outputs_name", &LeakyReLU_Op::getOutputsName)
-    .def("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
+        .def(py::init<float>(), py::arg("negative_slope"))
+        .def_static("get_inputs_name", &LeakyReLU_Op::getInputsName)
+        .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName);
     declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Ln.cpp b/python_binding/operator/pybind_Ln.cpp
new file mode 100755
index 0000000000000000000000000000000000000000..0be710be1dfe1a5a83ceaf085094e8ded3f07ffd
--- /dev/null
+++ b/python_binding/operator/pybind_Ln.cpp
@@ -0,0 +1,29 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Ln.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Ln(py::module& m) {
+    py::class_<Ln_Op, std::shared_ptr<Ln_Op>, OperatorTensor>(m, "LnOp", py::multiple_inheritance())
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Ln_Op::getInputsName)
+    .def_static("get_outputs_name", &Ln_Op::getOutputsName);
+
+    m.def("Ln", &Ln, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index 73bfac04a78ec9b972ec984466dbae582b2c03dc..09e11f89ea579b5a3aa75f177958d981c53f1dce 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -22,8 +22,9 @@ namespace Aidge {
 
 void init_MatMul(py::module &m) {
   py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
-  .def("get_inputs_name", &MatMul_Op::getInputsName)
-  .def("get_outputs_name", &MatMul_Op::getOutputsName);
+    .def(py::init<>())
+    .def_static("get_inputs_name", &MatMul_Op::getInputsName)
+    .def_static("get_outputs_name", &MatMul_Op::getOutputsName);
   declare_registrable<MatMul_Op>(m, "MatMulOp");
   m.def("MatMul", &MatMul, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 91fa0489d8bedd16dd33424e33d7e15eea3e3ecb..2a850cd7bfe5cca21ea1ca54b5e9ad86b880bcc2 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -27,7 +27,7 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
   const std::string pyClassName("MaxPoolingOp" + std::to_string(DIM) + "D");
-  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>(
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
@@ -36,9 +36,8 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
         py::arg("ceil_mode"))
-  .def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
-  .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
-  .def("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName);
+  .def_static("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
+  .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName);
   declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName);
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index 20cd3f156996c98bb64502a90ab98535f87cc2a3..d021a79c5ff4e337bebf424465458ddabf056a56 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -47,6 +47,22 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("no_bias")= false);
+    m.def(("PaddedConvOp" + std::to_string(DIM) + "D").c_str(), [](
+                                                         const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &padding_dims,
+                                                         const std::vector<DimSize_t> &dilation_dims)
+    {
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+        return PaddedConv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+    }, py::arg("kernel_dims"),
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
 }
 
 template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
@@ -71,6 +87,22 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("no_bias") = false);
+  m.def(("PaddedConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(), [](
+                                                         const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &padding_dims,
+                                                         const std::vector<DimSize_t> &dilation_dims)
+    {
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+        return PaddedConvDepthWise_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+    }, py::arg("kernel_dims"),
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
 
 }
 
@@ -89,7 +121,18 @@ template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0));
+  m.def(("PaddedAvgPoolingOp" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &padding_dims)
+    {
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
 
+        return PaddedAvgPooling_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()));
+    }, py::arg("kernel_dims"),
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0));
 }
 
 template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
@@ -109,6 +152,20 @@ template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("ceil_mode") = false);
+  m.def(("PaddedMaxPoolingOp" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &padding_dims,
+                                                         bool ceil_mode)
+    {
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
+
+        return PaddedMaxPooling_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), ceil_mode);
+    }, py::arg("kernel_dims"),
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
+       py::arg("ceil_mode") = false);
 
 }
 
@@ -118,21 +175,23 @@ void declare_LSTMOp(py::module &m) {
        py::arg("seq_length"),
        py::arg("nobias") = false,
        py::arg("name") = "");
+  m.def("LSTMOp", &LSTM_Op,
+       py::arg("seq_length"));
 }
 
 void init_MetaOperatorDefs(py::module &m) {
   declare_PaddedConvOp<1>(m);
   declare_PaddedConvOp<2>(m);
-  declare_PaddedConvOp<3>(m);
+//   declare_PaddedConvOp<3>(m);
   declare_PaddedConvDepthWiseOp<1>(m);
   declare_PaddedConvDepthWiseOp<2>(m);
-  declare_PaddedConvDepthWiseOp<3>(m);
-  declare_PaddedAvgPoolingOp<1>(m);
+//   declare_PaddedConvDepthWiseOp<3>(m);
+//   declare_PaddedAvgPoolingOp<1>(m);
   declare_PaddedAvgPoolingOp<2>(m);
-  declare_PaddedAvgPoolingOp<3>(m);
-  declare_PaddedMaxPoolingOp<1>(m);
+//   declare_PaddedAvgPoolingOp<3>(m);
+//   declare_PaddedMaxPoolingOp<1>(m);
   declare_PaddedMaxPoolingOp<2>(m);
-  declare_PaddedMaxPoolingOp<3>(m);
+//   declare_PaddedMaxPoolingOp<3>(m);
   declare_LSTMOp(m);
 
   py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperator_Op", py::multiple_inheritance())
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
index 47c84c0e52f605a5466a63a5a5d0851fecedd2f8..1658b0d959c0882d53e078f6d68b4474b34c739e 100644
--- a/python_binding/operator/pybind_Mul.cpp
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -20,8 +20,9 @@ namespace Aidge {
 
 void init_Mul(py::module& m) {
     py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Mul_Op::getInputsName)
-    .def("get_outputs_name", &Mul_Op::getOutputsName);
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Mul_Op::getInputsName)
+    .def_static("get_outputs_name", &Mul_Op::getOutputsName);
     declare_registrable<Mul_Op>(m, "MulOp");
     m.def("Mul", &Mul, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index e00f70413614a96919c2a068303b3fbc3f6eca8d..2b2f30f14931fd041bfb4ec1a712e5c9419fdf22 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -24,7 +24,18 @@
 namespace py = pybind11;
 namespace Aidge {
 void init_Operator(py::module& m){
+    py::enum_<OperatorType>(m, "OperatorType")
+        .value("Data", OperatorType::Data)
+        .value("Tensor", OperatorType::Tensor);
+
+    py::enum_<InputCategory>(m, "InputCategory")
+        .value("Data", InputCategory::Data)
+        .value("Param", InputCategory::Param)
+        .value("OptionalData", InputCategory::OptionalData)
+        .value("OptionalParam", InputCategory::OptionalParam);
+
     py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
+    .def("__repr__", &Operator::repr)
     .def("backend", &Operator::backend)
     .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput), py::arg("outputIdx"), py::arg("data"))
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
@@ -32,18 +43,25 @@ void init_Operator(py::module& m){
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
     .def("get_raw_input", &Operator::getRawInput, py::arg("inputIdx"))
     .def("nb_inputs", &Operator::nbInputs)
-    .def("nb_data", &Operator::nbData)
-    .def("nb_param", &Operator::nbParam)
     .def("nb_outputs", &Operator::nbOutputs)
+    .def("input_category", &Operator::inputCategory, py::arg("idx"),
+    R"mydelimiter(
+    Category of a specific input (Data or Param, optional or not).
+    Data inputs exclude inputs expecting parameters (weights or bias).
+
+    :rtype: InputCategory
+    )mydelimiter")
     .def("associate_input", &Operator::associateInput, py::arg("inputIdx"), py::arg("data"))
     .def("set_datatype", &Operator::setDataType, py::arg("dataType"))
     .def("set_backend", &Operator::setBackend, py::arg("name"), py::arg("device") = 0)
     .def("forward", &Operator::forward)
     // py::keep_alive forbide Python to garbage collect the implementation lambda as long as the Operator is not deleted !
     .def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>())
+    .def("type", &Operator::type)
     .def("get_impl", &Operator::getImpl)
     .def("get_hook", &Operator::getHook)
     .def("add_hook", &Operator::addHook)
+    .def_property_readonly("attr", &Operator::attributes)
     ;
 }
 }
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 1cd9f074fe5241be11da0ea7d0d1ed5a1c5869c2..3df203ed52967e3dbc393769276015a7fe0e016f 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -26,7 +26,7 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
   const std::string pyClassName("PadOp" + std::to_string(DIM) + "D");
-  py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>(
+  py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, 2*DIM> &,
@@ -35,9 +35,8 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
         py::arg("beginEndTuples"),
         py::arg("borderType") = PadBorderType::Constant,
         py::arg("borderValue") = 0.0)
-    .def("get_inputs_name", &Pad_Op<DIM>::getInputsName)
-    .def("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
-    .def("attributes_name", &Pad_Op<DIM>::staticGetAttrsName)
+    .def_static("get_inputs_name", &Pad_Op<DIM>::getInputsName)
+    .def_static("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
     ;
   declare_registrable<Pad_Op<DIM>>(m, pyClassName);
   m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
@@ -63,6 +62,6 @@ void init_Pad(py::module &m) {
     .export_values();
   declare_PadOp<1>(m);
   declare_PadOp<2>(m);
-  declare_PadOp<3>(m);
+  //declare_PadOp<3>(m);
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
index baae552270a4776d292047140e213dbe1566d35e..0c3b3f38803735d2df632496382e86a0c9f2735d 100644
--- a/python_binding/operator/pybind_Pop.cpp
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -19,9 +19,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Pop(py::module& m) {
-    py::class_<Pop_Op, std::shared_ptr<Pop_Op>, OperatorTensor, Attributes>(m, "PopOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Pop_Op::getInputsName)
-    .def("get_outputs_name", &Pop_Op::getOutputsName);
+    py::class_<Pop_Op, std::shared_ptr<Pop_Op>, OperatorTensor>(m, "PopOp", py::multiple_inheritance())
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Pop_Op::getInputsName)
+    .def_static("get_outputs_name", &Pop_Op::getOutputsName);
 
     m.def("Pop", &Pop, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
index 9e9ef772cadddb1c7928060b503c388b094ed9f4..e5d67542cd1acc5b2982081e4cf3a91948542147 100644
--- a/python_binding/operator/pybind_Pow.cpp
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -20,8 +20,9 @@ namespace Aidge {
 
 void init_Pow(py::module& m) {
     py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Pow_Op::getInputsName)
-    .def("get_outputs_name", &Pow_Op::getOutputsName);
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Pow_Op::getInputsName)
+    .def_static("get_outputs_name", &Pow_Op::getOutputsName);
     declare_registrable<Pow_Op>(m, "PowOp");
 
     m.def("Pow", &Pow, py::arg("name") = "");
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index eb74515915c252d50a2522cae6d6f4c6832ab3ef..30279dc477a0badbd5dc361ef7b5d071fa7b8cbc 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -31,16 +31,25 @@ void declare_Producer(py::module &m) {
 
 
 void init_Producer(py::module &m) {
-    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, Attributes, OperatorTensor>(
+    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, OperatorTensor>(
         m,
         "ProducerOp",
         py::multiple_inheritance())
-    .def("dims", &Producer_Op::dims)
-    .def("get_inputs_name", &Producer_Op::getInputsName)
-    .def("get_outputs_name", &Producer_Op::getOutputsName)
-    .def("attributes_name", &Producer_Op::staticGetAttrsName);
-    m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&, bool)>(&Producer), py::arg("tensor"), py::arg("name") = "", py::arg("constant") = false);
+        .def(py::init<const std::shared_ptr<Tensor>, bool>(), py::arg("tensor"), py::arg("constant"))
+        .def("dims", &Producer_Op::dims)
+        .def_static("get_inputs_name", &Producer_Op::getInputsName)
+        .def_static("get_outputs_name", &Producer_Op::getOutputsName);
+
+    m.def("Producer", static_cast<std::shared_ptr<Node>(*)(
+                                        const std::shared_ptr<Tensor>,
+                                        const std::string&,
+                                        bool)>(&Producer),
+                      py::arg("tensor"),
+                      py::arg("name") = "",
+                      py::arg("constant") = false);
+
     declare_registrable<Producer_Op>(m, "ProducerOp");
+
     declare_Producer<1>(m);
     declare_Producer<2>(m);
     declare_Producer<3>(m);
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index 57601e25607a40c44c400fe75965d83050a146ed..d611523f15a7007b0e9ab9cce323ed9a57d8ecdf 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -20,8 +20,9 @@ namespace Aidge {
 
 void init_ReLU(py::module& m) {
     py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance())
-    .def("get_inputs_name", &ReLU_Op::getInputsName)
-    .def("get_outputs_name", &ReLU_Op::getOutputsName);
+    .def(py::init<>())
+    .def_static("get_inputs_name", &ReLU_Op::getInputsName)
+    .def_static("get_outputs_name", &ReLU_Op::getOutputsName);
     declare_registrable<ReLU_Op>(m, "ReLUOp");
 
     m.def("ReLU", &ReLU, py::arg("name") = "");
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 599a648a3f2733acd49bbbc293cd30734e8ea2ff..3023c077e2f3695902ca76dfa21831749f0ca82e 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -26,11 +26,11 @@ namespace Aidge {
 
 void declare_ReduceMeanOp(py::module &m) {
   const std::string pyClassName("ReduceMeanOp");
-  py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, Attributes, OperatorTensor>(
+  py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, OperatorTensor>(
     m, pyClassName.c_str(), py::multiple_inheritance())
-    .def("get_inputs_name", &ReduceMean_Op::getInputsName)
-    .def("get_outputs_name", &ReduceMean_Op::getOutputsName)
-    .def("attributes_name", &ReduceMean_Op::staticGetAttrsName)
+    .def(py::init<std::vector<std::int32_t>, DimSize_t>(), py::arg("axes"), py::arg("keep_dims"))
+    .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
+    .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
     ;
   declare_registrable<ReduceMean_Op>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index 0e336db28ddba4629e61d30e026befe4240c40b6..89d93134ac2f590bcb067aa6936081c16fc1e2a3 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -19,10 +19,11 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Reshape(py::module& m) {
-    py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, Attributes, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Reshape_Op::getInputsName)
-    .def("get_outputs_name", &Reshape_Op::getOutputsName);
+    py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
+        .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
+        .def_static("get_inputs_name", &Reshape_Op::getInputsName)
+        .def_static("get_outputs_name", &Reshape_Op::getOutputsName);
     declare_registrable<Reshape_Op>(m, "ReshapeOp");
-    m.def("Reshape", &Reshape, py::arg("shape"), py::arg("name") = "");
+    m.def("Reshape", &Reshape, py::arg("shape") = std::vector<std::int64_t>(), py::arg("allowzero") = false, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a925af8cf357dabc09f4e8e3c39af9519b4ed550
--- /dev/null
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -0,0 +1,29 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Resize.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Resize(py::module& m) {
+    py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance())
+        .def_static("get_inputs_name", &Resize_Op::getInputsName)
+        .def_static("get_outputs_name", &Resize_Op::getOutputsName);
+
+    declare_registrable<Resize_Op>(m, "ResizeOp");
+
+    m.def("Resize", &Resize, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
index f091ea70f9b5e9927e535bd527cd84cf081d9823..31e6c0b08194fbb8b6ec2270e8127a2f838ba78f 100644
--- a/python_binding/operator/pybind_Scaling.cpp
+++ b/python_binding/operator/pybind_Scaling.cpp
@@ -19,12 +19,12 @@ namespace py = pybind11;
 
 namespace Aidge {
 
-void init_Scaling(py::module& m) 
+void init_Scaling(py::module& m)
 {
-    py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, Attributes, OperatorTensor>(m, "ScalingOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Scaling_Op::getInputsName)
-    .def("get_outputs_name", &Scaling_Op::getOutputsName)
-    .def("attributes_name", &Scaling_Op::staticGetAttrsName);
+    py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, OperatorTensor>(m, "ScalingOp", py::multiple_inheritance())
+        .def(py::init<float, size_t, bool>(), py::arg("scaling_factor"), py::arg("nb_bits"), py::arg("is_output_unsigned"))
+        .def_static("get_inputs_name", &Scaling_Op::getInputsName)
+        .def_static("get_outputs_name", &Scaling_Op::getOutputsName);
     declare_registrable<Scaling_Op>(m, "ScalingOp");
     m.def("Scaling", &Scaling, py::arg("scaling_factor") = 1.0f, py::arg("nb_bits") = 8, py::arg("is_output_unsigned") = true, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4e1d4203e48f714746587c9f209b4d28bfecb439
--- /dev/null
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -0,0 +1,36 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstdint>  // std::int64_t
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Shape.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Shape(py::module& m) {
+    py::class_<Shape_Op, std::shared_ptr<Shape_Op>, OperatorTensor>(m, "ShapeOp", py::multiple_inheritance())
+        .def(py::init<const std::int64_t,
+                      const std::int64_t>(),
+                py::arg("start"),
+                py::arg("end"))
+        .def_static("get_inputs_name", &Shape_Op::getInputsName)
+        .def_static("get_outputs_name", &Shape_Op::getOutputsName);
+
+    declare_registrable<Shape_Op>(m, "ShapeOp");
+
+    m.def("Shape", &Shape, py::arg("start") = 0, py::arg("end") = -1, py::arg("name") = "");
+}
+
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp
index 8ffa8581593af9dc994baa566475317bcd96d475..0ba94c73fcd1fb435194f8485567771a147ec616 100644
--- a/python_binding/operator/pybind_Sigmoid.cpp
+++ b/python_binding/operator/pybind_Sigmoid.cpp
@@ -20,8 +20,9 @@ namespace Aidge {
 
 void init_Sigmoid(py::module& m) {
     py::class_<Sigmoid_Op, std::shared_ptr<Sigmoid_Op>, OperatorTensor>(m, "SigmoidOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Sigmoid_Op::getInputsName)
-    .def("get_outputs_name", &Sigmoid_Op::getOutputsName);
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Sigmoid_Op::getInputsName)
+    .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName);
 
     m.def("Sigmoid", &Sigmoid, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index 558fc98c172ea1a264ee8ac3ebbc70e09eba826d..b87cc8da4874c666de21a6e798a66e3c7fad9c10 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -10,6 +10,7 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+#include <vector>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Slice.hpp"
@@ -20,9 +21,24 @@ namespace Aidge {
 
 void init_Slice(py::module& m) {
     py::class_<Slice_Op, std::shared_ptr<Slice_Op>, OperatorTensor>(m, "SliceOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Slice_Op::getInputsName)
-    .def("get_outputs_name", &Slice_Op::getOutputsName);
+    .def(py::init<const std::vector<std::int64_t>&,
+                  const std::vector<std::int64_t>&,
+                  const std::vector<std::int8_t>&,
+                  const std::vector<std::int64_t>&>(),
+                  py::arg("starts"),
+                  py::arg("ends"),
+                  py::arg("axes"),
+                  py::arg("steps"))
+    .def_static("get_inputs_name", &Slice_Op::getInputsName)
+    .def_static("get_outputs_name", &Slice_Op::getOutputsName);
     declare_registrable<Slice_Op>(m, "SliceOp");
-    m.def("Slice", &Slice, py::arg("starts"), py::arg("ends"), py::arg("axes"), py::arg("name") = "");
+
+    m.def("Slice",
+          &Slice,
+          py::arg("starts") = std::vector<std::int64_t>(),
+          py::arg("ends") = std::vector<std::int64_t>(),
+          py::arg("axes") = std::vector<std::int8_t>(),
+          py::arg("steps") = std::vector<std::int64_t>(),
+          py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 837f3ed2b92aeab5739d07a04b071040806d8a1f..f27e469d84c463ec48d1f9484807a8c93b7a5f4d 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -20,10 +20,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Softmax(py::module& m) {
-    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Attributes, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Softmax_Op::getInputsName)
-    .def("get_outputs_name", &Softmax_Op::getOutputsName)
-    .def("attributes_name", &Softmax_Op::staticGetAttrsName);
+    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
+        .def(py::init<std::size_t>(), py::arg("axis"))
+        .def_static("get_inputs_name", &Softmax_Op::getInputsName)
+        .def_static("get_outputs_name", &Softmax_Op::getOutputsName);
     declare_registrable<Softmax_Op>(m, "SoftmaxOp");
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6efc123864f21bf8ea02008b29fe59f31685f17c
--- /dev/null
+++ b/python_binding/operator/pybind_Split.cpp
@@ -0,0 +1,38 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Split.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Split(py::module& m) {
+    py::class_<Split_Op, std::shared_ptr<Split_Op>, Attributes, OperatorTensor>(m, "SplitOp", py::multiple_inheritance())
+        .def(py::init<DimSize_t, std::int8_t, std::vector<DimSize_t>&>(),
+                py::arg("nb_outputs"),
+                py::arg("axis"),
+                py::arg("split"))
+        .def_static("get_inputs_name", &Split_Op::getInputsName)
+        .def_static("get_outputs_name", &Split_Op::getOutputsName)
+        .def_static("attributes_name", &Split_Op::staticGetAttrsName);
+
+    declare_registrable<Split_Op>(m, "SplitOp");
+
+    m.def("Split", &Split, py::arg("nb_outputs"), py::arg("axis") = 0, py::arg("split") = std::vector<DimSize_t>(), py::arg("name") = "");
+}
+
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index 7065b828eb18d77edce49726dd903045c7952977..9425eba06574c73339e8e4628ffded3449a8b4ab 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -20,8 +20,9 @@ namespace Aidge {
 
 void init_Sqrt(py::module& m) {
     py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, OperatorTensor>(m, "SqrtOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Sqrt_Op::getInputsName)
-    .def("get_outputs_name", &Sqrt_Op::getOutputsName);
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Sqrt_Op::getInputsName)
+    .def_static("get_outputs_name", &Sqrt_Op::getOutputsName);
     declare_registrable<Sqrt_Op>(m, "SqrtOp");
     m.def("Sqrt", &Sqrt, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
index e031040dfe8373c07d1524cbe4f75f3744e2f312..752490a72bc35ec8a0ab08dd8d51a31c887b4dc6 100644
--- a/python_binding/operator/pybind_Sub.cpp
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -20,8 +20,9 @@ namespace Aidge {
 
 void init_Sub(py::module& m) {
     py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Sub_Op::getInputsName)
-    .def("get_outputs_name", &Sub_Op::getOutputsName);
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Sub_Op::getInputsName)
+    .def_static("get_outputs_name", &Sub_Op::getOutputsName);
     declare_registrable<Sub_Op>(m, "SubOp");
     m.def("Sub", &Sub, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Tanh.cpp b/python_binding/operator/pybind_Tanh.cpp
index a5c2f9dd5f2eab17e296f82788726210f976bd0d..74cde8dd3831c8d29ca87e2314afc27276ec025f 100644
--- a/python_binding/operator/pybind_Tanh.cpp
+++ b/python_binding/operator/pybind_Tanh.cpp
@@ -20,8 +20,9 @@ namespace Aidge {
 
 void init_Tanh(py::module& m) {
     py::class_<Tanh_Op, std::shared_ptr<Tanh_Op>, OperatorTensor>(m, "TanhOp", py::multiple_inheritance())
-    .def("get_inputs_name", &Tanh_Op::getInputsName)
-    .def("get_outputs_name", &Tanh_Op::getOutputsName);
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Tanh_Op::getInputsName)
+    .def_static("get_outputs_name", &Tanh_Op::getOutputsName);
 
     m.def("Tanh", &Tanh, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index 63b22608d1737f9a59caffd4517fc0e9cfc4dd91..c0c3ad617bef3eda3e283667944ac423cd10a622 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -27,11 +27,11 @@ namespace Aidge {
 
 void declare_Transpose(py::module &m) {
   const std::string pyClassName("TransposeOp");
-  py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, Attributes, OperatorTensor>(
+  py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, OperatorTensor>(
     m, "TransposeOp", py::multiple_inheritance())
-  .def("get_inputs_name", &Transpose_Op::getInputsName)
-  .def("get_outputs_name", &Transpose_Op::getOutputsName)
-  .def("attributes_name", &Transpose_Op::staticGetAttrsName);
+    .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order"))
+    .def_static("get_inputs_name", &Transpose_Op::getInputsName)
+    .def_static("get_outputs_name", &Transpose_Op::getOutputsName);
   declare_registrable<Transpose_Op>(m, pyClassName);
   m.def("Transpose", &Transpose, py::arg("output_dims_order"), py::arg("name") = "");
 }
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 7b38c2d72d5f4b2eed8d8bbf9f41f47144b51060..9443ed55eaaf6dc04ad9ee4612ed9d491aed54ae 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -51,10 +51,13 @@ void init_Pow(py::module&);
 void init_ReduceMean(py::module&);
 void init_ReLU(py::module&);
 void init_Reshape(py::module&);
+void init_Resize(py::module&);
 void init_Scaling(py::module&);
+void init_Shape(py::module&);
 void init_Sigmoid(py::module&);
 void init_Slice(py::module&);
 void init_Softmax(py::module&);
+void init_Split(py::module&);
 void init_Sqrt(py::module&);
 void init_Sub(py::module&);
 void init_Tanh(py::module&);
@@ -119,10 +122,13 @@ void init_Aidge(py::module& m) {
     init_ReduceMean(m);
     init_ReLU(m);
     init_Reshape(m);
+    init_Resize(m);
     init_Scaling(m);
+    init_Shape(m);
     init_Sigmoid(m);
     init_Slice(m);
     init_Softmax(m);
+    init_Split(m);
     init_Sqrt(m);
     init_Sub(m);
     init_Tanh(m);
diff --git a/python_binding/recipes/pybind_GraphViewHelper.cpp b/python_binding/recipes/pybind_GraphViewHelper.cpp
index e65b790d3eba6072e3e1b112c7d841959d4a5672..ac56fb4b43eb5b0a737157ec9e64c6771a692816 100644
--- a/python_binding/recipes/pybind_GraphViewHelper.cpp
+++ b/python_binding/recipes/pybind_GraphViewHelper.cpp
@@ -24,6 +24,5 @@ namespace py = pybind11;
 namespace Aidge {
 void init_GraphViewHelper(py::module &m) {
     m.def("producers", &producers, py::arg("graphview"));
-    m.def("compile_gradient", &compile_gradient, py::arg("graphview"));
 }
 } // namespace Aidge
diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp
index b85d1c41ed90a5774a9b24062dfda4186c2294d5..c0392287a756b6272a59275b6d12b3a70c1c9420 100644
--- a/python_binding/recipes/pybind_Recipes.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -39,11 +39,35 @@ void init_Recipes(py::module &m)
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
 
-  m.def("remove_dropout",static_cast<void(*)(std::shared_ptr<GraphView>)>(removeDropout), py::arg("graph_view"), R"mydelimiter(
-    Recipe to remove a dropout operator.
+  m.def("remove_node", removeNode, py::arg("graph_view"), py::arg("type"), py::arg("incProducers") = false, R"mydelimiter(
+    Recipe to remove operators of a given type.
 
     :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
+    :param type: Type of the operators to remove
+    :type type: str
+    :param incProducers: If true, also removed attached Producers
+    :type incProducers: bool
+    :return: Number of removed operators.
+    :rtype: int
+    )mydelimiter");
+
+  m.def("remove_dropout", removeDropout, py::arg("graph_view"), R"mydelimiter(
+    Recipe to remove dropout operators.
+
+    :param graph_view: Graph view on which we want to apply the recipe
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    :return: Number of removed operators.
+    :rtype: int
+    )mydelimiter");
+
+  m.def("remove_identity", removeIdentity, py::arg("graph_view"), R"mydelimiter(
+    Recipe to remove identity operators.
+
+    :param graph_view: Graph view on which we want to apply the recipe
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    :return: Number of removed operators.
+    :rtype: int
     )mydelimiter");
 
   m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter(
@@ -84,7 +108,27 @@ void init_Recipes(py::module &m)
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
 
-  m.def("expand_metaops", static_cast<void(*)(std::shared_ptr<GraphView>, bool)>(expandMetaOps), py::arg("graph_view"), py::arg("recursive") = false);
+  m.def("expand_metaops", static_cast<void(*)(std::shared_ptr<GraphView>, bool)>(expandMetaOps), py::arg("graph_view"), py::arg("recursive") = false, R"mydelimiter(
+    Flatten the graph by replacing the meta operators by their micro graph.
+
+    :param graph_view: Graph view on which we want to apply the recipe
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    :param recursive: If true, recursively replace meta operators until there is no more meta operator in the graph.
+    :type recursive: bool
+    )mydelimiter");
+
+  m.def("fuse_to_metaops", fuseToMetaOps, py::arg("graph_view"), py::arg("query"), py::arg("type") = "", R"mydelimiter(
+    Fuse each sub-graph matching a query in a Meta Operator.
+
+    :param graph_view: Graph view on which we want to apply the recipe
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    :param query: Sub-graph matching query
+    :type query: str
+    :param type: Type name of the resulting meta operators
+    :type type: str, optional
+    :return: Number of sub-graph actually fused in a Meta Operator.
+    :rtype: int
+    )mydelimiter");
 }
 
 } // namespace Aidge
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index b16134da324383a4542965393257288c49dceed0..ac35ce0a62408a69637a4160c9a008aba9dceb66 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -28,13 +28,14 @@ void init_Scheduler(py::module& m){
     .def("resetScheduling", &Scheduler::resetScheduling)
     .def("generate_scheduling", &Scheduler::generateScheduling)
     .def("get_static_scheduling", &Scheduler::getStaticScheduling, py::arg("step") = 0)
+    .def("graph_view", &Scheduler::graphView)
     .def("generate_memory", &Scheduler::generateMemory, py::arg("inc_producers") = false, py::arg("wrap_around_buffer") = false)
     ;
 
     py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>, Scheduler>(m, "SequentialScheduler")
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
     .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("data")=std::vector<Tensor>())
-    .def("backward", &SequentialScheduler::backward, py::arg("instanciate_grad")=true)
+    .def("backward", &SequentialScheduler::backward)
     ;
 
     py::class_<ParallelScheduler, std::shared_ptr<ParallelScheduler>, Scheduler>(m, "ParallelScheduler")
diff --git a/python_binding/utils/pybind_Attributes.cpp b/python_binding/utils/pybind_Attributes.cpp
index bfce891176822a3b1c07b1ded0c46c9c94a43c0a..7f5dde63c4835eb694d5fd2d571d7c9c1fd5a9ac 100644
--- a/python_binding/utils/pybind_Attributes.cpp
+++ b/python_binding/utils/pybind_Attributes.cpp
@@ -1,31 +1,47 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
+
 DynamicAttributes test_DynamicAttributes_binding() {
     DynamicAttributes attrs;
-    attrs.addAttr<int>("a", 42);
-    attrs.addAttr<std::string>("b", "test");
-    attrs.addAttr<std::vector<bool>>("c", {true, false, true});
+    attrs.addAttr<int>("A", 42);
+    attrs.addAttr<std::string>("B", "test");
+    attrs.addAttr<std::vector<bool>>("C", {true, false, true});
     return attrs;
 }
 
 double test_DynamicAttributes_binding_check(DynamicAttributes& attrs) {
-    return attrs.getAttr<double>("d");
+    return attrs.getAttr<double>("D");
 }
 
 void init_Attributes(py::module& m){
     py::class_<Attributes, std::shared_ptr<Attributes>>(m, "Attributes")
-    .def("has_attr", &Attributes::hasAttr, py::arg("name"))
-    .def("get_attr_type", &Attributes::getAttrType, py::arg("name"))
-    .def("get_attrs_name", &Attributes::getAttrsName)
+    .def("has_attr", &Attributes::hasAttrPy, py::arg("name"))
     .def("get_attr", &Attributes::getAttrPy, py::arg("name"))
     .def("__getattr__", &Attributes::getAttrPy, py::arg("name"))
     .def("set_attr", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
-    .def("__setattr__", &Attributes::setAttrPy, py::arg("name"), py::arg("value"));
+    .def("__setattr__", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
+    .def("dict", &Attributes::dict)
+    .def("__str__", &Attributes::str)
+    .def("__repr__", &Attributes::repr);
+
 
     py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes")
     .def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value"))
@@ -35,5 +51,4 @@ void init_Attributes(py::module& m){
     m.def("test_DynamicAttributes_binding_check", &test_DynamicAttributes_binding_check, py::arg("attrs"));
 }
 
-}
-
+} // namespace Aidge
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index 2277db2421c36704270b81bdb6c45f19aaa891e4..de200300a99bb33180103608238855b2f5604145 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -27,10 +27,6 @@ Aidge::OperatorImpl::OperatorImpl(const Operator& op, const std::string& backend
 }
 
 Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    AIDGE_ASSERT(mOp.getRawInput(inputIdx),
-        "a valid input is required at index {} for operator type {}",
-        inputIdx, mOp.type());
-
     if (mOp.getRawInput(inputIdx)) {
         const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
         if (!input->empty()) {
@@ -48,10 +44,6 @@ Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inpu
 }
 
 Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
-    AIDGE_ASSERT(mOp.getRawInput(inputIdx),
-        "a valid input is required at index {} for operator type {}",
-        inputIdx, mOp.type());
-
     if (mOp.getRawInput(inputIdx)) {
         const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
         if (!input->empty()) {
@@ -73,10 +65,6 @@ Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) co
 
 Aidge::Elts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                                                          const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    AIDGE_ASSERT(mOp.getRawOutput(outputIdx),
-        "a valid output is required at index {} for operator type {}",
-        outputIdx, mOp.type());
-
     if (mOp.getRawOutput(outputIdx)) {
         const auto output = std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx));
         if (!output->empty()) {
@@ -129,9 +117,9 @@ void Aidge::OperatorImpl::resetConsummerProducer(){
 }
 
 void Aidge::OperatorImpl::forward() {
-    AIDGE_THROW_OR_ABORT(std::runtime_error, "forward() not implemented");
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "forward() not implemented yet for operator of type {}", mOp.type());
 }
 
 void Aidge::OperatorImpl::backward() {
-    AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented");
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented yet for operator of type {}", mOp.type());
 }
diff --git a/src/backend/TensorImpl.cpp b/src/backend/TensorImpl.cpp
index ee2f82a9cf847bfc6fe51e8d8b621e53a4c93cf4..335122d0583d355b6aab1649b5e8122c16eef15b 100644
--- a/src/backend/TensorImpl.cpp
+++ b/src/backend/TensorImpl.cpp
@@ -15,7 +15,9 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 void Aidge::TensorImpl::copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbElts_t srcOffset, NbElts_t dstOffset) {
-    if (&srcImpl == this && srcOffset == dstOffset) {
+    // Returns if src and dst are the same
+    // OR if src capacity is 0 (no valid data must be copied)
+    if ((&srcImpl == this && srcOffset == dstOffset) || srcImpl.capacity() == 0) {
         return;
     }
 
@@ -24,7 +26,7 @@ void Aidge::TensorImpl::copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbE
             // Same backend, but different device
             copyFromDevice(srcImpl.rawPtr(srcOffset), srcImpl.device(), length, dstOffset);
         }
-        else if (srcImpl.hostPtr() != nullptr) {
+        else if (srcImpl.hostPtr() != nullptr) { // capacity() is > 0 so hostPtr() will not assert
             // Different backend, but input is valid on host
             copyFromHost(srcImpl.hostPtr(srcOffset), length, dstOffset);
         }
diff --git a/src/backend/cpu/data/TensorImpl.cpp b/src/backend/cpu/data/TensorImpl.cpp
index da90197e912fbeabc2f28bd3bedd91cc6f29e466..ed3c96f80c1b8bafd70425451d6618428d1888f0 100644
--- a/src/backend/cpu/data/TensorImpl.cpp
+++ b/src/backend/cpu/data/TensorImpl.cpp
@@ -38,12 +38,7 @@ bool Aidge::TensorImpl_cpu<T>::operator==(const Aidge::TensorImpl &other) const
 
 template <typename T>
 void Aidge::TensorImpl_cpu<T>::zeros() {
-    if (mData.empty()) {
-        lazyInit();
-    }
-    for (std::size_t i = 0; i < mData.size(); ++i) {
-        *(mData.data() + i) = T(0);
-    }
+    std::memset(rawPtr(), T(0), mNbElts * sizeof(T));
 }
 
 template <typename T>
@@ -53,7 +48,7 @@ void Aidge::TensorImpl_cpu<T>::copyCast(const void *src, const Aidge::DataType s
     }
 
     T* dstT = static_cast<T *>(rawPtr(offset));
-    AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
+    AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "TensorImpl_cpu<{}>::copyCast(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts);
     switch (srcDt)
     {
         case DataType::Float64:
diff --git a/src/data/DataProvider.cpp b/src/data/DataProvider.cpp
index 5c3d1d7ef3b3dd8c779cf9cda737f1a2b2f6e01f..fc6b842edef17c80a4ef80667fc814bf85df25a4 100644
--- a/src/data/DataProvider.cpp
+++ b/src/data/DataProvider.cpp
@@ -42,8 +42,8 @@ Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::si
 
     // Compute the number of bacthes depending on mDropLast boolean
     mNbBatch = (mDropLast) ?
-                static_cast<std::size_t>(std::floor(mNbItems / mBatchSize)) :
-                static_cast<std::size_t>(std::ceil(mNbItems / mBatchSize));
+                (mNbItems / mBatchSize) :
+                static_cast<std::size_t>(std::ceil(mNbItems / static_cast<float>(mBatchSize)));
 }
 
 std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() const
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 677bd0246e145ebf760f210000728bd2d99a3807..28fb90cebf8e387e69f1ec39c46a6a47c8a4d316 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -16,8 +16,117 @@
 
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Div.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/Sub.hpp"
+#include "aidge/operator/Sqrt.hpp"
+#include "aidge/operator/Transpose.hpp"
 #include "aidge/utils/Types.h"
 
+
+/**
+ * @brief Element-wise addition operation for two ``Tensor``s.
+ * @note ``Tensor``s should be stored on the same backend.
+ * @todo If input ``Tensor``s have a different dataType, the output should
+ * have the dataType of the ``Tensor`` with the highest precision.
+ *
+ * @param other
+ * @return Tensor
+ */
+Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto add_ = Add_Op(2);
+    add_.associateInput(0, std::make_shared<Tensor>(*this));
+    add_.associateInput(1, std::make_shared<Tensor>(other));
+    add_.setDataType(dataType());
+    add_.setDataFormat(dataFormat());
+    add_.setBackend(mImpl->backend());
+    add_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return add_.getOutput(0)->clone();
+}
+
+/**
+ * @brief Element-wise substraction operation for two ``Tensor``s.
+ * @note ``Tensor``s should be stored on the same backend.
+ * @todo If input ``Tensor``s have a different dataType, the output should
+ * have the dataType of the ``Tensor`` with the highest precision.
+ *
+ * @param other
+ * @return Tensor
+ */
+Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto sub_ = Sub_Op();
+    sub_.associateInput(0, std::make_shared<Tensor>(*this));
+    sub_.associateInput(1, std::make_shared<Tensor>(other));
+    sub_.setDataType(dataType());
+    sub_.setDataFormat(dataFormat());
+    sub_.setBackend(mImpl->backend());
+    sub_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return sub_.getOutput(0)->clone();
+}
+
+/**
+ * @brief Element-wise multiplication operation for two ``Tensor``s.
+ * @note ``Tensor``s should be stored on the same backend.
+ * @todo If input ``Tensor``s have a different dataType, the output should
+ * have the dataType of the ``Tensor`` with the highest precision.
+ *
+ * @param other
+ * @return Tensor
+ */
+Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto mul_ = Mul_Op();
+    mul_.associateInput(0, std::make_shared<Tensor>(*this));
+    mul_.associateInput(1, std::make_shared<Tensor>(other));
+    mul_.setDataType(dataType());
+    mul_.setDataFormat(dataFormat());
+    mul_.setBackend(mImpl->backend());
+    mul_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return mul_.getOutput(0)->clone();
+}
+
+Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto div_ = Div_Op();
+    div_.associateInput(0, std::make_shared<Tensor>(*this));
+    div_.associateInput(1, std::make_shared<Tensor>(other));
+    div_.setDataType(dataType());
+    div_.setDataFormat(dataFormat());
+    div_.setBackend(mImpl->backend());
+    div_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return div_.getOutput(0)->clone();
+}
+
+Aidge::Tensor Aidge::Tensor::sqrt() const {
+    AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
+    auto sqrt_ = Sqrt_Op();
+    sqrt_.associateInput(0, std::make_shared<Tensor>(*this));
+    sqrt_.setDataType(dataType());
+    sqrt_.setDataFormat(dataFormat());
+    sqrt_.setBackend(mImpl->backend());
+    sqrt_.forward();
+    return sqrt_.getOutput(0)->clone();
+}
+
 Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
     if (this == &other) {
         return *this;
@@ -319,6 +428,52 @@ void Aidge::Tensor::copyFrom(const Tensor& src) {
                         mImplOffset);
 }
 
+void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) {
+    std::vector<DimSize_t> newDims;
+    for (std::size_t i = 0; i < src.dims().size(); ++i) {
+        newDims.push_back(src.dims()[transpose[i]]);
+    }
+
+    std::vector<std::size_t> newStrides(newDims.size(), 1);
+    for (size_t i = 0; i < newDims.size(); ++i) {
+        for (size_t j = i + 1; j < newDims.size(); ++j) {
+            newStrides[i] *= newDims[j];
+        }
+    }
+
+    std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, newDims);
+
+    std::vector<size_t> indices(newDims.size(), 0);
+    for (size_t i = 0; i < src.size(); ++i) {
+        size_t idx = 0;
+        // Permute indices based on OutputDimsOrder attr
+        for (int j = newDims.size() -1; j >=0; --j) {
+            idx += indices[transpose[j]] * newStrides[j];
+        }
+
+        // Copy the value in output
+        newImpl->copy(src.getImpl()->rawPtr(i), 1, idx);
+
+        // Update indices for the next iteration
+        for (int j = newDims.size() - 1; j >= 0; --j) {
+            if (indices[j] < src.dims()[j] - 1) {
+                indices[j]++;
+                break;
+            }
+            else {
+                indices[j] = 0;
+            }
+        }
+    }
+
+    resize(newDims);
+    setImpl(newImpl);
+}
+
+void Aidge::Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) {
+    copyTranspose(src, std::vector<DimSize_t>(transpose.begin(), transpose.end()));
+}
+
 void Aidge::Tensor::copyCastFrom(const Tensor& src,
                                  std::shared_ptr<Tensor>& movedSrcPtr) {
     if (&src == this) {
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 0bc918995c55a914b29987506578491e2c86fae5..5a11aa20e03bef274f784788dee1ef047cafba42 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -31,6 +31,7 @@
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
+#include "aidge/operator/Memorize.hpp"
 #include "aidge/utils/Directories.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
@@ -45,23 +46,29 @@ const std::shared_ptr<Aidge::Node> Aidge::GraphView::operator[](const std::strin
 ///////////////////////////////////////////////////////
 
 Aidge::Connector Aidge::GraphView::operator()(
-    const std::vector<Aidge::Connector> ctors) {
+    const std::vector<Aidge::Connector> ctors)
+{
   // TODO: allow for multiple inputNodes?
-  assert((inputNodes().size() == 1U) && "Too many input Nodes for the GraphView, undefined behaviour");
+  AIDGE_ASSERT(inputNodes().size() == 1U, "Multiple input Nodes for the GraphView is not supported for Connectors");
   std::shared_ptr<Node> inNode = *inputNodes().begin();
-  assert((ctors.size() == static_cast<std::size_t>(inNode->nbData())) && "Wrong number of arguments.\n");
-  for (std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inNode->inputs()) {
-    assert((gk_IODefaultIndex == input.second) && "At least one input connection is not free.\n");
-    (void)input; // avoid unused warning
-  }
 
-  IOIndex_t inID = 0;
-  for (const Connector &ctor : ctors) {
-    assert((ctor.node() != nullptr) &&
-           "Input Connector must be associated with a node");
-    ctor.node()->addChild(shared_from_this(), static_cast<std::size_t>(ctor.index()),
-                          {inNode, inID++});
+  IOIndex_t ctorIdx = 0;
+  const auto& inputs = inNode->inputs();
+  for (IOIndex_t idx = 0; idx < inNode->nbInputs(); ++idx) {
+    if (inNode->inputCategory(idx) == InputCategory::Data || inNode->inputCategory(idx) == InputCategory::OptionalData) {
+      if (ctorIdx < ctors.size()) {
+        AIDGE_ASSERT(ctors[ctorIdx].node() != nullptr, "Input Connector #{} must be associated with a node", ctorIdx);
+        AIDGE_ASSERT(inputs[idx].second == gk_IODefaultIndex, "Data input#{} connection is not free.", idx);
+        ctors[ctorIdx].node()->addChild(shared_from_this(), static_cast<std::size_t>(ctors[ctorIdx].index()),
+                              {inNode, idx});
+        ++ctorIdx;
+      }
+      else {
+        AIDGE_ASSERT(inNode->inputCategory(idx) == InputCategory::OptionalData, "Missing an input connector for non-optional Data input#{}", idx);
+      }
+    }
   }
+  AIDGE_ASSERT(ctorIdx == ctors.size(), "Too many input connectors ({}) vs available node inputs ({}).", ctors.size(), ctorIdx);
   return Connector(*(outputNodes().begin()));
 }
 
@@ -73,6 +80,9 @@ bool Aidge::GraphView::inView(const std::shared_ptr<Aidge::Node>& nodePtr) const
     return mNodes.find(nodePtr) != mNodes.cend();
 }
 
+bool Aidge::GraphView::inView(const std::string& nodeName) const {
+  return mNodeRegistry.find(nodeName) != mNodeRegistry.end();
+}
 
 void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProducers) const {
     auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((path + ".mmd").c_str(), "w"), &std::fclose);
@@ -119,9 +129,11 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
           }
         }
 
-        if (node_ptr == mRootNode || node_ptr->type() != "Producer" || showProducers) {
-          fmt::print(fp.get(), "{}_{}({}){}\n", node_ptr->type(), namePtrTable.at(node_ptr),
-                      givenName, nodeCls);
+        if (node_ptr->type() != "Producer" || showProducers) {
+            // if (node_ptr == mRootNode) {
+            fmt::print(fp.get(), "{}_{}({}){}\n", node_ptr->type(), namePtrTable.at(node_ptr),
+                        givenName, nodeCls);
+            // }
         }
     }
 
@@ -392,14 +404,21 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
 }
 
 bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>>& dims, bool allowDataDependency) {
+    // remove current Data connections and use dummy inputs to propagate dimensions
     // setInputs
     // Link every tensor to the right pointer
     // following parent - children informations
     if (!dims.empty()){
       AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of given dimensions ({}) and graph inputs ({})", dims.size(), mInputNodes.size());
       for (std::size_t i = 0; i < dims.size(); ++i){
-        auto tensor = std::make_shared<Tensor>(dims[i]);
-        mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor);
+        const auto& currentTensorPtr =
+            std::dynamic_pointer_cast<OperatorTensor>(mInputNodes[i].first->getOperator())->getInput(mInputNodes[i].second);
+        if (currentTensorPtr) { // tensor detected
+            AIDGE_ASSERT(currentTensorPtr->dims() == dims[i], "Tensor of unexpected size provided.")
+        } else {
+            auto tensor = std::make_shared<Tensor>(dims[i]);
+            mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor);
+        }
       }
     }
 
@@ -412,7 +431,7 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                 AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i) == inputI.first->getOperator()->getRawOutput(inputI.second),
                   "Input#{} for node {} ({}) is not properly connected to output#{} of node {} ({}): Data or Tensor mismatch!",
                     i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type());
-            } else {
+            } else if (nodePtr->inputCategory(i) != InputCategory::OptionalData && nodePtr->inputCategory(i) != InputCategory::OptionalParam) {
                 // Input is missing
                 AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i),
                   "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
@@ -423,22 +442,68 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
         }
     }
 
-    // Compute dimensions of every node
-    std::set<std::shared_ptr<Node>> listNodes = getNodes();
+    // List of nodes that are already dims forwarded
+    std::set<std::shared_ptr<Node>> dimsForwarded;
+    // Establish initial list of dims forwardable nodes:
+    // input nodes and childs from Producers
+    std::set<std::shared_ptr<Node>> listNodes = inputNodes();
+    for (const auto& nodePtr : getNodes()) {
+        if (nodePtr->type() == Producer_Op::Type) {
+            // Producers are already dims forwarded!
+            dimsForwarded.insert(nodePtr);
+            // Producers childs are dims forwardable
+            for (const auto& child : nodePtr->getChildren()) {
+                if (inView(child)) {
+                    listNodes.insert(child);
+                }
+            }
+        }
+    }
+
     do {
         std::set<std::shared_ptr<Node>> nextList;
-        for (std::shared_ptr<Node> nodePtr : listNodes) {
+        for (const auto& nodePtr : listNodes) {
             if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) {
-              const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator());
-              // Recompute everytime, even if it was already computed in a
-              // previous call of forwardDims(), as the graph may have changed!
-              op->forwardDims(allowDataDependency);
-              if (!op->dimsForwarded()) {
-                  nextList.insert(nodePtr);
-              }
+                const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator());
+
+                bool anyParent = false;
+                bool parentsForwarded = true;
+                for (const auto& parent : nodePtr->getParents()) {
+                    if (parent != nullptr && inView(parent) && dimsForwarded.find(parent) == dimsForwarded.end()) {
+                        Log::debug("Dimensions not forwarded for parent (node {} (of type {})) of node {} (of type {})",
+                            parent->name(), parent->type(), nodePtr->name(), nodePtr->type());
+                        parentsForwarded = false;
+                    }
+                    else {
+                        anyParent = true;
+                    }
+                }
+
+                // Special rule for Memorize_Op, which only requires one parent
+                // to have its dims forwarded. This avoids circular dependency.
+                if (nodePtr->type() == Memorize_Op::Type && anyParent) {
+                    parentsForwarded = true;
+                }
+
+                if (parentsForwarded && op->forwardDims(allowDataDependency)) {
+                    // Recompute everytime, even if it was already computed in a
+                    // previous call of forwardDims(), as the graph may have changed!
+                    dimsForwarded.insert(nodePtr);
+                    for (const auto& child : nodePtr->getChildren()) {
+                        if (inView(child) && dimsForwarded.find(child) == dimsForwarded.end()) {
+                            nextList.insert(child);
+                        }
+                    }
+                }
+                else {
+                    Log::debug("Unable to forward dimensions for node {} (of type {}) yet", nodePtr->name(), nodePtr->type());
+                    nextList.insert(nodePtr);
+                }
             }
         }
 
+        Log::debug("********************");
+
         // Internal check to make sure we won't enter in an infinite loop!
         if (nextList == listNodes) {
             // We are stuck!
@@ -450,7 +515,6 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
             Log::warn("Unable to forward dimensions (circular dependency and/or wrong dimensions and/or data dependent dimension?). Unable to compute output dims for nodes {}.", nodesName);
             return false;
         }
-
         listNodes.swap(nextList);
     }
     while (!listNodes.empty());
@@ -470,6 +534,12 @@ void Aidge::GraphView::setDataType(const Aidge::DataType &datatype) const {
     }
 }
 
+void Aidge::GraphView::setDataFormat(const Aidge::DataFormat &dataformat) const {
+    for (const auto& node : getNodes()) {
+        node->getOperator()->setDataFormat(dataformat);
+    }
+}
+
 std::vector<
     std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>>
 Aidge::GraphView::outputs() const {
@@ -526,15 +596,17 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
 
   // add learnable parameters to the graph
   if (includeLearnableParam) {
-    for (IOIndex_t i = node->nbData(); i < node->nbInputs(); ++i) {
-      std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
-      if (parentNode) {
-          parentNode->addView(shared_from_this());
-          mNodes.insert(parentNode);
-          if (!(parentNode->name()).empty())
-            mNodeRegistry.insert(std::make_pair(parentNode->name(), parentNode));
-          // check if the parentNode is an input/output node
-          updateInputsOutputsNew(parentNode);
+    for (IOIndex_t i = 0; i < node->nbInputs(); ++i) {
+      if (node->inputCategory(i) == InputCategory::Param || node->inputCategory(i) == InputCategory::OptionalParam) {
+        std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
+        if (parentNode) {
+            parentNode->addView(shared_from_this());
+            mNodes.insert(parentNode);
+            if (!(parentNode->name()).empty())
+              mNodeRegistry.insert(std::make_pair(parentNode->name(), parentNode));
+            // check if the parentNode is an input/output node
+            updateInputsOutputsNew(parentNode);
+        }
       }
     }
   }
@@ -604,6 +676,15 @@ bool Aidge::GraphView::add(std::set<std::shared_ptr<Node>> otherNodes, bool incl
   std::set<NodePtr> nodesToAdd;
   std::set_difference(otherNodes.begin(), otherNodes.end(), mNodes.begin(), mNodes.end(), std::inserter(nodesToAdd, nodesToAdd.begin()));
 
+  // Check no name is common with the name in the current Graph
+  for (auto node : nodesToAdd) {
+    if (mNodeRegistry.find(node->name()) != mNodeRegistry.end()){
+      std::string newName = node->createUniqueName(node->name());
+      fmt::print("Warning: node name \"{}\" is a duplicate, renaming to {}.\n", node->name(), newName);
+      node->setName(newName);
+
+    }
+  }
   // List the nodes to rank, initially all the nodes in the GraphView
   std::set<NodePtr> nodesToRank(mNodes);
   nodesToRank.insert(nodesToAdd.begin(), nodesToAdd.end());
@@ -698,10 +779,10 @@ bool Aidge::GraphView::add(std::pair<NodePtr, std::set<NodePtr>> nodes, bool inc
   return add(nodes.second, includeLearnableParam);
 }
 
-bool Aidge::GraphView::add(std::shared_ptr<GraphView> graph) {
+bool Aidge::GraphView::add(std::shared_ptr<GraphView> graph, bool includeLearnableParam) {
     // set the rootNode to the other graphView rootNode if no rootNode yet
     mRootNode = mRootNode ? mRootNode : graph->rootNode();
-    return add(graph->getNodes(), false);
+    return add(graph->getNodes(), includeLearnableParam);
 }
 
 void Aidge::GraphView::addChild(std::shared_ptr<Node> toOtherNode,
@@ -813,29 +894,31 @@ Aidge::GraphView::getNode(const std::string& nodeName) const {
 void Aidge::GraphView::remove(std::shared_ptr<Node> nodePtr, bool includeLearnableParam) {
   // remove learnable params
   if (includeLearnableParam) {
-    for (IOIndex_t i = nodePtr->nbData(); i < nodePtr->nbInputs(); ++i) {
-      auto inputI = nodePtr->input(i);
-      if (inputI.first != nullptr) {
-        bool removeNode = true;
-        for (const auto& parentOutput : inputI.first->outputs()) {
-          for (const auto& childOfParentOutput : parentOutput) {
-            // only remove the learnable parameter if not related to any other Node in the GraphView
-            if (childOfParentOutput.first != nodePtr) {
-              removeNode = false;
-              break;
+    for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
+      if (nodePtr->inputCategory(i) == InputCategory::Param || nodePtr->inputCategory(i) == InputCategory::OptionalParam) {
+        auto inputI = nodePtr->input(i);
+        if (inputI.first != nullptr) {
+          bool removeNode = true;
+          for (const auto& parentOutput : inputI.first->outputs()) {
+            for (const auto& childOfParentOutput : parentOutput) {
+              // only remove the learnable parameter if not related to any other Node in the GraphView
+              if (childOfParentOutput.first != nodePtr) {
+                removeNode = false;
+                break;
+              }
             }
           }
-        }
-        if (removeNode) {
-          // assert Learnable Parameter in the GraphView scope
-          if (mNodes.find(inputI.first) != mNodes.end()) {
-            mNodes.erase(inputI.first);
-            inputI.first->removeView(shared_from_this());
-          }
-          if (!inputI.first->name().empty()) { mNodeRegistry.erase(inputI.first->name()); }
+          if (removeNode) {
+            // assert Learnable Parameter in the GraphView scope
+            if (mNodes.find(inputI.first) != mNodes.end()) {
+              mNodes.erase(inputI.first);
+              inputI.first->removeView(shared_from_this());
+            }
+            if (!inputI.first->name().empty()) { mNodeRegistry.erase(inputI.first->name()); }
 
-          // check if the node was an input/output node
-          updateInputsOutputsDelete(inputI.first);
+            // check if the node was an input/output node
+            updateInputsOutputsDelete(inputI.first);
+          }
         }
       }
     }
@@ -979,6 +1062,10 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
                       for (const auto& child : outputChildren[i]) {
                         inputParents[i].first -> addChild(child.first, inputParents[i].second, child.second);
                       }
+                    } else {
+                      for (const auto& child : outputChildren[i]) {
+                        child.first->getOperator()->resetInput(child.second);
+                      }
                     }
                 }
             }
@@ -998,7 +1085,10 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
             // Case 2
             if ((oldOIn.size() == 1) && (inputParents[0].first)) {
                 for (std::size_t i = 0; i < newOIn.size(); ++i) {
-                    inputParents[0].first -> addChild(newOIn[i].first, inputParents[0].second, newOIn[i].second);
+                    // Only re-connect the same input category
+                    if (newOIn[i].first->inputCategory(newOIn[i].second) == oldOIn[0].first->inputCategory(oldOIn[0].second)) {
+                      inputParents[0].first -> addChild(newOIn[i].first, inputParents[0].second, newOIn[i].second);
+                    }
                 }
             } else {
                 for (std::size_t i = 0; i < oldOIn.size(); ++i) {
@@ -1284,7 +1374,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
     auto clonedNode = cloneNode(node_ptr);
     if (clonedNode == nullptr) {
       AIDGE_ASSERT(node_ptr->getChildren().size() <= 1, "deleted nodes in GraphView::clone() cannot have multiple children");
-      AIDGE_ASSERT(node_ptr->nbData() <= 1, "deleted nodes in GraphView::clone() cannot have multiple data input parents");
+      AIDGE_ASSERT(node_ptr->dataInputs().size() <= 1, "deleted nodes in GraphView::clone() cannot have multiple data input parents");
     }
     oldToNewNodes[node_ptr] = clonedNode;
   }
@@ -1302,8 +1392,8 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
         while (oldToNewNodes[parent.first] == nullptr) {
           // Find next valid parent in line, going backward in the graph
           AIDGE_INTERNAL_ASSERT(parent.first->getChildren().size() == 1);
-          AIDGE_INTERNAL_ASSERT(parent.first->nbData() <= 1);
           const auto& parents = parent.first->dataInputs();
+          AIDGE_INTERNAL_ASSERT(parents.size() <= 1);
 
           if (!parents.empty() && parents[0].first != nullptr // a valid parent exists
             && oldToNewNodes.find(parents[0].first) != oldToNewNodes.end()) // parent is in the GraphView
@@ -1384,9 +1474,9 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
   for (auto it = newOutputNodes.begin(); it != newOutputNodes.end(); ) {
     // If output node was removed, find previous valid output
     while (oldToNewNodes[it->first] == nullptr) {
-      // Removed node should have only one connected data input, otherwise cloning is invalid
-      AIDGE_INTERNAL_ASSERT(it->first->nbData() <= 1);
       auto parents = it->first->dataInputs();
+      // Removed node should have only one connected data input, otherwise cloning is invalid
+      AIDGE_INTERNAL_ASSERT(parents.size() <= 1);
 
       if (!parents.empty() && parents[0].first != nullptr // a valid parent exists
         && oldToNewNodes.find(parents[0].first) != oldToNewNodes.end()) // parent is in the GraphView
@@ -1412,10 +1502,9 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
 }
 
 std::shared_ptr<Aidge::GraphView> Aidge::getConnectedGraphView(std::shared_ptr<Node> node) {
-  std::vector<NodePtr> foundNodes;
-  foundNodes.push_back(node);
+  std::vector<NodePtr> foundNodes{node};
 
-  for (size_t curNodeIdx = 0; curNodeIdx < foundNodes.size(); ++curNodeIdx) {
+  for (std::size_t curNodeIdx = 0; curNodeIdx < foundNodes.size(); ++curNodeIdx) {
     NodePtr curNode = foundNodes[curNodeIdx];
 
     for (auto childs : curNode->getOrderedChildren()) {
diff --git a/src/graph/Matching.cpp b/src/graph/Matching.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a840b6ab552d71990b796d741d3ca56b07c7c0be
--- /dev/null
+++ b/src/graph/Matching.cpp
@@ -0,0 +1,685 @@
+#include "aidge/graph/Matching.hpp"
+
+#include <fmt/color.h>
+
+std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphMatching::match(const std::string& query, bool disjoint) {
+    Context ctx;
+    ctx.query = query;
+    std::set<MatchingResult> matches;
+
+    while (matchSequence(ctx, matches) || matchNodeOrBlock(ctx, matches)) {
+        removeWhiteSpace(ctx.query);
+        if (!ctx.query.empty() && ctx.query[0] == ';') {
+            ctx.query.erase(0, 1);
+        }
+        else {
+            break;
+        }
+    }
+
+    removeWhiteSpace(ctx.query);
+    if (!ctx.query.empty()) {
+        Log::warn("Syntax error, unable to parse remaining query: {}", ctx.query);
+    }
+
+    if (disjoint) {
+        matches = filterLonguestDisjoint(matches);
+    }
+
+    return matches;
+}
+
+std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphMatching::filterLonguestDisjoint(const std::set<MatchingResult>& matches) {
+    // Sort matches by highest number of nodes first, thanks to the CompareMatchingResultSize function
+    std::set<MatchingResult, CompareMatchingResultSize> sortedMatches(matches.begin(), matches.end());
+    // Keep all the nodes that are already in previous (selected) matches
+    std::set<NodePtr> selectedNodes;
+    std::set<MatchingResult> filteredMatches;
+
+    for (const auto& match : sortedMatches) {
+        const auto& nodes = match.graph->getNodes();
+        bool isNonOverlapping = true;
+        for (const auto& node : nodes) {
+            if (selectedNodes.find(node) != selectedNodes.end()) {
+                isNonOverlapping = false;
+                break;
+            }
+        }
+
+        if (isNonOverlapping) {
+            // If no node of the current match is already in a previous match,
+            // the match is disjoint from previous matches and can be kept!
+            filteredMatches.insert(match);
+            selectedNodes.insert(nodes.begin(), nodes.end());
+        }
+    }
+
+    return filteredMatches;
+}
+
+bool Aidge::SinglePassGraphMatching::matchNodeOrBlock(Context& ctx, std::set<MatchingResult>& matches) {
+    auto newCtx = ctx;
+    Log::debug("{}node-or-block", std::string(2*newCtx.depth, ' '));
+    auto newMatches = matches;
+    ++newCtx.depth;
+
+    // (BLOCK | NODE)
+    if (!matchBlock(newCtx, newMatches) && !matchNode(newCtx, newMatches)) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    // QUANTIFIER?
+    bool matchMore = false;
+    size_t matchQuantity = 0;
+    removeWhiteSpace(newCtx.query);
+    if (!newCtx.query.empty() && (newCtx.query[0] == '?' || newCtx.query[0] == '*')) {
+        AIDGE_ASSERT(!(ctx.firstSequence && ctx.firstNode),
+            "Ill-formed query; the root node cannot be optional in query at: {}", ctx.query);
+
+        for (const auto& match : matches) {
+            bool found = false;
+            for (const auto& newMatch : newMatches) {
+                if (match.graph->rootNode() == newMatch.graph->rootNode()) {
+                    found = true;
+                }
+            }
+
+            if (!found) {
+                newMatches.insert(match);
+            }
+        }
+
+        if (newCtx.query[0] == '*') {
+            matchMore = true;
+        }
+
+        newCtx.query.erase(0, 1);
+    }
+    else if (!newCtx.query.empty() && newCtx.query[0] == '+') {
+        newCtx.query.erase(0, 1);
+        matchMore = true;
+    }
+    else if (!newCtx.query.empty() && newCtx.query[0] == '{') {
+        newCtx.query.erase(0, 1);
+
+        removeWhiteSpace(newCtx.query);
+        const auto endQuantity = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+            [](char c) { return !isdigit(c); });
+        if (endQuantity != newCtx.query.begin()) {
+            matchQuantity = std::stoi(newCtx.query.substr(0, endQuantity - newCtx.query.begin()));
+            newCtx.query = newCtx.query.substr(endQuantity - newCtx.query.begin());
+        }
+        else {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+
+        if (matchQuantity == 0) {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+
+        removeWhiteSpace(newCtx.query);
+        if (!newCtx.query.empty() && newCtx.query[0] == '}') {
+            newCtx.query.erase(0, 1);
+        }
+        else {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+
+        if (matchQuantity > 1) {
+            matchMore = true;
+        }
+    }
+
+    if (matchMore) {
+        std::set<MatchingResult> additionalMatches;
+
+        do {
+            auto additionalCtx = ctx;
+            additionalCtx.firstNode = newCtx.firstNode;
+            additionalCtx.firstSequence = newCtx.firstSequence;
+            additionalCtx.anchors = newCtx.anchors;
+            ++additionalCtx.depth;
+            additionalMatches = newMatches;
+
+            if (!matchBlock(additionalCtx, additionalMatches) && !matchNode(additionalCtx, additionalMatches)) {
+                Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+                return false;
+            }
+
+            for (const auto& additionalMatch : additionalMatches) {
+                for (auto& match : newMatches) {
+                    if (match.graph->rootNode() == additionalMatch.graph->rootNode()) {
+                        match.graph = std::make_shared<GraphView>(*(additionalMatch.graph.get()));
+                        match.anchors = additionalMatch.anchors;
+                        match.startNode = additionalMatch.startNode;
+                        break;
+                    }
+                }
+            }
+
+            --matchQuantity;
+        }
+        while (!additionalMatches.empty() && matchQuantity > 1);
+    }
+
+    --newCtx.depth;
+    ctx = newCtx;
+    matches = newMatches;
+    return true;
+}
+
+bool Aidge::SinglePassGraphMatching::matchBlock(Context& ctx, std::set<MatchingResult>& matches) {
+    auto newCtx = ctx;
+    Log::debug("{}block", std::string(2*newCtx.depth, ' '));
+    auto newMatches = matches;
+    ++newCtx.depth;
+
+    // '('
+    removeWhiteSpace(newCtx.query);
+    if (!newCtx.query.empty() && newCtx.query[0] == '(') {
+        newCtx.query.erase(0, 1);
+    }
+    else {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    // SEQ | PAR | BLOCK | ALT | NODE
+    if (!matchSequence(newCtx, newMatches)
+        && !matchParallel(newCtx, newMatches)
+        && !matchBlock(newCtx, newMatches)
+        && !matchAlternative(newCtx, newMatches)
+        && !matchNode(newCtx, newMatches))
+    {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    // ')'
+    removeWhiteSpace(newCtx.query);
+    if (!newCtx.query.empty() && newCtx.query[0] == ')') {
+        newCtx.query.erase(0, 1);
+    }
+    else {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    --newCtx.depth;
+    ctx = newCtx;
+    matches = newMatches;
+    return true;
+}
+
+bool Aidge::SinglePassGraphMatching::matchSequence(Context& ctx, std::set<MatchingResult>& matches) {
+    auto newCtx = ctx;
+    Log::debug("{}sequence", std::string(2*newCtx.depth, ' '));
+    auto newMatches = matches;
+    ++newCtx.depth;
+
+    if (!ctx.inSequence) {
+        newCtx.inSequence = true;
+        newCtx.firstNode = true;
+    }
+
+    // NODE_OR_BLOCK
+    if (!matchNodeOrBlock(newCtx, newMatches)) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    newCtx.firstNode = false;
+
+    bool found = false;
+    while (true) {
+        // (EDGE NODE_OR_BLOCK)+
+        //   EDGE
+        if (matchEdge(newCtx, newMatches)) {
+            found = true;
+        }
+        else {
+            break;
+        }
+
+        //   NODE_OR_BLOCK
+        if (!matchNodeOrBlock(newCtx, newMatches)) {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+    }
+
+    if (!found) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    if (!ctx.inSequence) {
+        newCtx.inSequence = false;
+    }
+
+    --newCtx.depth;
+    ctx = newCtx;
+    matches = newMatches;
+    return true;
+}
+
+bool Aidge::SinglePassGraphMatching::matchParallel(Context& ctx, std::set<MatchingResult>& matches) {
+    auto newCtx = ctx;
+    Log::debug("{}parallel", std::string(2*newCtx.depth, ' '));
+    ++newCtx.depth;
+    auto newMatches = matches;
+
+    // NODE_OR_BLOCK
+    auto parCtx = newCtx;
+    if (!matchNodeOrBlock(parCtx, newMatches)) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+    newCtx.query = parCtx.query;
+
+    bool found = false;
+    while (true) {
+        // ('&' NODE_OR_BLOCK)+
+        //   '&'
+        removeWhiteSpace(newCtx.query);
+        if (!newCtx.query.empty() && newCtx.query[0] == '&') {
+            newCtx.query.erase(0, 1);
+            found = true;
+        }
+        else {
+            break;
+        }
+
+        //   NODE_OR_BLOCK
+        // reset the ctx to the beginning
+        parCtx = newCtx;
+        // reset the startNode to the beginning
+        for (auto& newMatch : newMatches) {
+            for (const auto& match : matches) {
+                if (match.graph->rootNode() == newMatch.graph->rootNode()) {
+                    newMatch.startNode = match.startNode;
+                }
+            }
+        }
+
+        if (!matchNodeOrBlock(parCtx, newMatches)) {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+        newCtx.query = parCtx.query;
+    }
+
+    // Keep the last context for further query
+    newCtx = parCtx;
+
+    if (!found) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    --newCtx.depth;
+    ctx = newCtx;
+    matches = newMatches;
+    return true;
+}
+
+bool Aidge::SinglePassGraphMatching::matchAlternative(Context& ctx, std::set<MatchingResult>& matches) {
+    auto newCtx = ctx;
+    Log::debug("{}alternative", std::string(2*newCtx.depth, ' '));
+    ++newCtx.depth;
+    std::set<MatchingResult> newMatches;
+
+    // NODE_OR_BLOCK
+    auto altCtx = newCtx;
+    auto altMatches = matches;
+    if (!matchNodeOrBlock(altCtx, altMatches)) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+    newCtx.query = altCtx.query;
+    newMatches.insert(altMatches.begin(), altMatches.end());
+
+    bool found = false;
+    while (true) {
+        // ('|' NODE_OR_BLOCK)+
+        //    '|'
+        removeWhiteSpace(newCtx.query);
+        if (!newCtx.query.empty() && newCtx.query[0] == '|') {
+            newCtx.query.erase(0, 1);
+            found = true;
+        }
+        else {
+            break;
+        }
+
+        //    NODE_OR_BLOCK
+        altCtx = newCtx;
+        altMatches = matches;
+        if (!matchNodeOrBlock(altCtx, altMatches)) {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+        newCtx.query = altCtx.query;
+        newMatches.insert(altMatches.begin(), altMatches.end());
+    }
+
+    if (!found) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    --newCtx.depth;
+    ctx = newCtx;
+    matches = newMatches;
+    return true;
+}
+
+bool Aidge::SinglePassGraphMatching::matchEdge(Context& ctx, std::set<MatchingResult>& /*matches*/) {
+    auto newCtx = ctx;
+    Log::debug("{}edge", std::string(2*newCtx.depth, ' '));
+
+    // ('-' | '~') or '<'
+    removeWhiteSpace(newCtx.query);
+    if (!newCtx.query.empty() && (newCtx.query[0] == '-' || newCtx.query[0] == '~')) {
+        newCtx.singleOutput = (newCtx.query[0] == '-');
+        newCtx.query.erase(0, 1); // drop '-'
+        newCtx.lookForChild = true;
+    }
+    else if (!newCtx.query.empty() && newCtx.query[0] == '<') {
+        newCtx.query.erase(0, 1); // drop '<'
+        newCtx.lookForChild = false;
+    }
+    else {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    // optional first IO_INDEX
+    int firstIdx = 0;
+    bool foundFirst = false;
+    const auto endOutputIdx = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+        [](char c) { return !isdigit(c); });
+    if (endOutputIdx != newCtx.query.begin()) {
+        firstIdx = std::stoi(newCtx.query.substr(0, endOutputIdx - newCtx.query.begin()));
+        newCtx.query = newCtx.query.substr(endOutputIdx - newCtx.query.begin());
+        foundFirst = true;
+    }
+    else if (newCtx.query[0] == '*') {
+        newCtx.query.erase(0, 1); // drop '*'
+        firstIdx = -1;
+        foundFirst = true;
+    }
+
+    // optional second IO_INDEX, preceded by '-'
+    int secondIdx = 0;
+    bool foundSecond = false;
+    if (foundFirst && !newCtx.query.empty() && newCtx.query[0] == '-') {
+        auto query = newCtx.query;
+        query.erase(0, 1); // drop '-'
+
+        const auto endInputIdx = std::find_if(query.begin(), query.end(), 
+            [](char c) { return !isdigit(c); });
+        if (endInputIdx != query.begin()) {
+            secondIdx = std::stoi(query.substr(0, endInputIdx - query.begin()));
+            query = query.substr(endInputIdx - query.begin());
+            foundSecond = true;
+        }
+        else if (query[0] == '*') {
+            query.erase(0, 1); // drop '*'
+            secondIdx = -1;
+            foundSecond = true;
+        }
+
+        if (foundSecond) {
+            newCtx.query = query;
+        }
+    }
+
+    // '>' or ('-' | '~')
+    if (newCtx.lookForChild && !newCtx.query.empty() && newCtx.query[0] == '>') {
+        newCtx.query.erase(0, 1); // drop '>'
+    }
+    else if (!newCtx.lookForChild && !newCtx.query.empty() && (newCtx.query[0] == '-' || newCtx.query[0] == '~')) {
+        newCtx.singleOutput = (newCtx.query[0] == '-');
+        newCtx.query.erase(0, 1);
+    }
+    else {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    // Parsing is done, update the remaining context
+    newCtx.edgeLeftIdx = 0;
+    newCtx.edgeRightIdx = 0;
+    if (foundFirst && foundSecond) {
+        newCtx.edgeLeftIdx = firstIdx;
+        newCtx.edgeRightIdx = secondIdx;
+    }
+    else if (foundFirst) {
+        if (newCtx.lookForChild) {
+            newCtx.edgeRightIdx = firstIdx;
+        }
+        else {
+            newCtx.edgeLeftIdx = firstIdx;
+        }
+    }
+
+    if (newCtx.lookForChild) {
+        Log::debug("{}-{}-{}>", std::string(2*newCtx.depth + 2, ' '),
+            newCtx.edgeLeftIdx, newCtx.edgeRightIdx);
+    }
+    else {
+        Log::debug("{}<{}-{}-", std::string(2*newCtx.depth + 2, ' '),
+            newCtx.edgeLeftIdx, newCtx.edgeRightIdx);
+    }
+
+    ctx = newCtx;
+    return true;
+}
+
+bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingResult>& matches) {
+    auto newCtx = ctx;
+    Log::debug("{}node", std::string(2*newCtx.depth, ' '));
+    auto newMatches = matches;
+
+    // (TYPE | '.')
+    removeWhiteSpace(newCtx.query);
+    if (newCtx.query.empty()) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    std::string type;
+    if (newCtx.query[0] == '.') {
+        // '.'
+        newCtx.query.erase(0, 1); // drop '.'
+    }
+    else {
+        // TYPE
+        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+            [](char c) { return (!isalnum(c) && c != '_'); });
+
+        if (endIdentifier == newCtx.query.begin()) {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+
+        type = newCtx.query.substr(0, endIdentifier - newCtx.query.begin());
+        newCtx.query = newCtx.query.substr(endIdentifier - newCtx.query.begin());
+    }
+
+    // ('#' ANCHOR)?
+    std::string anchor = "";
+    if (!newCtx.query.empty() && newCtx.query[0] == '#') {
+        // '#'
+        newCtx.query.erase(0, 1); // drop '#'
+
+        // ANCHOR
+        const auto endAnchor = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+            [](char c) { return (!isalnum(c) && c != '_'); });
+        anchor = "#" + newCtx.query.substr(0, endAnchor - newCtx.query.begin());
+        newCtx.query = newCtx.query.substr(endAnchor - newCtx.query.begin());
+    }
+
+    // ('[' LAMBDA ']')?
+    std::string lambda = "";
+    if (!newCtx.query.empty() && newCtx.query[0] == '[') {
+        // '['
+        newCtx.query.erase(0, 1);
+
+        // LAMBDA
+        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+            [](char c) { return (!isalnum(c) && c != '_'); });
+
+        if (endIdentifier == newCtx.query.begin()) {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+
+        lambda = newCtx.query.substr(0, endIdentifier - newCtx.query.begin());
+        newCtx.query = newCtx.query.substr(endIdentifier - newCtx.query.begin());
+
+        // ']'
+        if (!newCtx.query.empty() && newCtx.query[0] == ']') {
+            newCtx.query.erase(0, 1);
+        }
+        else {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+    }
+
+    // Parsing is done, try to match the node
+    if (newCtx.firstSequence && newCtx.firstNode) {
+        // First node of first sequence = root node
+        for (auto node : mGraph->getNodes()) {
+            if ((type.empty() || node->type() == type)
+                && (lambda.empty() || mLambda.at(lambda)(node)))
+            {
+                MatchingResult result;
+                result.graph->add(node, false);
+                if (!anchor.empty()) {
+                    result.anchors[type][anchor] = node;
+                }
+                result.startNode = node;
+                newMatches.insert(result);
+            }
+        }
+        newCtx.firstSequence = false;
+
+        Log::debug("{}root node {}{}, found: {}", std::string(2*newCtx.depth + 2, ' '), fmt::styled(type.empty() ? "." : type, fmt::fg(fmt::color::yellow)), anchor, newMatches.size());
+    }
+    else if (newCtx.firstNode) {
+        // First node of a (new) sequence: it has to be an existing anchor
+        AIDGE_ASSERT(!anchor.empty(),
+            "Ill-formed query; an anchor is expected in query at: {}", ctx.query);
+        AIDGE_ASSERT(newCtx.anchors.find(type + anchor) != newCtx.anchors.end(),
+            "Ill-formed query; the node anchor {} has to be an existing anchor in query at: {}", type + anchor, ctx.query);
+
+        for (auto it = newMatches.begin(); it != newMatches.end(); ) {
+            const auto anchors = it->anchors[type];
+            const auto anchorNode = anchors.find(anchor);
+            if (anchorNode != anchors.end()) {
+                it->startNode = anchorNode->second;
+                ++it;
+            }
+            else {
+                it = newMatches.erase(it);
+            }
+        }
+
+        Log::debug("{}anchor node {}{}, found: {}", std::string(2*newCtx.depth + 2, ' '), fmt::styled(type.empty() ? "." : type, fmt::fg(fmt::color::yellow)), anchor, newMatches.size());
+    }
+    else {
+        for (auto it = newMatches.begin(); it != newMatches.end(); ) {
+            bool found = false;
+
+            if (newCtx.lookForChild) {
+                const auto outputs = (newCtx.edgeLeftIdx != gk_IODefaultIndex)
+                    ? std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>(1, std::vector<std::pair<NodePtr, IOIndex_t>>(it->startNode->output(newCtx.edgeLeftIdx)))
+                    : it->startNode->outputs();
+
+                for (const auto& output : outputs) {
+                    if (newCtx.singleOutput && output.size() > 1) {
+                        continue;
+                    }
+
+                    for (const auto& node : output) {
+                        if ((type.empty() || node.first->type() == type)
+                            && (lambda.empty() || mLambda.at(lambda)(node.first))
+                            && (newCtx.edgeRightIdx == gk_IODefaultIndex || node.second == newCtx.edgeRightIdx))
+                        {
+                            if (mGraph->inView(node.first) && !it->graph->inView(node.first)) {
+                                it->graph->add(node.first, false);
+                                if (!anchor.empty()) {
+                                    it->anchors[type][anchor] = node.first;
+                                }
+                                it->startNode = node.first;
+                                found = true;
+                                break;
+                            }
+                            else if (!anchor.empty() && it->anchors[type].find(anchor) != it->anchors[type].end()) {
+                                it->startNode = node.first;
+                                found = true;
+                                break;
+                            }
+                        }
+                    }
+
+                    if (found) {
+                        break;
+                    }
+                }
+            }
+            else {
+                const auto inputs = (newCtx.edgeLeftIdx != gk_IODefaultIndex)
+                    ? std::vector<std::pair<NodePtr, IOIndex_t>>(1, it->startNode->input(newCtx.edgeLeftIdx))
+                    : it->startNode->inputs();
+
+                for (const auto& input : inputs) {
+                    if ((type.empty() || input.first->type() == type)
+                        && (lambda.empty() || mLambda.at(lambda)(input.first))
+                        && (newCtx.edgeRightIdx == gk_IODefaultIndex || input.second == newCtx.edgeRightIdx))
+                    {
+                        if (newCtx.singleOutput && input.first->getChildren(input.second).size() > 1) {
+                            continue;
+                        }
+
+                        if (mGraph->inView(input.first) && !it->graph->inView(input.first)) {
+                            it->graph->add(input.first, false);
+                            if (!anchor.empty()) {
+                                it->anchors[type][anchor] = input.first;
+                            }
+                            it->startNode = input.first;
+                            found = true;
+                            break;
+                        }
+                        else if (!anchor.empty() && it->anchors[type].find(anchor) != it->anchors[type].end()) {
+                            it->startNode = input.first;
+                            found = true;
+                            break;
+                        }
+                    }
+                }
+            }
+
+            if (found) {
+                ++it;
+            }
+            else {
+                it = newMatches.erase(it);
+            }
+        }
+
+        Log::debug("{}node {}{}, found: {}", std::string(2*newCtx.depth + 2, ' '), fmt::styled(type.empty() ? "." : type, fmt::fg(fmt::color::yellow)), anchor, newMatches.size());
+    }
+
+    newCtx.anchors.insert(type + anchor);
+    ctx = newCtx;
+    matches = newMatches;
+    return true;
+}
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index b08bb4c2056e8c14f5b1dd3aae62fbacf8d8c14e..50b8be13c6faac62a8b2aecf1e767e0b83024d3c 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -38,18 +38,24 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
 ///////////////////////////////////////////////////////
 
 Aidge::Connector Aidge::Node::operator()(const std::vector<Connector>& ctors) {
-    assert((ctors.size() == nbData()) && "Wrong number of arguments.\n");
-    for (std::pair<std::shared_ptr<Node>, IOIndex_t>& input : inputs()) {
-        assert((gk_IODefaultIndex == input.second) &&
-               "At least one input connection is not free.\n");
-        (void)input;  // avoid unused warning
-    }
-    IOIndex_t i = 0;
-    for (const Connector& ctor : ctors) {
+    IOIndex_t idx = 0;
+    for (const auto& ctor : ctors) {
+        // Skip to next possible input idx
+        for (; idx < nbInputs() && (inputCategory(idx) != InputCategory::Data && inputCategory(idx) != InputCategory::OptionalData); ++idx) {}
+
+        AIDGE_ASSERT(idx < nbInputs(), "Too many input connectors ({}) vs available node inputs.", ctors.size());
+        AIDGE_ASSERT(input(idx).second == gk_IODefaultIndex, "Data input#{} connection is not free.", idx);
+
         if (ctor.node() != nullptr) {  // ctor must be associated with a node
-            ctor.node()->addChild(shared_from_this(), ctor.index(), i++);
+            ctor.node()->addChild(shared_from_this(), ctor.index(), idx);
         }
+        ++idx;
     }
+
+    // Skip to next possible input idx
+    for (; idx < nbInputs() && (inputCategory(idx) != InputCategory::Data && inputCategory(idx) != InputCategory::OptionalData); ++idx) {}
+    AIDGE_ASSERT(idx == nbInputs(), "Missing an input connector for Data input#{}", idx);
+
     return Connector(shared_from_this());
 }
 
@@ -62,6 +68,15 @@ void Aidge::Node::setName(const std::string& name) {
     mName = name;
 }
 
+std::string Aidge::Node::createUniqueName(std::string name){
+    for (auto graphView : views()){
+        if (graphView->inView(name)){
+            return createUniqueName(name.append("_"));
+        }
+    }
+    return name;
+}
+
 ///////////////////////////////////////////////////////
 //        OPERATORS
 ///////////////////////////////////////////////////////
@@ -101,10 +116,11 @@ Aidge::IOIndex_t Aidge::Node::getNbFreeDataInputs() const {
 
 std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> Aidge::Node::dataInputs()
         const {
-    std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res =
-            std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbData());
-    for (std::size_t i = 0; i < static_cast<std::size_t>(nbData()); ++i) {
-        res[i] = std::pair<std::shared_ptr<Node>, IOIndex_t>(mParents[i], mIdOutParents[i]);
+    std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res;
+    for (std::size_t i = 0; i < static_cast<std::size_t>(nbInputs()); ++i) {
+        if (inputCategory(i) == InputCategory::Data || inputCategory(i) == InputCategory::OptionalData) {
+            res.push_back(std::pair<std::shared_ptr<Node>, IOIndex_t>(mParents[i], mIdOutParents[i]));
+        }
     }
     return res;
 }
@@ -320,18 +336,19 @@ bool Aidge::Node::removeChild(const std::shared_ptr<Aidge::Node> nodePtr,
 
 void Aidge::Node::resetConnections(bool includeLearnableParam) {
     // remove every parents reference to it
-    IOIndex_t nbRemovedInputs = includeLearnableParam ? nbInputs() : nbData();
-    for (IOIndex_t i = 0; i < nbRemovedInputs; ++i) {
-        std::pair<std::shared_ptr<Node>, IOIndex_t> parent = input(i);
-        if (parent.first) {
-            // number of children linked to the parent's output
-            while (parent.first->removeChild(shared_from_this(), parent.second) == true) {
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (includeLearnableParam || inputCategory(i) == InputCategory::Data || inputCategory(i) == InputCategory::OptionalData) {
+            std::pair<std::shared_ptr<Node>, IOIndex_t> parent = input(i);
+            if (parent.first) {
+                // number of children linked to the parent's output
+                while (parent.first->removeChild(shared_from_this(), parent.second) == true) {
+                }
             }
+            // every reference to this object as child has been removed
+            // removing reference to parents.
+            mParents[i] = nullptr;
+            mIdOutParents[i] = gk_IODefaultIndex;
         }
-        // every reference to this object as child has been removed
-        // removing reference to parents.
-        mParents[i] = nullptr;
-        mIdOutParents[i] = gk_IODefaultIndex;
     }
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
         for (std::pair<std::shared_ptr<Node>, IOIndex_t> child : output(i)) {
diff --git a/src/graph/OpArgs.cpp b/src/graph/OpArgs.cpp
index 124878fc45fe632d4a584e76a0eae6e7acfd53b9..e1a378c3db0d79d7816e9882f790540cdc26cd88 100644
--- a/src/graph/OpArgs.cpp
+++ b/src/graph/OpArgs.cpp
@@ -18,23 +18,37 @@ std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs)
     std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
     for (const OpArgs& elt : inputs) {
         if(elt.node() != nullptr) {
-            // >= to allow incomplete graphViews
-            assert(static_cast<std::size_t>(elt.node()->getNbFreeDataInputs()) >= gv->outputNodes().size());
-            /*
-            *  /!\ mn.view()->outputNodes() is a set, order of Nodes cannot be guaranted.
-            *  Prefer a functional description for detailed inputs
-            */
-            for (const std::shared_ptr<Node>& node_ptr : gv->outputNodes()) {
-                node_ptr -> addChild(elt.node()); // already checks that node_ptr->nbOutput() == 1
+            // Connect the first output (ordered) of each output node (ordered) 
+            // to the next available input of the input node.
+            AIDGE_ASSERT(static_cast<std::size_t>(elt.node()->getNbFreeDataInputs()) >= gv->outputNodes().size(),
+                "Sequential(): not enough free data inputs ({}) for input node {} (of type {}) to connect to all previous output nodes ({})",
+                elt.node()->getNbFreeDataInputs(), elt.node()->name(), elt.node()->type(), gv->outputNodes().size());
+            std::set<NodePtr> connectedOutputs;
+            for (const auto& node_out : gv->getOrderedOutputs()) {
+                if (connectedOutputs.find(node_out.first) == connectedOutputs.end()) {
+                    node_out.first -> addChild(elt.node(), node_out.second); // already checks that node_out->nbOutput() == 1
+                    connectedOutputs.insert(node_out.first);
+                }
             }
             gv->add(elt.node());
         }
         else {
-            for (std::shared_ptr<Node> node_in : elt.view()->inputNodes()) {
-                // >= to allow incomplete graphViews
-                assert(static_cast<std::size_t>(node_in->getNbFreeDataInputs()) >= gv->outputNodes().size());
-                for (std::shared_ptr<Node> node_out : gv->outputNodes()) {
-                    node_out -> addChild(node_in); // assert one output Tensor per output Node
+            // For each input node, connect the first output (ordered) of each 
+            // output node (ordered) to the next available input
+            std::set<NodePtr> connectedInputs;
+            for (const auto& node_in : elt.view()->getOrderedInputs()) {
+                if (connectedInputs.find(node_in.first) == connectedInputs.end()) {
+                    AIDGE_ASSERT(static_cast<std::size_t>(node_in.first->getNbFreeDataInputs()) >= gv->outputNodes().size(),
+                        "Sequential(): not enough free data inputs ({}) for input node {} (of type {}) to connect to all previous output nodes ({})",
+                        node_in.first->getNbFreeDataInputs(), node_in.first->name(), node_in.first->type(), gv->outputNodes().size());
+                    std::set<NodePtr> connectedOutputs;
+                    for (const auto& node_out : gv->getOrderedOutputs()) {
+                        if (connectedOutputs.find(node_out.first) == connectedOutputs.end()) {
+                            node_out.first -> addChild(node_in.first, node_out.second); // assert one output Tensor per output Node
+                            connectedOutputs.insert(node_out.first);
+                        }
+                    }
+                    connectedInputs.insert(node_in.first);
                 }
             }
             gv->add(elt.view());
@@ -58,16 +72,18 @@ std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::vector<OpArgs> inputs) {
 
 std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::vector<OpArgs> inputs) {
     std::shared_ptr<GraphView> gv = Sequential(inputs);
-    assert(gv->outputNodes().size() == 1U && "Zero or more than one output Node for the GraphView, don't know which one to choose from for the residual connection");
+    AIDGE_ASSERT(gv->outputNodes().size() == 1U,
+        "Residual(): Zero or more than one output Node for the GraphView, don't know which one to choose from for the residual connection");
     std::shared_ptr<Node> lastNode = *gv->outputNodes().begin();
-    assert(gv->inputNodes().size() == 2U && "Zero or more than one input Node for the GraphView, don't know which one to choose from for the residual connection");
+    AIDGE_ASSERT(gv->inputNodes().size() == 2U,
+        "Residual(): Zero or more than one input Node for the GraphView, don't know which one to choose from for the residual connection");
     std::shared_ptr<Node> firstNode = nullptr;
     for (const std::shared_ptr<Node>& node_ptr : gv->inputNodes()) {
         if (node_ptr != lastNode) {
             firstNode = node_ptr;
         }
     }
-    assert(lastNode->getNbFreeDataInputs()>=1);
+    AIDGE_ASSERT(lastNode->getNbFreeDataInputs()>=1, "Residual(): missing a free data input for the output Node in order to connect the residual branch");
     gv->addChild(lastNode, firstNode, 0U, gk_IODefaultIndex);
     return gv;
 }
diff --git a/src/graph/Testing.cpp b/src/graph/Testing.cpp
index f30ad6e25b81e1ce7768fcc201ddf00c2226eebf..774ee8912da2ddaa19583debdac063a95b5aa461 100644
--- a/src/graph/Testing.cpp
+++ b/src/graph/Testing.cpp
@@ -45,7 +45,7 @@ std::pair<Aidge::NodePtr, std::set<Aidge::NodePtr>> Aidge::RandomGraph::gen(std:
     std::vector<NodePtr> nodes(nbNodes, nullptr);
     for (auto idx : nodesSeq) {
         const std::string name = nodesType[idx] + std::to_string(idx);
-        nodes[idx] = GenericOperator(nodesType[idx], nbIOs[idx].first, 0, nbIOs[idx].second, name);
+        nodes[idx] = GenericOperator(nodesType[idx], std::vector<InputCategory>(nbIOs[idx].first, InputCategory::Data), nbIOs[idx].second, name);
     }
 
     for (std::size_t i = 0; i < nbNodes; ++i) {
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 9b77ffcbe0117292ed0aa520309febf709e8dd68..57ece07152613b831675cdecd6526d4ab26af5cb 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -33,15 +33,7 @@ Aidge::Add_Op::Add_Op(const Add_Op& op)
 }
 
 bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = (nbInputs() > 0); // do not compute anything if no input
-    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
-        }
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
         for (std::size_t i = 0; i < nbInputs(); i++) {
             inputsDims[i] = getInput(i)->dims();
@@ -70,9 +62,10 @@ bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
             }
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 07123bc88aa1da22bfa98166d6a01af8d66be98d..53ffb93269e79c0ba940f1fb0d3d94cb494ad8ce 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -27,7 +27,10 @@ template <Aidge::DimIdx_t DIM>
 const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling";
 
 template <Aidge::DimIdx_t DIM>
-Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op): OperatorTensor(op), Attributes_(op) {
+Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
     if (op.mImpl) {
         SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend());
     } else {
@@ -37,21 +40,17 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op): Operator
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-    if (!(getInput(0)->empty())) {
+    if (inputsAssociated()) {
         std::array<DimSize_t, DIM + 2> outputDims;
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
         outputDims[0] = inputDims[0];
         outputDims[1] = inputDims[1];
 
-        for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
             outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                         std::floor(static_cast<float>(inputDims[dim+2] -
-                                                                this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
-                                        static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
+                                                            mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
+                                        static_cast<float>(mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
         }
         getOutput(0)->resize(outputDims);
         return true;
@@ -89,10 +88,10 @@ Aidge::AvgPooling_Op<DIM>::computeReceptiveField(const std::vector<Aidge::DimSiz
 
         for (DimIdx_t i = 0; i < DIM; ++i) {
             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        * mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
                         + 1
-                        + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
-            inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
+                        + (mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
+            inputIdxDims[2+i] *= mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
         }
         std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
         res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index 14bf65763c024ffe28d30654a49c9630737a12fd..98e5c2da20fc35e18d4fd69a79cf1d87ec9d60ca 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -27,7 +27,10 @@ template <Aidge::DimIdx_t DIM>
 const std::string Aidge::BatchNorm_Op<DIM>::Type = "BatchNorm";
 
 template <Aidge::DimIdx_t DIM>
-Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op): OperatorTensor(op), Attributes_(op) {
+Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
     if (op.mImpl) {
         SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend());
     } else {
@@ -37,23 +40,19 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op): OperatorTen
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = true;
-    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         const DimSize_t nbFeatures =  getInput(0)->dims()[1];
-        for (std::size_t i = nbData(); i < nbInputs(); ++i) {
-            if(getInput(i)->size() != nbFeatures) {
+        for (std::size_t i = 0; i < nbInputs(); ++i) {
+            if(inputCategory(i) == InputCategory::Param && getInput(i)->size() != nbFeatures) {
                 // /!\ Input size should be handled BEFORE calling this function
                 // This should raise an error
                 getInput(i)->resize({getInput(0)->dims()[1]});
             }
         }
         mOutputs[0]->resize(getInput(0)->dims());
+        return true;
     }
-    return associated;
+    return false;
 }
 
 template <Aidge::DimIdx_t DIM>
@@ -62,10 +61,33 @@ void Aidge::BatchNorm_Op<DIM>::setBackend(const std::string &name, Aidge::Device
     mOutputs[0]->setBackend(name, device);
 
     // By default, automatically set backend for scale, shift, mean and variance
-    getInput(1)->setBackend(name, device);
-    getInput(2)->setBackend(name, device);
-    getInput(3)->setBackend(name, device);
-    getInput(4)->setBackend(name, device);
+    if (getInput(1)) {
+        getInput(1)->setBackend(name, device);
+    }
+    else {
+        Log::notice("BatchNorm_Op::setBackend(): could not set backend for scale input, because input is not connected");
+    }
+
+    if (getInput(2)) {
+        getInput(2)->setBackend(name, device);
+    }
+    else {
+        Log::notice("BatchNorm_Op::setBackend(): could not set backend for shift input, because input is not connected");
+    }
+
+    if (getInput(3)) {
+        getInput(3)->setBackend(name, device);
+    }
+    else {
+        Log::notice("BatchNorm_Op::setBackend(): could not set backend for variance input, because input is not connected");
+    }
+
+    if (getInput(4)) {
+        getInput(4)->setBackend(name, device);
+    }
+    else {
+        Log::notice("BatchNorm_Op::setBackend(): could not set backend for mean input, because input is not connected");
+    }
 }
 
 template class Aidge::BatchNorm_Op<2>;
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index f1c8e25e17c80d58d444a1ddddbaa428b2fc4c41..8df153a67d2214e4435d9fa0aac6e74d53e11b12 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -27,6 +27,16 @@ void Aidge::Cast_OpImpl::forward() {
 
 const std::string Aidge::Cast_Op::Type = "Cast";
 
+Aidge::Cast_Op::Cast_Op(const DataType targetType)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+      mAttributes(std::make_shared<Attributes_>(
+        attr<CastAttr::TargetType>(targetType)))
+{
+    mImpl = std::make_shared<Cast_OpImpl>(*this);
+    mOutputs[0]->setDataType(targetType);
+}
+
+
 void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     if (Registrar<Cast_Op>::exists({name})) {
         SET_IMPL_MACRO(Cast_Op, *this, name);
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index ee06ce69b135e11fe3ed5be8fa9f501debb6acd5..bf4bbb85be606fc857bf8d771b9ce211ca8e858e 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -20,7 +20,7 @@
 
 void Aidge::Concat_OpImpl::forward() {
     const Concat_Op& op = dynamic_cast<const Concat_Op&>(mOp);
-    const DimSize_t axis = op.template getAttr<DimSize_t>("Axis");
+    const DimSize_t axis = op.axis();
 
     assert(op.getInput(0) && "missing input in Concat operator");
     DataType datatypeFirstInput = op.getInput(0)->dataType();
@@ -60,36 +60,47 @@ void Aidge::Concat_OpImpl::forward() {
 const std::string Aidge::Concat_Op::Type = "Concat";
 
 bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
-    // Every input is non-empty with the same number of dimensions
-    bool associated = (getInput(0) != nullptr);
-    associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input
-    auto outputDims =  getInput(0)->dims();
-    const auto firstInputNbDims = getInput(0) -> nbDims();
+    if (!inputsAssociated()) {
+        return false;
+    }
+    const std::size_t nbDimsInput0 = getInput(0)->nbDims();
+    if (nbDimsInput0 == 0) {
+        return false;
+    }
+    AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is empty", type());
     for (IOIndex_t i = 1; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+        if (getInput(i)->nbDims() == 0) {
+            return false;
         }
+        AIDGE_ASSERT(nbDimsInput0 == getInput(i)->nbDims(),
+            "Input 0 and input {} in {} Operator have different number of dimensions: {} / {}",
+            i, type(), nbDimsInput0, getInput(i)->nbDims());
+    }
+    // Check validity of attributes with inputs
+    // Axis
+    std::int32_t axis = mAttributes->template getAttr<ConcatAttr::Axis>();
+    axis = (axis < 0) ? axis + static_cast<std::int32_t>(nbDimsInput0) : axis;
+    AIDGE_ASSERT(((axis >= 0) && (axis < static_cast<std::int32_t>(nbDimsInput0))),
+                "'Axis' attribute not compatible with provided inputs.")
+    const std::size_t axis_u64 = static_cast<std::size_t>(axis);
 
-        if (getInput(i)->nbDims() == firstInputNbDims) {
-            for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) {
-                if (dim == getAttr<ConcatAttr::Axis>()) {
-                    outputDims[dim] += getInput(i)->dims()[dim];
-                }
-                else {
-                    associated &= (getInput(i)->dims()[dim] == outputDims[dim]);
-                }
+    // Check validity of inputs
+    auto outputDims =  getInput(0)->dims();
+    for (IOIndex_t i = 1; i < nbInputs(); ++i) {
+        for (DimSize_t dim = 0; dim < nbDimsInput0; ++dim) {
+            if (dim == axis_u64) {
+                outputDims[axis_u64] += getInput(i)->dims()[axis_u64];
+            }
+            else {
+                AIDGE_ASSERT(getInput(i)->dims()[dim] == outputDims[dim],
+                    "Incomatible dimensions between input 0 {} and input {} {}",
+                    getInput(0)->dims(), i, getInput(i)->dims());
             }
         }
-        else {
-            associated = false;
-            break;
-        }
-    }
-    if (associated) {
-        getOutput(0)->resize(outputDims);
     }
 
-    return associated;
+    getOutput(0)->resize(outputDims);
+    return true;
 }
 
 void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a33af78779971e77da4f4e910b89b9263a1af5d6
--- /dev/null
+++ b/src/operator/Conv.cpp
@@ -0,0 +1,161 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Conv.hpp"
+
+#include <cmath>      // std::floor
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <utility>    // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::Conv_Op<DIM>::Type = "Conv";
+
+template <Aidge::DimIdx_t DIM>
+Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        // first check weight since it defines inChannels and outChannels
+        AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
+                    "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
+        // check data
+        AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
+                    (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
+                    "Wrong input size for Conv operator.");
+        // check optional bias
+        if(getInput(2))
+            AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
+                    (getInput(2)->template dims<1>()[0] == outChannels()),
+                    "Wrong bias size for Conv operator.");
+        std::array<DimSize_t, DIM + 2> outputDims{};
+        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
+            const DimSize_t kernelExtent = mAttributes->template getAttr<ConvAttr::DilationDims>()[dim] *
+                                                    (mAttributes->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
+                                            1;
+
+            outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                    floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
+                            static_cast<float>(mAttributes->template getAttr<ConvAttr::StrideDims>()[dim])));
+        }
+
+        outputDims[1] = outChannels();
+        outputDims[0] = inputDims[0];
+        mOutputs[0]->resize(outputDims);
+        return true;
+    }
+
+    return false;
+}
+
+
+template <Aidge::DimIdx_t DIM>
+std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>
+Aidge::Conv_Op<DIM>::computeReceptiveField(
+                          const std::vector<Aidge::DimSize_t>& firstEltDims,
+                          const std::vector<Aidge::DimSize_t>& outputDims,
+                          const Aidge::IOIndex_t outputIdx) const
+{
+    if (outputIdx != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+    }
+    if (firstEltDims.size() != outputDims.size()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
+    }
+    if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
+        // Offset
+        auto inputIdxDims = firstEltDims; // batch idx is the same
+        inputIdxDims[1] = 0; // each channel is used so start with the first one
+
+        for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+            if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
+            }
+        }
+
+        // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+        // Input
+        // same batch value, every input channel is used
+        std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
+        for (DimIdx_t i = 0; i < DIM; ++i) {
+            inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+                        * mAttributes->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        + 1
+                        + (mAttributes->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                        * mAttributes->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+            inputIdxDims[2+i] *= mAttributes->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
+        }
+
+        // Weight
+        // same output value, every input channel is used
+        std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
+        for (std::size_t i = 0; i < DIM; ++i) {
+            weightDims.push_back(mAttributes->template getAttr<ConvAttr::KernelDims>()[i]);
+        }
+        std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
+        weightIdxDims[0] = firstEltDims[1];
+
+        // Result
+        std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>> res;
+        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
+        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
+
+        // Bias
+        if (getInput(2)){
+            const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
+            const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
+            res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
+        }
+        return res;
+    }
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Conv_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+
+    // By default, automatically set backend for weight and bias inputs
+    if (getInput(1)) {
+        getInput(1)->setBackend(name, device);
+    }
+    else {
+        Log::notice("Conv_Op::setBackend(): could not set backend for weight input, because input is not connected");
+    }
+
+    if (getInput(2)) {
+        // Bias is optional
+        getInput(2)->setBackend(name, device);
+    }
+}
+
+template class Aidge::Conv_Op<1>;
+template class Aidge::Conv_Op<2>;
\ No newline at end of file
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..342fd86195d5c2e85a63d990c4ebbb75e7f50a6b
--- /dev/null
+++ b/src/operator/ConvDepthWise.cpp
@@ -0,0 +1,160 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ConvDepthWise.hpp"
+
+#include <array>
+#include <cmath>      // std::floor
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <utility>    // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise";
+
+template <Aidge::DimIdx_t DIM>
+Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        // first check weight since it defines nbChannels
+        AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
+                    "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
+        // check data
+        AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
+                    (getInput(0)->template dims<DIM+2>()[1] == nbChannels()),
+                    "Wrong input size for Conv operator.");
+        // check optional bias
+        if(getInput(2))
+            AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
+                    (getInput(2)->template dims<1>()[0] == nbChannels()),
+                    "Wrong bias size for Conv operator.");
+        std::array<DimSize_t, DIM + 2> outputDims = {};
+        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
+            const DimSize_t kernelExtent = mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
+                                                    (mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
+                                            1;
+
+            outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                    floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
+                            static_cast<float>(mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
+        }
+
+        outputDims[1] = inputDims[1];
+        outputDims[0] = inputDims[0];
+        mOutputs[0]->resize(outputDims);
+        return true;
+    }
+
+    return false;
+}
+
+
+template <Aidge::DimIdx_t DIM>
+std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>
+Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField(
+                          const std::vector<Aidge::DimSize_t>& firstEltDims,
+                          const std::vector<Aidge::DimSize_t>& outputDims,
+                          const Aidge::IOIndex_t outputIdx) const
+{
+    if (outputIdx != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+    }
+    if (firstEltDims.size() != outputDims.size()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
+    }
+    if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
+        // Offset
+        auto inputIdxDims = firstEltDims; // batch idx is the same
+
+        for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+            if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
+            }
+        }
+
+        // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+        // Input
+        // same batch value
+        std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
+        for (DimIdx_t i = 0; i < DIM; ++i) {
+            inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+                        * mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        + 1
+                        + (mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                        * mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+            inputIdxDims[2+i] *= mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
+        }
+
+        // Weight
+        std::vector<DimSize_t> weightDims{outputDims[1], 1};
+        for (std::size_t i = 0; i < DIM; ++i) {
+            weightDims.push_back(mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
+        }
+        std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
+        weightIdxDims[0] = firstEltDims[1];
+
+
+        // Result
+        std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>> res;
+        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
+        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
+        // Bias
+        if (getInput(2)){
+            const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
+            const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
+            res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
+        }
+        return res;
+    }
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+
+    // By default, automatically set backend for weight and bias inputs
+    if (getInput(1)) {
+        getInput(1)->setBackend(name, device);
+    }
+    else {
+        Log::notice("ConvDepthWise_Op::setBackend(): could not set backend for weight input, because input is not connected");
+    }
+
+    if (getInput(2)) {
+        // Bias is optional
+        getInput(2)->setBackend(name, device);
+    }
+}
+
+template class Aidge::ConvDepthWise_Op<1>;
+template class Aidge::ConvDepthWise_Op<2>;
\ No newline at end of file
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index e6300d08c2c792c8a3eb66b307aca53f9d2acc73..387a9516077a937cca5c20ad091547b7f1c5be6f 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -23,13 +23,7 @@
 const std::string Aidge::Div_Op::Type = "Div";
 
 bool Aidge::Div_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
-    }
-
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
-
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index ba7e29e7b6543a570ceede6158bd306286037c10..44d499bc7e125c757f802e086c22e1e6c72e9216 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -37,19 +37,36 @@ void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::sh
 }
 
 bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
-    bool associated = true;
-    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+    if (inputsAssociated()) {
+        // first check weight since it defines inChannels and outChannels
+        AIDGE_ASSERT((getInput(1)->nbDims() == 2),
+                    "Wrong weight Tensor dimension: {} for FC operator (should have 2 dimensions).", getInput(1)->nbDims());
+        const DimSize_t outChannels = getInput(1)->template dims<2>()[0];
+        const DimSize_t inChannels = getInput(1)->template dims<2>()[1];
+        // check data
+        const std::vector<DimSize_t>& inputDims = getInput(0)->dims();
+        if (getInput(0)->nbDims() == 1) {
+            AIDGE_ASSERT(inputDims[0] == inChannels,
+                "Wrong number of input features for input data ({}), expected {}",
+                inputDims[0], inChannels);
+        } else {
+            AIDGE_ASSERT(getInput(0)->nbDims() > 1, "FC input data must have at least one dimension");
+            const DimSize_t nbInputFeatures = std::accumulate(inputDims.cbegin() + 1, inputDims.cend(), DimSize_t(1), std::multiplies<DimSize_t>());
+            AIDGE_ASSERT(nbInputFeatures == inChannels,
+                    "Wrong number of input features for input data ({}), expected {}",
+                    nbInputFeatures, inChannels);
         }
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+        // check optional bias
+        if(getInput(2))
+            AIDGE_ASSERT((getInput(2)->nbDims() == 1) &&
+                    (getInput(2)->template dims<1>()[0] == outChannels),
+                    "Wrong bias size for FC operator.");
         // <batch, OutChannels>
-        mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()});
+        mOutputs[0]->resize({getInput(0)->dims()[0], outChannels});
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
@@ -57,6 +74,15 @@ void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device
     mOutputs[0]->setBackend(name, device);
 
     // By default, automatically set backend for weight and bias inputs
-    getInput(1)->setBackend(name, device);
-    getInput(2)->setBackend(name, device);
+    if (getInput(1)) {
+        getInput(1)->setBackend(name, device);
+    }
+    else {
+        Log::notice("FC_Op::setBackend(): could not set backend for weight input, because input is not connected");
+    }
+
+    if (getInput(2)) {
+        // Bias is optional
+        getInput(2)->setBackend(name, device);
+    }
 }
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 7b0945271660be8f309024f46c258e6a7e2193e5..c28a0587a755ef0a910ec5bfdeb9caa2f1edc216 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -9,24 +9,21 @@
  *
  ********************************************************************************/
 
-#include "aidge/operator/Gather.hpp"
-
 #include <cstddef>  // std::size_t
 #include <cstdint>  // std::int64_t
 #include <string>
 #include <vector>
 
+#include "aidge/operator/Gather.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
 
 void Aidge::Gather_OpImpl::forward() {
     const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
-    const auto axis = op.template getAttr<std::int64_t>("Axis");
 
-    const std::size_t axisIdx = axis>=0 ?
-                                axis :
-                                static_cast<std::size_t>(axis) + op.getInput(0)->dims().size();
+    const std::size_t axisIdx = static_cast<std::size_t>(op.axis()) + (op.axis() >= 0 ? 0 : op.getInput(0)->dims().size());
 
     std::size_t postAxisElems = 1;
     for (std::size_t i = axisIdx + 1; i < op.getInput(0)->dims().size(); ++i) {
@@ -37,13 +34,14 @@ void Aidge::Gather_OpImpl::forward() {
         preAxisElems *= op.getInput(0)->dims()[i];
     }
 
-    const auto indices = op.template getAttr<std::vector<std::int64_t>>("Indices");
     std::size_t outputOffset = 0;
     for (std::size_t i=0; i<preAxisElems; ++i)
     {
-        for(std::size_t j=0; j<indices.size(); ++j)
+        for(std::size_t j = 0; j < op.indices().size(); ++j)
         {
-            const std::size_t idx = indices[j] >= 0 ? indices[j] : static_cast<std::size_t>(indices[j]) + op.getInput(0)->dims()[axisIdx];
+            const std::size_t idx = op.indices()[j] >= 0 ?
+                                        static_cast<std::size_t>(op.indices()[j]) :
+                                        static_cast<std::size_t>(op.indices()[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx]));
             op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i * postAxisElems * op.getInput(0)->dims()[axisIdx] + idx * postAxisElems), postAxisElems, outputOffset);
             outputOffset += postAxisElems;
         }
@@ -52,28 +50,53 @@ void Aidge::Gather_OpImpl::forward() {
 
 const std::string Aidge::Gather_Op::Type = "Gather";
 
-bool Aidge::Gather_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
+bool Aidge::Gather_Op::dimsForwarded() const {
+    if (getInput(1) && !getInput(1)->empty()) {
+        // output dims are data dependent
+        return false;
     }
 
-    if (!getInput(0)->empty()) {
+    return OperatorTensor::dimsForwarded();
+}
+
+bool Aidge::Gather_Op::forwardDims(bool allowDataDependency) {
+    if (inputsAssociated()) {
+        // Copy optional input #1, if present, to attribute Indices
+        if (getInput(1)) {
+            if (!this->indices().empty()) {
+                Log::notice("Gather_Op: ignoring non-empty Indices attribute because input#1 takes precedence");
+            }
+
+            if (!allowDataDependency) {
+                Log::warn("Gather_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
+
+            std::shared_ptr<Tensor> fallback;
+            this->gatheredShape() = getInput(1)->dims();
+            this->indices().clear(); // If both are provided input would override attrs
+            this->indices().reserve(getInput(1)->size());
+            const auto& indices = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(indices.getImpl()->hostPtr()),
+                        indices.size(),
+                        std::back_inserter(this->indices()));
+        }
+
+        AIDGE_ASSERT(!this->indices().empty(), "Missing input#1 or Indices attribute");
+
+        // Compute output dims
         std::vector<DimSize_t> outDims = getInput(0)->dims();
-        const std::vector<DimSize_t> gatheredShape = this->template getAttr<GatherAttr::GatheredShape>();
-        // TODO: check indices and gatheredShape
 
-        const std::int64_t axisIdx = this->template getAttr<GatherAttr::Axis>() >= 0 ?
-                                        this->template getAttr<GatherAttr::Axis>() :
-                                        this->template getAttr<GatherAttr::Axis>() + outDims.size();
+        std::int8_t axisIdx = this->axis()>=0?
+                                this->axis():
+                                this->axis()+outDims.size();
         outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
-        if (!gatheredShape.empty())
+        if( !this->gatheredShape().empty())
         {
-            outDims.insert(outDims.cbegin() + static_cast<std::size_t>(axisIdx),
-                            gatheredShape.cbegin(),
-                            gatheredShape.cend());
+            outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx),
+                            this->gatheredShape().begin(),
+                            this->gatheredShape().end());
         }
-
         mOutputs[0]->resize(outDims);
         return true;
     }
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index fdf3036fe7eeccb2dfd9e21faf834e27854e45f3..d49e1f0838f623bca1546e54ea4f4e470d70e1c5 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -26,9 +26,10 @@ const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Inpu
 }
 
 bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) {
-    if (mForwardDims) {
+    if (mForwardDims && inputsAssociated(false)) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs(), std::vector<std::size_t>());
         for (std::size_t i = 0; i < nbInputs(); ++i) {
+            // Check for input, as it may be optional
             if (getInput(i)) {
                 inputsDims[i] = getInput(i)->dims();
             }
@@ -47,12 +48,10 @@ bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) {
     }
 }
 
-bool Aidge::GenericOperator_Op::dimsForwarded() const {
-    if (mForwardDims) {
-        return !(mOutputs[0]->empty());
-    }
-    else {
-        Log::notice("GenericOperator: not output dims forwarded, no ComputeDimsFunc function provided.");
-        return false;
+void Aidge::GenericOperator_Op::setBackend(const std::string & name, DeviceIdx_t device) {
+    Log::warn("GenericOperator::setBackend(): cannot set backend for a generic operator, as no implementation has been provided!");
+
+    for (std::size_t i = 0; i < nbOutputs(); ++i) {
+        mOutputs[i]->setBackend(name, device);
     }
 }
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index b09426f8f835eda5600b630488ef18c5b08ba32a..1632c8a7677c884194494269e1a8cd93e7ef7822 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -22,26 +22,20 @@
 const std::string Aidge::GlobalAveragePooling_Op::Type = "GlobalAveragePooling";
 
 bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
-  // error checking
-  if (!getInput(0)) {
-    AIDGE_THROW_OR_ABORT(std::runtime_error,
-                         "GlobalAveragePooling : The input was not connected");
-  }
-  else if (!getInput(0)->empty()) {
-    AIDGE_ASSERT(getInput(0)->dims().size() >= 3,
-                 "GlobalAveragePooling :  needs at least a 3 dimensions input, "
-                 "number of input dim : {}",
-                 getInput(0)->dims().size());
-    // Global average pooling takes each filter, averages its values and uses
-    // it as an output(Much like a fancier flatten). 1st dim is batch 2nd is
-    // number of filter
-    const std::vector<DimSize_t> out_dims{getInput(0)->dims().at(0),
-                                          getInput(0)->dims().at(1)};
-    mOutputs[0]->resize(out_dims);
-    return true;
-  }
+    if (inputsAssociated()) {
+        AIDGE_ASSERT(getInput(0)->dims().size() >= 3,
+                    "GlobalAveragePooling :  needs at least a 3 dimensions input, "
+                    "number of input dim : {}",
+                    getInput(0)->dims().size());
+        // Global average pooling takes each filter, averages its values and uses
+        // it as an output(Much like a fancier flatten). 1st dim is batch 2nd is
+        // number of filter
+        mOutputs[0]->resize({getInput(0)->dims().at(0),
+                             getInput(0)->dims().at(1)});
+        return true;
+    }
 
-  return false;
+    return false;
 }
 
 void Aidge::GlobalAveragePooling_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Ln.cpp b/src/operator/Ln.cpp
new file mode 100755
index 0000000000000000000000000000000000000000..7e9f9ad01186f53a0f89657acb72f6a544223068
--- /dev/null
+++ b/src/operator/Ln.cpp
@@ -0,0 +1,25 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Ln.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Ln_Op::Type = "Ln";
+
+void Aidge::Ln_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    mImpl = Registrar<Ln_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 8f7548155cde4c7187f7a7fe96a44c4accd2c302..17b4960dfdfc9de199cc25b0119a5cb000bcf48c 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -21,58 +21,57 @@
 const std::string Aidge::MatMul_Op::Type = "MatMul";
 
 bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input. Cannot compute output dimensions for MatMul Operator.");
-    }
-    if (getInput(0)->empty() && getInput(1)->empty()) {
-        // both inputs are scalar
-        mOutputs[0]->resize({});
-        return true;
-    }
-    else if (!getInput(0)->empty() && !getInput(1)->empty())
-    {
-        std::vector<std::size_t> dims0 = getInput(0)->dims();
-        std::vector<std::size_t> dims1 = getInput(1)->dims();
+    if (inputsAssociated(false)) {
+        if (getInput(0)->empty() && getInput(1)->empty()) {
+            // both inputs are scalar
+            mOutputs[0]->resize({});
+            return true;
+        }
+        else if (!getInput(0)->empty() && !getInput(1)->empty())
+        {
+            std::vector<std::size_t> dims0 = getInput(0)->dims();
+            std::vector<std::size_t> dims1 = getInput(1)->dims();
 
-        // keep second-to-last dimension of dims0
-        const bool keepDim0 = dims0.size() > 1;
-        // keep last dimension of dims1
-        const bool keepDim1 = dims1.size() > 1;
+            // keep second-to-last dimension of dims0
+            const bool keepDim0 = dims0.size() > 1;
+            // keep last dimension of dims1
+            const bool keepDim1 = dims1.size() > 1;
 
-        if (dims0.size() == 1) {
-            dims0.insert(dims0.cbegin(), 1);
-        }
-        if (dims1.size() == 1) {
-            dims1.push_back(1);
-        }
-        const std::size_t dims_size = std::max(dims0.size(), dims1.size());
+            if (dims0.size() == 1) {
+                dims0.insert(dims0.cbegin(), 1);
+            }
+            if (dims1.size() == 1) {
+                dims1.push_back(1);
+            }
+            const std::size_t dims_size = std::max(dims0.size(), dims1.size());
 
 
-        if (dims0.size() > dims1.size()) {
-            dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1));
-        }
-        else if (dims1.size() > dims0.size()) {
-            dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
-        }
+            if (dims0.size() > dims1.size()) {
+                dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1));
+            }
+            else if (dims1.size() > dims0.size()) {
+                dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
+            }
 
-        AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes.");
+            AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes.");
 
-        std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
-        for (std::size_t i = 0; i < dims_size-2; ++i) {
-            AIDGE_ASSERT((dims0[i] == dims1[i]) || (dims0[i] == 1) || (dims1[i] == 1), "Bad vector dimension.");
-            outDims[i] = std::max(dims0[i], dims1[i]);
-        }
+            std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
+            for (std::size_t i = 0; i < dims_size-2; ++i) {
+                AIDGE_ASSERT((dims0[i] == dims1[i]) || (dims0[i] == 1) || (dims1[i] == 1), "Bad vector dimension.");
+                outDims[i] = std::max(dims0[i], dims1[i]);
+            }
 
-        // use keepDim0 instead of dims0.size() because dims0 has been modified
-        if (keepDim0)
-            outDims.push_back(dims0[dims_size-2]);
-        if (keepDim1)
-            outDims.push_back(dims1[dims_size-1]);
+            // use keepDim0 instead of dims0.size() because dims0 has been modified
+            if (keepDim0)
+                outDims.push_back(dims0[dims_size-2]);
+            if (keepDim1)
+                outDims.push_back(dims1[dims_size-1]);
 
-        mOutputs[0]->resize(outDims);
-        return true;
+            mOutputs[0]->resize(outDims);
+            return true;
+        }
     }
-    
+
     return false;
 }
 
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index e08b5f1054f07a9dcc1722d219ebce022f994d61..adf79b5c69e991ad7979184c313448e4288a8ecb 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -24,14 +24,13 @@ Aidge::Elts_t Aidge::Memorize_OpImpl::getNbRequiredData(
     Aidge::IOIndex_t inputIdx) const
 {
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
 
-    if (scheduleStep == 0 && inputIdx == 0) {
+    if (op.scheduleStep() == 0 && inputIdx == 0) {
         // No data input is required for the initial step.
         // Initialization data is required however.
         return Elts_t::NoneElts();
     }
-    else if (scheduleStep > 0 && inputIdx == 1) {
+    else if (op.scheduleStep() > 0 && inputIdx == 1) {
         // No initialization data is required after the initial step.
         return Elts_t::NoneElts();
     }
@@ -45,10 +44,8 @@ Aidge::Elts_t Aidge::Memorize_OpImpl::getRequiredMemory(const Aidge::IOIndex_t o
     assert(mOp.getRawOutput(outputIdx) && "requires valid output");
 
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
-    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
 
-    if (endStep > 0 && outputIdx == 1 && scheduleStep >= endStep) {
+    if ((op.endStep() > 0) && (outputIdx == 1) && (op.scheduleStep() >= op.endStep())) {
         return Elts_t::NoneElts();
     }
     else {
@@ -60,18 +57,15 @@ void Aidge::Memorize_OpImpl::updateConsummerProducer() {
     OperatorImpl::updateConsummerProducer();
 
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
-    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
-    AIDGE_ASSERT(endStep == 0 || scheduleStep <= endStep, "cannot update consumer producer anymore, number of cycles exceeded");
+    AIDGE_ASSERT(op.endStep() == 0 || op.scheduleStep() <= op.endStep(), "cannot update consumer producer anymore, number of cycles exceeded");
 }
 
 void Aidge::Memorize_OpImpl::forward() {
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int forwardStep = op.template getAttr<MemorizeAttr::ForwardStep>();
-    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
-    AIDGE_ASSERT(endStep == 0 || forwardStep <= endStep, "cannot forward anymore, number of cycles exceeded");
 
-    if (forwardStep == 0) {
+    AIDGE_ASSERT((op.endStep() == 0) || (op.forwardStep() <= op.endStep()), "cannot forward anymore, number of cycles exceeded");
+
+    if (op.forwardStep() == 0) {
         op.getOutput(0)->getImpl()->copy(op.getInput(1)->getImpl()->rawPtr(), op.getInput(1)->size());
     }
     else {
@@ -83,28 +77,24 @@ const std::string Aidge::Memorize_Op::Type = "Memorize";
 
 void Aidge::Memorize_Op::updateConsummerProducer() {
     Operator::updateConsummerProducer();
-    ++this->template getAttr<MemorizeAttr::ScheduleStep>();
-    this->template getAttr<MemorizeAttr::ForwardStep>() = 0;
+    ++mAttributes->template getAttr<MemorizeAttr::ScheduleStep>();
+    mAttributes->template getAttr<MemorizeAttr::ForwardStep>() = 0;
 }
 
 bool Aidge::Memorize_Op::forwardDims(bool /*allowDataDependency*/) {
-    for (size_t i = 0; i < 2; ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+    if (inputsAssociated(false)) {
+        // Only require one of the input to have dims defined
+        // Otherwise, forwardDims() won't converge!
+        if (!(getInput(0)->empty())) {
+            const auto expectedDims =  getInput(0)->dims();
+            mOutputs[0]->resize(expectedDims);
+            return true;
+        }
+        else if (!(getInput(1)->empty())) {
+            const auto expectedDims =  getInput(1)->dims();
+            mOutputs[0]->resize(expectedDims);
+            return true;
         }
-    }
-
-    // Only require one of the input to have dims defined
-    // Otherwise, forwardDims() won't converge!
-    if (!(getInput(0)->empty())) {
-        const auto expectedDims =  getInput(0)->dims();
-        mOutputs[0]->resize(expectedDims);
-        return true;
-    }
-    else if (!(getInput(1)->empty())) {
-        const auto expectedDims =  getInput(1)->dims();
-        mOutputs[0]->resize(expectedDims);
-        return true;
     }
 
     return false;
@@ -132,6 +122,6 @@ void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
 
 void Aidge::Memorize_Op::forward() {
     Operator::forward();
-    ++this->template getAttr<MemorizeAttr::ForwardStep>();
-    this->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
+    ++mAttributes->template getAttr<MemorizeAttr::ForwardStep>();
+    mAttributes->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
 }
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index 36ff1854703d015980a1943390eb87d0863d877f..e7c50033797c7c984b6b8da69d30f005bc69e70c 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -20,15 +20,16 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph)
-    : OperatorTensor(type, graph->dataInputs().size(), (graph->getOrderedInputs().size() - graph->dataInputs().size()), graph->getOrderedOutputs().size()),
+    : OperatorTensor(type, [graph]() {
+        std::vector<InputCategory> inputsCategory;
+        for (const auto& in : graph->getOrderedInputs()) {
+            inputsCategory.push_back(in.first->getOperator()->inputCategory(in.second));
+        }
+        return inputsCategory;
+    }(), graph->getOrderedOutputs().size()),
         mGraph(graph)
 {
-    mInputs = std::vector<std::shared_ptr<Tensor>>(mGraph->getOrderedInputs().size());
-    for (std::size_t i = 0; i < mInputs.size(); ++i) {
-        mInputs[i] = std::make_shared<Tensor>();
-    }
     // Associate outputs to micro-graph outputs for custom implementation
-    mOutputs = std::vector<std::shared_ptr<Tensor>>(mGraph->getOrderedOutputs().size());
     for (size_t outputIdx = 0; outputIdx < mOutputs.size(); ++outputIdx) {
         const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
         if (outputOp.first) {
@@ -58,16 +59,6 @@ void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, const std
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
 }
 
-void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, std::shared_ptr<Data>&& data) {
-    AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
-
-    const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
-    inputOp.first->getOperator()->setInput(inputOp.second, std::forward<std::shared_ptr<Data>>(data));
-
-    // Associate inputs for custom implementation
-    mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
-}
-
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const {
     if (mImpl) {
         return mImpl->getNbRequiredData(inputIdx);
@@ -143,6 +134,20 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbProducedData(IOIndex_t outputIdx) con
     }
 }
 
+void Aidge::MetaOperator_Op::resetConsummerProducer() {
+    if (mImpl) {
+        mImpl->resetConsummerProducer();
+    }
+    else {
+        if (!mScheduler) {
+            // Lazy initialization
+            mScheduler = std::make_shared<SequentialScheduler>(mGraph, mUpperNode.lock());
+        }
+
+        mScheduler->resetScheduling();
+    }
+}
+
 void Aidge::MetaOperator_Op::updateConsummerProducer() {
     if (mImpl) {
         mImpl->updateConsummerProducer();
diff --git a/src/operator/MetaOperatorDefs/LSTM.cpp b/src/operator/MetaOperatorDefs/LSTM.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..910e7c67aad0068679ca2d240b23312add3e42d7
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/LSTM.cpp
@@ -0,0 +1,229 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperatorDefs.hpp"
+
+#include <array>
+#include <memory>
+#include <string>
+
+#include "aidge/operator/Memorize.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/Identity.hpp"
+#include "aidge/operator/Concat.hpp"
+#include "aidge/operator/Tanh.hpp"
+
+namespace Aidge {
+
+std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
+                           const DimSize_t hiddenChannel,
+                           const DimSize_t seqLength,
+                           bool noBias,
+                           const std::string& name)
+{
+    // Construct micro-graph
+    auto input = Identity((!name.empty()) ? name + "_input" : "");
+    auto hiddenState = Memorize(seqLength, (!name.empty()) ? name + "_hidden_state" : "");
+    auto cellState = Memorize(seqLength, (!name.empty()) ? name + "_cell_state" : "");
+    auto add = Add(2, (!name.empty()) ? name + "_add" : "");
+
+    // Forget gate
+    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateX" : "");
+    input->addChild(forgetGateX, 0, 0);
+    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateH" : "");
+    hiddenState->addChild(forgetGateH, 1, 0);
+    auto forgetGate = Add(2, (!name.empty()) ? name + "_forgetGate" : "");
+    forgetGateX->addChild(forgetGate, 0, 0);
+    forgetGateH->addChild(forgetGate, 0, 1);
+    auto forgetGateAct = Sigmoid((!name.empty()) ? name + "_forgetGateAct" : "");
+    auto forgetGateMul = Mul((!name.empty()) ? name + "_forgetGateMul" : "");
+    forgetGate->addChild(forgetGateAct, 0, 0);
+    forgetGateAct->addChild(forgetGateMul, 0, 0);
+    forgetGateMul->addChild(add, 0, 0);
+    cellState->addChild(forgetGateMul, 1, 1);
+
+    // Input gate
+    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_inputGateX" : "");
+    input->addChild(inputGateX, 0, 0);
+    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_inputGateH" : "");
+    hiddenState->addChild(inputGateH, 1, 0);
+    auto inputGate = Add(2, (!name.empty()) ? name + "_inputGate" : "");
+    inputGateX->addChild(inputGate, 0, 0);
+    inputGateH->addChild(inputGate, 0, 1);
+    auto inputGateAct = Sigmoid((!name.empty()) ? name + "_inputGateAct" : "");
+    auto inputGateMul = Mul((!name.empty()) ? name + "_inputGateMul" : "");
+    inputGate->addChild(inputGateAct, 0, 0);
+    inputGateAct->addChild(inputGateMul, 0, 0);
+    inputGateMul->addChild(add, 0, 1);
+
+    // Candidate for cell update
+    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_cellCandidateX" : "");
+    input->addChild(cellCandidateX, 0, 0);
+    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_cellCandidateH" : "");
+    hiddenState->addChild(cellCandidateH, 1, 0);
+    auto cellCandidate = Add(2, (!name.empty()) ? name + "_cellCandidate" : "");
+    cellCandidateX->addChild(cellCandidate, 0, 0);
+    cellCandidateH->addChild(cellCandidate, 0, 1);
+    auto cellCandidateAct = Tanh((!name.empty()) ? name + "_cellCandidateAct" : "");
+    cellCandidate->addChild(cellCandidateAct, 0, 0);
+    cellCandidateAct->addChild(inputGateMul, 0, 1);
+
+    // Output gate
+    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_outputGateX" : "");
+    input->addChild(outputGateX, 0, 0);
+    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_outputGateH" : "");
+    hiddenState->addChild(outputGateH, 1, 0);
+    auto outputGate = Add(2, (!name.empty()) ? name + "_outputGate" : "");
+    outputGateX->addChild(outputGate, 0, 0);
+    outputGateH->addChild(outputGate, 0, 1);
+    auto outputGateAct = Sigmoid((!name.empty()) ? name + "_outputGateAct" : "");
+    auto outputGateMul = Mul((!name.empty()) ? name + "_outputGateMul" : "");
+    outputGate->addChild(outputGateAct, 0, 0);
+    outputGateAct->addChild(outputGateMul, 0, 0);
+
+    // Updated cell state to help determine new hidden state
+    auto cellUpdatedAct = Tanh((!name.empty()) ? name + "_cellUpdatedAct" : "");
+    add->addChild(cellUpdatedAct, 0, 0);
+    cellUpdatedAct->addChild(outputGateMul, 0, 1);
+    outputGateMul->addChild(hiddenState, 0, 0);
+    add->addChild(cellState, 0, 0);
+
+    std::shared_ptr<GraphView> microGraph = std::make_shared<GraphView>();
+    microGraph->add(input);
+    microGraph->add({hiddenState, cellState, add,
+        forgetGateX, forgetGateH, forgetGate, forgetGateAct, forgetGateMul,
+        inputGateX, inputGateH, inputGate, inputGateAct, inputGateMul,
+        cellCandidateX, cellCandidateH, cellCandidate, cellCandidateAct,
+        outputGateX, outputGateH, outputGate, outputGateAct, outputGateMul,
+        cellUpdatedAct}, false);
+
+    microGraph->setOrderedInputs({{input, 0},
+        {inputGateX, 1}, {outputGateX, 1}, {forgetGateX, 1}, {cellCandidateX, 1},
+        {inputGateH, 1}, {outputGateH, 1}, {forgetGateH, 1}, {cellCandidateH, 1},
+        {inputGateX, 2}, {outputGateX, 2}, {forgetGateX, 2}, {cellCandidateX, 2},
+        {inputGateH, 2}, {outputGateH, 2}, {forgetGateH, 2}, {cellCandidateH, 2},
+        {hiddenState, 1}, {cellState, 1}});
+    microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}});
+
+    auto metaOp = MetaOperator("LSTM", microGraph, name);
+    addProducer(metaOp, 1, {hiddenChannel, inChannel}, "wi");
+    addProducer(metaOp, 2, {hiddenChannel, inChannel}, "wo");
+    addProducer(metaOp, 3, {hiddenChannel, inChannel}, "wf");
+    addProducer(metaOp, 4, {hiddenChannel, inChannel}, "wc");
+    addProducer(metaOp, 5, {hiddenChannel, hiddenChannel}, "ri");
+    addProducer(metaOp, 6, {hiddenChannel, hiddenChannel}, "ro");
+    addProducer(metaOp, 7, {hiddenChannel, hiddenChannel}, "rf");
+    addProducer(metaOp, 8, {hiddenChannel, hiddenChannel}, "rc");
+    if (!noBias) {
+        addProducer(metaOp, 9, {hiddenChannel}, "wbi");
+        addProducer(metaOp, 10, {hiddenChannel}, "wbo");
+        addProducer(metaOp, 11, {hiddenChannel}, "wbf");
+        addProducer(metaOp, 12, {hiddenChannel}, "wbc");
+        addProducer(metaOp, 13, {hiddenChannel}, "rbi");
+        addProducer(metaOp, 14, {hiddenChannel}, "rbo");
+        addProducer(metaOp, 15, {hiddenChannel}, "rbf");
+        addProducer(metaOp, 16, {hiddenChannel}, "rbc");
+    }
+    return metaOp;
+}
+
+std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
+{
+    // Construct micro-graph
+    auto input = Identity("");
+    auto hiddenState = Memorize(seqLength, "");
+    auto cellState = Memorize(seqLength, "");
+    auto add = Add(2, "");
+
+    // Forget gate
+    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
+    input->addChild(forgetGateX, 0, 0);
+    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
+    hiddenState->addChild(forgetGateH, 1, 0);
+    auto forgetGate = Add(2, "");
+    forgetGateX->addChild(forgetGate, 0, 0);
+    forgetGateH->addChild(forgetGate, 0, 1);
+    auto forgetGateAct = Sigmoid("");
+    auto forgetGateMul = Mul("");
+    forgetGate->addChild(forgetGateAct, 0, 0);
+    forgetGateAct->addChild(forgetGateMul, 0, 0);
+    forgetGateMul->addChild(add, 0, 0);
+    cellState->addChild(forgetGateMul, 1, 1);
+
+    // Input gate
+    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
+    input->addChild(inputGateX, 0, 0);
+    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
+    hiddenState->addChild(inputGateH, 1, 0);
+    auto inputGate = Add(2, "");
+    inputGateX->addChild(inputGate, 0, 0);
+    inputGateH->addChild(inputGate, 0, 1);
+    auto inputGateAct = Sigmoid("");
+    auto inputGateMul = Mul("");
+    inputGate->addChild(inputGateAct, 0, 0);
+    inputGateAct->addChild(inputGateMul, 0, 0);
+    inputGateMul->addChild(add, 0, 1);
+
+    // Candidate for cell update
+    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
+    input->addChild(cellCandidateX, 0, 0);
+    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
+    hiddenState->addChild(cellCandidateH, 1, 0);
+    auto cellCandidate = Add(2, "");
+    cellCandidateX->addChild(cellCandidate, 0, 0);
+    cellCandidateH->addChild(cellCandidate, 0, 1);
+    auto cellCandidateAct = Tanh("");
+    cellCandidate->addChild(cellCandidateAct, 0, 0);
+    cellCandidateAct->addChild(inputGateMul, 0, 1);
+
+    // Output gate
+    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
+    input->addChild(outputGateX, 0, 0);
+    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
+    hiddenState->addChild(outputGateH, 1, 0);
+    auto outputGate = Add(2,"");
+    outputGateX->addChild(outputGate, 0, 0);
+    outputGateH->addChild(outputGate, 0, 1);
+    auto outputGateAct = Sigmoid("");
+    auto outputGateMul = Mul("");
+    outputGate->addChild(outputGateAct, 0, 0);
+    outputGateAct->addChild(outputGateMul, 0, 0);
+
+    // Updated cell state to help determine new hidden state
+    auto cellUpdatedAct = Tanh("");
+    add->addChild(cellUpdatedAct, 0, 0);
+    cellUpdatedAct->addChild(outputGateMul, 0, 1);
+    outputGateMul->addChild(hiddenState, 0, 0);
+    add->addChild(cellState, 0, 0);
+
+    std::shared_ptr<GraphView> microGraph = std::make_shared<GraphView>();
+    microGraph->add(input);
+    microGraph->add({hiddenState, cellState, add,
+        forgetGateX, forgetGateH, forgetGate, forgetGateAct, forgetGateMul,
+        inputGateX, inputGateH, inputGate, inputGateAct, inputGateMul,
+        cellCandidateX, cellCandidateH, cellCandidate, cellCandidateAct,
+        outputGateX, outputGateH, outputGate, outputGateAct, outputGateMul,
+        cellUpdatedAct}, false);
+
+    microGraph->setOrderedInputs({{input, 0},
+        {inputGateX, 1}, {outputGateX, 1}, {forgetGateX, 1}, {cellCandidateX, 1},
+        {inputGateH, 1}, {outputGateH, 1}, {forgetGateH, 1}, {cellCandidateH, 1},
+        {inputGateX, 2}, {outputGateX, 2}, {forgetGateX, 2}, {cellCandidateX, 2},
+        {inputGateH, 2}, {outputGateH, 2}, {forgetGateH, 2}, {cellCandidateH, 2},
+        {hiddenState, 1}, {cellState, 1}});
+    microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}});
+
+    return std::make_shared<MetaOperator_Op>("LSTM", microGraph);
+}
+
+} // namespace Aidge
diff --git a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ef319ef38ad18de9eaed0a1d4a92c3877ee7cf8e
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
@@ -0,0 +1,84 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperatorDefs.hpp"
+
+#include <array>
+#include <memory>
+#include <string>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/AvgPooling.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+//////////////////////////////////
+// Node functions
+//////////////////////////////////
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name,
+                                  const std::array<DimSize_t, DIM> &stride_dims,
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims)
+{
+    auto graph = Sequential({
+        Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
+        AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
+    });
+
+    return MetaOperator("PaddedAvgPooling", graph, name);
+}
+
+template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
+template std::shared_ptr<Node> PaddedAvgPooling<2>(const std::array<DimSize_t,2>&, const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
+
+// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+template <DimSize_t DIM>
+std::shared_ptr<Node> PaddedAvgPooling(const DimSize_t (&kernel_dims)[DIM],
+                                       const std::string& name,
+                                       const std::array<DimSize_t, DIM> &stride_dims,
+                                       const std::array<DimSize_t, 2*DIM> &padding_dims)
+{
+    return PaddedAvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
+}
+
+template std::shared_ptr<Node> PaddedAvgPooling<1>(const DimSize_t (&kernel_dims)[1], const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
+template std::shared_ptr<Node> PaddedAvgPooling<2>(const DimSize_t (&kernel_dims)[2], const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
+
+
+//////////////////////////////////
+// Operator functions
+//////////////////////////////////
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::array<DimSize_t, DIM> &stride_dims,
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims)
+{
+    auto graph = Sequential({
+        Pad<DIM>(padding_dims, ""),
+        AvgPooling(kernel_dims, "", stride_dims)
+    });
+
+    return std::make_shared<MetaOperator_Op>("PaddedAvgPooling", graph);
+}
+
+template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<1>(const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
+template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<2>(const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
+
+} // namespace Aidge
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index 426de388f31391fb5e59446d50e50de94ca5f8a1..ded67a11acd299e5407f0d7e74146f5bcd1bf86a 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -24,13 +24,7 @@
 const std::string Aidge::Mul_Op::Type = "Mul";
 
 bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
-    }
-
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
-
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index 2a60f580f3279170a0f1ff417cea96ae7cfa981f..5df90020a43ad6cffebcd2345c075837f11462b1 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -9,7 +9,6 @@
  *
  ********************************************************************************/
 
-#include <cassert>
 #include <memory>
 
 #include "aidge/operator/OperatorTensor.hpp"
@@ -20,11 +19,10 @@
 
 
 Aidge::OperatorTensor::OperatorTensor(const std::string& type,
-                                                            const IOIndex_t nbData,
-                                                            const IOIndex_t nbParam,
+                                      const std::vector<InputCategory>& inputsCategory,
                                                             const IOIndex_t nbOut)
-: Operator(type, nbData, nbParam, nbOut, OperatorType::Tensor),
-        mInputs(std::vector<std::shared_ptr<Tensor>>(nbData + nbParam, nullptr)),
+: Operator(type, inputsCategory, nbOut, OperatorType::Tensor),
+        mInputs(std::vector<std::shared_ptr<Tensor>>(inputsCategory.size(), nullptr)),
         mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) {
     for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) {
         mOutputs[i] = std::make_shared<Tensor>();
@@ -51,6 +49,11 @@ void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, cons
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
 }
 
+void Aidge::OperatorTensor::resetInput(const Aidge::IOIndex_t inputIdx) {
+    AIDGE_ASSERT(inputIdx < nbInputs(), "Input idx out of range.");
+    mInputs[inputIdx] = nullptr;
+}
+
 void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
     if (getInput(inputIdx)) {
@@ -62,15 +65,6 @@ void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, const std:
 
 Aidge::OperatorTensor::~OperatorTensor() = default;
 
-void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, std::shared_ptr<Aidge::Data>&& data) {
-    AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
-    if (getInput(inputIdx)) {
-        *mInputs[inputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data));
-    } else {
-        mInputs[inputIdx] = std::make_shared<Tensor>(std::move(*std::dynamic_pointer_cast<Tensor>(data)));
-    }
-}
-
 std::shared_ptr<Aidge::Data> Aidge::OperatorTensor::getRawInput(const Aidge::IOIndex_t inputIdx) const {
     return std::static_pointer_cast<Data>(getInput(inputIdx));
 }
@@ -88,15 +82,6 @@ void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, const st
     *mOutputs[outputIdx] = *data_tensor;
 }
 
-void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) {
-    AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
-    AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs());
-    auto&& data_tensor = std::dynamic_pointer_cast<Tensor>(data);
-    // if (mImpl)
-    //     AIDGE_ASSERT(data_tensor->getImpl()->backend() == backend(), "Data parameter and Operator have different backends: {} and {}", data_tensor->getImpl()->backend(), backend());
-    *mOutputs[outputIdx] = std::move(*data_tensor);
-}
-
 std::shared_ptr<Aidge::Data> Aidge::OperatorTensor::getRawOutput(const Aidge::IOIndex_t outputIdx) const {
     return std::static_pointer_cast<Data>(getOutput(outputIdx));
 }
@@ -116,9 +101,6 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_
     if (outputIdx >= nbOutputs()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator output index out of range.");
     }
-    if (nbInputs() != nbData()) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function.");
-    }
     if (!dimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
     }
@@ -128,19 +110,28 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_
         }
     }
     // return the same Tensor description as given in function parameter for each data input
-    return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims));
+    return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbInputs(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims));
 }
 
-bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = (nbInputs() > 0); // do not compute anything if no input
+bool Aidge::OperatorTensor::inputsAssociated(bool checkNonEmpty) const {
+    bool associated = true;
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+        if (inputCategory(i) != InputCategory::OptionalData && inputCategory(i) != InputCategory::OptionalParam) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+            }
+        }
+
+        if (checkNonEmpty && getInput(i)) {
+            associated &= !(getInput(i)->empty());
         }
-        associated &= !(getInput(i)->empty());
     }
-    if (associated) {
+
+    return associated;
+}
+
+bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
         const auto expectedDims =  getInput(0)->dims();
         for (std::size_t i = 1; i < nbInputs(); ++i) {
             if (expectedDims != getInput(i)->dims()) {
@@ -150,16 +141,19 @@ bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
             }
         }
         mOutputs[0]->resize(expectedDims);
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 bool Aidge::OperatorTensor::dimsForwarded() const {
     bool forwarded = true;
     // check both inputs and outputs have been filled
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
+        if (inputCategory(i) != InputCategory::OptionalData && inputCategory(i) != InputCategory::OptionalParam) {
+            forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
+        }
     }
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
         // If getOutput(i) is nullptr, ignore this output (it may be a dummy
@@ -174,15 +168,39 @@ void Aidge::OperatorTensor::setDataType(const DataType& dataType) const {
         getOutput(i)->setDataType(dataType);
     }
 
-    for (IOIndex_t i = nbData(); i < nbInputs(); ++i) {
-        AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
-        getInput(i)->setDataType(dataType);
+    // Set data type for parameters inputs only (weights, bias...), which are usually Producers
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (inputCategory(i) == InputCategory::Param) {
+            AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
+            getInput(i)->setDataType(dataType);
+        }
+        else if (inputCategory(i) == InputCategory::OptionalParam && getInput(i) != nullptr) {
+            getInput(i)->setDataType(dataType);
+        }
+    }
+}
+
+void Aidge::OperatorTensor::setDataFormat(const DataFormat& dataFormat) const {
+    for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
+        getOutput(i)->setDataFormat(dataFormat);
+    }
+
+    // Set data format for parameters inputs only (weights, bias...), which are usually Producers
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (inputCategory(i) == InputCategory::Param) {
+            AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
+            getInput(i)->setDataFormat(dataFormat);
+        }
+        else if (inputCategory(i) == InputCategory::OptionalParam && getInput(i) != nullptr) {
+            getInput(i)->setDataFormat(dataFormat);
+        }
     }
 }
 
 void Aidge::OperatorTensor::forward() {
     if (!dimsForwarded()) {
-        forwardDims();
+        // Allow data dependent forwardDims at this point (data is available)
+        forwardDims(true);
     }
 
     Operator::forward();
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c66e6c84af6df299e4786bbbb73767d6ee6374f5
--- /dev/null
+++ b/src/operator/Pad.cpp
@@ -0,0 +1,19 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/Types.h"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::Pad_Op<DIM>::Type = "Pad";
+
+template class Aidge::Pad_Op<1>;
+template class Aidge::Pad_Op<2>;
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index 18325d80a94f35878ededca839ec809000527c39..2fcc46a460ffd7c7f6746dfcd108acbaafe912de 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -30,19 +30,15 @@ Aidge::Elts_t Aidge::Pop_OpImpl::getNbRequiredData(const Aidge::IOIndex_t inputI
 
 void Aidge::Pop_OpImpl::forward() {
     const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+
     assert(op.getInput(0) && "missing input #0");
-    const unsigned int forwardStep = op.template getAttr<PopAttr::ForwardStep>();
-    *op.getOutput(0) = op.getInput(0)->extract({forwardStep});
+    *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()});
 }
 
 const std::string Aidge::Pop_Op::Type = "Pop";
 
 bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-    if (!(getInput(0)->empty())) {
+    if (inputsAssociated()) {
         auto inputDims = getInput(0)->dims();
         inputDims.erase(inputDims.begin());
         getOutput(0)->resize(inputDims);
@@ -54,7 +50,7 @@ bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
 
 void Aidge::Pop_Op::updateConsummerProducer() {
     Operator::updateConsummerProducer();
-    this->template getAttr<PopAttr::ForwardStep>() = 0;
+    mAttributes->template getAttr<PopAttr::ForwardStep>() = 0;
 }
 
 void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
@@ -69,5 +65,5 @@ void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
 
 void Aidge::Pop_Op::forward() {
     Operator::forward();
-    ++this->template getAttr<PopAttr::ForwardStep>();
+    ++mAttributes->template getAttr<PopAttr::ForwardStep>();
 }
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index 135c792345b0caf1166e671a8dad7d5b49b42ee7..2a50f9c7bad1e40cd6e69cfc0a22632439cfe000 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -23,13 +23,7 @@
 const std::string Aidge::Pow_Op::Type = "Pow";
 
 bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
-    }
-
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
-
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 7059ea7e989d789b4cff0ed895fc2c5ec0ad81bc..bdb69452ec54fb635d0cbc299336071295f37ae1 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -28,8 +28,9 @@ const std::string Aidge::Producer_Op::Type = "Producer";
 
 
 Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, bool constant)
-    : OperatorTensor(Type, 0, 0, 1),
-      Attributes_(attr<ProdAttr::Constant>(constant))
+    : OperatorTensor(Type, {}, 1),
+      mAttributes(std::make_shared<Attributes_>(
+        attr<ProdAttr::Constant>(constant)))
 {
     mOutputs[0] = tensor; // copy the pointer of the Tensor
     if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
@@ -47,7 +48,7 @@ Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, boo
  */
 Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
     : OperatorTensor(op),
-      Attributes_(op)
+      mAttributes(op.mAttributes)
 {
     mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0)));
     if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index 28e39b6d3387a0371c0505dc0a7b350e83a2bbaf..96f2f855f46275e167acb1300434f8bcdbdd7d3e 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -27,12 +27,9 @@
 const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
 
 bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
-    }
-    if (!getInput(0)->empty()) {
+    if (inputsAssociated()) {
         // make Axes attribute positive
-        std::vector<std::int32_t>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
+        std::vector<std::int32_t>& axes = mAttributes->template getAttr<ReduceMeanAttr::Axes>();
         std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
             if (val < 0)
                 val+=static_cast<std::int32_t>(getInput(0)->nbDims());
@@ -41,7 +38,7 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
 
         // build output dimensions
         std::vector<DimSize_t> outDims = getInput(0)->dims();
-        if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
+        if (mAttributes->template getAttr<ReduceMeanAttr::KeepDims>()) {
             std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
         }
         else {
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index ab53c094dac09879c1bec86509463aab2280ca92..1838c008a6b83548b6a5a80af0363e2cf239b649 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -30,22 +30,48 @@ void Aidge::Reshape_OpImpl::forward() {
 
 const std::string Aidge::Reshape_Op::Type = "Reshape";
 
-bool Aidge::Reshape_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check input has been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
+bool Aidge::Reshape_Op::dimsForwarded() const {
+    if (getInput(1) && !getInput(1)->empty()) {
+        // output dims are data dependent
+        return false;
     }
 
-    if (!getInput(0)->empty()) {
+    return OperatorTensor::dimsForwarded();
+}
+
+bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
+    if (inputsAssociated()) {
+        // Copy optional input #1, if present, to attribute Shape
+        if (getInput(1)) {
+            if (!this->shape().empty()) {
+                Log::notice("Reshape_Op: ignoring non-empty Shape attribute because input#1 takes precedence");
+            }
+
+            if (!allowDataDependency) {
+                Log::warn("Reshape_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
+
+            std::shared_ptr<Tensor> fallback;
+            this->shape().clear(); // If both are provided input would override attrs
+            this->shape().reserve(getInput(1)->size());
+            const auto& shape = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(shape.getImpl()->hostPtr()),
+                        shape.size(),
+                        std::back_inserter(this->shape()));
+        }
+
+        AIDGE_ASSERT(!this->shape().empty(), "Missing input#1 or Shape attribute");
+
+        // Compute output dims
         std::vector<DimSize_t> outDims;
         // variables to handle a negative dimension
         bool foundNegativeDimension = false;
         std::size_t outSize = 1;
         DimIdx_t negativeIndex = 0;
-
-        for(std::size_t i = 0; i < this->template getAttr<ReshapeAttr::Shape>().size(); ++i)
+        for(std::size_t i = 0; i < this->shape().size(); ++i)
         {
-            std::int64_t dimSize = this->template getAttr<ReshapeAttr::Shape>()[i];
+            int64_t dimSize = this->shape()[i];
             if (dimSize < 0) {
                 if (foundNegativeDimension) {
                     AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator.");
@@ -54,8 +80,14 @@ bool Aidge::Reshape_Op::forwardDims(bool /*allowDataDependency*/) {
                 dimSize = 1;
                 negativeIndex = static_cast<DimIdx_t>(i);
             }
+            else if (dimSize == 0 && !mAttributes->template getAttr<ReshapeAttr::AllowZero>())
+            {
+                dimSize = getInput(0) -> dims()[i];
+            }
             outDims.push_back(static_cast<DimSize_t>(dimSize));
-            outSize *= static_cast<DimSize_t>(dimSize);
+            if (dimSize != 0) {
+                outSize *= static_cast<DimSize_t>(dimSize);
+            }
         }
 
         if (foundNegativeDimension) {
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..966e1c3e032e64e75d3606fca022b84f9da8fbaf
--- /dev/null
+++ b/src/operator/Resize.cpp
@@ -0,0 +1,121 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Resize.hpp"
+
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int64_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+#include <fmt/core.h>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Resize_Op::Type = "Resize";
+
+bool Aidge::Resize_Op::dimsForwarded() const {
+    // in case of ROI add getInput(1) condition
+    if ((getInput(1) && !getInput(1)->empty())
+        || (getInput(2) && !getInput(2)->empty())
+        || (getInput(3) && !getInput(3)->empty())
+        )
+    {
+        // output dims are data dependent
+        return false;
+    }
+
+    return OperatorTensor::dimsForwarded();
+}
+
+bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
+    if (inputsAssociated()) {
+        AIDGE_ASSERT(getInput(0)->nbDims() == 4,
+            "input tensor must have dimensions = 4 (batch, channel, height, width).");
+
+        const bool input1ROIPresent           = getInput(1) && !getInput(1)->empty();
+        const bool input2ScalesPresent        = getInput(2) && !getInput(2)->empty();
+        const bool input3SizesPresent         = getInput(3) && !getInput(3)->empty();
+
+        AIDGE_ASSERT(input2ScalesPresent != input3SizesPresent, "Only one of scales and  sizes can be specified.")
+
+        if (input1ROIPresent) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Input #1 (ROI) is given and it is not supported.");
+        }
+        else if (input2ScalesPresent)  {
+            if (!allowDataDependency) {
+                Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #2");
+                return false;
+            }
+
+            AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(2)->size(),
+                "input #0 and input #2 (Scales) must have the same dimensions.");
+
+            std::vector<DimSize_t>      outDims = getInput(0)->dims();
+            const std::vector<DimSize_t> inDims = getInput(0)->dims();
+
+            std::shared_ptr<Tensor> fallback;
+            const auto& scales = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+
+            for (std::size_t dim=0; dim < getInput(2)->size(); ++dim) {
+                outDims[dim] = inDims[dim]*static_cast<int64_t*>(scales.getImpl()->hostPtr())[dim];
+            }
+
+            mOutputs[0]->resize(outDims);
+            return true;
+        }
+        else if (input3SizesPresent) {
+            if (!allowDataDependency) {
+                Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #3");
+                return false;
+            }
+
+            AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(3)->size(),
+                "input #0 and input #3 (Sizes) must have the same dimensions.");
+
+            std::vector<DimSize_t> outDims = getInput(0)->dims();
+
+            std::shared_ptr<Tensor> fallback;
+            const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+
+            for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {            
+                outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
+            }
+            
+            mOutputs[0]->resize(outDims);
+            return true;
+        }
+        else {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: Either Input #2 or Input #3 must be present.");
+        }
+    }
+
+    return false; 
+}
+
+void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Resize_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+
+    // By default, automatically set backend for all inputs: roi, scales and sizes 
+    if(getInput(1)) {
+        getInput(1)->setBackend(name, device);
+    }
+    if(getInput(2)) {
+        getInput(2)->setBackend(name, device);
+    }
+    if(getInput(3)) {
+        getInput(3)->setBackend(name, device);
+    }
+}
\ No newline at end of file
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8166712e1e5fd967bb9328e95ecf8c5388636ba7
--- /dev/null
+++ b/src/operator/Shape.cpp
@@ -0,0 +1,65 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>  // std::size_t
+#include <cstdint>  // std::int64_t
+#include <string>
+#include <vector>
+
+#include "aidge/operator/Shape.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+void Aidge::Shape_OpImpl::forward() {
+    const Shape_Op& op = dynamic_cast<const Shape_Op&>(mOp);
+    const auto start = op.start();
+    const auto end = op.end();
+
+    op.getOutput(0)->getImpl()->copyCast(std::next(op.getInput(0)->dims().data(),
+                                                   start),
+                                         DataType::UInt64,
+                                         end - start + 1);
+}
+
+const std::string Aidge::Shape_Op::Type = "Shape";
+
+bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        if (mAttributes->template getAttr<std::int64_t>("Start") < 0)
+            mAttributes->template getAttr<std::int64_t>("Start") += static_cast<std::int64_t>(getInput(0)->nbDims());
+        if (mAttributes->template getAttr<std::int64_t>("End") < 0)
+            mAttributes->template getAttr<std::int64_t>("End") += static_cast<std::int64_t>(getInput(0)->nbDims());
+
+        const auto start = mAttributes->template getAttr<std::int64_t>("Start");
+        const auto end = mAttributes->template getAttr<std::int64_t>("End");
+        const auto nbDims = static_cast<std::int64_t>(getInput(0)->nbDims());
+        const DimSize_t roi = end - start + 1;
+
+        AIDGE_ASSERT(start < nbDims && end < nbDims, "'Start' and 'End' must be < {}", nbDims);
+        AIDGE_ASSERT(roi> 1, "Unvalid ROI for Shape");
+
+        mOutputs[0]->resize({roi});
+        return true;
+    }
+
+    return false;
+}
+
+void Aidge::Shape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Shape_Op>::exists({name})) {
+        SET_IMPL_MACRO(Shape_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Shape_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/ShiftGELU.cpp b/src/operator/ShiftGELU.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ede83e291bd1670885192e3ac8f4958e185c28e2
--- /dev/null
+++ b/src/operator/ShiftGELU.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 25.06.2024
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ShiftGELU.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ShiftGELU_Op::Type = "ShiftGELU";
+
+void Aidge::ShiftGELU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(ShiftGELU_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/ShiftMax.cpp b/src/operator/ShiftMax.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..eb77ae655354eac03fbdc0f1a84a44391795ee8c
--- /dev/null
+++ b/src/operator/ShiftMax.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 25.06.2024
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ShiftMax.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ShiftMax_Op::Type = "ShiftMax";
+
+void Aidge::ShiftMax_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(ShiftMax_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 97ec0a5171a8f13fee0a93557b6831443f10713a..3cc2de686435a304326e2a4a60dad6c12a50349c 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -8,126 +8,172 @@
  * SPDX-License-Identifier: EPL-2.0
  *
  ********************************************************************************/
+
 #include "aidge/operator/Slice.hpp"
-#include "aidge/utils/Types.h"
-#include "aidge/utils/ErrorHandling.hpp"
 
 #include <cassert>
 #include <cstddef>
+#include <cstdint>
 #include <string>
 #include <utility>
 #include <vector>
 
+#include <fmt/format.h>
+
 #include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-void Aidge::Slice_OpImpl::forward() {
-    const Slice_Op& op = dynamic_cast<const Slice_Op&>(mOp);
-    const auto inputDims = op.getInput(0)->dims();
-    auto slicedDims = op.getInput(0)->dims();
-
-    std::size_t beginning = 0;
-    DimSize_t nbAxes = op.getAttr<SliceAttr::Axes>().size();
-    for (std::size_t i = 0; i < nbAxes; ++i) {
-        // For each slice operation get the params and cast them to size_t
-        const std::int64_t axis_ = op.getAttr<SliceAttr::Axes>()[i];
-        const std::int64_t start_ = op.getAttr<SliceAttr::Starts>()[i];
-        const std::int64_t end_ = op.getAttr<SliceAttr::Ends>()[i];
-        const std::size_t axis = axis_ >= 0 ? axis_ : static_cast<std::size_t>(axis_) + inputDims.size();
-        const std::size_t start = start_ >= 0 ? start_ : start_ + inputDims[axis];
-        const std::size_t end = end_ >= 0 ? end_ : end_ + inputDims[axis];
-        std::size_t stride = 1;
-        for (std::size_t j = inputDims.size() - 1; j > axis; --j) stride *= inputDims[j];
-        beginning += start * stride;
-        const std::size_t sliceLength = end - start + 1;
-        slicedDims[axis] = sliceLength;
+const std::string Aidge::Slice_Op::Type = "Slice";
+
+bool Aidge::Slice_Op::dimsForwarded() const {
+    if ((getInput(1) && !getInput(1)->empty())
+        || (getInput(2) && !getInput(2)->empty())
+        || (getInput(3) && !getInput(3)->empty())
+        || (getInput(4) && !getInput(4)->empty()))
+    {
+        // output dims are data dependent
+        return false;
     }
 
-    const std::size_t nbDims = slicedDims.size();
+    return OperatorTensor::dimsForwarded();
+}
 
-    // for inputDims = {4,5,5,3} & slicedDims = {3,2,2,1}, substractDims = {1,5,5,3}
-    std::vector<std::size_t> substractedDims = std::vector<std::size_t>(nbDims);
-    for (std::size_t i = 0; i < nbDims; ++i) {
-        substractedDims[i] = inputDims[i] - slicedDims[i];
-    }
+bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
+    if (inputsAssociated()) {
+        std::shared_ptr<Tensor> fallback;
+        // Copy optional input #1, if present, to attribute Starts
+        if (getInput(1)) {
+            if (!this->starts().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Starts attribute because input#1 takes precedence");
+            }
 
-    // for slicedDims = {3,2,2,1}, prodSlicedDims = {12,4,2,1}
-    std::vector<std::size_t> prodSlicedDims = std::vector<std::size_t>(nbDims);
-    std::vector<std::size_t> prodInputDims = std::vector<std::size_t>(nbDims + 1);
-    prodSlicedDims[nbDims - 1] = slicedDims[nbDims - 1];
-    prodInputDims[nbDims - 1] = inputDims[nbDims - 1];
-    prodInputDims[nbDims] = 1;
-    for (std::size_t i = 2; i <= nbDims; ++i) {
-        prodSlicedDims[nbDims - i] = prodSlicedDims[nbDims - i + 1] * slicedDims[nbDims - i];
-        prodInputDims[nbDims - i] = prodInputDims[nbDims - i + 1] * inputDims[nbDims - i];
-    }
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
+
+            this->starts().clear(); // If both are provided input would override attrs
+            this->starts().reserve(getInput(1)->size());
+            const auto& starts = getInput(1)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(starts.getImpl()->hostPtr()),
+                        starts.size(),
+                        std::back_inserter(this->starts()));
+        }
+
+        AIDGE_ASSERT(!this->starts().empty(), "Missing input#1 or Starts attribute");
+
+        // Copy optional input #2, if present, to attribute Ends
+        if (getInput(2)) {
+            if (!this->ends().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Ends attribute because input#2 takes precedence");
+            }
+
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#2");
+                return false;
+            }
+
+            this->ends().clear(); // If both are provided input would override attrs
+            this->ends().reserve(getInput(2)->size());
+            const auto& ends = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(ends.getImpl()->hostPtr()),
+                        ends.size(),
+                        std::back_inserter(this->ends()));
+        }
 
-    std::size_t i = beginning;
-    std::size_t size = 0;
-    std::size_t offset = 0;
-    for (std::size_t j = 0; j < prodSlicedDims[0];) {
-        ++size;
-        ++i;
-        ++j;
-        bool newChunk = false;
-        for (std::size_t idx = nbDims - 1; idx > 0; --idx) {
-            if (j % prodSlicedDims[idx] == 0) {
-                i += substractedDims[idx] * prodInputDims[idx + 1];
-                newChunk = true;
+        AIDGE_ASSERT(!this->ends().empty(), "Missing input#2 or Ends attribute");
+
+        // Copy optional input #3, if present, to attribute Axes
+        if (getInput(3)) {
+            if (!this->axes().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Axes attribute because input#3 takes precedence");
+            }
+
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#3");
+                return false;
             }
+
+            this->axes().clear(); // If both are provided input would override attrs
+            this->axes().reserve(getInput(3)->size());
+            const auto& axes = getInput(3)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
+            std::copy_n(static_cast<int8_t*>(axes.getImpl()->hostPtr()),
+                        axes.size(),
+                        std::back_inserter(this->axes()));
         }
 
-        if (newChunk) {
-            op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(beginning), size, offset);
-            beginning = i;
-            offset += size;
-            size = 0;
+        AIDGE_ASSERT(!this->axes().empty(), "Missing input#3 or Axes attribute");
+
+        // Copy optional input #4, if present, to attribute Steps
+        if (getInput(4)) {
+            if (!this->steps().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Steps attribute because input#4 takes precedence");
+            }
+
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#4");
+                return false;
+            }
+
+            this->steps().clear(); // If both are provided input would override attrs
+            this->steps().reserve(getInput(4)->size());
+            const auto& steps = getInput(4)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(steps.getImpl()->hostPtr()),
+                        steps.size(),
+                        std::back_inserter(this->steps()));
         }
-    }
 
-    if (size > 0) {
-        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(beginning), size, offset);
-    }
-}
+        // Fill Steps attr if empty
+        if(this->steps().empty()) {
+            // In case the input Steps is not provided, default value is 1
+            this->steps() = std::vector<std::int64_t>(this->axes().size(), 1);
+        }
 
-const std::string Aidge::Slice_Op::Type = "Slice";
+        // Compute output dims
+        const DimSize_t nbAxes = this->axes().size();
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        for (std::size_t i = 0; i < nbAxes; ++i) {
+            const DimIdx_t axis = this->axes()[i] >= 0 ?
+                            static_cast<DimIdx_t>(this->axes()[i]) :
+                            static_cast<DimIdx_t>(this->axes()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims()));
+            const DimSize_t start = this->starts()[i] >= 0 ?
+                                static_cast<DimSize_t>(this->starts()[i]) :
+                                static_cast<DimSize_t>(this->starts()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
+            const DimSize_t end = this->ends()[i] >= 0 ?
+                            static_cast<DimSize_t>(this->ends()[i]) :
+                            static_cast<DimSize_t>(this->ends()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
+            const std::int64_t step = this->steps()[i];
 
-bool Aidge::Slice_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check input have been associated
-    if (!getInput(0) || (getInput(0)->empty())) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
+            AIDGE_ASSERT(step != 0, "Slice_Op: Step ({}) must have a non-zero value on axis {}!", this->steps(), axis);
+            if(step * (static_cast<int64_t>(end) - static_cast<int64_t>(start)) < 0) {
+                if(step < 0) {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step ({}) is negative, we must have End ({}) < Start ({}) on axis {}", type(), step, end, start, axis);
+                }
+                else {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step ({}) is positive, we must have Start ({}) < End ({}) on axis {}", type(), step, start, end, axis);
+                }
+            }
 
-    const DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size();
-    std::vector<DimSize_t> outDims = getInput(0)->dims();
-    for (std::size_t i = 0; i < nbAxes; ++i) {
-        // For each slice operation get the params and cast them to size_t
-        const std::int64_t axis_ = this->template getAttr<SliceAttr::Axes>()[i];
-        const std::int64_t start_ = this->template getAttr<SliceAttr::Starts>()[i];
-        const std::int64_t end_ = this->template getAttr<SliceAttr::Ends>()[i];
-        const std::size_t axis = axis_ >= 0 ? static_cast<std::size_t>(axis_) : static_cast<std::size_t>(axis_) + getInput(0)->nbDims();
-        const std::size_t start = start_ >= 0 ? static_cast<std::size_t>(start_) : static_cast<std::size_t>(start_) + getInput(0)->dims()[axis];
-        const std::size_t end = end_ >= 0 ? static_cast<std::size_t>(end_) : static_cast<std::size_t>(end_) + getInput(0)->dims()[axis];
-
-        const std::size_t sliceLength = end - start + 1;
-        // Check if slice length is valid
-        if (sliceLength > getInput(0)->dims()[axis])
-        {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds");
+            const std::size_t sliceLength = static_cast<std::size_t>(std::ceil((static_cast<float>(end) - static_cast<float>(start)) / static_cast<float>(step)));
+            // Check if slice length is valid
+            if (sliceLength > getInput(0)->dims()[axis])
+            {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Slice_Op: ROI ({}) of Slice operator out of bounds ({}) on axis {}, with (Start, End, Step) = ({}, {}, {})",
+                    sliceLength, getInput(0)->dims()[axis], axis, start, end, step);
+            }
+            outDims[axis] = sliceLength;
         }
-        outDims[axis] = sliceLength;
+        mOutputs[0]->resize(outDims);
+        return true;
     }
-    mOutputs[0]->resize(outDims);
-    return true;
+
+    return false;
 }
 
 void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Slice_Op>::exists({name})){
-        SET_IMPL_MACRO(Slice_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Slice_OpImpl>(*this);
-    }
+    SET_IMPL_MACRO(Slice_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a0cb049b19e9411daf65bbe2a10319c62b32c1b8
--- /dev/null
+++ b/src/operator/Split.cpp
@@ -0,0 +1,138 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Split.hpp"
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <fmt/format.h>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+void Aidge::Split_OpImpl::forward() {
+    const Split_Op& op = dynamic_cast<const Split_Op&>(mOp);
+    const auto axis = op.template getAttr<std::int8_t>("Axis");
+    const auto splits = op.template getAttr<std::vector<DimSize_t>>("Split");
+    const auto dims = op.getInput(0)->dims();
+
+    //Compute pre/post axis strides
+    const std::size_t stride_pre = std::accumulate(dims.cbegin(), dims.cbegin() + axis, 1, std::multiplies<std::size_t>());
+    const std::size_t stride_post = std::accumulate(dims.crbegin(), dims.crbegin() + dims.size() -1 - axis, 1, std::multiplies<std::size_t>());
+    for (auto i = 0; i < op.nbOutputs(); ++i)
+    {
+        DimIdx_t chunkIdxOnAxis = std::accumulate(splits.cbegin(), splits.cbegin() + i, 0) * stride_post;
+        DimIdx_t offset = 0;
+        for (std::size_t j = 0; j < stride_pre; ++j)
+        {
+            // Compute chunk position in input tensor
+            DimIdx_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
+            // Copy chunk in ouput
+            op.getOutput(i)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(idx),
+                                             splits[i] * stride_post, offset);
+            offset += splits[i] * stride_post;
+        }
+
+    }
+}
+
+const std::string Aidge::Split_Op::Type = "Split";
+
+bool Aidge::Split_Op::dimsForwarded() const {
+    if ((getInput(1) && !getInput(1)->empty()))
+    {
+        // output dims are data dependent
+        return false;
+    }
+
+    return OperatorTensor::dimsForwarded();
+}
+
+bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
+    if (inputsAssociated()) {
+        // Copy optional input #1, if present, to attribute Split
+        if (getInput(1)) {
+            if (!this->template getAttr<SplitAttr::Split>().empty()) {
+                Log::notice("Split_Op: ignoring non-empty Split attribute because input#1 takes precedence");
+            }
+
+            if (!allowDataDependency) {
+                Log::warn("Split_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
+
+            std::shared_ptr<Tensor> fallback;
+            this->template getAttr<SplitAttr::Split>().reserve(getInput(1)->size());
+            const auto& splits = getInput(1)->refCastFrom(fallback, NativeType<DimSize_t>::type, "cpu");
+            std::copy_n(static_cast<DimSize_t*>(splits.getImpl()->hostPtr()),
+                        splits.size(),
+                        std::back_inserter(this->template getAttr<SplitAttr::Split>()));
+        }
+
+        // Compute output dims
+        if (this->template getAttr<std::int8_t>("Axis") < 0)
+            this->template getAttr<std::int8_t>("Axis") += static_cast<std::int8_t>(getInput(0)->nbDims());
+
+        DimSize_t dimToSplit = getInput(0)->dims()[this->template getAttr<std::int8_t>("Axis")];
+        DimSize_t nbOutput = this->nbOutputs();
+        // Fill Split attr if empty
+        if(this->template getAttr<SplitAttr::Split>().empty()) {
+            // In case the input Split is not provided, divide the dimension of Axis into equal slices
+            AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} musn't be bigger than dimension {}.", nbOutput, dimToSplit);
+            DimSize_t baseSliceSize = dimToSplit / nbOutput;
+
+            DimSize_t remainder = dimToSplit % nbOutput;
+
+            for (DimSize_t i = 0; i < static_cast<DimSize_t>(nbOutput -1); ++i) {
+                    this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize);
+            }
+            this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize + remainder);
+        }
+
+        const auto splits = this->template getAttr<SplitAttr::Split>();
+        AIDGE_ASSERT(splits.size() == nbOutput, "Split_Op: number of slices {} must be equal to number of outputs {}", splits, nbOutput);
+        DimSize_t totalSplitSize = std::accumulate(splits.cbegin(), splits.cend(), 0);
+        AIDGE_ASSERT(totalSplitSize == dimToSplit, "Split_Op: Total chunks size {} is different from dimension size {}.", totalSplitSize, dimToSplit);
+
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        for (std::size_t i = 0; i < nbOutput; ++i)
+        {
+            outDims[this->template getAttr<std::int8_t>("Axis")] = this->template getAttr<SplitAttr::Split>()[i];
+            mOutputs[i]->resize(outDims);
+        }
+
+        return true;
+    }
+    
+    return false;
+}
+
+void Aidge::Split_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Split_Op>::exists({name})) {
+        SET_IMPL_MACRO(Split_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Split_OpImpl>(*this);
+    }
+    for (std::size_t i = 0; i < this->nbOutputs(); i++)
+    {
+        mOutputs[i]->setBackend(name, device);
+    }
+    
+}
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index b977f4ee7ccce32d7f7929cbee99140aea36cd2f..858b32beaf9e23e8e9e7f52cfe7176afe399843c 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -25,13 +25,7 @@
 const std::string Aidge::Sub_Op::Type = "Sub";
 
 bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
-    }
-
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
-
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 08c4770e3fb43fe819a924dd963356401c3ce801..69820a924105acc8bea817aecb90e0aa278fce06 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -23,54 +23,18 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-void Aidge::Transpose_OpImpl::forward() {
+void Aidge::TransposeImpl::forward() {
     const Transpose_Op& op = dynamic_cast<const Transpose_Op&>(mOp);
-    const auto inputDims = op.getInput(0)->dims();
-    const auto outputDims = op.getOutput(0)->dims();
-
-    std::vector<std::size_t> outStrides(outputDims.size(), 1);
-    for (size_t i = 0; i < outputDims.size(); ++i) {
-        for (size_t j = i+1; j < outputDims.size(); ++j)
-        {
-            outStrides[i] *= outputDims[j];
-        }
-    }
-
-    std::vector<size_t> indices(outputDims.size(), 0);
-    for (size_t i = 0; i < op.getInput(0)->size(); ++i) {
-        size_t idx = 0;
-        // Permute indices based on OutputDimsOrder attr
-        for (int j = outputDims.size() -1; j >=0; --j) {
-            idx += indices[op.getAttr<std::vector<DimSize_t>>(0)[j]] * outStrides[j];
-        }
-        // Copy the value in output
-        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i), 1, idx);
-
-        // Update indices for the next iteration
-        for (int j = outputDims.size() - 1; j >= 0; --j) {
-            if (indices[j] < inputDims[j] - 1) {
-                indices[j]++;
-                break;
-            } else {
-                indices[j] = 0;
-            }
-        }
-    }
+    op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.outputDimsOrder());
 }
 
 const std::string Aidge::Transpose_Op::Type = "Transpose";
 
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check input has been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
-    }
-
-    if (!getInput(0)->empty()) {
-        const auto& outDimsOrder = getAttr<std::vector<DimSize_t>>(0);
+    if (inputsAssociated()) {
         std::vector<DimSize_t> outputDims;
-        for (std::size_t i = 0; i < outDimsOrder.size(); ++i) {
-            outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
+        for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) {
+            outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]);
         }
         mOutputs[0]->resize(outputDims);
         return true;
@@ -83,7 +47,7 @@ void Aidge::Transpose_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
         SET_IMPL_MACRO(Transpose_Op, *this, name);
     }
     else {
-        mImpl = std::make_shared<Transpose_OpImpl>(*this);
+        mImpl = std::make_shared<TransposeImpl>(*this);
     }
     mOutputs[0]->setBackend(name, device);
 }
diff --git a/src/recipes/ConstantFolding.cpp b/src/recipes/ConstantFolding.cpp
index 42fb45224614ca2655165a69b974cfe229e27f90..40b0bda766ab243805349b13e93391c5a60df63a 100644
--- a/src/recipes/ConstantFolding.cpp
+++ b/src/recipes/ConstantFolding.cpp
@@ -44,7 +44,7 @@ void Aidge::constantFolding(std::shared_ptr<GraphView> graph) {
                     }
 
                     const auto& producer = std::static_pointer_cast<Producer_Op>(input.first->getOperator());
-                    if (!producer->getAttr<bool>("Constant")) {
+                    if (!producer->constant()) {
                         Log::info("Node {} (of type {}) not foldable because Producer input {} not Constant",
                             node->name(), node->type(), input.first->name());
                         foldable = false;
diff --git a/src/recipes/ExplicitCastMove.cpp b/src/recipes/ExplicitCastMove.cpp
index 7d836c3acc835c5ed3fe014db6787029dc318afd..c860b9e8a0e1fcbf467eb13e1366f371d731a47d 100644
--- a/src/recipes/ExplicitCastMove.cpp
+++ b/src/recipes/ExplicitCastMove.cpp
@@ -73,7 +73,7 @@ void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) {
 
         IOIndex_t inputIdx = 0;
         for (auto parent : node->inputs()) {
-            // TODO: possible optimization: currently, a Cast/Move Operator may 
+            // TODO: possible optimization: currently, a Cast/Move Operator may
             // be added several time to the same output, if it has multiple childs,
             // even if it is the same conversion each time.
             if (parent.first != nullptr) {
@@ -91,8 +91,8 @@ void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) {
 
                 if (node->type() != Cast_Op::Type && input->dataType() != output->dataType()) {
                     // Change of date type => a Cast operator is required
-                    castOp = Cast();
-                    castOp->getOperator()->setDataType(output->dataType());
+                    castOp = Cast(output->dataType());
+                    // castOp->getOperator()->setDataType(output->dataType());
                     castOp->getOperator()->setBackend(device.first, device.second);
 
                     if (moveOp == nullptr) {
diff --git a/src/recipes/ExplicitTranspose.cpp b/src/recipes/ExplicitTranspose.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7ff971b7e436219d5dfbb7cbadbaf780d3f1aeda
--- /dev/null
+++ b/src/recipes/ExplicitTranspose.cpp
@@ -0,0 +1,125 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/recipes/Recipes.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Transpose.hpp"
+
+void Aidge::explicitTranspose(std::shared_ptr<GraphView> graph) {
+    // First, remove existing Transpose operators, if not needed anymore
+    auto nodes = graph->getNodes();
+    for (auto node : nodes) {
+        AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
+        const auto& output = std::static_pointer_cast<OperatorTensor>(node->getOperator())->getOutput(0);
+
+        if (node->type() == Transpose_Op::Type) {
+            // Remove existing Transpose operators, if not needed anymore
+            AIDGE_INTERNAL_ASSERT(node->inputs().size() == 1);
+            const auto parent = node->inputs()[0];
+            // Check parent is not nullptr, as this Operator may be an entry point of the graph without parent
+            if (parent.first != nullptr) {
+                AIDGE_ASSERT(parent.first->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
+                const auto& input = std::static_pointer_cast<OperatorTensor>(parent.first->getOperator())->getOutput(parent.second);
+
+                if (input->dataFormat() != DataFormat::Default
+                    && output->dataFormat() != DataFormat::Default
+                    && input->dataFormat() == output->dataFormat())
+                {
+                    // Add direct connection bypassing Transpose node
+                    const auto childs = node->outputs()[0];
+                    for (const auto& child : childs) {
+                        parent.first->addChild(child.first, parent.second, child.second);
+                    }
+
+                    // Remove all node connections
+                    node->resetConnections();
+                    // Remove node from view
+                    graph->remove(node);
+                }
+            }
+        }
+    }
+
+    // Second, insert Transpose operator between node inputs and parent output, if needed
+    nodes = graph->getNodes();
+    for (auto node : nodes) {
+        // TODO: currently, Operator data type is only reflected in its output tensor data type.
+        // But an Operator might have multiple outputs of different data type(?)
+        const auto& output = std::static_pointer_cast<OperatorTensor>(node->getOperator())->getOutput(0);
+
+        IOIndex_t inputIdx = 0;
+        for (auto parent : node->inputs()) {
+            // TODO: possible optimization: currently, a Transpose Operator may
+            // be added several time to the same output, if it has multiple childs,
+            // even if it is the same conversion each time.
+            if (parent.first != nullptr) {
+                const auto& input = std::static_pointer_cast<OperatorTensor>(parent.first->getOperator())->getOutput(parent.second);
+
+                if ((node->type() != Transpose_Op::Type
+                    && input->dataFormat() != DataFormat::Default
+                    && output->dataFormat() != DataFormat::Default
+                    && input->dataFormat() != output->dataFormat()))
+                {
+                    // Allow Transpose fuse only if data format is fully specified before and after in this recipe
+                    bool fuseTranspose = false;
+                    if (parent.first->type() == Transpose_Op::Type && parent.first->getChildren().size() == 1) {
+                        const auto& parentInput = std::static_pointer_cast<OperatorTensor>(parent.first->getOperator())->getInput(0);
+                        if (parentInput->dataFormat() != DataFormat::Default) {
+                            fuseTranspose = true;
+                        }
+                    }
+
+                    if (fuseTranspose) {
+                        // Do not insert a new Transpose if the only parent is already a Transpose!
+                        const auto& parentInput = std::static_pointer_cast<OperatorTensor>(parent.first->getOperator())->getInput(0);
+                        if (parentInput->dataFormat() == output->dataFormat()) {
+                            // Case 1: same data format than before parent Transpose
+                            // => remove Transpose altogether
+                            const auto parentParent = parent.first->inputs()[0];
+                            // Add direct connection bypassing Transpose node
+                            parentParent.first->addChild(node, parentParent.second, 0);
+                            // Remove all node connections
+                            parent.first->resetConnections();
+                            // Remove node from view
+                            graph->remove(parent.first);
+                        }
+                        else {
+                            // Case 2: change of format
+                            // => compute the new permutation array
+                            const auto transpose = getDataFormatTranspose(parentInput->dataFormat(), output->dataFormat());
+                            auto transposeOp = std::static_pointer_cast<Transpose_Op>(parent.first->getOperator());
+                            transposeOp->setDataFormat(output->dataFormat());
+                            transposeOp->outputDimsOrder() = std::vector<DimSize_t>(transpose.begin(), transpose.end());
+                        }
+                    }
+                    else {
+                        const auto transpose = getDataFormatTranspose(input->dataFormat(), output->dataFormat());
+                        auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
+                        transposeOp->getOperator()->setDataFormat(output->dataFormat());
+                        transposeOp->getOperator()->setDataType(output->dataType());
+                        if (output->getImpl()) {
+                            const auto& device = output->getImpl()->device();
+                            transposeOp->getOperator()->setBackend(device.first, device.second);
+                        }
+                        transposeOp->addChild(node, 0, inputIdx);
+                        parent.first->addChild(transposeOp, parent.second, 0);
+
+                        graph->add(transposeOp);
+                        graph->add(parent.first);
+                        graph->add(node);
+                    }
+                }
+            }
+
+            ++inputIdx;
+        }
+    }
+}
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index 76c15a0627ee65ed23c2dc385d9cd3787f9f0979..aa20a056ad789975c5b4d493a1ce48dcd7592946 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -32,10 +32,12 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
                           std::shared_ptr<Aidge::Node> batchnormNode) {
     // Case: convNode is a MetaOperator ending with a Convolution
     // eg. PaddedConv
+    std::shared_ptr<Node> metaNode;
     if (!(convNode -> getOperator() -> isAtomic())) {
-        const std::shared_ptr<MetaOperator_Op> metaNode = std::static_pointer_cast<MetaOperator_Op>(convNode -> getOperator());
-        const std::shared_ptr<GraphView>  metanodeGraph = metaNode -> getMicroGraph();
-        const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> outputNodes = metanodeGraph -> getOrderedOutputs();
+        metaNode = convNode;
+        const auto metaOp = std::static_pointer_cast<MetaOperator_Op>(convNode -> getOperator());
+        const std::shared_ptr<GraphView>  metaOpGraph = metaOp -> getMicroGraph();
+        const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> outputNodes = metaOpGraph -> getOrderedOutputs();
         if (outputNodes.size() != 1) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Bad MetaOperator argument for fuseBatchNorm recipie.");
         }
@@ -58,16 +60,17 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
     if (convNode->type() == Conv_Op<2>::Type) {
         const std::shared_ptr<Conv_Op<2>> convOpPtr =
             std::static_pointer_cast<Conv_Op<2>>(convNode->getOperator());
-        convNbOutChannels = convOpPtr->getAttr<DimSize_t>("OutChannels");
-        channelsSize = convOpPtr->getAttr<DimSize_t>("InChannels");
-        kernelDims = convOpPtr->getAttr<std::array<DimSize_t, 2>>("KernelDims");
+        convNbOutChannels = convOpPtr->outChannels();
+        channelsSize = convOpPtr->inChannels();
+        kernelDims = convOpPtr->kernelDims();
     }
     else if (convNode->type() == ConvDepthWise_Op<2>::Type) {
         const std::shared_ptr<ConvDepthWise_Op<2>> convOpPtr =
             std::static_pointer_cast<ConvDepthWise_Op<2>>(convNode->getOperator());
-        convNbOutChannels = convOpPtr->getAttr<DimSize_t>("Channels");
-        kernelDims = convOpPtr->getAttr<std::array<DimSize_t, 2>>("KernelDims");
+        convNbOutChannels = convOpPtr->nbChannels();
+        kernelDims = convOpPtr->kernelDims();
     }
+    AIDGE_ASSERT(kernelDims.size() == 2, "fuseBatchNorm(): only 2D convolutions are supported");
 
     std::shared_ptr<Tensor> scaleBuf, shiftBuf, b_meanBuf, b_varBuf;
     const Tensor& scale = batchOp->getInput(1)->refCastFrom(scaleBuf, DataType::Float32, "cpu");
@@ -75,7 +78,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
     const Tensor& b_mean = batchOp->getInput(3)->refCastFrom(b_meanBuf, DataType::Float32, "cpu");
     const Tensor& b_var = batchOp->getInput(4)->refCastFrom(b_varBuf, DataType::Float32, "cpu");
 
-    const float epsilon = batchOp->getAttr<float>("Epsilon");
+    const float epsilon = batchOp->epsilon();
 
 
     assert(epsilon > 0.0);
@@ -98,6 +101,51 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
         fmt::print("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
     }
 
+    // Add bias if it is non existant, as there will be a bias after the fuse
+    if (!convOp->getInput(2)) {
+        if (metaNode) {
+            // Conv is inside a meta-operator, we add bias outside it
+            // Find the correct input index of the meta-operator corresponding
+            // to the bias:
+            const auto metaOp = std::static_pointer_cast<MetaOperator_Op>(metaNode->getOperator());
+            const auto metaOpGraph = metaOp->getMicroGraph();
+            IOIndex_t inputIdx = 0;
+            for (auto input : metaOpGraph->getOrderedInputs()) {
+                if (input.first == convNode && input.second == 2) {
+                    break;
+                }
+                ++inputIdx;
+            }
+
+            auto prod = addProducer(metaNode, inputIdx, {convNbOutChannels}, "b");
+            // Add the new bias node to the same views as the meta node
+            for (auto g : metaNode->views()) {
+                g->add(prod);
+            }
+        }
+        else {
+            auto prod = addProducer(convNode, 2, {convNbOutChannels}, "b");
+            if (convNode->input(1).first) {
+                // Add the new bias node to the same views as the weights node
+                // if possible
+                for (auto g : convNode->input(1).first->views()) {
+                    g->add(prod);
+                }
+            }
+            else {
+                for (auto g : convNode->views()) {
+                    g->add(prod);
+                }
+            }
+        }
+
+        AIDGE_INTERNAL_ASSERT(convOp->getInput(2) != nullptr);
+
+        // Use the same backend for the bias than for the weights
+        convOp->getInput(2)->setBackend(convOp->getInput(1)->backend());
+        convOp->getInput(2)->zeros();
+    }
+
     std::shared_ptr<Tensor> weightBuf, biasBuf;
     Tensor& weight = convOp->getInput(1)->refCastFrom(weightBuf, DataType::Float32, "cpu");
     Tensor& bias = convOp->getInput(2)->refCastFrom(biasBuf, DataType::Float32, "cpu");
@@ -112,7 +160,6 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
                         ? b_var.get<float>(outChId) : meanVariance));
         // Weights adjustments
         for (std::size_t channel = 0; channel < channelsSize; ++channel) {
-            // TODO : Suppose kerneldims = 2
             for (std::size_t k0 = 0; k0 < kernelDims[0]; ++k0) {
                 for (std::size_t k1 = 0; k1 < kernelDims[1]; ++k1) {
                     std::vector<DimSize_t> currentIdx = {outChId, channel, k0, k1};
@@ -122,7 +169,6 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
             }
         }
 
-        // TODO : check if noBias==true is set, then set biasValue to 0
         float biasValue = bias.get<float>(outChId);
 
         biasValue = shift.get<float>(outChId) + (biasValue - b_mean.get<float>(outChId)) * factor;
diff --git a/src/recipes/FuseMulAdd.cpp b/src/recipes/FuseMulAdd.cpp
index 9a89f8af583fa8567f62ed7c8f42d160d80a1e99..6112fc47ece6bb361ebad626be7b5a6b1c2189bd 100644
--- a/src/recipes/FuseMulAdd.cpp
+++ b/src/recipes/FuseMulAdd.cpp
@@ -38,11 +38,11 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
     // Fetch the output dimension throught the bias size
     std::shared_ptr<Node> bias = nullptr;
     if (addNode->getParent(0) == matmulNode) {
-        AIDGE_ASSERT(matmulNode->getParent(1), "No bias detected to produce the fuseMulAdd recipe.");
+        AIDGE_ASSERT(addNode->getParent(1), "No bias detected to produce the fuseMulAdd recipe.");
         bias = addNode->getParent(1);
     }
     else if (addNode->getParent(1) == matmulNode) {
-        AIDGE_ASSERT(matmulNode->getParent(0), "No bias detected to produce the fuseMulAdd recipe.");
+        AIDGE_ASSERT(addNode->getParent(0), "No bias detected to produce the fuseMulAdd recipe.");
         bias = addNode->getParent(0);
     }
 
@@ -52,6 +52,12 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
             && matmulNode->getParent(0) && matmulNode->getParent(0)->getOperator()->type() != Producer_Op::Type))
     {
         weight = matmulNode->getParent(1);
+        // Transpose weights because weight Tensor is in first input
+        auto weightOpTensor = std::static_pointer_cast<OperatorTensor>(weight->getOperator());
+        const std::shared_ptr<Aidge::Tensor>& weightTensor = weightOpTensor->getOutput(0);
+        std::vector<DimSize_t> shape =  weightTensor->dims();
+        std::reverse(shape.begin(), shape.end());
+        weightTensor->copyTranspose(*weightTensor, std::vector<Aidge::DimSize_t>({1ul, 0ul}));
     }
     else if ((matmulNode->getParent(0) && !matmulNode->getParent(1))
         || (matmulNode->getParent(0) && matmulNode->getParent(0)->getOperator()->type() == Producer_Op::Type
@@ -82,7 +88,7 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
             break;
         }
     }
-    AIDGE_ASSERT(outSize, "Couldnt get output number of channels for FC operator.");
+    AIDGE_ASSERT(outSize, "Could not get output number of channels for FC operator.");
 
     // Instanciate FC
     std::string fcName = matmulNode->name();
@@ -90,7 +96,7 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
         fcName += "_" + addNode->name();
     }
 
-    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(outSize, bias ? false : true), fcName);
+    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(), fcName);
 
     // Step 2 : Branch existing producers & create the others
     // link weights & bias
@@ -138,4 +144,4 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::GraphView> graphView){
 
 
     }
-}
\ No newline at end of file
+}
diff --git a/src/recipes/FuseToMetaOps.cpp b/src/recipes/FuseToMetaOps.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e7748936c00a20ec235ea7853f4d17e2c10261fb
--- /dev/null
+++ b/src/recipes/FuseToMetaOps.cpp
@@ -0,0 +1,41 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Matching.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+size_t Aidge::fuseToMetaOps(std::shared_ptr<GraphView> graphView, const std::string& query, const std::string& type) {
+    const auto metaType = (!type.empty()) ? type : query;
+    const auto matches = SinglePassGraphMatching(graphView).match(query);
+
+    size_t nbReplaced = 0;
+    for (const auto& match : matches) {
+        auto metaOp = MetaOperator(metaType.c_str(), match.graph->clone());
+        auto metaOpGraph = std::make_shared<GraphView>();
+        metaOpGraph->add(metaOp, false);
+        const auto success = GraphView::replace(match.graph, metaOpGraph);
+
+        if (!success) {
+            Log::notice("Could not replace sub-graph with meta operator");
+        }
+        else {
+            ++nbReplaced;
+        }
+    }
+
+    Log::info("Replaced {} (out of {}) matching sub-graph with meta operators", nbReplaced, matches.size());
+    return nbReplaced;
+}
diff --git a/src/recipes/GraphViewHelper.cpp b/src/recipes/GraphViewHelper.cpp
index b0c99bffb895dc64b20d76991911ae5f4b604c85..9522c0fe7346e78875a08d3ebf19a04dea2909e1 100644
--- a/src/recipes/GraphViewHelper.cpp
+++ b/src/recipes/GraphViewHelper.cpp
@@ -44,14 +44,3 @@ std::set<std::shared_ptr<Aidge::Tensor>> Aidge::parameters(std::shared_ptr<Aidge
     }
     return res;
 }
-
-void Aidge::compile_gradient(std::shared_ptr<Aidge::GraphView> gv) {
-    for (const auto& node : gv->getNodes()) {
-        // TODO: check that each node is an OperatorTensor
-        AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Cannot instanciate gradient of an Operator ({}) that doesn't use Tensor.", node->getOperator()->type());
-        const std::shared_ptr<OperatorTensor> op = std::dynamic_pointer_cast<OperatorTensor>(node -> getOperator());
-        for (std::size_t o = 0; o < node -> nbOutputs(); ++o) {
-            op->getOutput(o)->initGrad();
-        }
-    }
-}
diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp
index 7959e1b70acab617b9c6f92160c6d501712f5945..88691c26d5d7013874c13000535ec2a3842d47d3 100644
--- a/src/recipes/HorizontalTiling.cpp
+++ b/src/recipes/HorizontalTiling.cpp
@@ -23,6 +23,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/operator/Producer.hpp"
 
 #include "aidge/operator/Add.hpp"
 #include "aidge/operator/Concat.hpp"
@@ -33,32 +34,35 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
                                                             const Aidge::DimIdx_t axis,
                                                             const std::size_t nbSlices)
 {
+    // for now, Tiling works only with Conv Operators
     if (node->getOperator()->type() != "Conv") {
         AIDGE_INTERNAL_ASSERT("Operator should be a Convolution.");
     }
-    AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
+    // TODO: back when tiling works with other Operators
+    // AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
     const auto& op = std::static_pointer_cast<OperatorTensor>(node->getOperator());
-    if (op->nbOutputs() != 1 || op->nbData() > 1) {
-        AIDGE_INTERNAL_ASSERT("Only slice Operators with one output and at most one input for now.");
-    }
+    // TODO: back when tiling works with other Operators
+    // if (op->nbOutputs() != 1 || op->nbData() > 1) {
+    //     AIDGE_INTERNAL_ASSERT("Only slice Operators with one output and at most one input for now.");
+    // }
     if (!op->dimsForwarded()) {
         AIDGE_INTERNAL_ASSERT("Dimensions must be forwarded before any tiling");
     }
+
+    const std::shared_ptr<Tensor>& outTensor = op->getOutput(0);
+    std::vector<DimSize_t> outputDims = outTensor->dims();
+
     // start by doing a tiling with strict dimensions division
-    const auto& outTensor = op->getOutput(0);
-    if (op->getOutput(0)->dims()[axis] % nbSlices != 0) {
+    if (outputDims[axis] % nbSlices != 0) {
         AIDGE_INTERNAL_ASSERT("axis should be a multiple of nbSlices");
     }
 
     // dimensions of a Slice
-    std::vector<DimSize_t> outputDims = outTensor->dims();
     outputDims[axis] /= nbSlices;
 
-    std::vector<DimSize_t> currentFirstDims = std::vector<DimSize_t>(outTensor->nbDims(), 0);
 
-    std::set<std::shared_ptr<Aidge::Node>> res;
     auto concat = Concat(nbSlices, axis);
-    res.insert(concat);
+    std::set<std::shared_ptr<Aidge::Node>> tiledOperator{concat};
 
     // check slice sizes
     // const auto inputDims = op->computeReceptiveField(currentFirstDims[axis], outputDims, 0);
@@ -70,36 +74,81 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
     // }
 
     std::vector<std::shared_ptr<Node>> clonedInputs = std::vector<std::shared_ptr<Node>>(node->nbInputs(), nullptr);
-    for (std::size_t i = node->nbData(); i < node ->nbInputs(); ++i) {
-        clonedInputs[i] = node -> getParent(i) -> cloneSharedOperators();
-        clonedInputs[i] -> setName(node -> name() + "_0");
-        res.insert(clonedInputs[i]);
+    for (std::size_t i = 0; i < node ->nbInputs(); ++i) {
+        if (node->inputCategory(i) == InputCategory::Param || node->inputCategory(i) == InputCategory::OptionalParam) {
+            clonedInputs[i] = node -> getParent(i) -> cloneSharedOperators();
+            clonedInputs[i] -> setName(node -> getParent(i) -> name() + "_0");
+            tiledOperator.insert(clonedInputs[i]);
+        }
     }
 
+    const std::vector<std::string> sliceInputsNames = Slice_Op::getInputsName();
+    // coordinates of the first value of the current output slice
+    std::vector<DimSize_t> currentFirstDims = std::vector<DimSize_t>(outTensor->nbDims(), 0);
     for (IOIndex_t i = 0; currentFirstDims[axis] < outTensor->dims()[axis]; currentFirstDims[axis] += outputDims[axis], ++i) {
         const auto inputDims = op->computeReceptiveField(currentFirstDims, outputDims, 0);
         auto newNode = node -> clone(); // no input associated to clones
         newNode -> setName(node->name() + "_" + std::to_string(currentFirstDims[axis]));
         clonedInputs[1] -> addChild(newNode, 0, 1);
         clonedInputs[2] -> addChild(newNode, 0, 2);
-        // Slice for input and each parameter
-        std::vector<std::int64_t> inputDimsEnd(inputDims[0].first.size());
-        for (std::size_t dim = 0; dim < inputDimsEnd.size(); ++dim) {
-            inputDimsEnd[dim] = static_cast<std::int64_t>(inputDims[0].first[dim] + inputDims[0].second[dim]) - 1;
-        }
+
+        auto slice = Slice();
+        auto backend = outTensor->getImpl()->backend();
+
+        // Create Slice's Starts producer node
         std::vector<std::int64_t> inputDimsStart(inputDims[0].first.size());
         for (std::size_t dim = 0; dim < inputDimsStart.size(); ++dim) {
             inputDimsStart[dim] = static_cast<std::int64_t>(inputDims[0].first[dim]);
         }
-        std::vector<std::int64_t> usedDims(inputDimsEnd.size());
-        std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int64_t>(0));
-        auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, "Slice_" + std::to_string(currentFirstDims[axis]));
+        const std::shared_ptr<Tensor> starts = std::make_shared<Tensor>();
+        starts -> setDataType(DataType::Int64);
+        starts -> setBackend(backend);
+        starts -> resize(std::vector<std::size_t>({inputDimsStart.size()}));
+        starts -> getImpl() -> copyFromHost(inputDimsStart.data(), inputDimsStart.size());
+        auto startsNode = Producer(starts, slice->name() + sliceInputsNames[1]);
+        startsNode -> addChild(slice, 0, 1);
+
+        // Create Slice's Ends producer node
+        std::vector<std::int64_t> inputDimsEnd(inputDims[0].first.size());
+        for (std::size_t dim = 0; dim < inputDimsEnd.size(); ++dim) {
+            inputDimsEnd[dim] = static_cast<std::int64_t>(inputDims[0].first[dim] + inputDims[0].second[dim]);
+        }
+        const std::shared_ptr<Tensor> ends = std::make_shared<Tensor>();
+        ends -> setDataType(DataType::Int64);
+        ends -> setBackend(backend);
+        ends -> resize(std::vector<std::size_t>({inputDimsEnd.size()}));
+        ends -> getImpl() -> copyFromHost(inputDimsEnd.data(), inputDimsEnd.size());
+        auto endsNode = Producer(ends, slice->name() + sliceInputsNames[2]);
+        endsNode -> addChild(slice, 0, 2);
+
+        // Create Slice's Axes producer node
+        std::vector<std::int8_t> usedDims(inputDimsEnd.size());
+        std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int8_t>(0));
+        const std::shared_ptr<Tensor> axes = std::make_shared<Tensor>();
+        axes -> setDataType(DataType::Int8);
+        axes -> setBackend(backend);
+        axes -> resize(std::vector<std::size_t>({usedDims.size()}));
+        axes -> getImpl() -> copyFromHost(usedDims.data(), usedDims.size());
+        auto axesNode = Producer(axes, slice->name() + sliceInputsNames[3]);
+        axesNode -> addChild(slice, 0, 3);
+
+        // Create Slice's Steps producer node
+        std::vector<std::int64_t> inputDimsSteps(inputDimsEnd.size(), static_cast<std::int64_t>(1));
+        const std::shared_ptr<Tensor> steps = std::make_shared<Tensor>();
+        steps -> setDataType(DataType::Int64);
+        steps -> setBackend(backend);
+        steps -> resize(std::vector<std::size_t>({inputDimsSteps.size()}));
+        steps -> getImpl() -> copyFromHost(inputDimsSteps.data(), inputDimsSteps.size());
+        auto stepsNode = Producer(steps, slice->name() + sliceInputsNames[4]);
+        stepsNode -> addChild(slice, 0, 4);
+
+        // auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, inputDimsSteps);
+        // auto backend = outTensor->getImpl()->backend();
         slice -> addChild(newNode, 0, 0);
         newNode -> addChild(concat, 0, i);
 
-        res.insert(slice);
-        res.insert(newNode);
+        tiledOperator.insert({slice, newNode, startsNode, endsNode, axesNode, stepsNode});
     }
 
-    return res;
+    return tiledOperator;
 }
\ No newline at end of file
diff --git a/src/recipes/LabelGraph.cpp b/src/recipes/LabelGraph.cpp
index ac0e6bfe197460c8c422a6c1f3b3240518ee1f29..75bcd36bf61f7c23645038bedb060cd13bdce2c5 100644
--- a/src/recipes/LabelGraph.cpp
+++ b/src/recipes/LabelGraph.cpp
@@ -22,7 +22,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == Conv_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<Conv_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<ConvAttr::KernelDims>(), op->template getAttr<ConvAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -30,7 +30,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == ConvDepthWise_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<ConvDepthWise_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<ConvDepthWiseAttr::KernelDims>(), op->template getAttr<ConvDepthWiseAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -38,7 +38,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == AvgPooling_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<AvgPooling_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<AvgPoolingAttr::KernelDims>(), op->template getAttr<AvgPoolingAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims());
         return std::make_shared<Node>(newOp, node->name());
     }
 
diff --git a/src/recipes/RemoveDropout.cpp b/src/recipes/RemoveDropout.cpp
deleted file mode 100644
index 4f8805845bd1f46fd187cba3564b031c55c4655a..0000000000000000000000000000000000000000
--- a/src/recipes/RemoveDropout.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <memory>
-
-#include "aidge/graph/Node.hpp"
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/recipes/Recipes.hpp"
-
-//Graph Regex
-#include "aidge/graphRegex/GraphRegex.hpp"
-
-
-namespace Aidge {
-    void removeDropout(std::shared_ptr<Node> dropout) {
-
-        std::set<NodePtr> nodesToRemove;
-        for (auto nodePtr: dropout->getParents())
-        {
-            if(nodePtr->type() == "Producer")
-            {
-                nodesToRemove.insert(nodePtr);
-            }
-        }
-        nodesToRemove.insert(dropout);
-        GraphView::replace(nodesToRemove, {});
-    }
-
-    void removeDropout(std::shared_ptr<MatchSolution> solution){
-
-        assert(solution->at("Dropout").size() == 1 && "Wrong number of nodes Dropout to replace\n");
-
-        for (const auto& dropout : solution->at("Dropout")) {
-
-            removeDropout(dropout);
-        }
-    }
-
-    void removeDropout(std::shared_ptr<GraphView> graphView){
-        std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
-        regex->setNodeKey("Dropout","getType($) =='Dropout'");
-        regex->addQuery("Dropout#");
-
-        for (const auto& solution : regex->match(graphView)) {
-            removeDropout(solution);
-        }
-    }
-}
diff --git a/src/recipes/RemoveNode.cpp b/src/recipes/RemoveNode.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a09c67991409dfe491d46b4ad739f9ddf5b72aef
--- /dev/null
+++ b/src/recipes/RemoveNode.cpp
@@ -0,0 +1,51 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+
+//Graph Regex
+#include "aidge/graphRegex/GraphRegex.hpp"
+
+size_t Aidge::removeNode(std::shared_ptr<GraphView> graphView, const std::string& type, bool incProducers) {
+    std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
+    regex->setNodeKey(type, "getType($) =='" + type + "'");
+    regex->addQuery(type + "#");
+
+    const auto matches = regex->match(graphView);
+    for (const auto& solution : matches) {
+        assert(solution->at(type).size() == 1 && "Wrong number of nodes to replace\n");
+
+        std::set<NodePtr> nodesToRemove = solution->at(type);
+        if (incProducers) {
+            for (const auto& nodePtr: (*solution->at(type).begin())->getParents()) {
+                if (nodePtr != nullptr && nodePtr->type() == "Producer") {
+                    nodesToRemove.insert(nodePtr);
+                }
+            }
+        }
+        GraphView::replace(nodesToRemove, {});
+    }
+
+    return matches.size();
+}
+
+size_t Aidge::removeDropout(std::shared_ptr<GraphView> graphView) {
+    return removeNode(graphView, "Dropout", true);
+}
+
+size_t Aidge::removeIdentity(std::shared_ptr<GraphView> graphView) {
+    return removeNode(graphView, "Identity");
+}
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index af10e3dcd3ead044f8619c40570936f53039d9a2..d63c93deb1ba2d7974ffc6e5b8ccd1e9c57dc76c 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -197,18 +197,20 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S
             bool isStillConsumer = false;
             // Only look for data inputs. If no data is available on data input,
             // by definition, no parameter can be consumed on parameter inputs.
-            for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbData(); ++inputIdx) {
-                AIDGE_LOG_CONTEXT("Consumer node {} input #{}", namePtrTable.at(consumer), inputIdx);
-
-                if (consumer->getOperator()->getNbConsumedData(inputIdx) <
-                            getNbAvailableData(consumer, inputIdx)) {
-                    Log::debug("  still consumer: C{} < P{} for input #{}",
-                        consumer->getOperator()->getNbConsumedData(inputIdx),
-                        getNbAvailableData(consumer, inputIdx), inputIdx);
-
-                    // there is still data to consume
-                    isStillConsumer = true;
-                    break;
+            for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbInputs(); ++inputIdx) {
+                if (consumer->inputCategory(inputIdx) == InputCategory::Data) {
+                    AIDGE_LOG_CONTEXT("Consumer node {} input #{}", namePtrTable.at(consumer), inputIdx);
+
+                    if (consumer->getOperator()->getNbConsumedData(inputIdx) <
+                                getNbAvailableData(consumer, inputIdx)) {
+                        Log::debug("  still consumer: C{} < P{} for input #{}",
+                            consumer->getOperator()->getNbConsumedData(inputIdx),
+                            getNbAvailableData(consumer, inputIdx), inputIdx);
+
+                        // there is still data to consume
+                        isStillConsumer = true;
+                        break;
+                    }
                 }
             }
 
@@ -638,32 +640,29 @@ Aidge::Elts_t Aidge::Scheduler::getNbAvailableData(const std::shared_ptr<Node>&
         // We are inside an upper operator (for instance a MetaOperator)
         // We need to connect the "local" producer-consumer model to the upper
         // one, by mapping local node inputs to the upper node inputs.
-        IOIndex_t nodeInputIdx = 0;
+        IOIndex_t upperInputIdx = 0;
         for (const auto& input : mGraphView->getOrderedInputs()) {
-            if (input.first == node) {
+            if (input.first == node && input.second == inputIdx) {
                 // Current node is an input
-                const auto upperInput = upperNode->inputs()[nodeInputIdx];
+                const auto upperInput = upperNode->inputs()[upperInputIdx];
                 if (upperInput.first) {
                     return upperInput.first->getOperator()->getNbProducedData(upperInput.second);
                 }
             }
-            ++nodeInputIdx;
+            ++upperInputIdx;
         }
     }
 
-    // Otherwise, two cases:
+    // Otherwise, it means that the input is not connected. Two cases:
+    // - There is no data, it is assumed to be an optional input
+    // - A valid tensor exists:
     if (node->getOperator()->getRawInput(inputIdx)) {
-        // Input is not connected but a valid tensor exists
         // => This means data was fed manually to the input, without a Producer
         // In this case, we assume a single-use data (unlike a Producer, which
         // keep producing the data each time it is needed).
         fmt::print("No producer node attached to input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
         return Elts_t::DataElts(std::static_pointer_cast<Tensor>(node->getOperator()->getRawInput(inputIdx))->size());
     }
-    else {
-        // Input is not connected, this is an error
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
-    }
 
     return Elts_t::NoneElts();
 }
diff --git a/src/scheduler/SequentialScheduler.cpp b/src/scheduler/SequentialScheduler.cpp
index 74b1b3f0c6e9be164792460669821744661c15b3..88b5e98bc62456bd59dc235c3112396daaeddd24 100644
--- a/src/scheduler/SequentialScheduler.cpp
+++ b/src/scheduler/SequentialScheduler.cpp
@@ -73,10 +73,7 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, const std::vector<std
     }
 }
 
-void Aidge::SequentialScheduler::backward(bool instanciateGrad) {
-    // create ad set Grad values
-    if (instanciateGrad) { compile_gradient(mGraphView); }
-
+void Aidge::SequentialScheduler::backward() {
     // TODO: Check output grad are not empty
 
     // Generate scheduling *only if empty*
diff --git a/src/utils/Attributes.cpp b/src/utils/Attributes.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e79db53a60a955e3502e070cda5818d3d7b6c922
--- /dev/null
+++ b/src/utils/Attributes.cpp
@@ -0,0 +1,96 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/utils/Attributes.hpp"
+
+#include <cctype>  // std::isdigit, std::islower, std::isupper, std::tolower,
+                   // std::toupper
+#include <string>
+
+std::string Aidge::Attributes::snakeToPascal(const std::string& snakeCase) {
+    std::string result;
+    bool to_upper = true; // Start with uppercase for PascalCase
+
+    for (char ch : snakeCase) {
+        if (ch == '_') {
+            to_upper = true; // Next character should be uppercase
+        } else {
+            if (to_upper) {
+                result += std::toupper(ch);
+                to_upper = false; // Reset flag after making a character uppercase
+            } else {
+                result += ch;
+            }
+        }
+    }
+    return result;
+}
+
+std::string Aidge::Attributes::pascalToSnake(const std::string& pascalCase) {
+    std::string result;
+
+    for (char ch : pascalCase) {
+        if (std::isupper(ch)) {
+            if (!result.empty()) {
+                result += '_';
+            }
+            result += std::tolower(ch);
+        } else {
+            result += ch;
+        }
+    }
+    return result;
+}
+
+bool Aidge::Attributes::isPascalCase(const std::string& str) {
+    if (str.empty() || !std::isupper(str[0])) {
+        return false;
+    }
+
+    bool expectUpper = false;
+    for (size_t i = 1; i < str.size(); ++i) {
+        if (str[i] == '_') {
+            return false;
+        }
+        if (std::isupper(str[i])) {
+            if (!expectUpper) {
+                return false;
+            }
+            expectUpper = false;
+        } else if (std::islower(str[i]) || std::isdigit(str[i])) {
+            expectUpper = true;
+        } else {
+            return false;
+        }
+    }
+    return true;
+}
+
+bool Aidge::Attributes::isSnakeCase(const std::string& str) {
+    if (str.empty()) {
+        return false;
+    }
+
+    bool lastCharWasUnderscore = false;
+    for (char ch : str) {
+        if (ch == '_') {
+            if (lastCharWasUnderscore) {
+                return false;
+            }
+            lastCharWasUnderscore = true;
+        } else if (!std::islower(ch) && !std::isdigit(ch)) {
+            return false;
+        } else {
+            lastCharWasUnderscore = false;
+        }
+    }
+    return true;
+}
diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp
index 655fd725e9d7d913d24c6552571ae3b91e3605b4..62e90dcbd7c20548019afae1a04f84b3e1d4484a 100644
--- a/unit_tests/data/Test_Tensor.cpp
+++ b/unit_tests/data/Test_Tensor.cpp
@@ -40,7 +40,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T_default.dims() == std::vector<DimSize_t>({})) &&
             (T_default.strides() == std::vector<DimSize_t>({1})) &&
             (T_default.getImpl() == nullptr) &&
-            (T_default.grad() == nullptr) &&
+            (T_default.grad() != nullptr) &&
             (T_default.isContiguous() == true)
         ));
     }
@@ -53,7 +53,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({})) &&
             (T.strides() == std::vector<DimSize_t>({1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
     }
@@ -67,7 +67,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == Tdims) &&
             (T.strides() == std::vector<DimSize_t>({5040,2520,840,210,42,7,1})) &&
             (T.getImpl() == nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
     }
@@ -83,7 +83,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({2})) &&
             (T.strides() == std::vector<DimSize_t>({1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
 
@@ -97,7 +97,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({2,2,2})) &&
             (T.strides() == std::vector<DimSize_t>({4,2,1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
         REQUIRE_NOTHROW(T = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
@@ -113,7 +113,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({2,2,2,2})) &&
             (T.strides() == std::vector<DimSize_t>({8,4,2,1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
     }
@@ -157,7 +157,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
                 (T.dims() == Tclone.dims()) &&
                 (T.strides() == Tclone.strides()) &&
                 (T.getImpl() != Tclone.getImpl()) &&
-                (Tclone.grad() == nullptr) &&
+                (Tclone.grad() != nullptr) &&
                 (Tclone.isContiguous() == true)
             ));
             REQUIRE(Tclone == T);
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 8403686d16da15e7e8ad4616029a241d6197d450..8e9f5a27e275a5ce56ddf57fa092ec96cec84711 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -399,9 +399,7 @@ TEST_CASE("[core/graph] GraphView(resetConnections)") {
         conv1->resetConnections(false);
 
         REQUIRE(conv->output(0).size() == 0);
-        for (std::size_t i = 0; i < conv1->nbData(); ++i) {
-        REQUIRE((conv1->input(i) == std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex)));
-        }
+        REQUIRE((conv1->input(0) == std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex)));
         REQUIRE((conv1->input(1) == std::pair<std::shared_ptr<Node>, IOIndex_t>(prod1, 0)));
         REQUIRE((conv1->input(2) == std::pair<std::shared_ptr<Node>, IOIndex_t>(prod2, 0)));
         REQUIRE((conv2->input(0) == std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex)));
@@ -554,6 +552,69 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
         REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myConv, other2}));
     }
 
+    SECTION("replace same input category 1") {
+        std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
+        auto otherInput = GenericOperator("Producer", {}, 1, "other_input");
+        auto other1 = GenericOperator("Other", {InputCategory::Data}, 1, "other1");
+        auto myOld = GenericOperator("myOld", {InputCategory::Data}, 1, "old");
+        auto other2 = GenericOperator("Other", {InputCategory::Data}, 1, "other2");
+        otherInput->addChild(other1);
+        other1->addChild(myOld);
+        myOld->addChild(other2);
+        g->add({other1, myOld, other2});
+
+        auto myNew =  GenericOperator("myNew", {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData}, 1, "new");
+
+        GraphView::replace({myOld}, {myNew});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myNew, other2}));
+        REQUIRE(myNew->input(0).first == other1);
+        REQUIRE(myNew->input(1).first == nullptr);
+        REQUIRE(myNew->input(2).first == nullptr);
+    }
+
+    SECTION("replace same input category 2") {
+        std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
+        auto otherInput = GenericOperator("Producer", {}, 1, "other_input");
+        auto other1 = GenericOperator("Other", {InputCategory::Data}, 1, "other1");
+        auto myOld = GenericOperator("myOld", {InputCategory::Param}, 1, "old");
+        auto other2 = GenericOperator("Other", {InputCategory::Data}, 1, "other2");
+        otherInput->addChild(other1);
+        other1->addChild(myOld, 0, 0);
+        myOld->addChild(other2);
+        g->add({other1, myOld, other2});
+
+        auto myNew =  GenericOperator("myNew", {InputCategory::Data, InputCategory::Param, InputCategory::Data}, 1, "new");
+
+        GraphView::replace({myOld}, {myNew});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myNew, other2}));
+        REQUIRE(myNew->input(0).first == nullptr);
+        REQUIRE(myNew->input(1).first == other1);
+        REQUIRE(myNew->input(2).first == nullptr);
+    }
+
+    SECTION("replace same input category 3") {
+        std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
+        auto otherInput = GenericOperator("Producer", {}, 1, "other_input");
+        auto other1 = GenericOperator("Other", {InputCategory::Data}, 1, "other1");
+        auto myOld = GenericOperator("myOld", {InputCategory::Data}, 1, "old");
+        auto other2 = GenericOperator("Other", {InputCategory::Data}, 1, "other2");
+        otherInput->addChild(other1);
+        other1->addChild(myOld);
+        myOld->addChild(other2);
+        g->add({other1, myOld, other2});
+
+        auto myNew =  GenericOperator("myNew", {InputCategory::Data, InputCategory::Data, InputCategory::Data}, 1, "new");
+
+        GraphView::replace({myOld}, {myNew});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myNew, other2}));
+        REQUIRE(myNew->input(0).first == other1);
+        REQUIRE(myNew->input(1).first == other1);
+        REQUIRE(myNew->input(2).first == other1);
+    }
+
     SECTION("Change every Nodes in a GraphView") {
         auto matmulWeight0 = GenericOperator("Producer", 0, 0, 1, "matmul_w0");
         auto addBias0 = GenericOperator("Producer", 0, 0, 1, "add_b0");
diff --git a/unit_tests/graph/Test_Matching.cpp b/unit_tests/graph/Test_Matching.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6abb4d37114d0952feb13c6cfbee66bd65dc5748
--- /dev/null
+++ b/unit_tests/graph/Test_Matching.cpp
@@ -0,0 +1,383 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include <fmt/chrono.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Testing.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/MetaOperatorDefs.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/graph/Matching.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+using namespace Aidge;
+
+void checkMatches(const std::set<SinglePassGraphMatching::MatchingResult>& results, const std::map<std::string, std::set<std::string>>& expected) {
+    REQUIRE(results.size() == expected.size());
+
+    for (const auto& result : results) {
+        const auto found = nodePtrTo(result.graph->getNodes(), nodePtrToName);
+        fmt::print("Found: {}\n", found);
+
+        const auto rootNode = result.graph->rootNode()->name();
+        const auto expectedSet = expected.at(rootNode);
+        REQUIRE(found == expectedSet);
+    }
+}
+
+TEST_CASE("[core/graph] Matching") {
+    auto g1 = Sequential({
+        Producer({16, 3, 512, 512}, "dataProvider"),
+        Conv(3, 4, {5, 5}, "conv1"),
+        ReLU("relu1"),
+        PaddedConv(4, 8, {5, 5}, "conv2", {1, 1}, {2, 2, 2, 2}),
+        ReLU("relu2"),
+        PaddedConv(8, 16, {3, 3}, "conv3", {1, 1}, {2, 2, 2, 2}),
+        ReLU("relu3"),
+        PaddedConv(8, 16, {5, 5}, "conv4", {1, 1}, {2, 2, 2, 2}),
+        Add(2, "add"),
+        PaddedConv(8, 16, {5, 5}, "conv5", {1, 1}, {2, 2, 2, 2}),
+        ReLU("relu5"),
+        Add(2, "add2")
+    });
+
+    g1->getNode("relu3")->addChild(g1->getNode("add"), 0, 1);
+    g1->getNode("conv5")->addChild(g1->getNode("add2"), 0, 1);
+    g1->updateInputsOutputs();
+
+    g1->save("Test_examples_before_expand", true);
+    expandMetaOps(g1);
+    g1->save("Test_examples", true);
+
+    SECTION("Conv->(ReLU->Pad->Conv)*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU->Pad->Conv)*");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "relu1", "relu2"}},
+            {"conv2_conv", {"conv2_conv", "conv3_conv", "conv3_pad", "relu2"}},
+            {"conv3_conv", {"conv3_conv"}},
+            {"conv4_conv", {"conv4_conv"}},
+            {"conv5_conv", {"conv5_conv"}}
+        });
+    }
+
+    SECTION("Conv->ReLU;ReLU->Pad") {
+        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv->ReLU;ReLU->Pad"));
+    }
+
+    SECTION("Conv->ReLU#1;ReLU#2->Pad") {
+        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv->ReLU#1;ReLU#2->Pad"));
+    }
+
+    SECTION("Conv?->ReLU") {
+        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv?->ReLU"));
+    }
+
+    SECTION("(Add#<*~.)*") {
+        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("(Add#<*~.)*"));
+    }
+
+    SECTION("Conv->(ReLU~>Pad->Conv)*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU~>Pad->Conv)*");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}},
+            {"conv2_conv", {"conv2_conv", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu2", "relu3"}},
+            {"conv3_conv", {"conv3_conv", "conv4_conv", "conv4_pad", "relu3"}},
+            {"conv4_conv", {"conv4_conv"}},
+            {"conv5_conv", {"conv5_conv"}}
+        });
+    }
+
+    SECTION("Conv->(ReLU~>Pad->Conv)* [disjoint]") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU~>Pad->Conv)*", true);
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}},
+            {"conv5_conv", {"conv5_conv"}}
+        });
+    }
+
+    SECTION("Conv~>(ReLU~>Pad->Conv)*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv~>(ReLU~>Pad->Conv)*");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}},
+            {"conv2_conv", {"conv2_conv", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu2", "relu3"}},
+            {"conv3_conv", {"conv3_conv", "conv4_conv", "conv4_pad", "relu3"}},
+            {"conv4_conv", {"conv4_conv"}},
+            {"conv5_conv", {"conv5_conv"}}
+        });
+    }
+
+    SECTION("Pad->Conv#->ReLU;Conv#<1-Producer;Conv#<2-Producer") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;Conv#<1-Producer;Conv#<2-Producer");
+
+        checkMatches(results, {
+            {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
+            {"conv3_pad", {"conv3_b", "conv3_conv", "conv3_pad", "conv3_w", "relu3"}}
+        });
+    }
+
+    SECTION("Pad->Conv#~>ReLU;Conv#<1-Producer;Conv#<2-Producer") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;Conv#<1-Producer;Conv#<2-Producer");
+
+        checkMatches(results, {
+            {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
+            {"conv3_pad", {"conv3_b", "conv3_conv", "conv3_pad", "conv3_w", "relu3"}},
+            {"conv5_pad", {"conv5_b", "conv5_conv", "conv5_pad", "conv5_w", "relu5"}}
+        });
+    }
+
+    SECTION("Pad->Conv#~>ReLU;(Conv#<*-Producer){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;(Conv#<*-Producer){2}");
+
+        checkMatches(results, {
+            {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
+            {"conv3_pad", {"conv3_b", "conv3_conv", "conv3_pad", "conv3_w", "relu3"}},
+            {"conv5_pad", {"conv5_b", "conv5_conv", "conv5_pad", "conv5_w", "relu5"}}
+        });
+    }
+
+    SECTION("Pad->Conv#->ReLU;(Conv#<*-Producer){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;(Conv#<*-Producer){2}");
+
+        checkMatches(results, {
+            {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
+            {"conv3_pad", {"conv3_b", "conv3_conv", "conv3_pad", "conv3_w", "relu3"}}
+        });
+    }
+
+    SECTION("Pad->Conv#~>ReLU;(Conv#<*-.){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;(Conv#<*-.){2}");
+
+        checkMatches(results, {
+            {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
+            {"conv3_pad", {"conv3_b", "conv3_conv", "conv3_pad", "conv3_w", "relu3"}},
+            {"conv5_pad", {"conv5_b", "conv5_conv", "conv5_pad", "conv5_w", "relu5"}}
+        });
+    }
+
+    SECTION("Pad->Conv#->ReLU;(Conv#<*-.){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;(Conv#<*-.){2}");
+
+        checkMatches(results, {
+            {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
+            {"conv3_pad", {"conv3_b", "conv3_conv", "conv3_pad", "conv3_w", "relu3"}}
+        });
+    }
+
+    SECTION("Conv#~>ReLU*;Conv#<-Pad*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU*;Conv#<-Pad*");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "relu1"}},
+            {"conv2_conv", {"conv2_conv", "conv2_pad", "relu2"}},
+            {"conv3_conv", {"conv3_conv", "conv3_pad", "relu3"}},
+            {"conv4_conv", {"conv4_conv", "conv4_pad"}},
+            {"conv5_conv", {"conv5_conv", "conv5_pad", "relu5"}}
+        });
+    }
+
+    SECTION("Conv#->ReLU*;Conv#<-Pad*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU*;Conv#<-Pad*");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "relu1"}},
+            {"conv2_conv", {"conv2_conv", "conv2_pad", "relu2"}},
+            {"conv3_conv", {"conv3_conv", "conv3_pad", "relu3"}},
+            {"conv4_conv", {"conv4_conv", "conv4_pad"}},
+            {"conv5_conv", {"conv5_conv", "conv5_pad"}}
+        });
+    }
+
+    SECTION("Conv#->ReLU?-*>Add#1?->ReLU?;Conv#<-Pad?;(Add#1<*-.)?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU?-*>Add#1?->ReLU?;Conv#<-Pad?;(Add#1<*-.)?");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "relu1"}},
+            {"conv2_conv", {"conv2_conv", "conv2_pad", "relu2"}},
+            {"conv3_conv", {"conv3_conv", "conv3_pad", "relu3"}},
+            {"conv4_conv", {"add", "conv4_conv", "conv4_pad"}},
+            {"conv5_conv", {"conv5_conv", "conv5_pad"}}
+        });
+    }
+
+    SECTION("Conv#~>ReLU?-*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*-.)?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?-*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*-.)?");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "relu1"}},
+            {"conv2_conv", {"conv2_conv", "conv2_pad", "relu2"}},
+            {"conv3_conv", {"conv3_conv", "conv3_pad", "relu3"}},
+            {"conv4_conv", {"add", "conv4_conv", "conv4_pad"}},
+            {"conv5_conv", {"add2", "conv5_conv", "conv5_pad", "relu5"}}
+        });
+    }
+
+    SECTION("Conv#~>ReLU?~*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*~.)?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?~*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*~.)?");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "relu1"}},
+            {"conv2_conv", {"conv2_conv", "conv2_pad", "relu2"}},
+            {"conv3_conv", {"add", "conv3_conv", "conv3_pad", "conv4_conv", "relu3"}},
+            {"conv4_conv", {"add", "conv4_conv", "conv4_pad", "relu3"}},
+            {"conv5_conv", {"add2", "conv5_conv", "conv5_pad", "relu5"}}
+        });
+    }
+
+    SECTION("Conv#->ReLU?;Conv#<-Pad?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU?;Conv#<-Pad?");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "relu1"}},
+            {"conv2_conv", {"conv2_conv", "conv2_pad", "relu2"}},
+            {"conv3_conv", {"conv3_conv", "conv3_pad", "relu3"}},
+            {"conv4_conv", {"conv4_conv", "conv4_pad"}},
+            {"conv5_conv", {"conv5_conv", "conv5_pad"}}
+        });
+    }
+
+    SECTION("Conv#~>ReLU?;Conv#<-Pad?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?;Conv#<-Pad?");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "relu1"}},
+            {"conv2_conv", {"conv2_conv", "conv2_pad", "relu2"}},
+            {"conv3_conv", {"conv3_conv", "conv3_pad", "relu3"}},
+            {"conv4_conv", {"conv4_conv", "conv4_pad"}},
+            {"conv5_conv", {"conv5_conv", "conv5_pad", "relu5"}}
+        });
+    }
+
+    SECTION("(Conv|ReLU)->Add") {
+        const auto results = SinglePassGraphMatching(g1).match("(Conv|ReLU)->Add");
+
+        checkMatches(results, {
+            {"conv4_conv", {"add", "conv4_conv"}},
+            {"relu5", {"add2", "relu5"}}
+        });
+    }
+
+    SECTION("Add<*-.") {
+        const auto results = SinglePassGraphMatching(g1).match("Add<*-.");
+
+        checkMatches(results, {
+            {"add", {"add", "conv4_conv"}},
+            {"add2", {"add2", "relu5"}}
+        });
+    }
+
+    SECTION("(Add#<*~.)+") {
+        const auto results = SinglePassGraphMatching(g1).match("(Add#<*~.)+");
+
+        checkMatches(results, {
+            {"add", {"add", "conv4_conv", "relu3"}},
+            {"add2", {"add2", "conv5_conv", "relu5"}}
+        });
+    }
+
+    SECTION("Conv~*>(ReLU&Add)") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv~*>(ReLU&Add)");
+
+        checkMatches(results, {
+            {"conv5_conv", {"add2", "conv5_conv", "relu5"}}
+        });
+    }
+
+    SECTION("Conv~>(ReLU&Add)") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv~>(ReLU&Add)");
+        REQUIRE(results.size() == 0);
+    }
+
+    SECTION("ReLU~*>((Pad->Conv-*>Add#)&Add#)") {
+        const auto results = SinglePassGraphMatching(g1).match("ReLU~*>((Pad->Conv-*>Add#)&Add#)");
+
+        checkMatches(results, {
+            {"relu3", {"add", "conv4_conv", "conv4_pad", "relu3"}}
+        });
+    }
+
+    SECTION("ReLU-*>((Pad->Conv-*>Add)&Add)") {
+        const auto results = SinglePassGraphMatching(g1).match("ReLU-*>((Pad->Conv-*>Add)&Add)");
+        REQUIRE(results.size() == 0);
+    }
+
+    SECTION("Pad->Conv[3x3]->ReLU") {
+        auto gm = SinglePassGraphMatching(g1);
+        gm.addNodeLambda("3x3", [](const NodePtr& node) {
+            const std::shared_ptr<Conv_Op<2>> op =
+                std::static_pointer_cast<Conv_Op<2>>(node->getOperator());
+            return (op->kernelDims() == std::array<DimSize_t, 2>({3, 3}));
+        });
+
+        const auto results = gm.match("Pad->Conv[3x3]->ReLU");
+
+        checkMatches(results, {
+            {"conv3_pad", {"conv3_conv", "conv3_pad", "relu3"}}
+        });
+    }
+
+    SECTION(".[test]->Pad") {
+        auto gm = SinglePassGraphMatching(g1);
+        gm.addNodeLambda("test", [](const NodePtr& node) {
+            return (node->type() == "Add" || (node->type() == "ReLU" && node->name() == "relu1"));
+        });
+
+        const auto results = gm.match(".[test]->Pad");
+
+        checkMatches(results, {
+            {"add", {"add", "conv5_pad"}},
+            {"relu1", {"relu1", "conv2_pad"}}
+        });
+    }
+
+    SECTION("Conv->ReLU [perf]") {
+        const size_t nbTests = 3;
+        std::mt19937::result_type seed(1);
+
+        for (int test = 0; test < nbTests; ++test) {
+            RandomGraph randGraph;
+            randGraph.types = {"Conv", "ReLU", "Dummy"};
+            randGraph.typesWeights = {0.4, 0.4, 0.2};
+            randGraph.avgIn = 1;
+            randGraph.maxIn = 1;
+            randGraph.maxOut = 1;
+            randGraph.avgOut = 1;
+            randGraph.density = 0.9;
+            randGraph.acyclic = true;
+            const auto g1 = std::make_shared<GraphView>("g1");
+
+            Log::setConsoleLevel(Log::Warn);
+            g1->add(randGraph.gen(seed, 100));
+            g1->save("graph_single_pass");
+
+            auto gm = SinglePassGraphMatching(g1);
+
+            const auto start = std::chrono::system_clock::now();
+            const auto results = gm.match("Conv->ReLU#;ReLU#->Dummy");
+            const auto end = std::chrono::system_clock::now();
+            const auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
+
+            REQUIRE(results.size() > 0);
+            ++seed;
+
+            fmt::print("Found: {} - duration: {}\n", results.size(), duration);
+        }
+    }
+}
diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp
index a62b9a8602b494f26fb47061b899eaba41129a1f..fbbc3f766857f15af0da8004c35078993d71e973 100644
--- a/unit_tests/graphRegex/Test_GraphRegex.cpp
+++ b/unit_tests/graphRegex/Test_GraphRegex.cpp
@@ -31,11 +31,11 @@ TEST_CASE("GraphRegexUser") {
         g1->addChild(fc, "c");
         g1->addChild(conv2, "c1");
         g1->addChild(fc2, "c2");
-        
+
         ///
         std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>();
         sut->setNodeKey("C",+[](NodePtr NodeOp){return NodeOp->type() == "FC";});
-        
+
         sut->setNodeKey("A","C($)==True");
         sut->addQuery("A");
         auto match = sut->match(g1);
@@ -163,14 +163,14 @@ TEST_CASE("GraphRegexUser") {
         auto w1 = Producer({5,5},"W1");
         auto input = Producer({2,5}, "input");
 
-        input->addChild(matmul0, 0, 0);
-        w0->addChild(matmul0, 0, 1);
+        input->addChild(matmul0, 0, 1);
+        w0->addChild(matmul0, 0, 0);
 
         matmul0->addChild(add0, 0, 0);
         b0->addChild(add0, 0, 1);
 
-        add0->addChild(matmul1, 0, 0);
-        w1->addChild(matmul1, 0, 1);
+        add0->addChild(matmul1, 0, 1);
+        w1->addChild(matmul1, 0, 0);
 
         matmul1->addChild(add1, 0, 0);
         b1->addChild(add1, 0, 1);
@@ -201,4 +201,4 @@ TEST_CASE("GraphRegexUser") {
 
     }
 
-}
\ No newline at end of file
+}
diff --git a/unit_tests/operator/Test_GatherImpl.cpp b/unit_tests/operator/Test_GatherImpl.cpp
index 2995963a35cda5b0c5794b1d15e4064438b58ece..02e8e74890918726212e09fdd9f969ce0863af83 100644
--- a/unit_tests/operator/Test_GatherImpl.cpp
+++ b/unit_tests/operator/Test_GatherImpl.cpp
@@ -14,12 +14,13 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Gather.hpp"
 
+#include <cstdint>
 #include <memory>
 
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Gather(forward)") {
+TEST_CASE("[cpu/operator] Gather(forward)", "[Gather][CPU]") {
     SECTION("2D Tensor axis 0") {
         std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> {
             {
@@ -42,10 +43,10 @@ TEST_CASE("[cpu/operator] Gather(forward)") {
             }
         });
 
-        std::shared_ptr<Node> myGather = Gather({1, 2}, {1, 2}, 0);
+        std::shared_ptr<Node> myGather = Gather(std::int8_t(0));
         auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator());
         op->associateInput(0,input);
-        // op->associateInput(1,indexes);
+        op->associateInput(1,indexes);
         op->setDataType(DataType::Int32);
         op->setBackend("cpu");
         myGather->forward();
@@ -82,10 +83,42 @@ TEST_CASE("[cpu/operator] Gather(forward)") {
             }
         });
 
-        std::shared_ptr<Node> myGather = Gather({0, 2}, {1, 2}, 1);
+        std::shared_ptr<Node> myGather = Gather(1);
+        auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator());
+        op->associateInput(0,input);
+        op->associateInput(1,indexes);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myGather->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+
+    }
+    SECTION("Init with attributes") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> {
+            {
+                {1, 2, 3},
+                {4, 5, 6},
+                {7, 8, 9}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,3,1,2> {
+            {
+                {
+                    {1, 3}
+                },
+                {
+                    {4, 6}
+                },
+                {
+                    {7, 9}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> myGather = Gather(1, {0, 2}, {1, 2});
         auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator());
         op->associateInput(0,input);
-        // op->associateInput(1,indexes);
         op->setDataType(DataType::Int32);
         op->setBackend("cpu");
         myGather->forward();
diff --git a/unit_tests/operator/Test_GenericOperator.cpp b/unit_tests/operator/Test_GenericOperator.cpp
index 8d634cc3a105c423b54b6003f41204aeb1fc5335..41bad69749fd82f892c6faa625739d0493396c73 100644
--- a/unit_tests/operator/Test_GenericOperator.cpp
+++ b/unit_tests/operator/Test_GenericOperator.cpp
@@ -20,7 +20,7 @@ using namespace Aidge;
 TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("INT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        const char* key = "intAttr";
+        const char* key = "IntAttr";
         Testop.addAttr(key, int(5));
         int registeredVal = Testop.getAttr<int>(key);
         REQUIRE(registeredVal == 5);
@@ -28,21 +28,21 @@ TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("LONG") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         long value = 3;
-        const char* key = "longAttr";
+        const char* key = "LongAttr";
         Testop.addAttr(key, value);
         REQUIRE(Testop.getAttr<long>(key) == value);
     }
     SECTION("FLOAT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         float value = 2.0;
-        const char* key = "floatAttr";
+        const char* key = "FloatAttr";
         Testop.addAttr(key, value);
         REQUIRE(Testop.getAttr<float>(key) == value);
     }
      SECTION("VECTOR<BOOL>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<bool> value = {true, false, false, true, true};
-        const char* key = "vect";
+        const char* key = "Vect";
         Testop.addAttr(key, value);
 
         REQUIRE(Testop.getAttr<std::vector<bool>>(key).size() == value.size());
@@ -53,7 +53,7 @@ TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("VECTOR<INT>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<int> value = {1, 2, 3, 4, 5, 6, 7, 8, 9};
-        const char* key = "vect";
+        const char* key = "Vect";
         Testop.addAttr(key, value);
 
         REQUIRE(Testop.getAttr<std::vector<int>>(key).size() == value.size());
@@ -66,23 +66,23 @@ TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
         Goal : Test that the offsets are well done by adding different attributes with different size.
         */
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addAttr<long>("longAttr", 3);
-        Testop.addAttr<float>("floatAttr", 2.0);
-        Testop.addAttr<uint8_t>("uint8Attr", 5);
-        Testop.addAttr<long long>("llAttr", 10);
-        REQUIRE(Testop.getAttr<long>("longAttr") == 3);
-        REQUIRE(Testop.getAttr<float>("floatAttr") == 2.0);
-        REQUIRE(Testop.getAttr<uint8_t>("uint8Attr") == 5);
-        REQUIRE(Testop.getAttr<long long>("llAttr") == 10);
+        Testop.addAttr<long>("LongAttr", 3);
+        Testop.addAttr<float>("FloatAttr", 2.0);
+        Testop.addAttr<uint8_t>("Uint8Attr", 5);
+        Testop.addAttr<long long>("LlAttr", 10);
+        REQUIRE(Testop.getAttr<long>("LongAttr") == 3);
+        REQUIRE(Testop.getAttr<float>("FloatAttr") == 2.0);
+        REQUIRE(Testop.getAttr<uint8_t>("Uint8Attr") == 5);
+        REQUIRE(Testop.getAttr<long long>("LlAttr") == 10);
     }
 }
 
 TEST_CASE("[core/operator] GenericOp(type check)", "[Operator]") {
     SECTION("WRONG TYPE FOR GETTER") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addAttr<long>("longAttr", 3);
+        Testop.addAttr<long>("LongAttr", 3);
 
         // This line should raise a failled assert
-        REQUIRE_THROWS(Testop.getAttr<int>("longAttribute"));
+        REQUIRE_THROWS(Testop.getAttr<int>("LongAttribute"));
     }
 }
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index ed4afafe39a367ecabb25ff949eb3d03999d1ea9..d1b4e2e31e8c57e2c3eebd42019ba9f42c4d39e0 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -39,7 +39,9 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         REQUIRE(microGraph->outputNodes().size() == 1);
         REQUIRE((*microGraph->outputNodes().begin())->getOperator()->type() == "Conv");
         REQUIRE(op->nbInputs() == 3);
-        REQUIRE(op->nbData() == 1);
+        REQUIRE(op->inputCategory(0) == InputCategory::Data);
+        REQUIRE(op->inputCategory(1) == InputCategory::Param);
+        REQUIRE(op->inputCategory(2) == InputCategory::OptionalParam);
         REQUIRE(op->nbOutputs() == 1);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(std::vector<std::size_t>({2,1,5,5}));
@@ -66,7 +68,13 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         microGraph->save("lstm", false, false);
 
         REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
-        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->inputCategory(0) == InputCategory::Data);
+        for (size_t i = 1; i < 9; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::Param);
+        }
+        for (size_t i = 9; i < 17; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::OptionalParam);
+        }
         REQUIRE(myLSTM->nbOutputs() == 2);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>();
@@ -94,7 +102,13 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         auto op = std::static_pointer_cast<OperatorTensor>(myLSTM->getOperator());
 
         REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
-        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->inputCategory(0) == InputCategory::Data);
+        for (size_t i = 1; i < 9; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::Param);
+        }
+        for (size_t i = 9; i < 17; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::OptionalParam);
+        }
         REQUIRE(myLSTM->nbOutputs() == 2);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
@@ -129,6 +143,6 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         REQUIRE(g->rootNode() == pop);
         g->save("lstm_expanded", true, true);
 
-        REQUIRE(g->getNodes().size() == 41);
+        REQUIRE(g->getNodes().size() == 33);
     }
 }
diff --git a/unit_tests/operator/Test_ReshapeImpl.cpp b/unit_tests/operator/Test_ReshapeImpl.cpp
index 5d28005eb40534742aae495948e5269373b81ad1..2685518b5c49f13ff4aee4202163e3aa3267aa5f 100644
--- a/unit_tests/operator/Test_ReshapeImpl.cpp
+++ b/unit_tests/operator/Test_ReshapeImpl.cpp
@@ -20,48 +20,105 @@ using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Reshape(forward)") {
     SECTION("1D Tensor") {
-        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array1D<float,6> {
-            {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,3> {
-            {
-                {1.0, 2.0, 3.0},
-                {4.0, 5.0, 6.0}
-            }
-        });
+        SECTION("Shape As Input") {
+            std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array1D<float,6> {
+                {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}
+            });
+            std::shared_ptr<Tensor> shape = std::make_shared<Tensor>(Array1D<float,2> {
+                {2, 3}
+            });
+            std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,3> {
+                {
+                    {1.0, 2.0, 3.0},
+                    {4.0, 5.0, 6.0}
+                }
+            });
 
-        std::shared_ptr<Node> myReshape = Reshape({2, 3});
-        auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
-        op->associateInput(0, input);
-        op->setDataType(DataType::Float32);
-        op->setBackend("cpu");
-        myReshape->forward();
+            std::shared_ptr<Node> myReshape = Reshape();
+            auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
+            op->associateInput(0, input);
+            op->associateInput(1, shape);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            myReshape->forward();
 
-        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+            REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        }
+        SECTION("Shape As Input") {
+            std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array1D<float,6> {
+                {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}
+            });
+            std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,3> {
+                {
+                    {1.0, 2.0, 3.0},
+                    {4.0, 5.0, 6.0}
+                }
+            });
+
+            std::shared_ptr<Node> myReshape = Reshape({2, 3});
+            auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
+            op->associateInput(0, input);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            myReshape->forward();
+
+            REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        }
     }
-    SECTION("2D Tensor") {
-        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,3> {
-            {
-                {1.0, 2.0, 3.0},
-                {4.0, 5.0, 6.0}
-            }
+    SECTION("2D Tensor - Shape Input") {
+        SECTION("Shape As Input") {
+            std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,3> {
+                {
+                    {1.0, 2.0, 3.0},
+                    {4.0, 5.0, 6.0}
+                }
+
+            });
+            std::shared_ptr<Tensor> shape = std::make_shared<Tensor>(Array1D<float,2> {
+                {3, 2}
+            });
+            std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,3,2> {
+                {
+                    {1.0, 2.0},
+                    {3.0, 4.0},
+                    {5.0, 6.0}
+                }
+            });
+
+            std::shared_ptr<Node> myReshape = Reshape();
+            auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
+            op->associateInput(0, input);
+            op->associateInput(1, shape);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            myReshape->forward();
+
+            REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        }
+        SECTION("Shape As Attribute") {
+            std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,3> {
+                {
+                    {1.0, 2.0, 3.0},
+                    {4.0, 5.0, 6.0}
+                }
 
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,3,2> {
-            {
-                {1.0, 2.0},
-                {3.0, 4.0},
-                {5.0, 6.0}
-            }
-        });
+            });
+            std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,3,2> {
+                {
+                    {1.0, 2.0},
+                    {3.0, 4.0},
+                    {5.0, 6.0}
+                }
+            });
 
-        std::shared_ptr<Node> myReshape = Reshape({3, 2});
-        auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
-        op->associateInput(0, input);
-        op->setDataType(DataType::Float32);
-        op->setBackend("cpu");
-        myReshape->forward();
+            std::shared_ptr<Node> myReshape = Reshape({3, 2});
+            auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
+            op->associateInput(0, input);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            myReshape->forward();
 
-        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+            REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        }
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_ShapeImpl.cpp b/unit_tests/operator/Test_ShapeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..45df89df061148fbfb892112e3c6d01edf27ffb4
--- /dev/null
+++ b/unit_tests/operator/Test_ShapeImpl.cpp
@@ -0,0 +1,86 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Shape.hpp"
+
+#include <cstdint>
+#include <memory>
+
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Shape(forward)", "[Shape][CPU]") {
+    SECTION("Default attributes") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,1,2,3,5> {
+            {
+                {
+                    {
+                        { 1,  2,  3,  4,  5},
+                        { 6,  7,  8,  9, 10},
+                        {11, 12, 13, 14, 15}
+                    },
+                    {
+                        {16, 17, 18, 19, 20},
+                        {21, 22, 23, 24, 25},
+                        {26, 27, 28, 29, 30}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,4> {
+            {1, 2, 3, 5}
+        });
+
+        std::shared_ptr<Node> myShape = Shape();
+        auto op = std::static_pointer_cast<OperatorTensor>(myShape -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myShape->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+
+    }
+    SECTION("Using attributes") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,1,2,3,5> {
+            {
+                {
+                    {
+                        { 1,  2,  3,  4,  5},
+                        { 6,  7,  8,  9, 10},
+                        {11, 12, 13, 14, 15}
+                    },
+                    {
+                        {16, 17, 18, 19, 20},
+                        {21, 22, 23, 24, 25},
+                        {26, 27, 28, 29, 30}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,2> {
+            {2, 3}
+        });
+
+        std::shared_ptr<Node> myShape = Shape(1, 2);
+        auto op = std::static_pointer_cast<OperatorTensor>(myShape -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myShape->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_SliceImpl.cpp b/unit_tests/operator/Test_SliceImpl.cpp
deleted file mode 100644
index 91ae92848b552a6038a4cb5f8dd3848b20ac2168..0000000000000000000000000000000000000000
--- a/unit_tests/operator/Test_SliceImpl.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <catch2/catch_test_macros.hpp>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/operator/Slice.hpp"
-
-using namespace Aidge;
-
-TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
-    SECTION("1D Tensor") {
-        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
-            {0, 1, 2,-3, 4,-5,-6, 7, 8, 9}
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,4> {
-            {0, 1, 2,-3}
-        });
-
-        std::shared_ptr<Node> mySlice = Slice({0}, {3}, {0});
-        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
-        mySlice->getOperator()->associateInput(0,input0);
-        mySlice->getOperator()->setDataType(DataType::Int32);
-        mySlice->getOperator()->setBackend("cpu");
-        mySlice->forward();
-
-        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
-        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
-        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
-    }
-
-    SECTION("2D Tensor") {
-        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array2D<int,2,10> {
-            {
-                { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-            }
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<int,2,3> {
-            {
-                {-5,-6, 7},
-                {-5,-6, 7}
-            }
-        });
-
-        std::shared_ptr<Node> mySlice = Slice({0,5}, {1,7}, {0,1});
-        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
-        mySlice->getOperator()->associateInput(0,input0);
-        mySlice->getOperator()->setDataType(DataType::Int32);
-        mySlice->getOperator()->setBackend("cpu");
-        mySlice->forward();
-        // mySlice->getOperator()->output(0).print();
-        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
-        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
-        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
-    }
-
-    SECTION("3D Tensor") {
-        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<int,2,2,10> {
-            {
-                {
-                    { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                    {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-                },
-                {
-                    { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                    {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-                }
-            }
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,1,1,3> {
-            {
-                {
-                    { 4,-5,-6}
-                }
-            }
-        });
-
-        std::shared_ptr<Node> mySlice = Slice({0,1,4}, {0,1,6}, {0,1,2});
-        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
-        mySlice->getOperator()->associateInput(0,input0);
-        mySlice->getOperator()->setDataType(DataType::Int32);
-        mySlice->getOperator()->setBackend("cpu");
-        mySlice->forward();
-        // mySlice->getOperator()->output(0).print();
-        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
-        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
-        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
-    }
-
-    SECTION("4D Tensor") {
-        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
-            {
-                {
-                    {
-                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-                    },
-                    {
-                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-                    }
-                },
-                {
-                    {
-                        { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
-                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-                    },
-                    {
-                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                        {-5, 4, 2,-3,11,-5,-6, 7,-1,10}
-                    }
-                }
-            }
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
-            {
-                {
-                    {
-                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-                    },
-                    {
-                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-                    }
-                },
-                {
-                    {
-                        { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
-                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-                    },
-                    {
-                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                        {-5, 4, 2,-3,11,-5,-6, 7,-1,10}
-                    }
-                }
-            }
-        });
-
-        std::shared_ptr<Node> mySlice = Slice({0,0,0,0}, {1,1,1,9}, {0,1,2,3});
-        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
-        mySlice->getOperator()->associateInput(0,input0);
-        mySlice->getOperator()->setDataType(DataType::Int32);
-        mySlice->getOperator()->setBackend("cpu");
-        mySlice->forward();
-        // mySlice->getOperator()->output(0).print();
-        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
-        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
-        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
-    }
-}
diff --git a/unit_tests/operator/Test_SplitImpl.cpp b/unit_tests/operator/Test_SplitImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5f6f191fccbc0ee5331a9ccaf83563e169eb6abe
--- /dev/null
+++ b/unit_tests/operator/Test_SplitImpl.cpp
@@ -0,0 +1,119 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Split.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Split(forward)", "[Split][CPU]") {
+    std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,1,3,7,2> {
+        {
+            {
+                {{ 1,  2},{ 3,  4},{ 5,  6},{ 7,  8},{ 9, 10},{11, 12},{13, 14}},
+                {{15, 16},{17, 18},{19, 20},{21, 22},{23, 24},{25, 26},{27, 28}},
+                {{30, 31},{32, 33},{34, 35},{36, 37},{38, 39},{40, 41},{42, 43}}
+            }
+        }
+    });
+    SECTION("Default split") {
+        std::shared_ptr<Tensor> expectedOutput0 = std::make_shared<Tensor>(Array4D<int,1,3,2,2> {
+            {
+                {
+                    {{ 1,  2},{ 3,  4}},
+                    {{15, 16},{17, 18}},
+                    {{30, 31},{32, 33}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput1 = std::make_shared<Tensor>(Array4D<int,1,3,2,2> {
+            {
+                {
+                    {{ 5,  6},{ 7,  8}},
+                    {{19, 20},{21, 22}},
+                    {{34, 35},{36, 37}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput2 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
+            {
+                {
+                    {{ 9, 10},{11, 12},{13, 14}},
+                    {{23, 24},{25, 26},{27, 28}},
+                    {{38, 39},{40, 41},{42, 43}}
+                }
+            }
+        });
+        auto mySplit = Split(DimSize_t(3), int8_t(2)); // Split on axis 2 into 3 outputs
+        mySplit->getOperator()->associateInput(0, input);
+        mySplit->getOperator()->setBackend("cpu");
+        mySplit->getOperator()->setDataType(DataType::Int32);
+        mySplit->forward();
+
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(mySplit->getOperator())->getOutput(0) == *expectedOutput0);
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(mySplit->getOperator())->getOutput(1) == *expectedOutput1);
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(mySplit->getOperator())->getOutput(2) == *expectedOutput2);
+    }
+    SECTION("Split with different chunk size") {
+        std::shared_ptr<Tensor> expectedOutput0 = std::make_shared<Tensor>(Array4D<int,1,3,4,2> {
+            {
+                {
+                    {{ 1,  2},{ 3,  4},{ 5,  6},{ 7,  8}},
+                    {{15, 16},{17, 18},{19, 20},{21, 22}},
+                    {{30, 31},{32, 33},{34, 35},{36, 37}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput1 = std::make_shared<Tensor>(Array4D<int,1,3,1,2> {
+            {
+                {
+                    {{ 9, 10}},
+                    {{23, 24}},
+                    {{38, 39}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput2 = std::make_shared<Tensor>(Array4D<int,1,3,2,2> {
+            {
+                {
+                    {{11, 12},{13, 14}},
+                    {{25, 26},{27, 28}},
+                    {{40, 41},{42, 43}}
+                }
+            }
+        });
+        auto mySplit = Split(DimSize_t(3), int8_t(2), {DimSize_t(4), DimSize_t(1), DimSize_t(2)});
+        mySplit->getOperator()->associateInput(0, input);
+        mySplit->getOperator()->setBackend("cpu");
+        mySplit->getOperator()->setDataType(DataType::Int32);
+        mySplit->forward();
+
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(mySplit->getOperator())->getOutput(0) == *expectedOutput0);
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(mySplit->getOperator())->getOutput(1) == *expectedOutput1);
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(mySplit->getOperator())->getOutput(2) == *expectedOutput2);
+    }
+    SECTION("Split with bad split attribute") {
+        auto mySplit = Split(DimSize_t(3), int8_t(2), {DimSize_t(4), DimSize_t(1), DimSize_t(3)});
+        mySplit->getOperator()->associateInput(0, input);
+        mySplit->getOperator()->setBackend("cpu");
+        mySplit->getOperator()->setDataType(DataType::Int32);
+        REQUIRE_THROWS(mySplit->forward());
+    }
+    SECTION("Split with bad outNumber") {
+        auto mySplit = Split(DimSize_t(8), int8_t(2));
+        mySplit->getOperator()->associateInput(0, input);
+        mySplit->getOperator()->setBackend("cpu");
+        mySplit->getOperator()->setDataType(DataType::Int32);
+        REQUIRE_THROWS(mySplit->forward());
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/recipes/Test_ExplicitTranspose.cpp b/unit_tests/recipes/Test_ExplicitTranspose.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0c0a46710d69606508a22e7b01dac708db9b8f34
--- /dev/null
+++ b/unit_tests/recipes/Test_ExplicitTranspose.cpp
@@ -0,0 +1,54 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/recipes/Recipes.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include <cstddef>
+
+using namespace Aidge;
+
+TEST_CASE("[ExplicitTranspose] conv") {
+    auto conv1 = Conv(3, 32, {3, 3}, "conv1");
+    auto conv2 = Conv(32, 64, {3, 3}, "conv2");
+    auto conv3 = Conv(64, 10, {1, 1}, "conv3", {2, 2});
+
+    auto g1 = Sequential({
+        Producer({16, 3, 224, 224}, "dataProvider"),
+        conv1,
+        conv2,
+        conv3
+    });
+
+    g1->setDataFormat(DataFormat::NCHW);
+    conv2->getOperator()->setDataFormat(DataFormat::NHWC);
+
+    g1->save("explicitTranspose_before");
+    REQUIRE(g1->getNodes().size() == 10);
+    const auto initialNodes = g1->getNodes();
+
+    g1->forwardDims();
+    explicitTranspose(g1);
+
+    // Check that Tranpose were inserted
+    g1->save("explicitTranspose_after");
+    REQUIRE(g1->getNodes().size() == 12);
+
+    // Check that Tranpose are removed
+    conv2->getOperator()->setDataFormat(DataFormat::NCHW);
+    explicitTranspose(g1);
+
+    REQUIRE(g1->getNodes().size() == 10);
+    REQUIRE(g1->getNodes() == initialNodes);
+}
diff --git a/unit_tests/recipes/Test_FuseMulAdd.cpp b/unit_tests/recipes/Test_FuseMulAdd.cpp
index 4c6e3f9d563d2e74958e68f8876a49a8323f4403..9ea151039f07e5c688572d61b746d8fc26f1c3fe 100644
--- a/unit_tests/recipes/Test_FuseMulAdd.cpp
+++ b/unit_tests/recipes/Test_FuseMulAdd.cpp
@@ -42,8 +42,8 @@ TEST_CASE("[cpu/recipes] FuseMulAdd", "[FuseMulAdd][recipes]") {
     matmul0->addChild(add0, 0, 0);
     b0->addChild(add0, 0, 1);
 
-    add0->addChild(matmul1, 0, 0);
-    w1->addChild(matmul1, 0, 1);
+    add0->addChild(matmul1, 0, 1);
+    w1->addChild(matmul1, 0, 0);
 
     matmul1->addChild(add1, 0, 0);
     b1->addChild(add1, 0, 1);
@@ -56,14 +56,14 @@ TEST_CASE("[cpu/recipes] FuseMulAdd", "[FuseMulAdd][recipes]") {
             std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
     REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
     REQUIRE(((add0->getParent(0) == matmul0) && (add0->getParent(1) == b0)));
-    REQUIRE(((matmul1->getParent(0) == add0) && (matmul1->getParent(1) == w1)));
+    REQUIRE(((matmul1->getParent(1) == add0) && (matmul1->getParent(0) == w1)));
     REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
 
 	// Transform GraphView inplace
     fuseMulAdd(g);
 
 	// Check new GraphView
-	 std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
+	std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
 	REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
 	REQUIRE(newNodes.size() == 6);
 	for (const auto& node : newNodes) {
@@ -71,4 +71,4 @@ TEST_CASE("[cpu/recipes] FuseMulAdd", "[FuseMulAdd][recipes]") {
 	}
 }
 
-}  // namespace Aidge
\ No newline at end of file
+}  // namespace Aidge
diff --git a/unit_tests/recipes/Test_FuseToMetaOps.cpp b/unit_tests/recipes/Test_FuseToMetaOps.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9fceedf2feef0a3ed79b83a8494a1a2b49f77291
--- /dev/null
+++ b/unit_tests/recipes/Test_FuseToMetaOps.cpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <set>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+namespace Aidge {
+
+
+TEST_CASE("[cpu/recipes] FuseToMetaOps", "[FuseToMetaOps][recipes]") {
+    auto g1 = Sequential({
+        Producer({16, 3, 224, 224}, "dataProvider"),
+        Conv(3, 32, {3, 3}, "conv1"),
+        ReLU("relu1"),
+        Conv(32, 64, {3, 3}, "conv2"),
+        ReLU("relu2"),
+        Conv(64, 10, {1, 1}, "conv3")
+    });
+    g1->save("FuseToMetaOps_before");
+
+    // FIXME: GraphRegex also matches the Conv Producers, which are not in the query!
+    const auto nbFused = fuseToMetaOps(g1, "Conv->ReLU", "ConvReLU");
+    g1->save("FuseToMetaOps_after", true);
+
+    REQUIRE(nbFused == 2);
+}
+
+}  // namespace Aidge
diff --git a/unit_tests/recipes/Test_removeFlatten.cpp b/unit_tests/recipes/Test_removeFlatten.cpp
index 84099ac0b77a633893af6a7550464e539c95d806..24f5aa2e231b5204add1c8f87cdeb7a71175ea05 100644
--- a/unit_tests/recipes/Test_removeFlatten.cpp
+++ b/unit_tests/recipes/Test_removeFlatten.cpp
@@ -27,8 +27,8 @@ namespace Aidge {
 TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
   std::shared_ptr<Node> flatten =
       GenericOperator("Flatten", 1, 0, 1, "myFlatten");
-  std::shared_ptr<Node> fc0 = FC(10, 10, "FC_1");
-  std::shared_ptr<Node> fc1 = FC(10, 10, "FC_2");
+  std::shared_ptr<Node> fc0 = FC(10, 10, false, "FC_1");
+  std::shared_ptr<Node> fc1 = FC(10, 10, false, "FC_2");
   std::shared_ptr<Node> prod = Producer(std::array<DimSize_t, 10>(), "myProd");
 
   SECTION("flatten last layer : nothing removed because pattern searched is "
diff --git a/version.txt b/version.txt
index 0c62199f16ac1e2d7f7ae75b420c1231325dff4e..ee1372d33a29e27945406f0527f8af8e6ee119c9 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-0.2.1
+0.2.2