diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000000000000000000000000000000000000..7a01a2d6c0caf880738df8393567fb169a07be7e
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,6 @@
+include MANIFEST.in
+include LICENSE
+include README.md
+recursive-include aidge_core *
+include setup.py
+include version.txt
diff --git a/aidge_core/__init__.py b/aidge_core/__init__.py
index 4b5c448355a17fd4274ba45f5cd98afa70b1ae53..4234747b94b25b35a6836a36ad6331c1c8a4bc66 100644
--- a/aidge_core/__init__.py
+++ b/aidge_core/__init__.py
@@ -8,5 +8,6 @@ http://www.eclipse.org/legal/epl-2.0.
 SPDX-License-Identifier: EPL-2.0
 """
 from aidge_core.aidge_core import * # import so generated by PyBind
-from aidge_core.export import ExportNode, generate_file, generate_str
+from aidge_core.export_utils import ExportNode, generate_file, generate_str
 import aidge_core.utils
+from aidge_core.aidge_export_aidge import *
diff --git a/aidge_core/aidge_export_aidge/__init__.py b/aidge_core/aidge_export_aidge/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5d6f96b3300c0e86147baac30d7cae8a3a0b798
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/__init__.py
@@ -0,0 +1,8 @@
+from pathlib import Path
+
+# Constants
+FILE = Path(__file__).resolve()
+ROOT_EXPORT = FILE.parents[0]
+
+from .operator_export import *
+from .export import export
diff --git a/aidge_core/aidge_export_aidge/export.py b/aidge_core/aidge_export_aidge/export.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0e859b71e77bb03b95acb6ca75dcf09a8af0722
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/export.py
@@ -0,0 +1,104 @@
+import aidge_core
+import shutil
+import os
+from pathlib import Path
+from .utils import supported_operators, OPERATORS_REGISTRY
+from . import ROOT_EXPORT
+
+
+from aidge_core import ExportNode, generate_file
+
+
+
+def export(export_folder: str,
+           graph_view: aidge_core.GraphView,
+           enable_python_binding: bool = True,
+           ):
+    export_folder_path = Path(export_folder)
+    export_name = export_folder_path.name
+
+    ### Creating export folder ###
+    # Create export directory
+    os.makedirs(export_folder, exist_ok=True)
+
+    ### Cpy static files ###
+    shutil.copytree(ROOT_EXPORT / "static/include",
+                    export_folder_path / "include", dirs_exist_ok=True)
+    shutil.copytree(ROOT_EXPORT / "static/cmake",
+                    export_folder_path / "cmake", dirs_exist_ok=True)
+    shutil.copyfile(ROOT_EXPORT / "static/CMakeLists.txt",
+                    export_folder_path / "CMakeLists.txt")
+    shutil.copyfile(ROOT_EXPORT / "static/version.txt",
+                    export_folder_path / "version.txt")
+    shutil.copyfile(ROOT_EXPORT / "static/README.md",
+                    export_folder_path / "README.md")
+    shutil.copyfile(ROOT_EXPORT / "static/main.cpp",
+                    export_folder_path / "main.cpp")
+    shutil.copyfile(ROOT_EXPORT / "static/export-config.cmake.in",
+                    export_folder_path / f"{export_name}-config.cmake.in")
+
+    # Create project_name file
+    with open(export_folder_path / "project_name.txt", "w") as f:
+        f.write(export_name)
+
+    # Add files related to python binding if
+    if enable_python_binding:
+        os.makedirs(export_folder_path / "python_binding", exist_ok=True)
+        generate_file(
+            export_folder_path / "python_binding/pybind.cpp",
+            ROOT_EXPORT / "templates/pybind.jinja",
+            name=export_name,
+        )
+        # TODO: Add a main.py file ?
+
+    ### Generating an export for each nodes and dnn file ###
+    list_configs = []  # List of headers to include in dnn.cpp to access attribute and parameters
+    list_actions = []  # List of string to construct graph
+    set_operator = set()
+    # Queue of Aidge nodes to explore, guarantee a topological exploration of the graph
+    open_nodes = list(graph_view.get_input_nodes())
+    # List of Aidge nodes already explored
+    closed_nodes = []
+
+    while open_nodes:
+        node = open_nodes.pop(0)
+        if node in closed_nodes:
+            continue  # Node already converted, moving on ...
+        parents_not_converted = False
+        # Check all parents have been converted
+        for parent in node.get_parents():
+            if parent is not None and \
+                    parent not in closed_nodes:
+                # If parents have not been converted, push back current node
+                if not parents_not_converted:
+                    open_nodes.insert(0, node)
+                    parents_not_converted = True
+                # Add to the stack the not converted parent as next node to convert
+                open_nodes.insert(0, parent)
+        if parents_not_converted:
+
+            continue
+        # Next nodes to treat are children of current node
+        open_nodes += list(node.get_children())
+
+        if node.type() in supported_operators():
+            set_operator.add(node.type())
+            op = OPERATORS_REGISTRY[node.type()](node)
+
+            # TODO: list_configs and list_actions don't need to be passed by argument
+            # Export the configuration
+            list_configs = op.export(export_folder_path, list_configs)
+
+            # Add forward kernel
+            list_actions = op.forward(list_actions)
+        else:
+            raise RuntimeError(f"Operator: {node.type()} is not supported")
+        closed_nodes.append(node)
+    # Generate full dnn.cpp
+    aidge_core.generate_file(
+        export_folder_path / "src/dnn.cpp",
+        ROOT_EXPORT / "templates/dnn.jinja",
+        headers=list_configs,
+        operators=set_operator,
+        actions=list_actions,
+    )
diff --git a/aidge_core/aidge_export_aidge/operator_export/__init__.py b/aidge_core/aidge_export_aidge/operator_export/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..37d674ac84f72d643ba1a628a86fbcde9780f4a4
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/__init__.py
@@ -0,0 +1,14 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+from pathlib import Path
+
+DIR_PATH = Path(__file__).parent
+modules = [Path(module).stem for module in DIR_PATH.glob("*.py")]
+__all__ = [ f for f in modules if f != "__init__"]
diff --git a/aidge_core/aidge_export_aidge/operator_export/conv.py b/aidge_core/aidge_export_aidge/operator_export/conv.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb7092fb18982a3cc3f11a1ca47394ce2f77d0b6
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/conv.py
@@ -0,0 +1,31 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import ExportNode, generate_file, generate_str
+from pathlib import Path
+
+@operator_register("Conv")
+class Conv(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+    def export(self, export_folder:Path, list_configs:list):
+        include_path = f"attributes/{self.name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/attributes/conv.jinja",
+            name=self.name,
+            **self.attributes
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT /"templates/graph_ctor/conv.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/fc.py b/aidge_core/aidge_export_aidge/operator_export/fc.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcd528528707dc6eec917790b46e509c2984fa66
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/fc.py
@@ -0,0 +1,37 @@
+from aidge_core.aidge_export_aidge.utils import operator_register,parse_node_input
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import ExportNode, generate_file, generate_str
+from pathlib import Path
+
+@operator_register("FC")
+class FC(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+
+    def export(self, export_folder:Path, list_configs:list):
+
+
+        include_path = f"attributes/{self.name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/attributes/fc.jinja",
+            name=self.name,
+            InChannels=self.inputs_dims[1][1],
+            OutChannels=self.operator.out_channels(),
+            **self.attributes
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/fc.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/maxpooling.py b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c63e71b423b90f62536cafd25c61101e76e0562
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py
@@ -0,0 +1,32 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import ExportNode, generate_file, generate_str
+from pathlib import Path
+
+@operator_register("MaxPooling")
+class MaxPooling(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+
+    def export(self, export_folder:Path, list_configs:list):
+        include_path = f"attributes/{self.name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/attributes/maxpooling.jinja",
+            name=self.name,
+            **self.attributes
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/maxpooling.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/producer.py b/aidge_core/aidge_export_aidge/operator_export/producer.py
new file mode 100644
index 0000000000000000000000000000000000000000..c04019043a6cd7d1a0de95a0ba32f2bb7a3a4bec
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/producer.py
@@ -0,0 +1,65 @@
+from aidge_core.aidge_export_aidge.utils import operator_register
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import DataType, ExportNode, generate_file, generate_str
+import numpy as np
+from pathlib import Path
+
+# Convert aidge datatype to C++ type
+datatype_converter = {
+    DataType.Float64 : "double",
+    DataType.Float32 : "float",
+    DataType.Float16 : "half_float::half",
+    DataType.Int8    : "int8_t",
+    DataType.Int16   : "int16_t",
+    DataType.Int32   : "int32_t",
+    DataType.Int64   : "int64_t",
+    DataType.UInt8   : "uint8_t",
+    DataType.UInt16  : "uint16_t",
+    DataType.UInt32  : "uint32_t",
+    DataType.UInt64  : "uint64_t"
+}
+
+
+@operator_register("Producer")
+class Producer(ExportNode):
+    """
+    If there is a standardization of the export operators
+    then this class should be just a inheritance of ProducerCPP
+    """
+    def __init__(self, node):
+        super().__init__(node)
+        child, in_idx = self.node.output(0)[0]
+        self.tensor_name = f"{child.name()}_{in_idx}"
+        self.values = np.array(self.operator.get_output(0))
+
+    def export(self, export_folder:Path, list_configs:list):
+        assert(len(self.node.output(0)) == 1)
+
+        include_path = f"parameters/{self.tensor_name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+        aidge_tensor = self.operator.get_output(0)
+        aidge_type = aidge_tensor.dtype()
+        if aidge_type in datatype_converter:
+            datatype = datatype_converter[aidge_type]
+        else:
+            raise RuntimeError(f"No conversion found for data type {aidge_type}.")
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/parameter.jinja",
+            dims = aidge_tensor.dims(),
+            data_t = datatype, # TODO : get data from producer
+            name = self.tensor_name,
+            values = str(aidge_tensor)
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/producer.jinja",
+            name=self.name,
+            tensor_name=self.tensor_name,
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/relu.py b/aidge_core/aidge_export_aidge/operator_export/relu.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0f4f6afdc35737a8967f51c1859bda0c9773f88
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/relu.py
@@ -0,0 +1,21 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core import ExportNode, generate_str
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from pathlib import Path
+
+@operator_register("ReLU")
+class ReLU(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+    def export(self, export_folder:Path, list_configs:list):
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/relu.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/sub.py b/aidge_core/aidge_export_aidge/operator_export/sub.py
new file mode 100644
index 0000000000000000000000000000000000000000..efcdd0924fbcf6944b0fb95a967e1a3e16ccc3c5
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/sub.py
@@ -0,0 +1,21 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core import ExportNode, generate_str
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from pathlib import Path
+
+@operator_register("Sub")
+class Sub(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+    def export(self, export_folder:Path, list_configs:list):
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/sub.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/static/CMakeLists.txt b/aidge_core/aidge_export_aidge/static/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c52e09e82699b1f2c0f8e10ed9f4ab83fbb0f10f
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/CMakeLists.txt
@@ -0,0 +1,148 @@
+cmake_minimum_required(VERSION 3.15)
+
+
+file(STRINGS "${CMAKE_SOURCE_DIR}/version.txt" version)
+file(STRINGS "${CMAKE_SOURCE_DIR}/project_name.txt" project)
+
+message(STATUS "Project name: ${project}")
+message(STATUS "Project version: ${version}")
+
+# Note : project name is {project} and python module name is also {project}
+set(module_name _${project}) # target name
+
+project(${project})
+set(CXX_STANDARD 14)
+
+##############################################
+# Define options
+option(PYBIND "python binding" ON)
+option(WERROR "Warning as error" OFF)
+option(TEST "Enable tests" ON)
+option(COVERAGE "Enable coverage" OFF)
+option(ENABLE_ASAN "Enable ASan (AddressSanitizer) for runtime analysis of memory use (over/underflow, memory leak, ...)" OFF)
+
+##############################################
+# Import utils CMakeLists
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
+include(PybindModuleCreation)
+
+if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
+    Include(CodeCoverage)
+endif()
+
+##############################################
+# Find system dependencies
+find_package(aidge_core REQUIRED)
+# Example of adding dependency to backend CPU
+# find_package(aidge_backend_cpu REQUIRED)
+
+##############################################
+# Create target and set properties
+file(GLOB_RECURSE src_files "src/*.cpp")
+file(GLOB_RECURSE inc_files "include/*.hpp")
+
+add_library(${module_name} ${src_files} ${inc_files})
+target_link_libraries(${module_name}
+    PUBLIC
+        _aidge_core # _ is added because we link the target not the project
+        # _aidge_backend_cpu  # Example of adding dependency to backend CPUs
+)
+
+#Set target properties
+set_property(TARGET ${module_name} PROPERTY POSITION_INDEPENDENT_CODE ON)
+
+if( ${ENABLE_ASAN} )
+    message("Building ${module_name} with ASAN.")
+    set(SANITIZE_FLAGS -fsanitize=address -fno-omit-frame-pointer)
+    target_link_libraries(${module_name}
+        PUBLIC
+            -fsanitize=address
+    )
+    target_compile_options(${module_name}
+        PRIVATE
+            ${SANITIZE_FLAGS}
+    )
+endif()
+
+target_include_directories(${module_name}
+    PUBLIC
+        $<INSTALL_INTERFACE:include>
+        $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+    PRIVATE
+        ${CMAKE_CURRENT_SOURCE_DIR}/src
+)
+
+# PYTHON BINDING
+if (PYBIND)
+    generate_python_binding(${project} ${module_name})
+
+    # Handles Python + pybind11 headers dependencies
+    target_link_libraries(${module_name}
+        PUBLIC
+            pybind11::pybind11
+        )
+endif()
+
+target_compile_features(${module_name} PRIVATE cxx_std_14)
+
+target_compile_options(${module_name} PRIVATE
+    $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
+    -Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror>>)
+target_compile_options(${module_name} PRIVATE
+    $<$<CXX_COMPILER_ID:MSVC>:
+    /W4>)
+
+if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
+    append_coverage_compiler_flags()
+endif()
+
+##############################################
+# Installation instructions
+
+include(GNUInstallDirs)
+set(INSTALL_CONFIGDIR ${CMAKE_INSTALL_LIBDIR}/cmake/${project})
+
+install(TARGETS ${module_name} EXPORT ${project}-targets
+  LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+  ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+  RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+)
+
+install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
+
+#Export the targets to a script
+
+install(EXPORT ${project}-targets
+ FILE "${project}-targets.cmake"
+ DESTINATION ${INSTALL_CONFIGDIR}
+ COMPONENT ${module_name}
+)
+
+#Create a ConfigVersion.cmake file
+include(CMakePackageConfigHelpers)
+write_basic_package_version_file(
+    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config-version.cmake"
+    VERSION ${version}
+    COMPATIBILITY AnyNewerVersion
+)
+
+configure_package_config_file("${project}-config.cmake.in"
+    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config.cmake"
+    INSTALL_DESTINATION ${INSTALL_CONFIGDIR}
+)
+
+#Install the config, configversion and custom find modules
+install(FILES
+    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config.cmake"
+    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config-version.cmake"
+    DESTINATION ${INSTALL_CONFIGDIR}
+)
+
+##############################################
+## Exporting from the build tree
+export(EXPORT ${project}-targets
+    FILE "${CMAKE_CURRENT_BINARY_DIR}/${project}-targets.cmake")
+
+# Compile executable
+add_executable(main main.cpp)
+target_link_libraries(main PUBLIC _aidge_core ${module_name})
diff --git a/aidge_core/aidge_export_aidge/static/README.md b/aidge_core/aidge_export_aidge/static/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1ce48d5274cbc2f007bde7c7be01e41e617cb19a
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/README.md
@@ -0,0 +1,5 @@
+To compile:
+
+> mkdir build && cd build
+> cmake -DCMAKE_INSTALL_PREFIX:PATH=/data1/is156025/cm264821/anaconda3/envs/aidge_demo/lib/libAidge ..
+> make all install
diff --git a/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake b/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..87e70fc38c9e4ec4ddb44cbe5d7fb2a31c2e94d6
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake
@@ -0,0 +1,21 @@
+function(generate_python_binding name target_to_bind) 
+    add_definitions(-DPYBIND)
+    Include(FetchContent)
+
+    FetchContent_Declare(
+    PyBind11
+    GIT_REPOSITORY https://github.com/pybind/pybind11.git
+    GIT_TAG        v2.10.4 # or a later release
+    )
+
+    # Use the New FindPython mode, recommanded. Requires CMake 3.15+
+    find_package(Python COMPONENTS Interpreter Development)
+    FetchContent_MakeAvailable(PyBind11)
+
+    message(STATUS "Creating binding for module ${name}")
+    file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
+
+    pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
+    target_include_directories(${name} PUBLIC "python_binding")
+    target_link_libraries(${name} PUBLIC ${target_to_bind})
+endfunction()
diff --git a/aidge_core/aidge_export_aidge/static/export-config.cmake.in b/aidge_core/aidge_export_aidge/static/export-config.cmake.in
new file mode 100644
index 0000000000000000000000000000000000000000..f3604be11c27d86caf1ad8a48b333b9bd8f30625
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/export-config.cmake.in
@@ -0,0 +1,3 @@
+include(${CMAKE_CURRENT_LIST_DIR}/aidge_backend_cpu-config-version.cmake)
+
+include(${CMAKE_CURRENT_LIST_DIR}/aidge_backend_cpu-targets.cmake)
diff --git a/aidge_core/aidge_export_aidge/static/include/dnn.hpp b/aidge_core/aidge_export_aidge/static/include/dnn.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..3a4d5c02eceee1054c93b8ad635a71d3d04ac2fc
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/include/dnn.hpp
@@ -0,0 +1,17 @@
+#ifndef DNN_HPP
+#define DNN_HPP
+#include <aidge/graph/GraphView.hpp>
+/**
+ * @brief This file contains all of what is related to the construction of the
+ * neural network
+ *
+ */
+
+/**
+ * @brief This function generate the exported Aidge::GraphView.
+ *
+ * @return std::shared_ptr<Aidge::GraphView>
+ */
+std::shared_ptr<Aidge::GraphView> generateModel();
+
+#endif /* DNN_HPP */
diff --git a/aidge_core/aidge_export_aidge/static/main.cpp b/aidge_core/aidge_export_aidge/static/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ab8bac1851b6d2dae4bf97bd3af10e19e0b71c1e
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/main.cpp
@@ -0,0 +1,16 @@
+#include <iostream>
+#include <aidge/backend/cpu.hpp>
+
+#include "include/dnn.hpp"
+
+int main()
+{
+
+    std::cout << "BEGIN" << std::endl;
+
+    std::shared_ptr<Aidge::GraphView> graph = generateModel();
+
+    std::cout << "END" << std::endl;
+
+    return 0;
+}
diff --git a/aidge_core/aidge_export_aidge/static/project_name.txt b/aidge_core/aidge_export_aidge/static/project_name.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d5637593fe045bb602bd181bf0f242f0943fb9fd
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/project_name.txt
@@ -0,0 +1 @@
+export
diff --git a/aidge_core/aidge_export_aidge/static/python_binding/pybind.cpp b/aidge_core/aidge_export_aidge/static/python_binding/pybind.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..072fc4cd6012996a4fcda1d18b8244209c69797a
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/python_binding/pybind.cpp
@@ -0,0 +1,14 @@
+#include <pybind11/pybind11.h>
+
+#include "dnn.hpp"
+
+namespace py = pybind11;
+
+
+void init_export(py::module& m){
+    m.def("generate_model", generateModel);
+}
+
+PYBIND11_MODULE(export, m) {
+    init_export(m);
+}
diff --git a/aidge_core/aidge_export_aidge/static/version.txt b/aidge_core/aidge_export_aidge/static/version.txt
new file mode 100644
index 0000000000000000000000000000000000000000..77d6f4ca23711533e724789a0a0045eab28c5ea6
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/version.txt
@@ -0,0 +1 @@
+0.0.0
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..48d07e8db8d5fb116148e9d41100fffa01fcf622
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
@@ -0,0 +1,17 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+#define _{{name|upper}}_IN_CHANNELS  {{InChannels}}
+#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}}
+
+{% for i in range(KernelDims|length) %}
+#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}}
+{%- endfor %}
+{% for i in range(StrideDims|length) %}
+#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}}
+{%- endfor %}
+{% for i in range(DilationDims|length) %}
+#define _{{name|upper}}_DILATION_{{i}} {{DilationDims[i]}}
+{%- endfor %}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..e292f9b611978877c47b15e91f926f30d27a1cc5
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
@@ -0,0 +1,7 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+#define _{{name|upper}}_IN_CHANNELS  {{InChannels}}
+#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja b/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..d258f580e6ff9c523a87b834fdccf2f3b14fb133
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja
@@ -0,0 +1,13 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+{% for i in range(KernelDims|length) %}
+#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}}
+{%- endfor %}
+{% for i in range(StrideDims|length) %}
+#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}}
+{%- endfor %}
+
+#define _{{name|upper}}_CEIL_MODE {{CeilMode|int}}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/dnn.jinja b/aidge_core/aidge_export_aidge/templates/dnn.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..5da46b2d8a439a359dfb1c7ec8ebc18e8d516767
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/dnn.jinja
@@ -0,0 +1,36 @@
+/********************************************************************************
+ * This file has been generated by the Aidge export.
+ ********************************************************************************/
+
+/*** STD INCLUDES ***/
+#include <memory>  // std::shared_ptr
+
+/*** AIDGE INCLUDES ***/
+#include <aidge/graph/GraphView.hpp>  // Aidge::GraphView
+#include <aidge/graph/Node.hpp>       // Aidge::Node
+#include <aidge/graph/OpArgs.hpp>     // Aidge::Sequential
+
+/*** AIDGE OPERATORS ***/
+{%- for operator in operators %}
+#include <aidge/operator/{{operator}}.hpp>
+{%- endfor %}
+
+/*** OPERATOR ATTRIBUTES & PARAMETERS ***/
+{%- for header in headers %}
+#include "{{ header }}"
+{%- endfor %}
+
+/*** HEADER ***/
+#include "dnn.hpp"
+
+
+std::shared_ptr<Aidge::GraphView> generateModel() {
+    /*** BUILDING GRAPH ***/
+    std::shared_ptr<Aidge::GraphView> graph = std::make_shared<Aidge::GraphView>();
+
+    {%- for action in actions %}
+    {{ action }}
+    {%- endfor %}
+
+    return graph;
+}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8e841ea2a10c71b884736dcbd7cfd03b52c5ad4f
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja
@@ -0,0 +1,7 @@
+{# NOTE: Trying a shorter notation like {%- for input in inputs if input[0] %}
+will mess up loop.index as the input set up at None will not increment ! #}
+{%- for input in inputs %}
+{%- if input[0] %}
+{{input[0]}}->addChild({{name}}, {{input[1]}}, {{loop.index - 1}}); {# NOTE: loop.index begin at 1 #}
+{%- endif %}
+{%- endfor %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..a805f8065e87244bf0546ca42d294b86f144a26d
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
@@ -0,0 +1,26 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Conv(
+            _{{name|upper}}_IN_CHANNELS,
+            _{{name|upper}}_OUT_CHANNELS,
+            {
+            {%- for i in range(KernelDims|length) -%}
+                _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            "{{name}}",
+            {
+            {%- for i in range(StrideDims|length) -%}
+                _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            {
+            {%- for i in range(DilationDims|length) -%}
+                _{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            }
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..df6dbc83492174fc49348b8073deb47a5deca313
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja
@@ -0,0 +1,11 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::FC(
+            _{{name|upper}}_IN_CHANNELS,
+            _{{name|upper}}_OUT_CHANNELS,
+            "{{name}}"
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..c6587c128509712e1a8e903e7484476548e9347d
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja
@@ -0,0 +1,20 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::MaxPooling(
+            {
+            {%- for i in range(KernelDims|length) -%}
+                _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            "{{name}}",
+            {
+            {%- for i in range(StrideDims|length) -%}
+                _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            _{{name|upper}}_CEIL_MODE
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/producer.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/producer.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8e0a465a044ebfcc249206f9f5886c7a38fc3252
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/producer.jinja
@@ -0,0 +1,9 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Producer(
+            {{tensor_name}},
+            "{{name}}"
+        );
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/relu.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/relu.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8fd58a30bddd39647dd3b25b2982e67174220381
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/relu.jinja
@@ -0,0 +1,9 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::ReLU(
+            "{{name}}"
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/sub.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/sub.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..d9417751e4117cb296ec874d946b93c2c64df614
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/sub.jinja
@@ -0,0 +1,9 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Sub(
+            "{{name}}"
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/parameter.jinja b/aidge_core/aidge_export_aidge/templates/parameter.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..11a407cc89f72f24167871a594decc6d90ab489d
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/parameter.jinja
@@ -0,0 +1,11 @@
+#ifndef EXPORT_PARAMETERS_{{name|upper}}_H
+#define EXPORT_PARAMETERS_{{name|upper}}_H
+
+#include <aidge/data/Tensor.hpp>
+#include <memory>
+
+std::shared_ptr<Aidge::Tensor> {{name}} = std::make_shared<Aidge::Tensor>(Aidge::Array{{dims|length}}D<{{data_t}}, {{ dims|join(", ") }}> {
+{{ values }}
+});
+
+#endif /* EXPORT_PARAMETERS_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/pybind.jinja b/aidge_core/aidge_export_aidge/templates/pybind.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..9b48cc97f242813cc33b77bfc028e85c08b0cec7
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/pybind.jinja
@@ -0,0 +1,14 @@
+#include <pybind11/pybind11.h>
+
+#include "dnn.hpp"
+
+namespace py = pybind11;
+
+
+void init_{{name}}(py::module& m){
+    m.def("generate_model", generateModel);
+}
+
+PYBIND11_MODULE({{name}}, m) {
+    init_{{name}}(m);
+}
diff --git a/aidge_core/aidge_export_aidge/utils/__init__.py b/aidge_core/aidge_export_aidge/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecdf2aec2692a48e108d5f4ad05ed05803319525
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/utils/__init__.py
@@ -0,0 +1,11 @@
+from .operator_registry import *
+
+def parse_node_input(node_inputs: list) -> list:
+    """Parse node intputs in order to adapt the list for Jinja.
+
+    :param node_inputs: return of node.inputs()
+    :type node_inputs: list of tuple of aidge_core.Node, output idx.
+    :return: list of tuple of node name, output idx.
+    :rtype: list
+    """
+    return [None if parent_node is None else (parent_node.name(), outId) for parent_node, outId in node_inputs]
diff --git a/aidge_core/aidge_export_aidge/utils/operator_registry.py b/aidge_core/aidge_export_aidge/utils/operator_registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd6fbaaceeba9c2125b38354eca9cc116acd29b1
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/utils/operator_registry.py
@@ -0,0 +1,18 @@
+OPERATORS_REGISTRY = {}
+
+def operator_register(*args):
+
+    key_list = [arg for arg in args]
+
+    def decorator(operator):
+        def wrapper(*args, **kwargs):
+            return operator(*args, **kwargs)
+
+        for key in key_list:
+            OPERATORS_REGISTRY[key] = operator
+
+        return wrapper
+    return decorator
+
+def supported_operators():
+    return list(OPERATORS_REGISTRY.keys())
diff --git a/aidge_core/export/code_generation.py b/aidge_core/export/code_generation.py
deleted file mode 100644
index b18b5476f8e083bcbe3d4f6c4a57132ebe7b780f..0000000000000000000000000000000000000000
--- a/aidge_core/export/code_generation.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-from jinja2 import Environment, FileSystemLoader
-
-
-def generate_file(file_path: str, template_path: str, **kwargs) -> None:
-    """Generate a file at `file_path` using the jinja template located at `file_path`.
-
-    kwargs are used to fill the template.
-
-    :param file_path: path where to generate the file
-    :type file_path: str
-    :param template_path: Path to the template to use for code generation
-    :type template_path: str
-    """
-    # Get directory name of the file
-    dirname = os.path.dirname(file_path)
-
-    # If directory doesn't exist, create it
-    if not os.path.exists(dirname):
-        os.makedirs(dirname)
-
-    # Get directory name and name of the template
-    template_dir = os.path.dirname(template_path)
-    template_name = os.path.basename(template_path)
-
-    # Select template
-    template = Environment(loader=FileSystemLoader(
-        template_dir)).get_template(template_name)
-
-    # Generate file
-    content = template.render(kwargs)
-    with open(file_path, mode="w", encoding="utf-8") as message:
-        message.write(content)
-
-def generate_str(template_path:str, **kwargs) -> str:
-    """Generate a string using the jinja template located at `file_path`.
-    kwargs are used to fill the template.
-
-    :param template_path: Path to the template to use for code generation
-    :type template_path: str
-    :return: A string of the interpreted template
-    :rtype: str
-    """
-    dirname = os.path.dirname(template_path)
-    filename = os.path.basename(template_path)
-    template = Environment(loader=FileSystemLoader(dirname)).get_template(filename)
-    return template.render(kwargs)
diff --git a/aidge_core/export/__init__.py b/aidge_core/export_utils/__init__.py
similarity index 100%
rename from aidge_core/export/__init__.py
rename to aidge_core/export_utils/__init__.py
diff --git a/aidge_core/export_utils/code_generation.py b/aidge_core/export_utils/code_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..a02fc0966702cec7a2cbe33f8411bb71e3035e90
--- /dev/null
+++ b/aidge_core/export_utils/code_generation.py
@@ -0,0 +1,46 @@
+from pathlib import Path
+from jinja2 import Environment, FileSystemLoader
+from typing import Union
+
+
+def generate_file(file_path: Union[Path, str], template_path: Union[Path, str], **kwargs) -> None:
+    """Generate a file at `file_path` using the jinja template located at `file_path`.
+
+    kwargs are used to fill the template.
+
+    :param file_path: path where to generate the file
+    :type file_path: pathlib.Path or str
+    :param template_path: Path to the template to use for code generation
+    :type template_path: pathlib.Path or str
+    """
+    # Convert str -> Path for compatibility !
+    if isinstance(file_path, str):
+        file_path = Path(file_path)
+    if isinstance(template_path, str):
+        template_path = Path(template_path)
+    # Make dir
+    file_path.parent.mkdir(parents=True, exist_ok=True)
+
+    # Select template
+    template = Environment(loader=FileSystemLoader(
+        template_path.parent)).get_template(template_path.name)
+
+    # Generate file
+    with open(file_path, mode="w", encoding="utf-8") as file:
+        file.write(template.render(kwargs))
+
+
+def generate_str(template_path: Union[Path, str], **kwargs) -> str:
+    """Generate a string using the jinja template located at `file_path`.
+    kwargs are used to fill the template.
+
+    :param template_path: Path to the template to use for code generation
+    :type template_path: pathlib.Path or str
+    :return: A string of the interpreted template
+    :rtype: str
+    """
+    # Convert str -> Path for compatibility !
+    if isinstance(template_path, str):
+        template_path = Path(template_path)
+    return Environment(loader=FileSystemLoader(
+        template_path.parent)).get_template(template_path.name).render(kwargs)
diff --git a/aidge_core/export/node_export.py b/aidge_core/export_utils/node_export.py
similarity index 100%
rename from aidge_core/export/node_export.py
rename to aidge_core/export_utils/node_export.py
diff --git a/aidge_core/unit_tests/static/main.cpp b/aidge_core/unit_tests/static/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..06171e2a036a18b0dea3dca40de34c296d99222d
--- /dev/null
+++ b/aidge_core/unit_tests/static/main.cpp
@@ -0,0 +1,19 @@
+/*
+Example main.cpp used to test aidge export.
+This file is copied in the test export.
+*/
+#include <iostream>
+
+#include "include/dnn.hpp"
+
+int main()
+{
+
+    std::cout << "BEGIN" << std::endl;
+
+    std::shared_ptr<Aidge::GraphView> graph = generateModel();
+
+    std::cout << "END" << std::endl;
+
+    return 0;
+}
diff --git a/aidge_core/unit_tests/test_export.py b/aidge_core/unit_tests/test_export.py
new file mode 100644
index 0000000000000000000000000000000000000000..3061d7940603b8eeca2c07368bc1bbd6756fa1f3
--- /dev/null
+++ b/aidge_core/unit_tests/test_export.py
@@ -0,0 +1,83 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+from functools import reduce
+import pathlib
+import os
+import sys
+import subprocess
+import shutil
+import numpy as np
+
+def initFiller(model):
+    # Initialize parameters (weights and biases)
+    for node in model.get_nodes():
+        if node.type() == "Producer":
+            prod_op = node.get_operator()
+            value = prod_op.get_output(0)
+            value.set_backend("cpu")
+            tuple_out = node.output(0)[0]
+            # No conv in current network
+            if tuple_out[0].type() == "Conv" and tuple_out[1]==1:
+                # Conv weight
+                aidge_core.xavier_uniform_filler(value)
+            elif tuple_out[0].type() == "Conv" and tuple_out[1]==2:
+                # Conv bias
+                aidge_core.constant_filler(value, 0.01)
+            elif tuple_out[0].type() == "FC" and tuple_out[1]==1:
+                # FC weight
+                aidge_core.normal_filler(value)
+            elif tuple_out[0].type() == "FC" and tuple_out[1]==2:
+                # FC bias
+                aidge_core.constant_filler(value, 0.01)
+            else:
+                pass
+
+
+class test_export(unittest.TestCase):
+    """Test aidge export
+    """
+    def setUp(self):
+        self.EXPORT_PATH = pathlib.Path("myexport")
+    def tearDown(self):
+        pass
+
+    def test_generate_export(self):
+        # Create model
+
+        model = aidge_core.sequential([
+            aidge_core.FC(in_channels=32*32*3, out_channels=512, name="InputNode"),
+            aidge_core.ReLU(name="Relu0"),
+            aidge_core.FC(in_channels=512, out_channels=256, name="FC1"),
+            aidge_core.ReLU(name="Relu1"),
+            aidge_core.FC(in_channels=256, out_channels=128, name="FC2"),
+            aidge_core.ReLU(name="Relu2"),
+            aidge_core.FC(in_channels=128, out_channels=10, name="OutputNode"),
+        ])
+
+        initFiller(model)
+
+        # Export model
+        aidge_core.export(self.EXPORT_PATH, model)
+        self.assertTrue(self.EXPORT_PATH.is_dir(), "Export folder has not been generated")
+        os.makedirs(self.EXPORT_PATH / "build", exist_ok=True)
+
+        # Test compilation of export
+        install_path = os.path.join(sys.prefix, "lib", "libAidge")  if "AIDGE_INSTALL" not in os.environ else os.environ["AIDGE_INSTALL"]
+
+        shutil.copyfile(pathlib.Path(__file__).parent / "static/main.cpp", self.EXPORT_PATH / "main.cpp")
+
+        subprocess.check_call(['cmake', str(self.EXPORT_PATH.absolute()), f'-DCMAKE_INSTALL_PREFIX:PATH={install_path}'], cwd=str(self.EXPORT_PATH / "build"))
+        subprocess.check_call(['make', 'all', 'install'], cwd=str(self.EXPORT_PATH / "build"))
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py
index 6e0c1f9b9a0828e266ef3bf19ee75df3e275b282..26d60f2fbaf0f3903baf191cf0a2ad5550fb3275 100644
--- a/aidge_core/unit_tests/test_impl.py
+++ b/aidge_core/unit_tests/test_impl.py
@@ -41,6 +41,7 @@ class test_OperatorImpl(unittest.TestCase):
         generic_matmul_op = matmul.get_operator()
         generic_matmul_op.set_forward_dims(lambda x: x)
         generic_matmul_op.set_impl(testImpl(generic_matmul_op))
+        generic_matmul_op.set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
         generic_matmul_op.forward()
         self.assertEqual(GLOBAL_CPT, 1)
 
diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index 787385a803e9bd1d983155cfdecffca5542b8345..7c3bc0f6f68506c02af1723b263455a9c72b1f3a 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -34,10 +34,8 @@ class test_attributes(unittest.TestCase):
     def test_fc(self):
         in_channels = 4
         out_channels = 8
-        nb_bias = True
-        fc_op = aidge_core.FC(in_channels, out_channels, nb_bias).get_operator()
+        fc_op = aidge_core.FC(in_channels, out_channels).get_operator()
         self.assertEqual(fc_op.out_channels(), out_channels)
-        self.assertEqual(fc_op.attr.get_attr("no_bias"), nb_bias)
 
     def test_producer_1D(self):
         dims = [5]
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 940440bad52e367fe04872a308c99e4c802fa242..651a5de69596ee867a97b06ba683f49b05a41303 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -59,6 +59,7 @@
 #include "aidge/operator/ReduceMean.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Reshape.hpp"
+#include "aidge/operator/Resize.hpp"
 #include "aidge/operator/Shape.hpp"
 #include "aidge/operator/Scaling.hpp"
 #include "aidge/operator/Slice.hpp"
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 68e058183d0a59544c28f2ae3a7d519266fbb95e..80f8408a01be5a9b1f485251af0b13b8069404c5 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -283,6 +283,11 @@ class Tensor : public Data,
      */
     Tensor operator/(const Tensor& other) const;
 
+    /**
+     * @brief Element-wise sqrt operation for Tensor.
+     * @return Tensor
+     */
+    Tensor sqrt() const;
 
     ~Tensor() noexcept;
 
@@ -545,23 +550,16 @@ public:
 
     inline void print() const { fmt::print("{}\n", toString()); }
 
-    std::shared_ptr<Tensor> grad() {
-        return mGrad;
-    }
-    void setGrad(std::shared_ptr<Tensor> newGrad) {
-        mGrad = newGrad;
-    }
-
     /**
-     * @brief Associate the gradient with a Tensor instance and set its implementation
-     * if none was previously set.
+     * @brief Get the gradient Tensor. If not initialized, set a Tensor instance
+     * and set its implementation if none was previously set.
      * @note Dimensions for the Tensor instance are copied from the original current Tensor.
      * @note If a Tensor instance was already associated, only the implementation is created
      * with values set to 0.
      * @note If Tensor instance and implementation already existed for the gradient
      * nothing is done.
      */
-    void initGrad() {
+    std::shared_ptr<Tensor> grad() {
         if (!mGrad) {
             mGrad = std::make_shared<Tensor>(mDims);
         }
@@ -571,6 +569,11 @@ public:
             mGrad->setBackend(hasImpl() ? mImpl->backend() : "cpu");
             mGrad->zeros();
         }
+        return mGrad;
+    }
+
+    void setGrad(std::shared_ptr<Tensor> newGrad) {
+        mGrad = newGrad;
     }
 
     /**
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 9bae78fdb9657db32f16512f0fdb5ccb431dee02..f694a1234b6037a0ae75a89380af9747765e290c 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -193,9 +193,14 @@ public:
    */
   inline IOIndex_t getFirstFreeDataInput() const {
     IOIndex_t i = 0;
-    for (; (i < nbData()) && (input(i).second != gk_IODefaultIndex); ++i) {}
-    // assert((i<nbData()) && "No free data input for Node");
-    return (i < nbData()) ? i : gk_IODefaultIndex;
+    for (; i < nbInputs(); ++i) {
+      if ((inputCategory(i) == InputCategory::Data || inputCategory(i) == InputCategory::OptionalData)
+        && input(i).second == gk_IODefaultIndex)
+      {
+        break;
+      }
+    }
+    return (i < nbInputs()) ? i : gk_IODefaultIndex;
   }
 
 
@@ -227,13 +232,12 @@ public:
   inline IOIndex_t nbInputs() const noexcept { return getOperator()->nbInputs(); }
 
   /**
-   * @brief Number of input specifically for data.
+   * @brief Category of a specific input (Data or Param, optional or not).
    * Data inputs exclude inputs expecting parameters (weights or bias).
-   * @details [data, data, weight, bias] => 2
-   * @return IOIndex_t
+   * @return InputCategory
    */
-  inline IOIndex_t nbData() const noexcept {
-    return getOperator()->nbData();
+  inline InputCategory inputCategory(IOIndex_t idx) const {
+    return getOperator()->inputCategory(idx);
   }
 
   /**
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 4ac14bdaecd16e90586d14699f3b6f1bd6d88cab..0e709afe9f175443a28947be7f4c3f5b01f5e362 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -29,7 +29,7 @@ public:
     static const std::string Type;
 
     Add_Op(const IOIndex_t nbIn)
-        : OperatorTensor(Type, nbIn, 0, 1)
+        : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1)
     {
         if (nbIn == 0) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 2e77ba871382442bcb9c5c020ce59cbba8578360..06ee4327e2f2d4df32c2decd73841bdf5f79a739 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -48,7 +48,7 @@ public:
 
     constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
-        : OperatorTensor(Type, 1, 0, 1),
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
                         attr<AvgPoolingAttr::StrideDims>(stride_dims),
                         attr<AvgPoolingAttr::KernelDims>(kernel_dims)))
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 3b4f74ba2891e9fae8f7566c04d9b6a7ea92166b..b5b64eb428d709e804dd9f6711530b348e0be747 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -43,7 +43,13 @@ public:
     BatchNorm_Op() = delete;
 
     constexpr BatchNorm_Op(float epsilon, float momentum)
-        : OperatorTensor(Type, 1, 4, 1),
+        : OperatorTensor(Type,
+                            {InputCategory::Data,
+                                InputCategory::Param,
+                                InputCategory::Param,
+                                InputCategory::Param,
+                                InputCategory::Param},
+                            1),
           mAttributes(std::make_shared<Attributes_>(
             attr<BatchNormAttr::Epsilon>(epsilon),
             attr<BatchNormAttr::Momentum>(momentum))) {}
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index e86f092c6a8cc6b7ca913735f5af3717777e94ba..8341a93fe66d260ae3687170629b8759d0305a9c 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -51,7 +51,7 @@ public:
     Concat_Op() = delete;
 
     Concat_Op(const IOIndex_t nbIn, const std::int32_t axis)
-        : OperatorTensor(Type, nbIn, 0, 1),
+        : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<ConcatAttr::Axis>(axis)))
     {
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index ea46f37c710883a21d8332ae2f5c030de0fa27af..87ff5854b310ca472994bd6b68fd6ae58d31e806 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -30,7 +30,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvAttr { StrideDims, DilationDims, KernelDims, NoBias };
+enum class ConvAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
@@ -41,10 +41,9 @@ public:
 
 private:
     using Attributes_ = StaticAttributes<ConvAttr,
-                                            std::array<DimSize_t, DIM>,
-                                            std::array<DimSize_t, DIM>,
-                                            std::array<DimSize_t, DIM>,
-                                            bool>;
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>>;
     template <ConvAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
@@ -54,14 +53,12 @@ public:
 
     constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
                       const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                      bool noBias = false)
-        : OperatorTensor(Type, 1, 2, 1),
+                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<ConvAttr::StrideDims>(strideDims),
             attr<ConvAttr::DilationDims>(dilationDims),
-            attr<ConvAttr::KernelDims>(kernelDims),
-            attr<ConvAttr::NoBias>(noBias)))
+            attr<ConvAttr::KernelDims>(kernelDims)))
     {}
 
     /**
@@ -119,7 +116,6 @@ public:
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvAttr::StrideDims>(); }
     inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvAttr::DilationDims>(); }
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvAttr::KernelDims>(); }
-    inline bool& noBias() const { return mAttributes->template getAttr<ConvAttr::NoBias>(); }
 
 
     static const std::vector<std::string> getInputsName(){
@@ -153,10 +149,11 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
                                   bool noBias = false) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
     addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
-    addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
-
+    if (!noBias) {
+        addProducer(conv, 2, {outChannels}, "b"); // already sets bias dims
+    }
     return conv;
 }
 
@@ -175,6 +172,7 @@ inline std::shared_ptr<Node> Conv(
 }
 }  // namespace Aidge
 
+extern template class Aidge::Conv_Op<1>;
 extern template class Aidge::Conv_Op<2>;
 
 namespace {
@@ -182,8 +180,7 @@ template <>
 const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
     "StrideDims",
     "DilationDims",
-    "KernelDims",
-    "NoBias"
+    "KernelDims"
 };
 }
 
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index b2234f4cb80356b9ed64193f9f42ed8b8849e12b..c8a83ff7de62a61e8125eac29d61c3938115cd09 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -29,7 +29,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims, NoBias };
+enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
@@ -39,10 +39,9 @@ public:
 
 private:
     using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
-                                            std::array<DimSize_t, DIM>,
-                                            std::array<DimSize_t, DIM>,
-                                            std::array<DimSize_t, DIM>,
-                                            bool>;
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, DIM>>;
     template <ConvDepthWiseAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
@@ -53,14 +52,12 @@ public:
 
     constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                               const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                               bool no_bias=false)
-        : OperatorTensor(Type, 1, 2, 1),
+                               const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
             attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
-            attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
-            attr<ConvDepthWiseAttr::NoBias>(no_bias)))
+            attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)))
     {}
 
     /**
@@ -98,7 +95,6 @@ public:
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>(); }
     inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>(); }
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>(); }
-    inline bool& noBias() const { return mAttributes->template getAttr<ConvDepthWiseAttr::NoBias>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
@@ -117,9 +113,11 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
                                            bool noBias=false) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
     addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
-    addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b");
+    if (!noBias) {
+        addProducer(convDW, 2, {nbChannels}, "b");
+    }
     return convDW;
 }
 
@@ -137,12 +135,13 @@ inline std::shared_ptr<Node> ConvDepthWise(
 }
 }  // namespace Aidge
 
+extern template class Aidge::ConvDepthWise_Op<1>;
 extern template class Aidge::ConvDepthWise_Op<2>;
 
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims",
-                                                          "KernelDims", "NoBias"};
+                                                          "KernelDims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 566f4a6ae69b090b3a035b034406d463eeb77317..3edb4a28851cffe060886a4660d6b524eb9b814a 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -30,7 +30,7 @@ class Div_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Div_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Div_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index 5ec10522e889bb1188b2304940fd892c0928b414..f615fedeef6fea59d2177cf886e8d910f064f5c2 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -29,7 +29,7 @@ class Erf_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Erf_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Erf_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 37cebdd0c8542ac0de3f51ebd0a992959fe8b64f..01da37a05414c5994ace767770e7c26fc8cd4646 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -24,8 +24,6 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class FCAttr { NoBias };
-
 class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
@@ -33,18 +31,8 @@ class FC_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-private:
-    template <FCAttr e>
-    using attr = typename StaticAttributes<FCAttr, bool>::template attr<e>;
-    const std::shared_ptr<StaticAttributes<FCAttr, bool>> mAttributes;
-
-public:
-    FC_Op() = delete;
-
-    FC_Op(bool noBias)
-    : OperatorTensor(Type, 1, 2, 1),
-      mAttributes(std::make_shared<StaticAttributes<FCAttr, bool>>(
-        attr<FCAttr::NoBias>(noBias)))
+    FC_Op()
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1)
     {}
 
     /**
@@ -52,8 +40,7 @@ public:
      * @param op Operator to copy.
      */
     FC_Op(const FC_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
+        : OperatorTensor(op)
     {
         if (op.mImpl) {
             SET_IMPL_MACRO(FC_Op, *this, op.backend());
@@ -82,8 +69,6 @@ public:
         }
         return getInput(1)->template dims<2>()[0];
     }
-    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline bool& noBias() const { return mAttributes -> getAttr<FCAttr::NoBias>(); }
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input", "weight", "bias"};
@@ -95,16 +80,13 @@ public:
 
 inline std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), name);
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(), name);
     addProducer(fc, 1, {outChannels, inChannels}, "w");
-    addProducer(fc, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
+    if (!noBias) {
+        addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
+    }
     return fc;
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::FCAttr>::data[] = {"NoBias"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index a7ec3cf221c7b679d1b4d152b9410a5cdf374c52..3e9b780732fa9144f2e58bef854d1b42d063d0bf 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -56,7 +56,7 @@ public:
     Gather_Op(std::int8_t axis,
               const std::vector<int64_t>& indices,
               const std::vector<DimSize_t>& gatheredShape)
-        : OperatorTensor(Type, 2, 0, 1),
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
         mAttributes(std::make_shared<Attributes_>(
             attr<GatherAttr::Axis>(axis),
             attr<GatherAttr::Indices>(indices),
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 25fb0523aac4dad2dd95bb48dd931d674ca8329d..8196c4268e669001d99f25ed2cead546e1141aa7 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -35,8 +35,18 @@ private:
     const std::shared_ptr<DynamicAttributes> mAttributes;
 
 public:
+    GenericOperator_Op(const std::string& type, const std::vector<InputCategory>& inputsCategory, IOIndex_t nbOut)
+        : OperatorTensor(type, inputsCategory, nbOut)
+    {
+        mImpl = std::make_shared<OperatorImpl>(*this);
+    }
+
     GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
-        : OperatorTensor(type, nbData, nbParam, nbOut),
+        : OperatorTensor(type, [nbData, nbParam]() {
+                                std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
+                                inputsCategory.resize(nbData + nbParam, InputCategory::Param);
+                                return inputsCategory;
+                            }(), nbOut),
           mAttributes(std::make_shared<DynamicAttributes>())
     {
         mImpl = std::make_shared<OperatorImpl>(*this);
@@ -92,6 +102,20 @@ public:
     }
 };
 
+/**
+ * @brief Fictive custom operator not associated with any implementation.
+ * Allows to import unknown operators and simulate new ones.
+ * @param type Type of the fictive operator.
+ * @param inputCategory List inputs with their category
+ * @param nbOut Number of output data.
+ * @param name (optional) name of the Operator.
+ * @return std::shared_ptr<Node> Node associated with the Generic Operator.
+ */
+inline std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector<InputCategory>& inputCategory, IOIndex_t nbOut,
+                                             const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, inputCategory, nbOut), name);
+}
+
 /**
  * @brief Fictive custom operator not associated with any implementation.
  * Allows to import unknown operators and simulate new ones.
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index 74529a0ba9481bf6280df8d3ce496f67635a5aef..8bb738e8b57598e4256d3850fc791976e73c834c 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -37,7 +37,7 @@ class GlobalAveragePooling_Op
 public:
   static const std::string Type;
 
-  GlobalAveragePooling_Op() : OperatorTensor(Type, 1, 0, 1) {}
+  GlobalAveragePooling_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
   GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op)
       : OperatorTensor(op) {
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index bcbe1c6c69e0a666d7a976558d558f101c5b8fca..393798da2fc26b3ef3f5e4cfe54f69fd82174a5f 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -42,7 +42,7 @@ public:
     static const std::string Type;
 
     Identity_Op()
-        : OperatorTensor(Type, 1, 0, 1)
+        : OperatorTensor(Type, {InputCategory::Data}, 1)
     {
         mImpl = std::make_shared<OperatorImpl>(*this);
     }
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 2556a395b28be980dcd0d9e91669fb2fe78e6549..294e7ebb009ff184c9150d2aa18067a15deeba22 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -44,7 +44,7 @@ public:
     LeakyReLU_Op() = delete;
 
     LeakyReLU_Op(float negativeSlope)
-        : OperatorTensor(Type, 1, 0, 1),
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(
             std::make_shared<Attributes_>(
                 attr<LeakyReLUAttr::NegativeSlope>(negativeSlope)))
diff --git a/include/aidge/operator/Ln.hpp b/include/aidge/operator/Ln.hpp
index 09353e8a696c275e6a5dbadcfd8a254f69ad7f97..d4010471c9af853556dbe1d60c8585d12f8fc638 100755
--- a/include/aidge/operator/Ln.hpp
+++ b/include/aidge/operator/Ln.hpp
@@ -30,7 +30,7 @@ class Ln_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Ln_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Ln_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index 580d720e617e5b20c0acc7ce5e7f200fe5b25606..be460ee88bd79592e29581f6acd64813ecc39bec 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -30,7 +30,7 @@ class MatMul_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    MatMul_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    MatMul_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 6f6dd8b38fbe92170481f2bfea867352e51e1161..082aa26bbdf1d55dcae29d1ffb2b9810db8b17d0 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -54,7 +54,7 @@ public:
     constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                             bool ceil_mode = false)
-        : OperatorTensor(Type, 1, 0, 1),
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<MaxPoolingAttr::StrideDims>(stride_dims),
             attr<MaxPoolingAttr::KernelDims>(kernel_dims),
@@ -86,10 +86,7 @@ public:
 
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        if (!getInput(0)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-        }
-        if (!(getInput(0)->empty())) {
+        if (inputsAssociated()) {
             std::array<DimSize_t, DIM + 2> outputDims{};
             const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index 714f629d71d4a032360eccc00562bd9af32aa95d..d6af56f2faad18b9e39c793ea68e39eac4dd2f01 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -51,7 +51,7 @@ public:
     Memorize_Op() = delete;
 
     Memorize_Op(const std::uint32_t endStep)
-        : OperatorTensor(Type, 1, 1, 2),
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 2),
           mAttributes(std::make_shared<Attributes_>(
                         attr<MemorizeAttr::ScheduleStep>(0),
                         attr<MemorizeAttr::ForwardStep>(0),
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index fb8c73af33dd081664c82427ea8aa6876117d695..73a5c6a99fc06640df4f984dd8b8a291c3b3d783 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -74,14 +74,7 @@ public:
     void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
     bool forwardDims(bool allowDataDependency = false) override final {
-        // Check first that all required inputs are available, otherwise
-        // mGraph->forwardDims() will fail!
-        bool forwarded = true;
-        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-            forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
-        }
-
-        if (forwarded) {
+        if (inputsAssociated()) {
             // Forward dims of micro-graph
             return mGraph->forwardDims({}, allowDataDependency);
         }
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index eb57761cc5927cb4eedfb6cb12b1d49a0ee50b9c..51681629cbae215fd529b6e7bb568d07264dd63e 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -44,11 +44,13 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
 {
     // Construct micro-graph
     auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
 
     auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name);
     addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
-    addProducer(metaOp, 2, {out_channels}, "b");
+    if (!no_bias) {
+        addProducer(metaOp, 2, {out_channels}, "b");
+    }
     return metaOp;
 }
 
@@ -57,11 +59,10 @@ inline std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
 {
     auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), "");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
 
     return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
 }
@@ -94,11 +95,13 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
 {
     // Construct micro-graph
     auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
 
     auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
     addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w");
-    addProducer(metaOp, 2, {nb_channels}, "b");
+    if (!no_bias) {
+        addProducer(metaOp, 2, {nb_channels}, "b");
+    }
     return metaOp;
 }
 
@@ -107,11 +110,10 @@ inline std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
 {
     auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), "");
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
 
     return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
 }
@@ -203,8 +205,7 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels,
                            bool noBias = false,
                            const std::string& name = "");
 
-std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length,
-                                         bool noBias = false);
+std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length);
 
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index e9bcaa871619828a50dcd407d39744e7983fe2c4..cf5a3f188424fc52849eab580cce624ff714c729 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -35,7 +35,7 @@ class Move_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Move_Op() : OperatorTensor(Type, 1, 0, 1) {
+    Move_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {
         mImpl = std::make_shared<Move_OpImpl>(*this);
     }
 
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index f53a38a82a6771e416435222137e72366f5f69f3..e61393b28fc45bf46487ac2277753dec1b297b81 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -32,7 +32,7 @@ class Mul_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Mul_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Mul_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index d09c440d9b9ba1750f2452245d109a0c674f304a..adec17d07f39727a0c75d32fa24bcc624aa66e1a 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -40,6 +40,13 @@ enum class OperatorType {
     Tensor
 };
 
+enum class InputCategory {
+    Data,
+    Param,
+    OptionalData,
+    OptionalParam
+};
+
 class Operator : public std::enable_shared_from_this<Operator> {
 protected:
     std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator
@@ -48,17 +55,15 @@ protected:
 private:
     std::string mType;
     const OperatorType mOperatorType;
-    const IOIndex_t mNbData;
-    const IOIndex_t mNbParam;
+    const std::vector<InputCategory> mInputsCategory;
     const IOIndex_t mNbOut;
 
 public:
     Operator() = delete;
-    Operator(const std::string& type, const IOIndex_t nbData, const IOIndex_t nbParam, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)
+    Operator(const std::string& type, const std::vector<InputCategory>& inputsCategory, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)
     : mType(type),
       mOperatorType(operatorType),
-      mNbData(nbData),
-      mNbParam(nbParam),
+      mInputsCategory(inputsCategory),
       mNbOut(nbOut)
     {
         // ctor
@@ -67,8 +72,7 @@ public:
     Operator(const Operator& op):
         std::enable_shared_from_this<Operator>(),
         mOperatorType(op.mOperatorType),
-        mNbData(op.mNbData),
-        mNbParam(op.mNbParam),
+        mInputsCategory(op.mInputsCategory),
         mNbOut(op.mNbOut)
     {
         mType = op.mType;
@@ -191,11 +195,14 @@ public:
         return mOperatorType;
     }
 
+    inline InputCategory inputCategory(IOIndex_t idx) const {
+        AIDGE_ASSERT(idx < mInputsCategory.size(), "Input #{} out of range (number of inputs is {})", idx, mInputsCategory.size());
+        return mInputsCategory[idx];
+    }
+
     virtual inline bool isAtomic() const noexcept { return true; }
 
-    inline IOIndex_t nbInputs() const noexcept { return mNbData+mNbParam; };
-    inline IOIndex_t nbData() const noexcept { return mNbData; };
-    inline IOIndex_t nbParam() const noexcept { return mNbParam; };
+    inline IOIndex_t nbInputs() const noexcept { return mInputsCategory.size(); };
     inline IOIndex_t nbOutputs() const noexcept { return mNbOut; };
 
     static const std::vector<std::string> getInputsName() {
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index 2737a6d93274b77025668782afc644d28e047682..657a6d8ab6124b8919a3ac8fea5b6bfa6c4254b9 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -40,7 +40,7 @@ protected:
 public:
     OperatorTensor() = delete;
 
-    OperatorTensor(const std::string& type, const IOIndex_t nbData, const IOIndex_t nbParam,
+    OperatorTensor(const std::string& type, const std::vector<InputCategory>& inputsCategory,
                    const IOIndex_t nbOut);
 
     OperatorTensor(const OperatorTensor& other);
@@ -87,6 +87,9 @@ public:
     virtual void setDataFormat(const DataFormat& dataFormat) const override;
 
     virtual void forward() override;
+
+protected:
+    bool inputsAssociated(bool checkNonEmpty = true) const;
 };
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 79a3489a289df6857d85ff1e22dd5669a42e85ff..5fd0f93986206e6cd958a85055159783eeb8bc8f 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -51,7 +51,7 @@ public:
     constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
                      const PadBorderType &borderType = PadBorderType::Constant,
                      double borderValue = 0.0)
-        : OperatorTensor(Type, 1, 0, 1),
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<PadAttr::BeginEndBorders>(beginEndTuples),
             attr<PadAttr::BorderType>(borderType),
@@ -76,14 +76,7 @@ public:
 
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        bool associated = true;
-        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-            if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-            }
-            associated &= !(getInput(i)->empty());
-        }
-        if (associated) {
+        if (inputsAssociated()) {
             std::array<DimSize_t, DIM + 2> outputDims{};
             const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
 
@@ -95,9 +88,10 @@ public:
             outputDims[1] = inputDims[1];
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
+            return true;
         }
 
-        return associated;
+        return false;
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
@@ -118,9 +112,6 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string Pad_Op<DIM>::Type = "Pad";
-
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
                                            const std::string& name = "",
@@ -143,6 +134,9 @@ inline std::shared_ptr<Node> Pad(
 }
 }  // namespace Aidge
 
+extern template class Aidge::Pad_Op<1>;
+extern template class Aidge::Pad_Op<2>;
+
 namespace {
 template <>
 const char *const EnumStrings<Aidge::PadAttr>::data[] = {"BeginEndBorders", "BorderType", "BorderValue"};
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 93493cd8e095116db7e8c1dcb94f3bc76b1d25c9..575d56b455940ea98571110dbaa9a83de09fef37 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -45,7 +45,7 @@ private:
 
 public:
     Pop_Op()
-        : OperatorTensor(Type, 1, 0, 1),
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(attr<PopAttr::ForwardStep>(0)))
     {
         mImpl = std::make_shared<Pop_OpImpl>(*this);
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index 08c4de2a254dd267eda4040b54108f93a0c2d922..ee5c01c2121d68a7988dc686c4dbb4bbf7331c84 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -29,7 +29,7 @@ class Pow_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Pow_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Pow_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 1b9505c883cae62d3ba44bd6ff07932014607d60..9e3bdd1ba2f601da27dea3a6a01131a0c8191eb4 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -46,7 +46,7 @@ public:
     template <std::size_t DIM>
     Producer_Op(const std::array<DimSize_t, DIM>& dims,
                 bool constant = false)
-        : OperatorTensor(Type, 0, 0, 1),
+        : OperatorTensor(Type, {}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<ProdAttr::Constant>(constant)))
     {
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 963de31c49f48784e92434b2b563d6c008e2d4fd..40b5d581d53521e6086d24c5ecc53f725dd9f252 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -30,7 +30,7 @@ class ReLU_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    ReLU_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    ReLU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index fdbc3a5c132e0c9599481370bc39f351df8883bc..3fcf19ffd13645fb28b6efcfefaf8e347b148c89 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -46,7 +46,7 @@ public:
     ReduceMean_Op() = delete;
 
     ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims)
-        : OperatorTensor(Type, 1, 0, 1),
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<ReduceMeanAttr::Axes>(axes),
             attr<ReduceMeanAttr::KeepDims>(keep_dims)))
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 23494a4d1f45106c6946cb4e5fcc7384b6afd720..4ea0cca30089555ff7979f141f94e5c84f04ffa1 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -48,7 +48,7 @@ public:
     Reshape_Op() = delete;
 
     Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
-        : OperatorTensor(Type, 2, 0, 1),
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<ReshapeAttr::Shape>(shape),
             attr<ReshapeAttr::AllowZero>(allowzero)))
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..565affc57ae8e7b1838466733b0f5d8fa8e1a6d6
--- /dev/null
+++ b/include/aidge/operator/Resize.hpp
@@ -0,0 +1,88 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_Resize_H_
+#define AIDGE_CORE_OPERATOR_Resize_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Resize_Op : public OperatorTensor,
+                  public Registrable<Resize_Op, std::string, std::shared_ptr<OperatorImpl>(const Resize_Op&)>{
+
+public:
+    static const std::string Type;
+
+    Resize_Op()
+        : OperatorTensor(Type,
+            {InputCategory::Data,
+                InputCategory::OptionalData,
+                InputCategory::OptionalData,
+                InputCategory::OptionalData},
+            1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+
+    Resize_Op(const Resize_Op& op)
+        : OperatorTensor(op)
+    {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Resize_Op, *this, op.backend());
+        }
+        else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Resize_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Resize_Op>(*this);
+    }
+
+    bool dimsForwarded() const override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    static const std::vector<std::string> getInputsName(){
+        //  roi, scales, sizes, even if considered as const parameters/input
+        return {"data_input", "roi ", "scales", "sizes"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Resize(const std::string &name = "") {
+
+    return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
+}
+
+}  // namespace Aidge
+
+
+#endif /* AIDGE_CORE_OPERATOR_Resize_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 725ec171b93ed301a377449b6253dff99a49d114..7d8e11b31546cd87a8d6b2d36e2929c9ef6df7a2 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -43,7 +43,7 @@ public:
     Scaling_Op() = delete;
 
     Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
-        : OperatorTensor(Type, 1, 0, 1),
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<ScalingAttr::ScalingFactor>(scalingFactor),
             attr<ScalingAttr::QuantizedNbBits>(nbBits),
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 3132e4ab7adcc331772d627147cc31c25597570a..6d2d1b5e7c212fafa5ad6457d9e0a260e96b1c90 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -36,20 +36,24 @@ enum class ShapeAttr { Start, End };
 class Shape_Op : public OperatorTensor,
                 public Registrable<Shape_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Shape_Op&)>,
-                public StaticAttributes<ShapeAttr, std::int64_t, std::int64_t> {
+                                   std::shared_ptr<OperatorImpl>(const Shape_Op&)> {
 
 public:
     static const std::string Type;
 
-    Shape_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ShapeAttr, std::int64_t, std::int64_t>;
     template <ShapeAttr e> using attr = typename Attributes_::template attr<e>;
-    Shape_Op(std::int64_t start, std::int64_t end)
-            : OperatorTensor(Type, 1, 0, 1),
-            Attributes_(attr<ShapeAttr::Start>(start),
-                        attr<ShapeAttr::End>(end))
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Shape_Op() = delete;
+
+    Shape_Op(const std::int64_t start, const std::int64_t end)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ShapeAttr::Start>(start),
+            attr<ShapeAttr::End>(end)))
     {
         mImpl = std::make_shared<Shape_OpImpl>(*this);
     }
@@ -60,7 +64,7 @@ public:
      */
     Shape_Op(const Shape_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Shape_Op, *this, op.backend());
@@ -82,6 +86,10 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int64_t& start() const noexcept { return mAttributes -> getAttr<ShapeAttr::Start>(); }
+    inline std::int64_t& end() const noexcept { return mAttributes -> getAttr<ShapeAttr::End>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -90,7 +98,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Shape(std::int64_t start = 0, std::int64_t end = -1, const std::string& name = "") {
+inline std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end = -1, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
 }
 } // namespace Aidge
diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..879edcac6a7ed9a78a2db8d82994071a6cf09635
--- /dev/null
+++ b/include/aidge/operator/ShiftGELU.hpp
@@ -0,0 +1,75 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 25.06.2024
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SHIFTGELU_H_
+#define AIDGE_CORE_OPERATOR_SHIFTGELU_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class ShiftGELU_Op : public OperatorTensor,
+    public Registrable<ShiftGELU_Op, std::string, std::shared_ptr<OperatorImpl>(const ShiftGELU_Op&)> {
+public:
+    static const std::string Type;
+
+    ShiftGELU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ShiftGELU_Op(const ShiftGELU_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ShiftGELU_Op, *this, op.backend());
+        }else{
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ShiftGELU_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ShiftGELU_Op>(*this);
+    }
+
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> ShiftGELU(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<ShiftGELU_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SHIFTGELU_H_ */
diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f171130213b2e51ca8fc9905d93944198f849ce7
--- /dev/null
+++ b/include/aidge/operator/ShiftMax.hpp
@@ -0,0 +1,75 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 25.06.2024
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SHIFTMAX_H_
+#define AIDGE_CORE_OPERATOR_SHIFTMAX_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class ShiftMax_Op : public OperatorTensor,
+    public Registrable<ShiftMax_Op, std::string, std::shared_ptr<OperatorImpl>(const ShiftMax_Op&)> {
+public:
+    static const std::string Type;
+
+    ShiftMax_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ShiftMax_Op(const ShiftMax_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ShiftMax_Op, *this, op.backend());
+        }else{
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ShiftMax_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ShiftMax_Op>(*this);
+    }
+
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> ShiftMax(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<ShiftMax_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SHIFTMAX_H_ */
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index bea9fc45eaa7f17f71963106b5bd3e1340a48a92..ae82d4a3a2d29755bba22b9a4194284310ac4f84 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -30,7 +30,7 @@ class Sigmoid_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Sigmoid_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Sigmoid_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 71887ec6cba010f0208c449f6054d3d6e9be72c4..7d425a0f3589e74b54ee0834fdc4291ea7f49bad 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -46,7 +46,7 @@ public:
     Slice_Op() = delete;
 
     Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>& ends, const std::vector<std::int8_t>& axes, const std::vector<std::int64_t>& steps)
-        : OperatorTensor(Type, 5, 0, 1),
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<SliceAttr::Starts>(starts),
             attr<SliceAttr::Ends>(ends),
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 4bb4c1d0caf74e7c86d2685eaf885d2e9d642ba0..70f3a561ae5c9ba4720de8419bcd5aaf32a51e47 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -43,7 +43,7 @@ public:
     Softmax_Op() = delete;
 
     Softmax_Op(std::int32_t axis)
-        : OperatorTensor(Type, 1, 0, 1),
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
                 attr<SoftmaxAttr::Axis>(axis)))
     {}
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index ff50a6aa7b8de971431515a09ca4e684dcc51865..42baf66e6722c6f9a0d3f40f12d4f4685fcc6980 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -45,7 +45,7 @@ public:
     using Attributes_ = StaticAttributes<SplitAttr,  std::int8_t, std::vector<DimSize_t>>;
     template <SplitAttr e> using attr = typename Attributes_::template attr<e>;
     Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split)
-        : OperatorTensor(Type, 2, 0, nbOutputs),
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
           Attributes_(attr<SplitAttr::Axis>(axis),
                       attr<SplitAttr::Split>(split))
     {
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index f5ffa431192d73a703c1ce973cb485dadb31420d..05b20286bc3f576d4e43fbece26ae270b3e583e6 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -33,7 +33,7 @@ public:
 public:
     static const std::string Type;
 
-    Sqrt_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Sqrt_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index e5d8442851c35e9232fdd77d862fb48b71c76f1f..fc30e51c9a6daed56a2e0e665be645739961aa6b 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -33,7 +33,7 @@ public:
 public:
     static const std::string Type;
 
-    Sub_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Sub_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
index 3fd5377d30cfff864743dcab2da9e690e26e5263..b5f183a90aeeb4ef424c318e8942a818b568b44a 100644
--- a/include/aidge/operator/Tanh.hpp
+++ b/include/aidge/operator/Tanh.hpp
@@ -28,7 +28,7 @@ class Tanh_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Tanh_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Tanh_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 2f5c43693a562b39de7a159a0050745085d6204b..72096448ebf0e00d73e33bdab094ca7f0b7d0633 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -51,7 +51,7 @@ public:
     Transpose_Op() = delete;
 
     Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder)
-        : OperatorTensor(Type, 1, 0, 1),
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder)))
     {
diff --git a/include/aidge/recipes/GraphViewHelper.hpp b/include/aidge/recipes/GraphViewHelper.hpp
index a2c571bf4ed164729f7c3416c814b913b4d07e6f..3b8ba7627362c945a6bfbe587ec952fdda013e98 100644
--- a/include/aidge/recipes/GraphViewHelper.hpp
+++ b/include/aidge/recipes/GraphViewHelper.hpp
@@ -39,8 +39,6 @@ std::set<std::shared_ptr<Tensor>> producers(std::shared_ptr<GraphView> graphview
  */
 std::set<std::shared_ptr<Tensor>> parameters(std::shared_ptr<GraphView> graphview);
 
-void compile_gradient(std::shared_ptr<Aidge::GraphView> gv);
-
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_GRAPHVIEWHELPER_H_ */
diff --git a/include/aidge/scheduler/SequentialScheduler.hpp b/include/aidge/scheduler/SequentialScheduler.hpp
index a7929fde8a2affdd562d70d11a7c809aaf3357d0..35dafead6dc424550df7d83d54f5ec998c3b4d86 100644
--- a/include/aidge/scheduler/SequentialScheduler.hpp
+++ b/include/aidge/scheduler/SequentialScheduler.hpp
@@ -54,7 +54,7 @@ public:
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    void backward(bool instantiateGrad = true);
+    void backward();
 
 private:
     SchedulingPolicy mSchedulingPolicy;
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index d733f2e364f441be8a45f866ceb77fea15202be1..83bb4afeacdd6de181fd6738edad2229736854c8 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -77,13 +77,13 @@ void init_Tensor(py::module& m){
     .def(py::self - py::self)
     .def(py::self * py::self)
     .def(py::self / py::self)
+	.def("sqrt", &Tensor::sqrt)
     .def("set_datatype", &Tensor::setDataType, py::arg("datatype"), py::arg("copyCast") = true)
     .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true)
     .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims)
     .def("grad", &Tensor::grad)
     .def("set_grad", &Tensor::setGrad)
     .def("dtype", &Tensor::dataType)
-    .def("init_grad", &Tensor::initGrad)
     .def("size", &Tensor::size)
     .def("capacity", &Tensor::capacity)
     .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize)
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index a04ed283bb0dbd4a6962848e9b80ea2140c2c269..06c171214d5df261e5df832179a0fa69420aab7d 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -134,11 +134,12 @@ void init_Node(py::module& m) {
     :rtype: int
     )mydelimiter")
 
-    .def("get_nb_data", &Node::nbData,
+    .def("input_category", &Node::inputCategory, py::arg("idx"),
     R"mydelimiter(
-    Number of data inputs.
+    Category of a specific input (Data or Param, optional or not).
+    Data inputs exclude inputs expecting parameters (weights or bias).
 
-    :rtype: int
+    :rtype: InputCategory
     )mydelimiter")
 
     .def("get_nb_outputs", &Node::nbOutputs,
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 7550b4ee8708ddf446ea4cb0b4837f3b6a6c8f9e..61fb37e788021757fa6c3aced9a5f4c30fb60548 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -32,17 +32,15 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
     py::multiple_inheritance())
         .def(py::init([](const std::vector<DimSize_t>& kernel_dims,
                          const std::vector<DimSize_t> &stride_dims,
-                         const std::vector<DimSize_t> &dilation_dims,
-                         bool no_bias) {
+                         const std::vector<DimSize_t> &dilation_dims) {
             AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
             AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
             AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-            return new Conv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+            return new Conv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
         }), py::arg("kernel_dims"),
             py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-            py::arg("no_bias") = false)
+            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
         .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
         .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
         .def("in_channels", &Conv_Op<DIM>::inChannels)
@@ -74,7 +72,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
 
 
 void init_Conv(py::module &m) {
-//   declare_ConvOp<1>(m);
+  declare_ConvOp<1>(m);
   declare_ConvOp<2>(m);
 //   declare_ConvOp<3>(m);
 }
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index fdc4b62bb4ea39b95591bd2e817210550cdf8f2a..080df1832bf92a9db9d26e1fa18b652dc70c2a42 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -33,12 +33,10 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                bool>(),
+                const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("dilation_dims"),
-        py::arg("no_bias"))
+        py::arg("dilation_dims"))
   .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
   .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels);
@@ -66,7 +64,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
 
 
 void init_ConvDepthWise(py::module &m) {
-//   declare_ConvDepthWiseOp<1>(m);
+  declare_ConvDepthWiseOp<1>(m);
   declare_ConvDepthWiseOp<2>(m);
 //   declare_ConvDepthWiseOp<3>(m);
 
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 7681add7c8ae747fe20e1477f57a648fc2cfea5b..9e0d61bc3a4d957e98db39577e120da5fe97ebea 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -25,7 +25,7 @@ namespace Aidge {
 
 void declare_FC(py::module &m) {
   py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
-    .def(py::init<bool>(), py::arg("no_bias"))
+    .def(py::init<>())
     .def_static("get_inputs_name", &FC_Op::getInputsName)
     .def_static("get_outputs_name", &FC_Op::getOutputsName)
     .def("out_channels", &FC_Op::outChannels)
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index ee3f85b6578054512df7b0087d1a972176cd50a3..d021a79c5ff4e337bebf424465458ddabf056a56 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -51,20 +51,18 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
-                                                         const std::vector<DimSize_t> &dilation_dims,
-                                                         bool no_bias)
+                                                         const std::vector<DimSize_t> &dilation_dims)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-        return PaddedConv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+        return PaddedConv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("kernel_dims"),
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("no_bias")= false);
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
 }
 
 template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
@@ -93,20 +91,18 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
-                                                         const std::vector<DimSize_t> &dilation_dims,
-                                                         bool no_bias)
+                                                         const std::vector<DimSize_t> &dilation_dims)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-        return PaddedConvDepthWise_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+        return PaddedConvDepthWise_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("kernel_dims"),
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("no_bias") = false);
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
 
 }
 
@@ -180,15 +176,14 @@ void declare_LSTMOp(py::module &m) {
        py::arg("nobias") = false,
        py::arg("name") = "");
   m.def("LSTMOp", &LSTM_Op,
-       py::arg("seq_length"),
-       py::arg("nobias") = false);
+       py::arg("seq_length"));
 }
 
 void init_MetaOperatorDefs(py::module &m) {
-//   declare_PaddedConvOp<1>(m);
+  declare_PaddedConvOp<1>(m);
   declare_PaddedConvOp<2>(m);
 //   declare_PaddedConvOp<3>(m);
-//   declare_PaddedConvDepthWiseOp<1>(m);
+  declare_PaddedConvDepthWiseOp<1>(m);
   declare_PaddedConvDepthWiseOp<2>(m);
 //   declare_PaddedConvDepthWiseOp<3>(m);
 //   declare_PaddedAvgPoolingOp<1>(m);
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index 10bf629c701641658ef37a88dd1b32935b3c6c21..2b2f30f14931fd041bfb4ec1a712e5c9419fdf22 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -24,6 +24,16 @@
 namespace py = pybind11;
 namespace Aidge {
 void init_Operator(py::module& m){
+    py::enum_<OperatorType>(m, "OperatorType")
+        .value("Data", OperatorType::Data)
+        .value("Tensor", OperatorType::Tensor);
+
+    py::enum_<InputCategory>(m, "InputCategory")
+        .value("Data", InputCategory::Data)
+        .value("Param", InputCategory::Param)
+        .value("OptionalData", InputCategory::OptionalData)
+        .value("OptionalParam", InputCategory::OptionalParam);
+
     py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
     .def("__repr__", &Operator::repr)
     .def("backend", &Operator::backend)
@@ -33,9 +43,14 @@ void init_Operator(py::module& m){
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
     .def("get_raw_input", &Operator::getRawInput, py::arg("inputIdx"))
     .def("nb_inputs", &Operator::nbInputs)
-    .def("nb_data", &Operator::nbData)
-    .def("nb_param", &Operator::nbParam)
     .def("nb_outputs", &Operator::nbOutputs)
+    .def("input_category", &Operator::inputCategory, py::arg("idx"),
+    R"mydelimiter(
+    Category of a specific input (Data or Param, optional or not).
+    Data inputs exclude inputs expecting parameters (weights or bias).
+
+    :rtype: InputCategory
+    )mydelimiter")
     .def("associate_input", &Operator::associateInput, py::arg("inputIdx"), py::arg("data"))
     .def("set_datatype", &Operator::setDataType, py::arg("dataType"))
     .def("set_backend", &Operator::setBackend, py::arg("name"), py::arg("device") = 0)
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 792a9318b63e21c7f74b70fff9b2afdabd4b8470..3df203ed52967e3dbc393769276015a7fe0e016f 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -62,6 +62,6 @@ void init_Pad(py::module &m) {
     .export_values();
   declare_PadOp<1>(m);
   declare_PadOp<2>(m);
-  declare_PadOp<3>(m);
+  //declare_PadOp<3>(m);
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a925af8cf357dabc09f4e8e3c39af9519b4ed550
--- /dev/null
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -0,0 +1,29 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Resize.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Resize(py::module& m) {
+    py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance())
+        .def_static("get_inputs_name", &Resize_Op::getInputsName)
+        .def_static("get_outputs_name", &Resize_Op::getOutputsName);
+
+    declare_registrable<Resize_Op>(m, "ResizeOp");
+
+    m.def("Resize", &Resize, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 42e29fd43324d12ea4cac2c16c88a056903b7c54..9443ed55eaaf6dc04ad9ee4612ed9d491aed54ae 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -51,6 +51,7 @@ void init_Pow(py::module&);
 void init_ReduceMean(py::module&);
 void init_ReLU(py::module&);
 void init_Reshape(py::module&);
+void init_Resize(py::module&);
 void init_Scaling(py::module&);
 void init_Shape(py::module&);
 void init_Sigmoid(py::module&);
@@ -121,6 +122,7 @@ void init_Aidge(py::module& m) {
     init_ReduceMean(m);
     init_ReLU(m);
     init_Reshape(m);
+    init_Resize(m);
     init_Scaling(m);
     init_Shape(m);
     init_Sigmoid(m);
diff --git a/python_binding/recipes/pybind_GraphViewHelper.cpp b/python_binding/recipes/pybind_GraphViewHelper.cpp
index e65b790d3eba6072e3e1b112c7d841959d4a5672..ac56fb4b43eb5b0a737157ec9e64c6771a692816 100644
--- a/python_binding/recipes/pybind_GraphViewHelper.cpp
+++ b/python_binding/recipes/pybind_GraphViewHelper.cpp
@@ -24,6 +24,5 @@ namespace py = pybind11;
 namespace Aidge {
 void init_GraphViewHelper(py::module &m) {
     m.def("producers", &producers, py::arg("graphview"));
-    m.def("compile_gradient", &compile_gradient, py::arg("graphview"));
 }
 } // namespace Aidge
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index b16134da324383a4542965393257288c49dceed0..ac35ce0a62408a69637a4160c9a008aba9dceb66 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -28,13 +28,14 @@ void init_Scheduler(py::module& m){
     .def("resetScheduling", &Scheduler::resetScheduling)
     .def("generate_scheduling", &Scheduler::generateScheduling)
     .def("get_static_scheduling", &Scheduler::getStaticScheduling, py::arg("step") = 0)
+    .def("graph_view", &Scheduler::graphView)
     .def("generate_memory", &Scheduler::generateMemory, py::arg("inc_producers") = false, py::arg("wrap_around_buffer") = false)
     ;
 
     py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>, Scheduler>(m, "SequentialScheduler")
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
     .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("data")=std::vector<Tensor>())
-    .def("backward", &SequentialScheduler::backward, py::arg("instanciate_grad")=true)
+    .def("backward", &SequentialScheduler::backward)
     ;
 
     py::class_<ParallelScheduler, std::shared_ptr<ParallelScheduler>, Scheduler>(m, "ParallelScheduler")
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index 8a5b40e44308111c5778c5260155b644234103c8..de200300a99bb33180103608238855b2f5604145 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -27,10 +27,6 @@ Aidge::OperatorImpl::OperatorImpl(const Operator& op, const std::string& backend
 }
 
 Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    AIDGE_ASSERT(mOp.getRawInput(inputIdx),
-        "a valid input is required at index {} for operator type {}",
-        inputIdx, mOp.type());
-
     if (mOp.getRawInput(inputIdx)) {
         const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
         if (!input->empty()) {
@@ -48,10 +44,6 @@ Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inpu
 }
 
 Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
-    AIDGE_ASSERT(mOp.getRawInput(inputIdx),
-        "a valid input is required at index {} for operator type {}",
-        inputIdx, mOp.type());
-
     if (mOp.getRawInput(inputIdx)) {
         const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
         if (!input->empty()) {
@@ -73,10 +65,6 @@ Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) co
 
 Aidge::Elts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                                                          const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    AIDGE_ASSERT(mOp.getRawOutput(outputIdx),
-        "a valid output is required at index {} for operator type {}",
-        outputIdx, mOp.type());
-
     if (mOp.getRawOutput(outputIdx)) {
         const auto output = std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx));
         if (!output->empty()) {
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index de9a4bd0ac7848052ba34068c7f5fa25b865f53c..28fb90cebf8e387e69f1ec39c46a6a47c8a4d316 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -20,6 +20,7 @@
 #include "aidge/operator/Div.hpp"
 #include "aidge/operator/Mul.hpp"
 #include "aidge/operator/Sub.hpp"
+#include "aidge/operator/Sqrt.hpp"
 #include "aidge/operator/Transpose.hpp"
 #include "aidge/utils/Types.h"
 
@@ -115,6 +116,17 @@ Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
     return div_.getOutput(0)->clone();
 }
 
+Aidge::Tensor Aidge::Tensor::sqrt() const {
+    AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
+    auto sqrt_ = Sqrt_Op();
+    sqrt_.associateInput(0, std::make_shared<Tensor>(*this));
+    sqrt_.setDataType(dataType());
+    sqrt_.setDataFormat(dataFormat());
+    sqrt_.setBackend(mImpl->backend());
+    sqrt_.forward();
+    return sqrt_.getOutput(0)->clone();
+}
+
 Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
     if (this == &other) {
         return *this;
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 33d31636ccc22e26d6529cdf1a57aff6d0acff9e..5a11aa20e03bef274f784788dee1ef047cafba42 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -46,23 +46,29 @@ const std::shared_ptr<Aidge::Node> Aidge::GraphView::operator[](const std::strin
 ///////////////////////////////////////////////////////
 
 Aidge::Connector Aidge::GraphView::operator()(
-    const std::vector<Aidge::Connector> ctors) {
+    const std::vector<Aidge::Connector> ctors)
+{
   // TODO: allow for multiple inputNodes?
-  assert((inputNodes().size() == 1U) && "Too many input Nodes for the GraphView, undefined behaviour");
+  AIDGE_ASSERT(inputNodes().size() == 1U, "Multiple input Nodes for the GraphView is not supported for Connectors");
   std::shared_ptr<Node> inNode = *inputNodes().begin();
-  assert((ctors.size() == static_cast<std::size_t>(inNode->nbData())) && "Wrong number of arguments.\n");
-  for (std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inNode->inputs()) {
-    assert((gk_IODefaultIndex == input.second) && "At least one input connection is not free.\n");
-    (void)input; // avoid unused warning
-  }
 
-  IOIndex_t inID = 0;
-  for (const Connector &ctor : ctors) {
-    assert((ctor.node() != nullptr) &&
-           "Input Connector must be associated with a node");
-    ctor.node()->addChild(shared_from_this(), static_cast<std::size_t>(ctor.index()),
-                          {inNode, inID++});
+  IOIndex_t ctorIdx = 0;
+  const auto& inputs = inNode->inputs();
+  for (IOIndex_t idx = 0; idx < inNode->nbInputs(); ++idx) {
+    if (inNode->inputCategory(idx) == InputCategory::Data || inNode->inputCategory(idx) == InputCategory::OptionalData) {
+      if (ctorIdx < ctors.size()) {
+        AIDGE_ASSERT(ctors[ctorIdx].node() != nullptr, "Input Connector #{} must be associated with a node", ctorIdx);
+        AIDGE_ASSERT(inputs[idx].second == gk_IODefaultIndex, "Data input#{} connection is not free.", idx);
+        ctors[ctorIdx].node()->addChild(shared_from_this(), static_cast<std::size_t>(ctors[ctorIdx].index()),
+                              {inNode, idx});
+        ++ctorIdx;
+      }
+      else {
+        AIDGE_ASSERT(inNode->inputCategory(idx) == InputCategory::OptionalData, "Missing an input connector for non-optional Data input#{}", idx);
+      }
+    }
   }
+  AIDGE_ASSERT(ctorIdx == ctors.size(), "Too many input connectors ({}) vs available node inputs ({}).", ctors.size(), ctorIdx);
   return Connector(*(outputNodes().begin()));
 }
 
@@ -425,7 +431,7 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                 AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i) == inputI.first->getOperator()->getRawOutput(inputI.second),
                   "Input#{} for node {} ({}) is not properly connected to output#{} of node {} ({}): Data or Tensor mismatch!",
                     i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type());
-            } else {
+            } else if (nodePtr->inputCategory(i) != InputCategory::OptionalData && nodePtr->inputCategory(i) != InputCategory::OptionalParam) {
                 // Input is missing
                 AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i),
                   "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
@@ -590,15 +596,17 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
 
   // add learnable parameters to the graph
   if (includeLearnableParam) {
-    for (IOIndex_t i = node->nbData(); i < node->nbInputs(); ++i) {
-      std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
-      if (parentNode) {
-          parentNode->addView(shared_from_this());
-          mNodes.insert(parentNode);
-          if (!(parentNode->name()).empty())
-            mNodeRegistry.insert(std::make_pair(parentNode->name(), parentNode));
-          // check if the parentNode is an input/output node
-          updateInputsOutputsNew(parentNode);
+    for (IOIndex_t i = 0; i < node->nbInputs(); ++i) {
+      if (node->inputCategory(i) == InputCategory::Param || node->inputCategory(i) == InputCategory::OptionalParam) {
+        std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
+        if (parentNode) {
+            parentNode->addView(shared_from_this());
+            mNodes.insert(parentNode);
+            if (!(parentNode->name()).empty())
+              mNodeRegistry.insert(std::make_pair(parentNode->name(), parentNode));
+            // check if the parentNode is an input/output node
+            updateInputsOutputsNew(parentNode);
+        }
       }
     }
   }
@@ -886,29 +894,31 @@ Aidge::GraphView::getNode(const std::string& nodeName) const {
 void Aidge::GraphView::remove(std::shared_ptr<Node> nodePtr, bool includeLearnableParam) {
   // remove learnable params
   if (includeLearnableParam) {
-    for (IOIndex_t i = nodePtr->nbData(); i < nodePtr->nbInputs(); ++i) {
-      auto inputI = nodePtr->input(i);
-      if (inputI.first != nullptr) {
-        bool removeNode = true;
-        for (const auto& parentOutput : inputI.first->outputs()) {
-          for (const auto& childOfParentOutput : parentOutput) {
-            // only remove the learnable parameter if not related to any other Node in the GraphView
-            if (childOfParentOutput.first != nodePtr) {
-              removeNode = false;
-              break;
+    for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
+      if (nodePtr->inputCategory(i) == InputCategory::Param || nodePtr->inputCategory(i) == InputCategory::OptionalParam) {
+        auto inputI = nodePtr->input(i);
+        if (inputI.first != nullptr) {
+          bool removeNode = true;
+          for (const auto& parentOutput : inputI.first->outputs()) {
+            for (const auto& childOfParentOutput : parentOutput) {
+              // only remove the learnable parameter if not related to any other Node in the GraphView
+              if (childOfParentOutput.first != nodePtr) {
+                removeNode = false;
+                break;
+              }
             }
           }
-        }
-        if (removeNode) {
-          // assert Learnable Parameter in the GraphView scope
-          if (mNodes.find(inputI.first) != mNodes.end()) {
-            mNodes.erase(inputI.first);
-            inputI.first->removeView(shared_from_this());
-          }
-          if (!inputI.first->name().empty()) { mNodeRegistry.erase(inputI.first->name()); }
+          if (removeNode) {
+            // assert Learnable Parameter in the GraphView scope
+            if (mNodes.find(inputI.first) != mNodes.end()) {
+              mNodes.erase(inputI.first);
+              inputI.first->removeView(shared_from_this());
+            }
+            if (!inputI.first->name().empty()) { mNodeRegistry.erase(inputI.first->name()); }
 
-          // check if the node was an input/output node
-          updateInputsOutputsDelete(inputI.first);
+            // check if the node was an input/output node
+            updateInputsOutputsDelete(inputI.first);
+          }
         }
       }
     }
@@ -1075,7 +1085,10 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
             // Case 2
             if ((oldOIn.size() == 1) && (inputParents[0].first)) {
                 for (std::size_t i = 0; i < newOIn.size(); ++i) {
-                    inputParents[0].first -> addChild(newOIn[i].first, inputParents[0].second, newOIn[i].second);
+                    // Only re-connect the same input category
+                    if (newOIn[i].first->inputCategory(newOIn[i].second) == oldOIn[0].first->inputCategory(oldOIn[0].second)) {
+                      inputParents[0].first -> addChild(newOIn[i].first, inputParents[0].second, newOIn[i].second);
+                    }
                 }
             } else {
                 for (std::size_t i = 0; i < oldOIn.size(); ++i) {
@@ -1361,7 +1374,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
     auto clonedNode = cloneNode(node_ptr);
     if (clonedNode == nullptr) {
       AIDGE_ASSERT(node_ptr->getChildren().size() <= 1, "deleted nodes in GraphView::clone() cannot have multiple children");
-      AIDGE_ASSERT(node_ptr->nbData() <= 1, "deleted nodes in GraphView::clone() cannot have multiple data input parents");
+      AIDGE_ASSERT(node_ptr->dataInputs().size() <= 1, "deleted nodes in GraphView::clone() cannot have multiple data input parents");
     }
     oldToNewNodes[node_ptr] = clonedNode;
   }
@@ -1379,8 +1392,8 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
         while (oldToNewNodes[parent.first] == nullptr) {
           // Find next valid parent in line, going backward in the graph
           AIDGE_INTERNAL_ASSERT(parent.first->getChildren().size() == 1);
-          AIDGE_INTERNAL_ASSERT(parent.first->nbData() <= 1);
           const auto& parents = parent.first->dataInputs();
+          AIDGE_INTERNAL_ASSERT(parents.size() <= 1);
 
           if (!parents.empty() && parents[0].first != nullptr // a valid parent exists
             && oldToNewNodes.find(parents[0].first) != oldToNewNodes.end()) // parent is in the GraphView
@@ -1461,9 +1474,9 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
   for (auto it = newOutputNodes.begin(); it != newOutputNodes.end(); ) {
     // If output node was removed, find previous valid output
     while (oldToNewNodes[it->first] == nullptr) {
-      // Removed node should have only one connected data input, otherwise cloning is invalid
-      AIDGE_INTERNAL_ASSERT(it->first->nbData() <= 1);
       auto parents = it->first->dataInputs();
+      // Removed node should have only one connected data input, otherwise cloning is invalid
+      AIDGE_INTERNAL_ASSERT(parents.size() <= 1);
 
       if (!parents.empty() && parents[0].first != nullptr // a valid parent exists
         && oldToNewNodes.find(parents[0].first) != oldToNewNodes.end()) // parent is in the GraphView
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 4fad845242979de97ca1348d9dfb9e2f73714f88..50b8be13c6faac62a8b2aecf1e767e0b83024d3c 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -38,17 +38,24 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
 ///////////////////////////////////////////////////////
 
 Aidge::Connector Aidge::Node::operator()(const std::vector<Connector>& ctors) {
-    assert((ctors.size() == nbData()) && "Wrong number of arguments.\n");
-    for (std::size_t i = 0; i < nbData(); i++) {
-        assert((gk_IODefaultIndex == input(i).second) &&
-               "At least one input connection is not free.\n");
-    }
-    IOIndex_t i = 0;
-    for (const Connector& ctor : ctors) {
+    IOIndex_t idx = 0;
+    for (const auto& ctor : ctors) {
+        // Skip to next possible input idx
+        for (; idx < nbInputs() && (inputCategory(idx) != InputCategory::Data && inputCategory(idx) != InputCategory::OptionalData); ++idx) {}
+
+        AIDGE_ASSERT(idx < nbInputs(), "Too many input connectors ({}) vs available node inputs.", ctors.size());
+        AIDGE_ASSERT(input(idx).second == gk_IODefaultIndex, "Data input#{} connection is not free.", idx);
+
         if (ctor.node() != nullptr) {  // ctor must be associated with a node
-            ctor.node()->addChild(shared_from_this(), ctor.index(), i++);
+            ctor.node()->addChild(shared_from_this(), ctor.index(), idx);
         }
+        ++idx;
     }
+
+    // Skip to next possible input idx
+    for (; idx < nbInputs() && (inputCategory(idx) != InputCategory::Data && inputCategory(idx) != InputCategory::OptionalData); ++idx) {}
+    AIDGE_ASSERT(idx == nbInputs(), "Missing an input connector for Data input#{}", idx);
+
     return Connector(shared_from_this());
 }
 
@@ -109,10 +116,11 @@ Aidge::IOIndex_t Aidge::Node::getNbFreeDataInputs() const {
 
 std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> Aidge::Node::dataInputs()
         const {
-    std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res =
-            std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbData());
-    for (std::size_t i = 0; i < static_cast<std::size_t>(nbData()); ++i) {
-        res[i] = std::pair<std::shared_ptr<Node>, IOIndex_t>(mParents[i], mIdOutParents[i]);
+    std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res;
+    for (std::size_t i = 0; i < static_cast<std::size_t>(nbInputs()); ++i) {
+        if (inputCategory(i) == InputCategory::Data || inputCategory(i) == InputCategory::OptionalData) {
+            res.push_back(std::pair<std::shared_ptr<Node>, IOIndex_t>(mParents[i], mIdOutParents[i]));
+        }
     }
     return res;
 }
@@ -328,18 +336,19 @@ bool Aidge::Node::removeChild(const std::shared_ptr<Aidge::Node> nodePtr,
 
 void Aidge::Node::resetConnections(bool includeLearnableParam) {
     // remove every parents reference to it
-    IOIndex_t nbRemovedInputs = includeLearnableParam ? nbInputs() : nbData();
-    for (IOIndex_t i = 0; i < nbRemovedInputs; ++i) {
-        std::pair<std::shared_ptr<Node>, IOIndex_t> parent = input(i);
-        if (parent.first) {
-            // number of children linked to the parent's output
-            while (parent.first->removeChild(shared_from_this(), parent.second) == true) {
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (includeLearnableParam || inputCategory(i) == InputCategory::Data || inputCategory(i) == InputCategory::OptionalData) {
+            std::pair<std::shared_ptr<Node>, IOIndex_t> parent = input(i);
+            if (parent.first) {
+                // number of children linked to the parent's output
+                while (parent.first->removeChild(shared_from_this(), parent.second) == true) {
+                }
             }
+            // every reference to this object as child has been removed
+            // removing reference to parents.
+            mParents[i] = nullptr;
+            mIdOutParents[i] = gk_IODefaultIndex;
         }
-        // every reference to this object as child has been removed
-        // removing reference to parents.
-        mParents[i] = nullptr;
-        mIdOutParents[i] = gk_IODefaultIndex;
     }
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
         for (std::pair<std::shared_ptr<Node>, IOIndex_t> child : output(i)) {
diff --git a/src/graph/Testing.cpp b/src/graph/Testing.cpp
index f30ad6e25b81e1ce7768fcc201ddf00c2226eebf..774ee8912da2ddaa19583debdac063a95b5aa461 100644
--- a/src/graph/Testing.cpp
+++ b/src/graph/Testing.cpp
@@ -45,7 +45,7 @@ std::pair<Aidge::NodePtr, std::set<Aidge::NodePtr>> Aidge::RandomGraph::gen(std:
     std::vector<NodePtr> nodes(nbNodes, nullptr);
     for (auto idx : nodesSeq) {
         const std::string name = nodesType[idx] + std::to_string(idx);
-        nodes[idx] = GenericOperator(nodesType[idx], nbIOs[idx].first, 0, nbIOs[idx].second, name);
+        nodes[idx] = GenericOperator(nodesType[idx], std::vector<InputCategory>(nbIOs[idx].first, InputCategory::Data), nbIOs[idx].second, name);
     }
 
     for (std::size_t i = 0; i < nbNodes; ++i) {
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 9b77ffcbe0117292ed0aa520309febf709e8dd68..57ece07152613b831675cdecd6526d4ab26af5cb 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -33,15 +33,7 @@ Aidge::Add_Op::Add_Op(const Add_Op& op)
 }
 
 bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = (nbInputs() > 0); // do not compute anything if no input
-    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
-        }
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
         for (std::size_t i = 0; i < nbInputs(); i++) {
             inputsDims[i] = getInput(i)->dims();
@@ -70,9 +62,10 @@ bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
             }
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 8d4148bb49ff6092b8ba695e727bdc0750e58cb2..53ffb93269e79c0ba940f1fb0d3d94cb494ad8ce 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -40,11 +40,7 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-    if (!(getInput(0)->empty())) {
+    if (inputsAssociated()) {
         std::array<DimSize_t, DIM + 2> outputDims;
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
         outputDims[0] = inputDims[0];
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index e5fc634ebd6db047554abed0da1af4c0c3bf1ec2..98e5c2da20fc35e18d4fd69a79cf1d87ec9d60ca 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -40,23 +40,19 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = true;
-    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         const DimSize_t nbFeatures =  getInput(0)->dims()[1];
-        for (std::size_t i = nbData(); i < nbInputs(); ++i) {
-            if(getInput(i)->size() != nbFeatures) {
+        for (std::size_t i = 0; i < nbInputs(); ++i) {
+            if(inputCategory(i) == InputCategory::Param && getInput(i)->size() != nbFeatures) {
                 // /!\ Input size should be handled BEFORE calling this function
                 // This should raise an error
                 getInput(i)->resize({getInput(0)->dims()[1]});
             }
         }
         mOutputs[0]->resize(getInput(0)->dims());
+        return true;
     }
-    return associated;
+    return false;
 }
 
 template <Aidge::DimIdx_t DIM>
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index 1a24f6f74b7f652a4263a2f7b44d733f01e6742f..8df153a67d2214e4435d9fa0aac6e74d53e11b12 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -28,7 +28,7 @@ void Aidge::Cast_OpImpl::forward() {
 const std::string Aidge::Cast_Op::Type = "Cast";
 
 Aidge::Cast_Op::Cast_Op(const DataType targetType)
-    : OperatorTensor(Type, 1, 0, 1),
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
       mAttributes(std::make_shared<Attributes_>(
         attr<CastAttr::TargetType>(targetType)))
 {
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index af20b8e5ce878dcc3f1853b8de8d95972a9327f9..739e69fbab6ea27c68d19205e858b7f97cc1ecab 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -60,48 +60,31 @@ void Aidge::Concat_OpImpl::forward() {
 const std::string Aidge::Concat_Op::Type = "Concat";
 
 bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
-    // Every input is non-empty with the same number of dimensions
-    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-        }
-    }
-    const std::size_t nbDimsInput0 = getInput(0)->nbDims();
-    if (nbDimsInput0 == 0) {
-        return false;
-    }
-    AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is empty", type());
-    for (IOIndex_t i = 1; i < nbInputs(); ++i) {
-        if (getInput(i)->nbDims() == 0) {
-            return false;
-        }
-        AIDGE_ASSERT(nbDimsInput0 == getInput(i)->nbDims(), "Input 0 and input {} in {} Operator have different number of dimensions: {} / {}", i, type(), nbDimsInput0, getInput(i)->nbDims());
-    }
+    if (inputsAssociated()) {
+        AIDGE_ASSERT(axis() < getInput(0)->nbDims(), "Concat: Axis ({}) out of range ({})",
+            axis(), getInput(0)->nbDims());
 
-    // Check validity of attributes with inputs
-    // Axis
-    std::int32_t axis = mAttributes->template getAttr<ConcatAttr::Axis>();
-    axis = (axis < 0) ? axis + static_cast<std::int32_t>(nbDimsInput0) : axis;
-    AIDGE_ASSERT(((axis >= 0) && (axis < static_cast<std::int32_t>(nbDimsInput0))),
-                "'Axis' attribute not compatible with provided inputs.")
-    const std::size_t axis_u64 = static_cast<std::size_t>(axis);
-
-    // Check validity of inputs
-    auto outputDims =  getInput(0)->dims();
-    for (IOIndex_t i = 1; i < nbInputs(); ++i) {
-        for (DimSize_t dim = 0; dim < nbDimsInput0; ++dim) {
-            if (dim == axis_u64) {
-                outputDims[axis_u64] += getInput(i)->dims()[axis_u64];
-            }
-            else {
-                AIDGE_ASSERT(getInput(i)->dims()[dim] == outputDims[dim], "Incomatible dimensions between input 0 {} and input {} {}", getInput(0)->dims(), i, getInput(i)->dims());
+        auto outputDims =  getInput(0)->dims();
+        const auto firstInputNbDims = getInput(0) -> nbDims();
+        for (IOIndex_t i = 1; i < nbInputs(); ++i) {
+            if (getInput(i)->nbDims() == firstInputNbDims) {
+                for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) {
+                    if (dim == axis()) {
+                        outputDims[dim] += getInput(i)->dims()[dim];
+                    }
+                    else {
+                        AIDGE_ASSERT(getInput(i)->dims()[dim] == outputDims[dim], "Concat: input #{} dim #{} ({}) must match value {}",
+                            i, dim, getInput(i)->dims()[dim], outputDims[dim]);
+                    }
+                }
             }
         }
-    }
 
-    getOutput(0)->resize(outputDims);
+        getOutput(0)->resize(outputDims);
+        return true;
+    }
 
-    return true;
+    return false;
 }
 
 void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 9ca30b65c4e4d729d36bb790d692159ae4936b28..a33af78779971e77da4f4e910b89b9263a1af5d6 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -40,15 +40,7 @@ Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = true;
-    for (IOIndex_t i = 0; i < 3; ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-        }
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         // first check weight since it defines inChannels and outChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
                     "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
@@ -57,7 +49,7 @@ bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
                     (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
                     "Wrong input size for Conv operator.");
         // check optional bias
-        if(!mAttributes->template getAttr<ConvAttr::NoBias>())
+        if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
                     (getInput(2)->template dims<1>()[0] == outChannels()),
                     "Wrong bias size for Conv operator.");
@@ -77,9 +69,10 @@ bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
         outputDims[1] = outChannels();
         outputDims[0] = inputDims[0];
         mOutputs[0]->resize(outputDims);
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 
@@ -135,7 +128,7 @@ Aidge::Conv_Op<DIM>::computeReceptiveField(
         res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
 
         // Bias
-        if (!mAttributes->template getAttr<ConvAttr::NoBias>()){
+        if (getInput(2)){
             const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
             const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
             res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
@@ -164,4 +157,5 @@ void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
     }
 }
 
+template class Aidge::Conv_Op<1>;
 template class Aidge::Conv_Op<2>;
\ No newline at end of file
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index 4f19b87e4dcff092d8e40f4ea5271febdd5ac6d8..342fd86195d5c2e85a63d990c4ebbb75e7f50a6b 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -41,16 +41,7 @@ Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    // TODO : add a check of inputs dimensions ?
-    bool associated = true;
-    for (IOIndex_t i = 0; i < 3; ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-        }
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         // first check weight since it defines nbChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
                     "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
@@ -59,7 +50,7 @@ bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
                     (getInput(0)->template dims<DIM+2>()[1] == nbChannels()),
                     "Wrong input size for Conv operator.");
         // check optional bias
-        if(!mAttributes->template getAttr<ConvDepthWiseAttr::NoBias>())
+        if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
                     (getInput(2)->template dims<1>()[0] == nbChannels()),
                     "Wrong bias size for Conv operator.");
@@ -79,9 +70,10 @@ bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
         outputDims[1] = inputDims[1];
         outputDims[0] = inputDims[0];
         mOutputs[0]->resize(outputDims);
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 
@@ -135,7 +127,7 @@ Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField(
         res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
         res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
         // Bias
-        if (! mAttributes->template getAttr<ConvDepthWiseAttr::NoBias>()){
+        if (getInput(2)){
             const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
             const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
             res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
@@ -164,4 +156,5 @@ void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::De
     }
 }
 
+template class Aidge::ConvDepthWise_Op<1>;
 template class Aidge::ConvDepthWise_Op<2>;
\ No newline at end of file
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index e6300d08c2c792c8a3eb66b307aca53f9d2acc73..387a9516077a937cca5c20ad091547b7f1c5be6f 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -23,13 +23,7 @@
 const std::string Aidge::Div_Op::Type = "Div";
 
 bool Aidge::Div_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
-    }
-
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
-
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 2a044c607cc8634d24b66af6ae7fa10acfe6e1c7..44d499bc7e125c757f802e086c22e1e6c72e9216 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -37,14 +37,7 @@ void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::sh
 }
 
 bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
-    bool associated = true;
-    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-        }
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         // first check weight since it defines inChannels and outChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == 2),
                     "Wrong weight Tensor dimension: {} for FC operator (should have 2 dimensions).", getInput(1)->nbDims());
@@ -64,15 +57,16 @@ bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
                     nbInputFeatures, inChannels);
         }
         // check optional bias
-        if(!mAttributes->template getAttr<FCAttr::NoBias>())
+        if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == 1) &&
                     (getInput(2)->template dims<1>()[0] == outChannels),
                     "Wrong bias size for FC operator.");
         // <batch, OutChannels>
         mOutputs[0]->resize({getInput(0)->dims()[0], outChannels});
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 4b7a6232cad7c1b9d9af1bd1bc710871d107a746..c28a0587a755ef0a910ec5bfdeb9caa2f1edc216 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -60,51 +60,48 @@ bool Aidge::Gather_Op::dimsForwarded() const {
 }
 
 bool Aidge::Gather_Op::forwardDims(bool allowDataDependency) {
-    // check data input has been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
+    if (inputsAssociated()) {
+        // Copy optional input #1, if present, to attribute Indices
+        if (getInput(1)) {
+            if (!this->indices().empty()) {
+                Log::notice("Gather_Op: ignoring non-empty Indices attribute because input#1 takes precedence");
+            }
+
+            if (!allowDataDependency) {
+                Log::warn("Gather_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
+
+            std::shared_ptr<Tensor> fallback;
+            this->gatheredShape() = getInput(1)->dims();
+            this->indices().clear(); // If both are provided input would override attrs
+            this->indices().reserve(getInput(1)->size());
+            const auto& indices = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(indices.getImpl()->hostPtr()),
+                        indices.size(),
+                        std::back_inserter(this->indices()));
+        }
 
-    if (getInput(0)->empty()) {
-        return false;
-    }
+        AIDGE_ASSERT(!this->indices().empty(), "Missing input#1 or Indices attribute");
 
-    if (getInput(1) && !getInput(1)->empty()) {
-        if (!mAttributes->template getAttr<GatherAttr::Indices>().empty()) {
-            Log::notice("Gather_Op: ignoring non-empty Indices attribute because input#1 takes precedence");
-        }
+        // Compute output dims
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
 
-        if (!allowDataDependency) {
-            Log::warn("Gather_Op: unable to forwardDims() because output dims are data dependent on input#1");
-            return false;
+        std::int8_t axisIdx = this->axis()>=0?
+                                this->axis():
+                                this->axis()+outDims.size();
+        outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
+        if( !this->gatheredShape().empty())
+        {
+            outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx),
+                            this->gatheredShape().begin(),
+                            this->gatheredShape().end());
         }
-
-        std::shared_ptr<Tensor> fallback;
-        mAttributes->template getAttr<GatherAttr::GatheredShape>() = getInput(1)->dims();
-        mAttributes->template getAttr<GatherAttr::Indices>().clear(); // If both are provided input would override attrs
-        mAttributes->template getAttr<GatherAttr::Indices>().reserve(getInput(1)->size());
-        const auto& indices = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(indices.getImpl()->hostPtr()),
-                    indices.size(),
-                    std::back_inserter(mAttributes->template getAttr<GatherAttr::Indices>()));
+        mOutputs[0]->resize(outDims);
+        return true;
     }
 
-    AIDGE_ASSERT(!mAttributes->template getAttr<GatherAttr::Indices>().empty(), "Missing input#1 or Indices attribute");
-
-    std::vector<DimSize_t> outDims = getInput(0)->dims();
-
-    std::int8_t axisIdx = mAttributes->template getAttr<GatherAttr::Axis>()>=0?
-                            mAttributes->template getAttr<GatherAttr::Axis>():
-                            mAttributes->template getAttr<GatherAttr::Axis>()+outDims.size();
-    outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
-    if( !mAttributes->template getAttr<GatherAttr::GatheredShape>().empty())
-    {
-        outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx),
-                        mAttributes->template getAttr<GatherAttr::GatheredShape>().begin(),
-                        mAttributes->template getAttr<GatherAttr::GatheredShape>().end());
-    }
-    mOutputs[0]->resize(outDims);
-    return true;
+    return false;
 }
 
 void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index a770e1602b7fc33fc47a65c51c2dcf05d5840ba4..d49e1f0838f623bca1546e54ea4f4e470d70e1c5 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -26,9 +26,10 @@ const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Inpu
 }
 
 bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) {
-    if (mForwardDims) {
+    if (mForwardDims && inputsAssociated(false)) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs(), std::vector<std::size_t>());
         for (std::size_t i = 0; i < nbInputs(); ++i) {
+            // Check for input, as it may be optional
             if (getInput(i)) {
                 inputsDims[i] = getInput(i)->dims();
             }
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index b09426f8f835eda5600b630488ef18c5b08ba32a..1632c8a7677c884194494269e1a8cd93e7ef7822 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -22,26 +22,20 @@
 const std::string Aidge::GlobalAveragePooling_Op::Type = "GlobalAveragePooling";
 
 bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
-  // error checking
-  if (!getInput(0)) {
-    AIDGE_THROW_OR_ABORT(std::runtime_error,
-                         "GlobalAveragePooling : The input was not connected");
-  }
-  else if (!getInput(0)->empty()) {
-    AIDGE_ASSERT(getInput(0)->dims().size() >= 3,
-                 "GlobalAveragePooling :  needs at least a 3 dimensions input, "
-                 "number of input dim : {}",
-                 getInput(0)->dims().size());
-    // Global average pooling takes each filter, averages its values and uses
-    // it as an output(Much like a fancier flatten). 1st dim is batch 2nd is
-    // number of filter
-    const std::vector<DimSize_t> out_dims{getInput(0)->dims().at(0),
-                                          getInput(0)->dims().at(1)};
-    mOutputs[0]->resize(out_dims);
-    return true;
-  }
+    if (inputsAssociated()) {
+        AIDGE_ASSERT(getInput(0)->dims().size() >= 3,
+                    "GlobalAveragePooling :  needs at least a 3 dimensions input, "
+                    "number of input dim : {}",
+                    getInput(0)->dims().size());
+        // Global average pooling takes each filter, averages its values and uses
+        // it as an output(Much like a fancier flatten). 1st dim is batch 2nd is
+        // number of filter
+        mOutputs[0]->resize({getInput(0)->dims().at(0),
+                             getInput(0)->dims().at(1)});
+        return true;
+    }
 
-  return false;
+    return false;
 }
 
 void Aidge::GlobalAveragePooling_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 8f7548155cde4c7187f7a7fe96a44c4accd2c302..17b4960dfdfc9de199cc25b0119a5cb000bcf48c 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -21,58 +21,57 @@
 const std::string Aidge::MatMul_Op::Type = "MatMul";
 
 bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input. Cannot compute output dimensions for MatMul Operator.");
-    }
-    if (getInput(0)->empty() && getInput(1)->empty()) {
-        // both inputs are scalar
-        mOutputs[0]->resize({});
-        return true;
-    }
-    else if (!getInput(0)->empty() && !getInput(1)->empty())
-    {
-        std::vector<std::size_t> dims0 = getInput(0)->dims();
-        std::vector<std::size_t> dims1 = getInput(1)->dims();
+    if (inputsAssociated(false)) {
+        if (getInput(0)->empty() && getInput(1)->empty()) {
+            // both inputs are scalar
+            mOutputs[0]->resize({});
+            return true;
+        }
+        else if (!getInput(0)->empty() && !getInput(1)->empty())
+        {
+            std::vector<std::size_t> dims0 = getInput(0)->dims();
+            std::vector<std::size_t> dims1 = getInput(1)->dims();
 
-        // keep second-to-last dimension of dims0
-        const bool keepDim0 = dims0.size() > 1;
-        // keep last dimension of dims1
-        const bool keepDim1 = dims1.size() > 1;
+            // keep second-to-last dimension of dims0
+            const bool keepDim0 = dims0.size() > 1;
+            // keep last dimension of dims1
+            const bool keepDim1 = dims1.size() > 1;
 
-        if (dims0.size() == 1) {
-            dims0.insert(dims0.cbegin(), 1);
-        }
-        if (dims1.size() == 1) {
-            dims1.push_back(1);
-        }
-        const std::size_t dims_size = std::max(dims0.size(), dims1.size());
+            if (dims0.size() == 1) {
+                dims0.insert(dims0.cbegin(), 1);
+            }
+            if (dims1.size() == 1) {
+                dims1.push_back(1);
+            }
+            const std::size_t dims_size = std::max(dims0.size(), dims1.size());
 
 
-        if (dims0.size() > dims1.size()) {
-            dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1));
-        }
-        else if (dims1.size() > dims0.size()) {
-            dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
-        }
+            if (dims0.size() > dims1.size()) {
+                dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1));
+            }
+            else if (dims1.size() > dims0.size()) {
+                dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
+            }
 
-        AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes.");
+            AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes.");
 
-        std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
-        for (std::size_t i = 0; i < dims_size-2; ++i) {
-            AIDGE_ASSERT((dims0[i] == dims1[i]) || (dims0[i] == 1) || (dims1[i] == 1), "Bad vector dimension.");
-            outDims[i] = std::max(dims0[i], dims1[i]);
-        }
+            std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
+            for (std::size_t i = 0; i < dims_size-2; ++i) {
+                AIDGE_ASSERT((dims0[i] == dims1[i]) || (dims0[i] == 1) || (dims1[i] == 1), "Bad vector dimension.");
+                outDims[i] = std::max(dims0[i], dims1[i]);
+            }
 
-        // use keepDim0 instead of dims0.size() because dims0 has been modified
-        if (keepDim0)
-            outDims.push_back(dims0[dims_size-2]);
-        if (keepDim1)
-            outDims.push_back(dims1[dims_size-1]);
+            // use keepDim0 instead of dims0.size() because dims0 has been modified
+            if (keepDim0)
+                outDims.push_back(dims0[dims_size-2]);
+            if (keepDim1)
+                outDims.push_back(dims1[dims_size-1]);
 
-        mOutputs[0]->resize(outDims);
-        return true;
+            mOutputs[0]->resize(outDims);
+            return true;
+        }
     }
-    
+
     return false;
 }
 
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index a055da29a2c2e603d28e64adb0e222fe569fbcf9..adf79b5c69e991ad7979184c313448e4288a8ecb 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -82,23 +82,19 @@ void Aidge::Memorize_Op::updateConsummerProducer() {
 }
 
 bool Aidge::Memorize_Op::forwardDims(bool /*allowDataDependency*/) {
-    for (size_t i = 0; i < 2; ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+    if (inputsAssociated(false)) {
+        // Only require one of the input to have dims defined
+        // Otherwise, forwardDims() won't converge!
+        if (!(getInput(0)->empty())) {
+            const auto expectedDims =  getInput(0)->dims();
+            mOutputs[0]->resize(expectedDims);
+            return true;
+        }
+        else if (!(getInput(1)->empty())) {
+            const auto expectedDims =  getInput(1)->dims();
+            mOutputs[0]->resize(expectedDims);
+            return true;
         }
-    }
-
-    // Only require one of the input to have dims defined
-    // Otherwise, forwardDims() won't converge!
-    if (!(getInput(0)->empty())) {
-        const auto expectedDims =  getInput(0)->dims();
-        mOutputs[0]->resize(expectedDims);
-        return true;
-    }
-    else if (!(getInput(1)->empty())) {
-        const auto expectedDims =  getInput(1)->dims();
-        mOutputs[0]->resize(expectedDims);
-        return true;
     }
 
     return false;
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index 1397b69b9c126c0e2d0ec84bf900a320b95f0d80..7362f67fcce97cc6861edd2b334758801d060ade 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -20,15 +20,16 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph)
-    : OperatorTensor(type, graph->dataInputs().size(), (graph->getOrderedInputs().size() - graph->dataInputs().size()), graph->getOrderedOutputs().size()),
+    : OperatorTensor(type, [graph]() {
+        std::vector<InputCategory> inputsCategory;
+        for (const auto& in : graph->getOrderedInputs()) {
+            inputsCategory.push_back(in.first->getOperator()->inputCategory(in.second));
+        }
+        return inputsCategory;
+    }(), graph->getOrderedOutputs().size()),
         mGraph(graph)
 {
-    mInputs = std::vector<std::shared_ptr<Tensor>>(mGraph->getOrderedInputs().size());
-    for (std::size_t i = 0; i < mInputs.size(); ++i) {
-        mInputs[i] = std::make_shared<Tensor>();
-    }
     // Associate outputs to micro-graph outputs for custom implementation
-    mOutputs = std::vector<std::shared_ptr<Tensor>>(mGraph->getOrderedOutputs().size());
     for (size_t outputIdx = 0; outputIdx < mOutputs.size(); ++outputIdx) {
         const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
         if (outputOp.first) {
diff --git a/src/operator/MetaOperatorDefs/LSTM.cpp b/src/operator/MetaOperatorDefs/LSTM.cpp
index cd993f9e5cd127a005101284b78c416150b3c99a..910e7c67aad0068679ca2d240b23312add3e42d7 100644
--- a/src/operator/MetaOperatorDefs/LSTM.cpp
+++ b/src/operator/MetaOperatorDefs/LSTM.cpp
@@ -38,9 +38,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     auto add = Add(2, (!name.empty()) ? name + "_add" : "");
 
     // Forget gate
-    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_forgetGateX" : "");
+    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateX" : "");
     input->addChild(forgetGateX, 0, 0);
-    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_forgetGateH" : "");
+    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateH" : "");
     hiddenState->addChild(forgetGateH, 1, 0);
     auto forgetGate = Add(2, (!name.empty()) ? name + "_forgetGate" : "");
     forgetGateX->addChild(forgetGate, 0, 0);
@@ -53,9 +53,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     cellState->addChild(forgetGateMul, 1, 1);
 
     // Input gate
-    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_inputGateX" : "");
+    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_inputGateX" : "");
     input->addChild(inputGateX, 0, 0);
-    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_inputGateH" : "");
+    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_inputGateH" : "");
     hiddenState->addChild(inputGateH, 1, 0);
     auto inputGate = Add(2, (!name.empty()) ? name + "_inputGate" : "");
     inputGateX->addChild(inputGate, 0, 0);
@@ -67,9 +67,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     inputGateMul->addChild(add, 0, 1);
 
     // Candidate for cell update
-    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_cellCandidateX" : "");
+    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_cellCandidateX" : "");
     input->addChild(cellCandidateX, 0, 0);
-    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_cellCandidateH" : "");
+    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_cellCandidateH" : "");
     hiddenState->addChild(cellCandidateH, 1, 0);
     auto cellCandidate = Add(2, (!name.empty()) ? name + "_cellCandidate" : "");
     cellCandidateX->addChild(cellCandidate, 0, 0);
@@ -79,9 +79,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     cellCandidateAct->addChild(inputGateMul, 0, 1);
 
     // Output gate
-    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_outputGateX" : "");
+    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_outputGateX" : "");
     input->addChild(outputGateX, 0, 0);
-    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_outputGateH" : "");
+    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_outputGateH" : "");
     hiddenState->addChild(outputGateH, 1, 0);
     auto outputGate = Add(2, (!name.empty()) ? name + "_outputGate" : "");
     outputGateX->addChild(outputGate, 0, 0);
@@ -124,19 +124,20 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     addProducer(metaOp, 6, {hiddenChannel, hiddenChannel}, "ro");
     addProducer(metaOp, 7, {hiddenChannel, hiddenChannel}, "rf");
     addProducer(metaOp, 8, {hiddenChannel, hiddenChannel}, "rc");
-    addProducer(metaOp, 9, {(noBias ? 0 : hiddenChannel)}, "wbi");
-    addProducer(metaOp, 10, {(noBias ? 0 : hiddenChannel)}, "wbo");
-    addProducer(metaOp, 11, {(noBias ? 0 : hiddenChannel)}, "wbf");
-    addProducer(metaOp, 12, {(noBias ? 0 : hiddenChannel)}, "wbc");
-    addProducer(metaOp, 13, {(noBias ? 0 : hiddenChannel)}, "rbi");
-    addProducer(metaOp, 14, {(noBias ? 0 : hiddenChannel)}, "rbo");
-    addProducer(metaOp, 15, {(noBias ? 0 : hiddenChannel)}, "rbf");
-    addProducer(metaOp, 16, {(noBias ? 0 : hiddenChannel)}, "rbc");
+    if (!noBias) {
+        addProducer(metaOp, 9, {hiddenChannel}, "wbi");
+        addProducer(metaOp, 10, {hiddenChannel}, "wbo");
+        addProducer(metaOp, 11, {hiddenChannel}, "wbf");
+        addProducer(metaOp, 12, {hiddenChannel}, "wbc");
+        addProducer(metaOp, 13, {hiddenChannel}, "rbi");
+        addProducer(metaOp, 14, {hiddenChannel}, "rbo");
+        addProducer(metaOp, 15, {hiddenChannel}, "rbf");
+        addProducer(metaOp, 16, {hiddenChannel}, "rbc");
+    }
     return metaOp;
 }
 
-std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
-                                         bool noBias)
+std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
 {
     // Construct micro-graph
     auto input = Identity("");
@@ -145,9 +146,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     auto add = Add(2, "");
 
     // Forget gate
-    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(forgetGateX, 0, 0);
-    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(forgetGateH, 1, 0);
     auto forgetGate = Add(2, "");
     forgetGateX->addChild(forgetGate, 0, 0);
@@ -160,9 +161,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     cellState->addChild(forgetGateMul, 1, 1);
 
     // Input gate
-    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(inputGateX, 0, 0);
-    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(inputGateH, 1, 0);
     auto inputGate = Add(2, "");
     inputGateX->addChild(inputGate, 0, 0);
@@ -174,9 +175,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     inputGateMul->addChild(add, 0, 1);
 
     // Candidate for cell update
-    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(cellCandidateX, 0, 0);
-    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(cellCandidateH, 1, 0);
     auto cellCandidate = Add(2, "");
     cellCandidateX->addChild(cellCandidate, 0, 0);
@@ -186,9 +187,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     cellCandidateAct->addChild(inputGateMul, 0, 1);
 
     // Output gate
-    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(outputGateX, 0, 0);
-    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(outputGateH, 1, 0);
     auto outputGate = Add(2,"");
     outputGateX->addChild(outputGate, 0, 0);
diff --git a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
index ad300cd4f98b84d5ac5834370db53017958efaf6..ef319ef38ad18de9eaed0a1d4a92c3877ee7cf8e 100644
--- a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
@@ -46,8 +46,6 @@ std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_
 
 template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
 template std::shared_ptr<Node> PaddedAvgPooling<2>(const std::array<DimSize_t,2>&, const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
-template std::shared_ptr<Node> PaddedAvgPooling<3>(const std::array<DimSize_t,3>&, const std::string&, const std::array<DimSize_t,3>&, const std::array<DimSize_t,6>&);
-template std::shared_ptr<Node> PaddedAvgPooling<4>(const std::array<DimSize_t,4>&, const std::string&, const std::array<DimSize_t,4>&, const std::array<DimSize_t,8>&);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
@@ -61,8 +59,6 @@ std::shared_ptr<Node> PaddedAvgPooling(const DimSize_t (&kernel_dims)[DIM],
 
 template std::shared_ptr<Node> PaddedAvgPooling<1>(const DimSize_t (&kernel_dims)[1], const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
 template std::shared_ptr<Node> PaddedAvgPooling<2>(const DimSize_t (&kernel_dims)[2], const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
-template std::shared_ptr<Node> PaddedAvgPooling<3>(const DimSize_t (&kernel_dims)[3], const std::string&, const std::array<DimSize_t,3>&, const std::array<DimSize_t,6>&);
-template std::shared_ptr<Node> PaddedAvgPooling<4>(const DimSize_t (&kernel_dims)[4], const std::string&, const std::array<DimSize_t,4>&, const std::array<DimSize_t,8>&);
 
 
 //////////////////////////////////
@@ -84,8 +80,5 @@ inline std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<Dim
 
 template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<1>(const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
 template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<2>(const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
-template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<3>(const std::array<DimSize_t,3>&, const std::array<DimSize_t,3>&, const std::array<DimSize_t,6>&);
-template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<4>(const std::array<DimSize_t,4>&, const std::array<DimSize_t,4>&, const std::array<DimSize_t,8>&);
-
 
 } // namespace Aidge
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index 426de388f31391fb5e59446d50e50de94ca5f8a1..ded67a11acd299e5407f0d7e74146f5bcd1bf86a 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -24,13 +24,7 @@
 const std::string Aidge::Mul_Op::Type = "Mul";
 
 bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
-    }
-
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
-
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index 5da4503111bed78384438d35818dc5d2b7d5dfb7..5df90020a43ad6cffebcd2345c075837f11462b1 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -19,11 +19,10 @@
 
 
 Aidge::OperatorTensor::OperatorTensor(const std::string& type,
-                                                            const IOIndex_t nbData,
-                                                            const IOIndex_t nbParam,
+                                      const std::vector<InputCategory>& inputsCategory,
                                                             const IOIndex_t nbOut)
-: Operator(type, nbData, nbParam, nbOut, OperatorType::Tensor),
-        mInputs(std::vector<std::shared_ptr<Tensor>>(nbData + nbParam, nullptr)),
+: Operator(type, inputsCategory, nbOut, OperatorType::Tensor),
+        mInputs(std::vector<std::shared_ptr<Tensor>>(inputsCategory.size(), nullptr)),
         mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) {
     for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) {
         mOutputs[i] = std::make_shared<Tensor>();
@@ -102,9 +101,6 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_
     if (outputIdx >= nbOutputs()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator output index out of range.");
     }
-    if (nbInputs() != nbData()) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function.");
-    }
     if (!dimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
     }
@@ -114,19 +110,28 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_
         }
     }
     // return the same Tensor description as given in function parameter for each data input
-    return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims));
+    return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbInputs(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims));
 }
 
-bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = (nbInputs() > 0); // do not compute anything if no input
+bool Aidge::OperatorTensor::inputsAssociated(bool checkNonEmpty) const {
+    bool associated = true;
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+        if (inputCategory(i) != InputCategory::OptionalData && inputCategory(i) != InputCategory::OptionalParam) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+            }
+        }
+
+        if (checkNonEmpty && getInput(i)) {
+            associated &= !(getInput(i)->empty());
         }
-        associated &= !(getInput(i)->empty());
     }
-    if (associated) {
+
+    return associated;
+}
+
+bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
         const auto expectedDims =  getInput(0)->dims();
         for (std::size_t i = 1; i < nbInputs(); ++i) {
             if (expectedDims != getInput(i)->dims()) {
@@ -136,16 +141,19 @@ bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
             }
         }
         mOutputs[0]->resize(expectedDims);
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 bool Aidge::OperatorTensor::dimsForwarded() const {
     bool forwarded = true;
     // check both inputs and outputs have been filled
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
+        if (inputCategory(i) != InputCategory::OptionalData && inputCategory(i) != InputCategory::OptionalParam) {
+            forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
+        }
     }
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
         // If getOutput(i) is nullptr, ignore this output (it may be a dummy
@@ -161,11 +169,14 @@ void Aidge::OperatorTensor::setDataType(const DataType& dataType) const {
     }
 
     // Set data type for parameters inputs only (weights, bias...), which are usually Producers
-    // TODO: Fix -> if there is no parameter input connected (e.g optional bias) then this function will fail.
-    // This behaviour should be decided in its own dedicated issue.
-    for (IOIndex_t i = nbData(); i < nbInputs(); ++i) {
-        if (getInput(i))
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (inputCategory(i) == InputCategory::Param) {
+            AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
+            getInput(i)->setDataType(dataType);
+        }
+        else if (inputCategory(i) == InputCategory::OptionalParam && getInput(i) != nullptr) {
             getInput(i)->setDataType(dataType);
+        }
     }
 }
 
@@ -175,9 +186,14 @@ void Aidge::OperatorTensor::setDataFormat(const DataFormat& dataFormat) const {
     }
 
     // Set data format for parameters inputs only (weights, bias...), which are usually Producers
-    for (IOIndex_t i = nbData(); i < nbInputs(); ++i) {
-        AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
-        getInput(i)->setDataFormat(dataFormat);
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (inputCategory(i) == InputCategory::Param) {
+            AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
+            getInput(i)->setDataFormat(dataFormat);
+        }
+        else if (inputCategory(i) == InputCategory::OptionalParam && getInput(i) != nullptr) {
+            getInput(i)->setDataFormat(dataFormat);
+        }
     }
 }
 
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c66e6c84af6df299e4786bbbb73767d6ee6374f5
--- /dev/null
+++ b/src/operator/Pad.cpp
@@ -0,0 +1,19 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/Types.h"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::Pad_Op<DIM>::Type = "Pad";
+
+template class Aidge::Pad_Op<1>;
+template class Aidge::Pad_Op<2>;
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index b4c4c7df82306240fcde6e0ae33271eca1420b4a..2fcc46a460ffd7c7f6746dfcd108acbaafe912de 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -38,11 +38,7 @@ void Aidge::Pop_OpImpl::forward() {
 const std::string Aidge::Pop_Op::Type = "Pop";
 
 bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-    if (!(getInput(0)->empty())) {
+    if (inputsAssociated()) {
         auto inputDims = getInput(0)->dims();
         inputDims.erase(inputDims.begin());
         getOutput(0)->resize(inputDims);
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index 135c792345b0caf1166e671a8dad7d5b49b42ee7..2a50f9c7bad1e40cd6e69cfc0a22632439cfe000 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -23,13 +23,7 @@
 const std::string Aidge::Pow_Op::Type = "Pow";
 
 bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
-    }
-
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
-
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 2ac10993736fb679785a5bcb9b03189297e4feff..bdb69452ec54fb635d0cbc299336071295f37ae1 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -28,7 +28,7 @@ const std::string Aidge::Producer_Op::Type = "Producer";
 
 
 Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, bool constant)
-    : OperatorTensor(Type, 0, 0, 1),
+    : OperatorTensor(Type, {}, 1),
       mAttributes(std::make_shared<Attributes_>(
         attr<ProdAttr::Constant>(constant)))
 {
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index f8a8243b6d00bf14f8a48b1b897ec9d15dd2c9f7..96f2f855f46275e167acb1300434f8bcdbdd7d3e 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -27,10 +27,7 @@
 const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
 
 bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
-    }
-    if (!getInput(0)->empty()) {
+    if (inputsAssociated()) {
         // make Axes attribute positive
         std::vector<std::int32_t>& axes = mAttributes->template getAttr<ReduceMeanAttr::Axes>();
         std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index cfdec080353b95e844fe38ecabadce8e6c290ec4..1838c008a6b83548b6a5a80af0363e2cf239b649 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -40,68 +40,65 @@ bool Aidge::Reshape_Op::dimsForwarded() const {
 }
 
 bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
-    // check input has been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-
-    if (getInput(0)->empty()) {
-        return false;
-    }
+    if (inputsAssociated()) {
+        // Copy optional input #1, if present, to attribute Shape
+        if (getInput(1)) {
+            if (!this->shape().empty()) {
+                Log::notice("Reshape_Op: ignoring non-empty Shape attribute because input#1 takes precedence");
+            }
 
-    if (getInput(1) && !getInput(1)->empty()) {
-        if (!mAttributes->template getAttr<ReshapeAttr::Shape>().empty()) {
-            Log::notice("Reshape_Op: ignoring non-empty Shape attribute because input#1 takes precedence");
-        }
+            if (!allowDataDependency) {
+                Log::warn("Reshape_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
 
-        if (!allowDataDependency) {
-            Log::warn("Reshape_Op: unable to forwardDims() because output dims are data dependent on input#1");
-            return false;
+            std::shared_ptr<Tensor> fallback;
+            this->shape().clear(); // If both are provided input would override attrs
+            this->shape().reserve(getInput(1)->size());
+            const auto& shape = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(shape.getImpl()->hostPtr()),
+                        shape.size(),
+                        std::back_inserter(this->shape()));
         }
 
-        std::shared_ptr<Tensor> fallback;
-        mAttributes->template getAttr<ReshapeAttr::Shape>().clear(); // If both are provided input would override attrs
-        mAttributes->template getAttr<ReshapeAttr::Shape>().reserve(getInput(1)->size());
-        const auto& shape = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(shape.getImpl()->hostPtr()),
-                    shape.size(),
-                    std::back_inserter(mAttributes->template getAttr<ReshapeAttr::Shape>()));
-    }
+        AIDGE_ASSERT(!this->shape().empty(), "Missing input#1 or Shape attribute");
 
-    AIDGE_ASSERT(!mAttributes->template getAttr<ReshapeAttr::Shape>().empty(), "Missing input#1 or Shape attribute");
-
-    std::vector<DimSize_t> outDims;
-    // variables to handle a negative dimension
-    bool foundNegativeDimension = false;
-    std::size_t outSize = 1;
-    DimIdx_t negativeIndex = 0;
-    for(std::size_t i = 0; i < mAttributes->template getAttr<ReshapeAttr::Shape>().size(); ++i)
-    {
-        int64_t dimSize = mAttributes->template getAttr<ReshapeAttr::Shape>()[i];
-        if (dimSize < 0) {
-            if (foundNegativeDimension) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator.");
-            }
-            foundNegativeDimension = true;
-            dimSize = 1;
-            negativeIndex = static_cast<DimIdx_t>(i);
-        }
-        else if (dimSize == 0 && !mAttributes->template getAttr<ReshapeAttr::AllowZero>())
+        // Compute output dims
+        std::vector<DimSize_t> outDims;
+        // variables to handle a negative dimension
+        bool foundNegativeDimension = false;
+        std::size_t outSize = 1;
+        DimIdx_t negativeIndex = 0;
+        for(std::size_t i = 0; i < this->shape().size(); ++i)
         {
-            dimSize = getInput(0) -> dims()[i];
+            int64_t dimSize = this->shape()[i];
+            if (dimSize < 0) {
+                if (foundNegativeDimension) {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator.");
+                }
+                foundNegativeDimension = true;
+                dimSize = 1;
+                negativeIndex = static_cast<DimIdx_t>(i);
+            }
+            else if (dimSize == 0 && !mAttributes->template getAttr<ReshapeAttr::AllowZero>())
+            {
+                dimSize = getInput(0) -> dims()[i];
+            }
+            outDims.push_back(static_cast<DimSize_t>(dimSize));
+            if (dimSize != 0) {
+                outSize *= static_cast<DimSize_t>(dimSize);
+            }
         }
-        outDims.push_back(static_cast<DimSize_t>(dimSize));
-        if (dimSize != 0) {
-            outSize *= static_cast<DimSize_t>(dimSize);
+
+        if (foundNegativeDimension) {
+            outDims[negativeIndex] = (getInput(0) -> size()) / outSize;
         }
-    }
 
-    if (foundNegativeDimension) {
-        outDims[negativeIndex] = (getInput(0) -> size()) / outSize;
+        mOutputs[0]->resize(outDims);
+        return true;
     }
 
-    mOutputs[0]->resize(outDims);
-    return true;
+    return false;
 }
 
 void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..966e1c3e032e64e75d3606fca022b84f9da8fbaf
--- /dev/null
+++ b/src/operator/Resize.cpp
@@ -0,0 +1,121 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Resize.hpp"
+
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int64_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+#include <fmt/core.h>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Resize_Op::Type = "Resize";
+
+bool Aidge::Resize_Op::dimsForwarded() const {
+    // in case of ROI add getInput(1) condition
+    if ((getInput(1) && !getInput(1)->empty())
+        || (getInput(2) && !getInput(2)->empty())
+        || (getInput(3) && !getInput(3)->empty())
+        )
+    {
+        // output dims are data dependent
+        return false;
+    }
+
+    return OperatorTensor::dimsForwarded();
+}
+
+bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
+    if (inputsAssociated()) {
+        AIDGE_ASSERT(getInput(0)->nbDims() == 4,
+            "input tensor must have dimensions = 4 (batch, channel, height, width).");
+
+        const bool input1ROIPresent           = getInput(1) && !getInput(1)->empty();
+        const bool input2ScalesPresent        = getInput(2) && !getInput(2)->empty();
+        const bool input3SizesPresent         = getInput(3) && !getInput(3)->empty();
+
+        AIDGE_ASSERT(input2ScalesPresent != input3SizesPresent, "Only one of scales and  sizes can be specified.")
+
+        if (input1ROIPresent) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Input #1 (ROI) is given and it is not supported.");
+        }
+        else if (input2ScalesPresent)  {
+            if (!allowDataDependency) {
+                Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #2");
+                return false;
+            }
+
+            AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(2)->size(),
+                "input #0 and input #2 (Scales) must have the same dimensions.");
+
+            std::vector<DimSize_t>      outDims = getInput(0)->dims();
+            const std::vector<DimSize_t> inDims = getInput(0)->dims();
+
+            std::shared_ptr<Tensor> fallback;
+            const auto& scales = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+
+            for (std::size_t dim=0; dim < getInput(2)->size(); ++dim) {
+                outDims[dim] = inDims[dim]*static_cast<int64_t*>(scales.getImpl()->hostPtr())[dim];
+            }
+
+            mOutputs[0]->resize(outDims);
+            return true;
+        }
+        else if (input3SizesPresent) {
+            if (!allowDataDependency) {
+                Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #3");
+                return false;
+            }
+
+            AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(3)->size(),
+                "input #0 and input #3 (Sizes) must have the same dimensions.");
+
+            std::vector<DimSize_t> outDims = getInput(0)->dims();
+
+            std::shared_ptr<Tensor> fallback;
+            const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+
+            for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {            
+                outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
+            }
+            
+            mOutputs[0]->resize(outDims);
+            return true;
+        }
+        else {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: Either Input #2 or Input #3 must be present.");
+        }
+    }
+
+    return false; 
+}
+
+void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Resize_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+
+    // By default, automatically set backend for all inputs: roi, scales and sizes 
+    if(getInput(1)) {
+        getInput(1)->setBackend(name, device);
+    }
+    if(getInput(2)) {
+        getInput(2)->setBackend(name, device);
+    }
+    if(getInput(3)) {
+        getInput(3)->setBackend(name, device);
+    }
+}
\ No newline at end of file
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index d11cf39e1cd301d49f21863dcb1f250e96c6e502..8166712e1e5fd967bb9328e95ecf8c5388636ba7 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -21,10 +21,10 @@
 
 void Aidge::Shape_OpImpl::forward() {
     const Shape_Op& op = dynamic_cast<const Shape_Op&>(mOp);
-    const auto start = op.template getAttr<std::int64_t>("Start");
-    const auto end = op.template getAttr<std::int64_t>("End");
+    const auto start = op.start();
+    const auto end = op.end();
 
-    op.getOutput(0)->getImpl()->copyCast(std::next(op.getInput(0)->dims().data(), 
+    op.getOutput(0)->getImpl()->copyCast(std::next(op.getInput(0)->dims().data(),
                                                    start),
                                          DataType::UInt64,
                                          end - start + 1);
@@ -33,30 +33,25 @@ void Aidge::Shape_OpImpl::forward() {
 const std::string Aidge::Shape_Op::Type = "Shape";
 
 bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check data input has been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-
-    if (getInput(0)->empty()) {
-        return false;
-    }
+    if (inputsAssociated()) {
+        if (mAttributes->template getAttr<std::int64_t>("Start") < 0)
+            mAttributes->template getAttr<std::int64_t>("Start") += static_cast<std::int64_t>(getInput(0)->nbDims());
+        if (mAttributes->template getAttr<std::int64_t>("End") < 0)
+            mAttributes->template getAttr<std::int64_t>("End") += static_cast<std::int64_t>(getInput(0)->nbDims());
 
-    if (this->template getAttr<std::int64_t>("Start") < 0)
-        this->template getAttr<std::int64_t>("Start") += static_cast<std::int64_t>(getInput(0)->nbDims());
-    if (this->template getAttr<std::int64_t>("End") < 0)
-        this->template getAttr<std::int64_t>("End") += static_cast<std::int64_t>(getInput(0)->nbDims());
+        const auto start = mAttributes->template getAttr<std::int64_t>("Start");
+        const auto end = mAttributes->template getAttr<std::int64_t>("End");
+        const auto nbDims = static_cast<std::int64_t>(getInput(0)->nbDims());
+        const DimSize_t roi = end - start + 1;
 
-    const auto start = this->template getAttr<std::int64_t>("Start");
-    const auto end = this->template getAttr<std::int64_t>("End");
-    const auto nbDims = static_cast<std::int64_t>(getInput(0)->nbDims());
-    const DimSize_t roi = end - start + 1;
+        AIDGE_ASSERT(start < nbDims && end < nbDims, "'Start' and 'End' must be < {}", nbDims);
+        AIDGE_ASSERT(roi> 1, "Unvalid ROI for Shape");
 
-    AIDGE_ASSERT(start < nbDims && end < nbDims, "'Start' and 'End' must be < {}", nbDims);
-    AIDGE_ASSERT(roi> 1, "Unvalid ROI for Shape");
+        mOutputs[0]->resize({roi});
+        return true;
+    }
 
-    mOutputs[0]->resize({roi});
-    return true;
+    return false;
 }
 
 void Aidge::Shape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/ShiftGELU.cpp b/src/operator/ShiftGELU.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ede83e291bd1670885192e3ac8f4958e185c28e2
--- /dev/null
+++ b/src/operator/ShiftGELU.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 25.06.2024
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ShiftGELU.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ShiftGELU_Op::Type = "ShiftGELU";
+
+void Aidge::ShiftGELU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(ShiftGELU_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/ShiftMax.cpp b/src/operator/ShiftMax.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..eb77ae655354eac03fbdc0f1a84a44391795ee8c
--- /dev/null
+++ b/src/operator/ShiftMax.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 25.06.2024
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ShiftMax.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ShiftMax_Op::Type = "ShiftMax";
+
+void Aidge::ShiftMax_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(ShiftMax_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 3204a4b9960187e8add87491ad60c720ca08c2de..4c42280cad77f93a706d731894326ff8bb411a27 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -42,134 +42,134 @@ bool Aidge::Slice_Op::dimsForwarded() const {
 }
 
 bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-
-    if (getInput(0)->empty()) {
-        return false;
-    }
+    if (inputsAssociated()) {
+        std::shared_ptr<Tensor> fallback;
+        // Copy optional input #1, if present, to attribute Starts
+        if (getInput(1)) {
+            if (!this->starts().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Starts attribute because input#1 takes precedence");
+            }
 
-   std::shared_ptr<Tensor> fallback;
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
 
-    if (getInput(1) && !getInput(1)->empty()) {
-        if (!mAttributes->template getAttr<SliceAttr::Starts>().empty()) {
-            Log::notice("Slice_Op: ignoring non-empty Starts attribute because input#1 takes precedence");
+            this->starts().clear(); // If both are provided input would override attrs
+            this->starts().reserve(getInput(1)->size());
+            const auto& starts = getInput(1)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(starts.getImpl()->hostPtr()),
+                        starts.size(),
+                        std::back_inserter(this->starts()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#1");
-            return false;
-        }
+        AIDGE_ASSERT(!this->starts().empty(), "Missing input#1 or Starts attribute");
 
-        mAttributes->template getAttr<SliceAttr::Starts>().clear(); // If both are provided input would override attrs
-        mAttributes->template getAttr<SliceAttr::Starts>().reserve(getInput(1)->size());
-        const auto& starts = getInput(1)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(starts.getImpl()->hostPtr()),
-                    starts.size(),
-                    std::back_inserter(mAttributes->template getAttr<SliceAttr::Starts>()));
-    }
+        // Copy optional input #2, if present, to attribute Ends
+        if (getInput(2)) {
+            if (!this->ends().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Ends attribute because input#2 takes precedence");
+            }
 
-    AIDGE_ASSERT(!mAttributes->template getAttr<SliceAttr::Starts>().empty(), "Missing input#1 or Starts attribute");
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#2");
+                return false;
+            }
 
-    if (getInput(2) && !getInput(2)->empty()) {
-        if (!mAttributes->template getAttr<SliceAttr::Ends>().empty()) {
-            Log::notice("Slice_Op: ignoring non-empty Ends attribute because input#2 takes precedence");
+            this->ends().clear(); // If both are provided input would override attrs
+            this->ends().reserve(getInput(2)->size());
+            const auto& ends = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(ends.getImpl()->hostPtr()),
+                        ends.size(),
+                        std::back_inserter(this->ends()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#2");
-            return false;
-        }
+        AIDGE_ASSERT(!this->ends().empty(), "Missing input#2 or Ends attribute");
 
-        mAttributes->template getAttr<SliceAttr::Ends>().clear(); // If both are provided input would override attrs
-        mAttributes->template getAttr<SliceAttr::Ends>().reserve(getInput(2)->size());
-        const auto& ends = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(ends.getImpl()->hostPtr()),
-                    ends.size(),
-                    std::back_inserter(mAttributes->template getAttr<SliceAttr::Ends>()));
-    }
+        // Copy optional input #3, if present, to attribute Axes
+        if (getInput(3)) {
+            if (!this->axes().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Axes attribute because input#3 takes precedence");
+            }
 
-    AIDGE_ASSERT(!mAttributes->template getAttr<SliceAttr::Ends>().empty(), "Missing input#2 or Ends attribute");
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#3");
+                return false;
+            }
 
-    if (getInput(3) && !getInput(3)->empty()) {
-        if (!mAttributes->template getAttr<SliceAttr::Axes>().empty()) {
-            Log::notice("Slice_Op: ignoring non-empty Axes attribute because input#3 takes precedence");
+            this->axes().clear(); // If both are provided input would override attrs
+            this->axes().reserve(getInput(3)->size());
+            const auto& axes = getInput(3)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
+            std::copy_n(static_cast<int8_t*>(axes.getImpl()->hostPtr()),
+                        axes.size(),
+                        std::back_inserter(this->axes()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#3");
-            return false;
-        }
+        AIDGE_ASSERT(!this->axes().empty(), "Missing input#3 or Axes attribute");
 
-        mAttributes->template getAttr<SliceAttr::Axes>().clear(); // If both are provided input would override attrs
-        mAttributes->template getAttr<SliceAttr::Axes>().reserve(getInput(3)->size());
-        const auto& axes = getInput(3)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
-        std::copy_n(static_cast<int8_t*>(axes.getImpl()->hostPtr()),
-                    axes.size(),
-                    std::back_inserter(mAttributes->template getAttr<SliceAttr::Axes>()));
-    }
+        // Copy optional input #4, if present, to attribute Steps
+        if (getInput(4)) {
+            if (!this->steps().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Steps attribute because input#4 takes precedence");
+            }
 
-    AIDGE_ASSERT(!mAttributes->template getAttr<SliceAttr::Axes>().empty(), "Missing input#3 or Axes attribute");
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#4");
+                return false;
+            }
 
-    if (getInput(4) && !getInput(4)->empty()) {
-        if (!mAttributes->template getAttr<SliceAttr::Steps>().empty()) {
-            Log::notice("Slice_Op: ignoring non-empty Steps attribute because input#4 takes precedence");
+            this->steps().clear(); // If both are provided input would override attrs
+            this->steps().reserve(getInput(4)->size());
+            const auto& steps = getInput(4)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(steps.getImpl()->hostPtr()),
+                        steps.size(),
+                        std::back_inserter(this->steps()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#4");
-            return false;
+        // Fill Steps attr if empty
+        if(this->steps().empty()) {
+            // In case the input Steps is not provided, default value is 1
+            this->steps() = std::vector<std::int64_t>(this->axes().size(), 1);
         }
 
-        mAttributes->template getAttr<SliceAttr::Steps>().clear(); // If both are provided input would override attrs
-        mAttributes->template getAttr<SliceAttr::Steps>().reserve(getInput(4)->size());
-        const auto& steps = getInput(4)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(steps.getImpl()->hostPtr()),
-                    steps.size(),
-                    std::back_inserter(mAttributes->template getAttr<SliceAttr::Steps>()));
-    }
-    // Fill Steps attr if empty
-    if(mAttributes->template getAttr<SliceAttr::Steps>().empty()) {
-        // In case the input Steps is not provided, default value is 1
-        mAttributes->template getAttr<SliceAttr::Steps>() = std::vector<std::int64_t>(mAttributes->template getAttr<SliceAttr::Axes>().size(), 1);
-    }
-
-    const DimSize_t nbAxes = mAttributes->template getAttr<SliceAttr::Axes>().size();
-    std::vector<DimSize_t> outDims = getInput(0)->dims();
-    for (std::size_t i = 0; i < nbAxes; ++i) {
-        const DimIdx_t axis = mAttributes->template getAttr<SliceAttr::Axes>()[i] >= 0 ?
-                        static_cast<DimIdx_t>(mAttributes->template getAttr<SliceAttr::Axes>()[i]) :
-                        static_cast<DimIdx_t>(mAttributes->template getAttr<SliceAttr::Axes>()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims()));
-        const DimSize_t start = mAttributes->template getAttr<SliceAttr::Starts>()[i] >= 0 ?
-                            static_cast<DimSize_t>(mAttributes->template getAttr<SliceAttr::Starts>()[i]) :
-                            static_cast<DimSize_t>(mAttributes->template getAttr<SliceAttr::Starts>()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
-        const DimSize_t end = mAttributes->template getAttr<SliceAttr::Ends>()[i] >= 0 ?
-                        static_cast<DimSize_t>(mAttributes->template getAttr<SliceAttr::Ends>()[i]) :
-                        static_cast<DimSize_t>(mAttributes->template getAttr<SliceAttr::Ends>()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
-        const std::int64_t step = mAttributes->template getAttr<SliceAttr::Steps>()[i];
-
-        AIDGE_ASSERT(step != 0, "Slice_Op: Step must be a non-zero value!");
-        if(step * (static_cast<int64_t>(end) - static_cast<int64_t>(start)) < 0) {
-            if(step < 0) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step is negative we must have End < Start", type());
-            }
-            else {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step is positive we must have Start < End", type());
+        // Compute output dims
+        const DimSize_t nbAxes = this->axes().size();
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        for (std::size_t i = 0; i < nbAxes; ++i) {
+            const DimIdx_t axis = this->axes()[i] >= 0 ?
+                            static_cast<DimIdx_t>(this->axes()[i]) :
+                            static_cast<DimIdx_t>(this->axes()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims()));
+            const DimSize_t start = this->starts()[i] >= 0 ?
+                                static_cast<DimSize_t>(this->starts()[i]) :
+                                static_cast<DimSize_t>(this->starts()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
+            const DimSize_t end = this->ends()[i] >= 0 ?
+                            static_cast<DimSize_t>(this->ends()[i]) :
+                            static_cast<DimSize_t>(this->ends()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
+            const std::int64_t step = this->steps()[i];
+
+            AIDGE_ASSERT(step != 0, "Slice_Op: Step must be a non-zero value!");
+            if(step * (static_cast<int64_t>(end) - static_cast<int64_t>(start)) < 0) {
+                if(step < 0) {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step is negative we must have End < Start", type());
+                }
+                else {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step is positive we must have Start < End", type());
+                }
             }
-        }
 
-        const std::size_t sliceLength = static_cast<std::size_t>(std::ceil((static_cast<float>(end) - static_cast<float>(start)) / static_cast<float>(step)));
-        // Check if slice length is valid
-        if (sliceLength > getInput(0)->dims()[axis])
-        {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Slice_Op: ROI of Slice operator out of bounds");
+            const std::size_t sliceLength = static_cast<std::size_t>(std::ceil((static_cast<float>(end) - static_cast<float>(start)) / static_cast<float>(step)));
+            // Check if slice length is valid
+            if (sliceLength > getInput(0)->dims()[axis])
+            {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Slice_Op: ROI of Slice operator out of bounds");
+            }
+            outDims[axis] = sliceLength;
         }
-        outDims[axis] = sliceLength;
+        mOutputs[0]->resize(outDims);
+        return true;
     }
-    mOutputs[0]->resize(outDims);
-    return true;
+
+    return false;
 }
 
 void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index 5d0493ea4da0b80bf572a33fa4ee466804d0d270..a0cb049b19e9411daf65bbe2a10319c62b32c1b8 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -65,66 +65,62 @@ bool Aidge::Split_Op::dimsForwarded() const {
 }
 
 bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-
-    if (getInput(0)->empty()) {
-        return false;
-    }
-
-    std::shared_ptr<Tensor> fallback;
-
-    if (getInput(1) && !getInput(1)->empty()) { // Split is given, replace
-        if (!this->template getAttr<SplitAttr::Split>().empty()) {
-            Log::notice("Split_Op: ignoring non-empty Split attribute because input#1 takes precedence");
+    if (inputsAssociated()) {
+        // Copy optional input #1, if present, to attribute Split
+        if (getInput(1)) {
+            if (!this->template getAttr<SplitAttr::Split>().empty()) {
+                Log::notice("Split_Op: ignoring non-empty Split attribute because input#1 takes precedence");
+            }
+
+            if (!allowDataDependency) {
+                Log::warn("Split_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
+
+            std::shared_ptr<Tensor> fallback;
+            this->template getAttr<SplitAttr::Split>().reserve(getInput(1)->size());
+            const auto& splits = getInput(1)->refCastFrom(fallback, NativeType<DimSize_t>::type, "cpu");
+            std::copy_n(static_cast<DimSize_t*>(splits.getImpl()->hostPtr()),
+                        splits.size(),
+                        std::back_inserter(this->template getAttr<SplitAttr::Split>()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Split_Op: unable to forwardDims() because output dims are data dependent on input#1");
-            return false;
-        }
+        // Compute output dims
+        if (this->template getAttr<std::int8_t>("Axis") < 0)
+            this->template getAttr<std::int8_t>("Axis") += static_cast<std::int8_t>(getInput(0)->nbDims());
 
-        this->template getAttr<SplitAttr::Split>().reserve(getInput(1)->size());
-        const auto& splits = getInput(1)->refCastFrom(fallback, NativeType<DimSize_t>::type, "cpu");
-        std::copy_n(static_cast<DimSize_t*>(splits.getImpl()->hostPtr()),
-                    splits.size(),
-                    std::back_inserter(this->template getAttr<SplitAttr::Split>()));
-    }
+        DimSize_t dimToSplit = getInput(0)->dims()[this->template getAttr<std::int8_t>("Axis")];
+        DimSize_t nbOutput = this->nbOutputs();
+        // Fill Split attr if empty
+        if(this->template getAttr<SplitAttr::Split>().empty()) {
+            // In case the input Split is not provided, divide the dimension of Axis into equal slices
+            AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} musn't be bigger than dimension {}.", nbOutput, dimToSplit);
+            DimSize_t baseSliceSize = dimToSplit / nbOutput;
 
-    if (this->template getAttr<std::int8_t>("Axis") < 0)
-        this->template getAttr<std::int8_t>("Axis") += static_cast<std::int8_t>(getInput(0)->nbDims());
+            DimSize_t remainder = dimToSplit % nbOutput;
 
-    DimSize_t dimToSplit = getInput(0)->dims()[this->template getAttr<std::int8_t>("Axis")];
-    DimSize_t nbOutput = this->nbOutputs();
-    // Fill Split attr if empty
-    if(this->template getAttr<SplitAttr::Split>().empty()) {
-        // In case the input Split is not provided, divide the dimension of Axis into equal slices
-        AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} musn't be bigger than dimension {}.", nbOutput, dimToSplit);
-        DimSize_t baseSliceSize = dimToSplit / nbOutput;
+            for (DimSize_t i = 0; i < static_cast<DimSize_t>(nbOutput -1); ++i) {
+                    this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize);
+            }
+            this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize + remainder);
+        }
 
-        DimSize_t remainder = dimToSplit % nbOutput;
+        const auto splits = this->template getAttr<SplitAttr::Split>();
+        AIDGE_ASSERT(splits.size() == nbOutput, "Split_Op: number of slices {} must be equal to number of outputs {}", splits, nbOutput);
+        DimSize_t totalSplitSize = std::accumulate(splits.cbegin(), splits.cend(), 0);
+        AIDGE_ASSERT(totalSplitSize == dimToSplit, "Split_Op: Total chunks size {} is different from dimension size {}.", totalSplitSize, dimToSplit);
 
-        for (DimSize_t i = 0; i < static_cast<DimSize_t>(nbOutput -1); ++i) {
-                this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize);
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        for (std::size_t i = 0; i < nbOutput; ++i)
+        {
+            outDims[this->template getAttr<std::int8_t>("Axis")] = this->template getAttr<SplitAttr::Split>()[i];
+            mOutputs[i]->resize(outDims);
         }
-        this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize + remainder);
-    }
-
-    const auto splits = this->template getAttr<SplitAttr::Split>();
-    AIDGE_ASSERT(splits.size() == nbOutput, "Split_Op: number of slices {} must be equal to number of outputs {}", splits, nbOutput);
-    DimSize_t totalSplitSize = std::accumulate(splits.cbegin(), splits.cend(), 0);
-    AIDGE_ASSERT(totalSplitSize == dimToSplit, "Split_Op: Total chunks size {} is different from dimension size {}.", totalSplitSize, dimToSplit);
 
-    std::vector<DimSize_t> outDims = getInput(0)->dims();
-    for (std::size_t i = 0; i < nbOutput; ++i)
-    {
-        outDims[this->template getAttr<std::int8_t>("Axis")] = this->template getAttr<SplitAttr::Split>()[i];
-        mOutputs[i]->resize(outDims);
+        return true;
     }
-
-    return true;
+    
+    return false;
 }
 
 void Aidge::Split_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index b977f4ee7ccce32d7f7929cbee99140aea36cd2f..858b32beaf9e23e8e9e7f52cfe7176afe399843c 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -25,13 +25,7 @@
 const std::string Aidge::Sub_Op::Type = "Sub";
 
 bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
-    }
-
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
-
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 0b00c9b600b86764631090297ef1fe25f9212578..69820a924105acc8bea817aecb90e0aa278fce06 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -31,16 +31,10 @@ void Aidge::TransposeImpl::forward() {
 const std::string Aidge::Transpose_Op::Type = "Transpose";
 
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check input has been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
-    }
-
-    if (!getInput(0)->empty()) {
-        const auto& outDimsOrder = mAttributes->template getAttr<std::vector<DimSize_t>>(0);
+    if (inputsAssociated()) {
         std::vector<DimSize_t> outputDims;
-        for (std::size_t i = 0; i < outDimsOrder.size(); ++i) {
-            outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
+        for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) {
+            outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]);
         }
         mOutputs[0]->resize(outputDims);
         return true;
diff --git a/src/recipes/FuseMulAdd.cpp b/src/recipes/FuseMulAdd.cpp
index 08e817c8ef4272f24e53a7870ead2c22ad46c186..6112fc47ece6bb361ebad626be7b5a6b1c2189bd 100644
--- a/src/recipes/FuseMulAdd.cpp
+++ b/src/recipes/FuseMulAdd.cpp
@@ -96,7 +96,7 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
         fcName += "_" + addNode->name();
     }
 
-    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(bias ? false : true), fcName);
+    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(), fcName);
 
     // Step 2 : Branch existing producers & create the others
     // link weights & bias
diff --git a/src/recipes/GraphViewHelper.cpp b/src/recipes/GraphViewHelper.cpp
index b0c99bffb895dc64b20d76991911ae5f4b604c85..9522c0fe7346e78875a08d3ebf19a04dea2909e1 100644
--- a/src/recipes/GraphViewHelper.cpp
+++ b/src/recipes/GraphViewHelper.cpp
@@ -44,14 +44,3 @@ std::set<std::shared_ptr<Aidge::Tensor>> Aidge::parameters(std::shared_ptr<Aidge
     }
     return res;
 }
-
-void Aidge::compile_gradient(std::shared_ptr<Aidge::GraphView> gv) {
-    for (const auto& node : gv->getNodes()) {
-        // TODO: check that each node is an OperatorTensor
-        AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Cannot instanciate gradient of an Operator ({}) that doesn't use Tensor.", node->getOperator()->type());
-        const std::shared_ptr<OperatorTensor> op = std::dynamic_pointer_cast<OperatorTensor>(node -> getOperator());
-        for (std::size_t o = 0; o < node -> nbOutputs(); ++o) {
-            op->getOutput(o)->initGrad();
-        }
-    }
-}
diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp
index 3226235189d778f27d922824a62b95ba6c6c170c..88691c26d5d7013874c13000535ec2a3842d47d3 100644
--- a/src/recipes/HorizontalTiling.cpp
+++ b/src/recipes/HorizontalTiling.cpp
@@ -74,10 +74,12 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
     // }
 
     std::vector<std::shared_ptr<Node>> clonedInputs = std::vector<std::shared_ptr<Node>>(node->nbInputs(), nullptr);
-    for (std::size_t i = node->nbData(); i < node ->nbInputs(); ++i) {
-        clonedInputs[i] = node -> getParent(i) -> cloneSharedOperators();
-        clonedInputs[i] -> setName(node -> getParent(i) -> name() + "_0");
-        tiledOperator.insert(clonedInputs[i]);
+    for (std::size_t i = 0; i < node ->nbInputs(); ++i) {
+        if (node->inputCategory(i) == InputCategory::Param || node->inputCategory(i) == InputCategory::OptionalParam) {
+            clonedInputs[i] = node -> getParent(i) -> cloneSharedOperators();
+            clonedInputs[i] -> setName(node -> getParent(i) -> name() + "_0");
+            tiledOperator.insert(clonedInputs[i]);
+        }
     }
 
     const std::vector<std::string> sliceInputsNames = Slice_Op::getInputsName();
diff --git a/src/recipes/RemoveNode.cpp b/src/recipes/RemoveNode.cpp
index 317db6f87b2d3c4a6879a2f176afeaf06b36f733..a09c67991409dfe491d46b4ad739f9ddf5b72aef 100644
--- a/src/recipes/RemoveNode.cpp
+++ b/src/recipes/RemoveNode.cpp
@@ -31,7 +31,7 @@ size_t Aidge::removeNode(std::shared_ptr<GraphView> graphView, const std::string
         std::set<NodePtr> nodesToRemove = solution->at(type);
         if (incProducers) {
             for (const auto& nodePtr: (*solution->at(type).begin())->getParents()) {
-                if (nodePtr->type() == "Producer") {
+                if (nodePtr != nullptr && nodePtr->type() == "Producer") {
                     nodesToRemove.insert(nodePtr);
                 }
             }
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 0d59e6a4220385df90cf36d4e978087ac570876c..d63c93deb1ba2d7974ffc6e5b8ccd1e9c57dc76c 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -197,18 +197,20 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S
             bool isStillConsumer = false;
             // Only look for data inputs. If no data is available on data input,
             // by definition, no parameter can be consumed on parameter inputs.
-            for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbData(); ++inputIdx) {
-                AIDGE_LOG_CONTEXT("Consumer node {} input #{}", namePtrTable.at(consumer), inputIdx);
-
-                if (consumer->getOperator()->getNbConsumedData(inputIdx) <
-                            getNbAvailableData(consumer, inputIdx)) {
-                    Log::debug("  still consumer: C{} < P{} for input #{}",
-                        consumer->getOperator()->getNbConsumedData(inputIdx),
-                        getNbAvailableData(consumer, inputIdx), inputIdx);
-
-                    // there is still data to consume
-                    isStillConsumer = true;
-                    break;
+            for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbInputs(); ++inputIdx) {
+                if (consumer->inputCategory(inputIdx) == InputCategory::Data) {
+                    AIDGE_LOG_CONTEXT("Consumer node {} input #{}", namePtrTable.at(consumer), inputIdx);
+
+                    if (consumer->getOperator()->getNbConsumedData(inputIdx) <
+                                getNbAvailableData(consumer, inputIdx)) {
+                        Log::debug("  still consumer: C{} < P{} for input #{}",
+                            consumer->getOperator()->getNbConsumedData(inputIdx),
+                            getNbAvailableData(consumer, inputIdx), inputIdx);
+
+                        // there is still data to consume
+                        isStillConsumer = true;
+                        break;
+                    }
                 }
             }
 
@@ -651,19 +653,16 @@ Aidge::Elts_t Aidge::Scheduler::getNbAvailableData(const std::shared_ptr<Node>&
         }
     }
 
-    // Otherwise, two cases:
+    // Otherwise, it means that the input is not connected. Two cases:
+    // - There is no data, it is assumed to be an optional input
+    // - A valid tensor exists:
     if (node->getOperator()->getRawInput(inputIdx)) {
-        // Input is not connected but a valid tensor exists
         // => This means data was fed manually to the input, without a Producer
         // In this case, we assume a single-use data (unlike a Producer, which
         // keep producing the data each time it is needed).
         fmt::print("No producer node attached to input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
         return Elts_t::DataElts(std::static_pointer_cast<Tensor>(node->getOperator()->getRawInput(inputIdx))->size());
     }
-    else {
-        // Input is not connected, this is an error
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
-    }
 
     return Elts_t::NoneElts();
 }
diff --git a/src/scheduler/SequentialScheduler.cpp b/src/scheduler/SequentialScheduler.cpp
index 74b1b3f0c6e9be164792460669821744661c15b3..88b5e98bc62456bd59dc235c3112396daaeddd24 100644
--- a/src/scheduler/SequentialScheduler.cpp
+++ b/src/scheduler/SequentialScheduler.cpp
@@ -73,10 +73,7 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, const std::vector<std
     }
 }
 
-void Aidge::SequentialScheduler::backward(bool instanciateGrad) {
-    // create ad set Grad values
-    if (instanciateGrad) { compile_gradient(mGraphView); }
-
+void Aidge::SequentialScheduler::backward() {
     // TODO: Check output grad are not empty
 
     // Generate scheduling *only if empty*
diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp
index 655fd725e9d7d913d24c6552571ae3b91e3605b4..62e90dcbd7c20548019afae1a04f84b3e1d4484a 100644
--- a/unit_tests/data/Test_Tensor.cpp
+++ b/unit_tests/data/Test_Tensor.cpp
@@ -40,7 +40,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T_default.dims() == std::vector<DimSize_t>({})) &&
             (T_default.strides() == std::vector<DimSize_t>({1})) &&
             (T_default.getImpl() == nullptr) &&
-            (T_default.grad() == nullptr) &&
+            (T_default.grad() != nullptr) &&
             (T_default.isContiguous() == true)
         ));
     }
@@ -53,7 +53,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({})) &&
             (T.strides() == std::vector<DimSize_t>({1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
     }
@@ -67,7 +67,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == Tdims) &&
             (T.strides() == std::vector<DimSize_t>({5040,2520,840,210,42,7,1})) &&
             (T.getImpl() == nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
     }
@@ -83,7 +83,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({2})) &&
             (T.strides() == std::vector<DimSize_t>({1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
 
@@ -97,7 +97,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({2,2,2})) &&
             (T.strides() == std::vector<DimSize_t>({4,2,1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
         REQUIRE_NOTHROW(T = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
@@ -113,7 +113,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({2,2,2,2})) &&
             (T.strides() == std::vector<DimSize_t>({8,4,2,1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
     }
@@ -157,7 +157,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
                 (T.dims() == Tclone.dims()) &&
                 (T.strides() == Tclone.strides()) &&
                 (T.getImpl() != Tclone.getImpl()) &&
-                (Tclone.grad() == nullptr) &&
+                (Tclone.grad() != nullptr) &&
                 (Tclone.isContiguous() == true)
             ));
             REQUIRE(Tclone == T);
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 8403686d16da15e7e8ad4616029a241d6197d450..8e9f5a27e275a5ce56ddf57fa092ec96cec84711 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -399,9 +399,7 @@ TEST_CASE("[core/graph] GraphView(resetConnections)") {
         conv1->resetConnections(false);
 
         REQUIRE(conv->output(0).size() == 0);
-        for (std::size_t i = 0; i < conv1->nbData(); ++i) {
-        REQUIRE((conv1->input(i) == std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex)));
-        }
+        REQUIRE((conv1->input(0) == std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex)));
         REQUIRE((conv1->input(1) == std::pair<std::shared_ptr<Node>, IOIndex_t>(prod1, 0)));
         REQUIRE((conv1->input(2) == std::pair<std::shared_ptr<Node>, IOIndex_t>(prod2, 0)));
         REQUIRE((conv2->input(0) == std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex)));
@@ -554,6 +552,69 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
         REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myConv, other2}));
     }
 
+    SECTION("replace same input category 1") {
+        std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
+        auto otherInput = GenericOperator("Producer", {}, 1, "other_input");
+        auto other1 = GenericOperator("Other", {InputCategory::Data}, 1, "other1");
+        auto myOld = GenericOperator("myOld", {InputCategory::Data}, 1, "old");
+        auto other2 = GenericOperator("Other", {InputCategory::Data}, 1, "other2");
+        otherInput->addChild(other1);
+        other1->addChild(myOld);
+        myOld->addChild(other2);
+        g->add({other1, myOld, other2});
+
+        auto myNew =  GenericOperator("myNew", {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData}, 1, "new");
+
+        GraphView::replace({myOld}, {myNew});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myNew, other2}));
+        REQUIRE(myNew->input(0).first == other1);
+        REQUIRE(myNew->input(1).first == nullptr);
+        REQUIRE(myNew->input(2).first == nullptr);
+    }
+
+    SECTION("replace same input category 2") {
+        std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
+        auto otherInput = GenericOperator("Producer", {}, 1, "other_input");
+        auto other1 = GenericOperator("Other", {InputCategory::Data}, 1, "other1");
+        auto myOld = GenericOperator("myOld", {InputCategory::Param}, 1, "old");
+        auto other2 = GenericOperator("Other", {InputCategory::Data}, 1, "other2");
+        otherInput->addChild(other1);
+        other1->addChild(myOld, 0, 0);
+        myOld->addChild(other2);
+        g->add({other1, myOld, other2});
+
+        auto myNew =  GenericOperator("myNew", {InputCategory::Data, InputCategory::Param, InputCategory::Data}, 1, "new");
+
+        GraphView::replace({myOld}, {myNew});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myNew, other2}));
+        REQUIRE(myNew->input(0).first == nullptr);
+        REQUIRE(myNew->input(1).first == other1);
+        REQUIRE(myNew->input(2).first == nullptr);
+    }
+
+    SECTION("replace same input category 3") {
+        std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
+        auto otherInput = GenericOperator("Producer", {}, 1, "other_input");
+        auto other1 = GenericOperator("Other", {InputCategory::Data}, 1, "other1");
+        auto myOld = GenericOperator("myOld", {InputCategory::Data}, 1, "old");
+        auto other2 = GenericOperator("Other", {InputCategory::Data}, 1, "other2");
+        otherInput->addChild(other1);
+        other1->addChild(myOld);
+        myOld->addChild(other2);
+        g->add({other1, myOld, other2});
+
+        auto myNew =  GenericOperator("myNew", {InputCategory::Data, InputCategory::Data, InputCategory::Data}, 1, "new");
+
+        GraphView::replace({myOld}, {myNew});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myNew, other2}));
+        REQUIRE(myNew->input(0).first == other1);
+        REQUIRE(myNew->input(1).first == other1);
+        REQUIRE(myNew->input(2).first == other1);
+    }
+
     SECTION("Change every Nodes in a GraphView") {
         auto matmulWeight0 = GenericOperator("Producer", 0, 0, 1, "matmul_w0");
         auto addBias0 = GenericOperator("Producer", 0, 0, 1, "add_b0");
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index ed4afafe39a367ecabb25ff949eb3d03999d1ea9..d1b4e2e31e8c57e2c3eebd42019ba9f42c4d39e0 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -39,7 +39,9 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         REQUIRE(microGraph->outputNodes().size() == 1);
         REQUIRE((*microGraph->outputNodes().begin())->getOperator()->type() == "Conv");
         REQUIRE(op->nbInputs() == 3);
-        REQUIRE(op->nbData() == 1);
+        REQUIRE(op->inputCategory(0) == InputCategory::Data);
+        REQUIRE(op->inputCategory(1) == InputCategory::Param);
+        REQUIRE(op->inputCategory(2) == InputCategory::OptionalParam);
         REQUIRE(op->nbOutputs() == 1);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(std::vector<std::size_t>({2,1,5,5}));
@@ -66,7 +68,13 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         microGraph->save("lstm", false, false);
 
         REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
-        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->inputCategory(0) == InputCategory::Data);
+        for (size_t i = 1; i < 9; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::Param);
+        }
+        for (size_t i = 9; i < 17; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::OptionalParam);
+        }
         REQUIRE(myLSTM->nbOutputs() == 2);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>();
@@ -94,7 +102,13 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         auto op = std::static_pointer_cast<OperatorTensor>(myLSTM->getOperator());
 
         REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
-        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->inputCategory(0) == InputCategory::Data);
+        for (size_t i = 1; i < 9; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::Param);
+        }
+        for (size_t i = 9; i < 17; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::OptionalParam);
+        }
         REQUIRE(myLSTM->nbOutputs() == 2);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
@@ -129,6 +143,6 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         REQUIRE(g->rootNode() == pop);
         g->save("lstm_expanded", true, true);
 
-        REQUIRE(g->getNodes().size() == 41);
+        REQUIRE(g->getNodes().size() == 33);
     }
 }
diff --git a/unit_tests/recipes/Test_removeFlatten.cpp b/unit_tests/recipes/Test_removeFlatten.cpp
index 84099ac0b77a633893af6a7550464e539c95d806..24f5aa2e231b5204add1c8f87cdeb7a71175ea05 100644
--- a/unit_tests/recipes/Test_removeFlatten.cpp
+++ b/unit_tests/recipes/Test_removeFlatten.cpp
@@ -27,8 +27,8 @@ namespace Aidge {
 TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
   std::shared_ptr<Node> flatten =
       GenericOperator("Flatten", 1, 0, 1, "myFlatten");
-  std::shared_ptr<Node> fc0 = FC(10, 10, "FC_1");
-  std::shared_ptr<Node> fc1 = FC(10, 10, "FC_2");
+  std::shared_ptr<Node> fc0 = FC(10, 10, false, "FC_1");
+  std::shared_ptr<Node> fc1 = FC(10, 10, false, "FC_2");
   std::shared_ptr<Node> prod = Producer(std::array<DimSize_t, 10>(), "myProd");
 
   SECTION("flatten last layer : nothing removed because pattern searched is "