diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000000000000000000000000000000000000..7a01a2d6c0caf880738df8393567fb169a07be7e
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,6 @@
+include MANIFEST.in
+include LICENSE
+include README.md
+recursive-include aidge_core *
+include setup.py
+include version.txt
diff --git a/aidge_core/__init__.py b/aidge_core/__init__.py
index 4b5c448355a17fd4274ba45f5cd98afa70b1ae53..4234747b94b25b35a6836a36ad6331c1c8a4bc66 100644
--- a/aidge_core/__init__.py
+++ b/aidge_core/__init__.py
@@ -8,5 +8,6 @@ http://www.eclipse.org/legal/epl-2.0.
 SPDX-License-Identifier: EPL-2.0
 """
 from aidge_core.aidge_core import * # import so generated by PyBind
-from aidge_core.export import ExportNode, generate_file, generate_str
+from aidge_core.export_utils import ExportNode, generate_file, generate_str
 import aidge_core.utils
+from aidge_core.aidge_export_aidge import *
diff --git a/aidge_core/aidge_export_aidge/__init__.py b/aidge_core/aidge_export_aidge/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5d6f96b3300c0e86147baac30d7cae8a3a0b798
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/__init__.py
@@ -0,0 +1,8 @@
+from pathlib import Path
+
+# Constants
+FILE = Path(__file__).resolve()
+ROOT_EXPORT = FILE.parents[0]
+
+from .operator_export import *
+from .export import export
diff --git a/aidge_core/aidge_export_aidge/export.py b/aidge_core/aidge_export_aidge/export.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0e859b71e77bb03b95acb6ca75dcf09a8af0722
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/export.py
@@ -0,0 +1,104 @@
+import aidge_core
+import shutil
+import os
+from pathlib import Path
+from .utils import supported_operators, OPERATORS_REGISTRY
+from . import ROOT_EXPORT
+
+
+from aidge_core import ExportNode, generate_file
+
+
+
+def export(export_folder: str,
+           graph_view: aidge_core.GraphView,
+           enable_python_binding: bool = True,
+           ):
+    export_folder_path = Path(export_folder)
+    export_name = export_folder_path.name
+
+    ### Creating export folder ###
+    # Create export directory
+    os.makedirs(export_folder, exist_ok=True)
+
+    ### Cpy static files ###
+    shutil.copytree(ROOT_EXPORT / "static/include",
+                    export_folder_path / "include", dirs_exist_ok=True)
+    shutil.copytree(ROOT_EXPORT / "static/cmake",
+                    export_folder_path / "cmake", dirs_exist_ok=True)
+    shutil.copyfile(ROOT_EXPORT / "static/CMakeLists.txt",
+                    export_folder_path / "CMakeLists.txt")
+    shutil.copyfile(ROOT_EXPORT / "static/version.txt",
+                    export_folder_path / "version.txt")
+    shutil.copyfile(ROOT_EXPORT / "static/README.md",
+                    export_folder_path / "README.md")
+    shutil.copyfile(ROOT_EXPORT / "static/main.cpp",
+                    export_folder_path / "main.cpp")
+    shutil.copyfile(ROOT_EXPORT / "static/export-config.cmake.in",
+                    export_folder_path / f"{export_name}-config.cmake.in")
+
+    # Create project_name file
+    with open(export_folder_path / "project_name.txt", "w") as f:
+        f.write(export_name)
+
+    # Add files related to python binding if
+    if enable_python_binding:
+        os.makedirs(export_folder_path / "python_binding", exist_ok=True)
+        generate_file(
+            export_folder_path / "python_binding/pybind.cpp",
+            ROOT_EXPORT / "templates/pybind.jinja",
+            name=export_name,
+        )
+        # TODO: Add a main.py file ?
+
+    ### Generating an export for each nodes and dnn file ###
+    list_configs = []  # List of headers to include in dnn.cpp to access attribute and parameters
+    list_actions = []  # List of string to construct graph
+    set_operator = set()
+    # Queue of Aidge nodes to explore, guarantee a topological exploration of the graph
+    open_nodes = list(graph_view.get_input_nodes())
+    # List of Aidge nodes already explored
+    closed_nodes = []
+
+    while open_nodes:
+        node = open_nodes.pop(0)
+        if node in closed_nodes:
+            continue  # Node already converted, moving on ...
+        parents_not_converted = False
+        # Check all parents have been converted
+        for parent in node.get_parents():
+            if parent is not None and \
+                    parent not in closed_nodes:
+                # If parents have not been converted, push back current node
+                if not parents_not_converted:
+                    open_nodes.insert(0, node)
+                    parents_not_converted = True
+                # Add to the stack the not converted parent as next node to convert
+                open_nodes.insert(0, parent)
+        if parents_not_converted:
+
+            continue
+        # Next nodes to treat are children of current node
+        open_nodes += list(node.get_children())
+
+        if node.type() in supported_operators():
+            set_operator.add(node.type())
+            op = OPERATORS_REGISTRY[node.type()](node)
+
+            # TODO: list_configs and list_actions don't need to be passed by argument
+            # Export the configuration
+            list_configs = op.export(export_folder_path, list_configs)
+
+            # Add forward kernel
+            list_actions = op.forward(list_actions)
+        else:
+            raise RuntimeError(f"Operator: {node.type()} is not supported")
+        closed_nodes.append(node)
+    # Generate full dnn.cpp
+    aidge_core.generate_file(
+        export_folder_path / "src/dnn.cpp",
+        ROOT_EXPORT / "templates/dnn.jinja",
+        headers=list_configs,
+        operators=set_operator,
+        actions=list_actions,
+    )
diff --git a/aidge_core/aidge_export_aidge/operator_export/__init__.py b/aidge_core/aidge_export_aidge/operator_export/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..37d674ac84f72d643ba1a628a86fbcde9780f4a4
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/__init__.py
@@ -0,0 +1,14 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+from pathlib import Path
+
+DIR_PATH = Path(__file__).parent
+modules = [Path(module).stem for module in DIR_PATH.glob("*.py")]
+__all__ = [ f for f in modules if f != "__init__"]
diff --git a/aidge_core/aidge_export_aidge/operator_export/conv.py b/aidge_core/aidge_export_aidge/operator_export/conv.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb7092fb18982a3cc3f11a1ca47394ce2f77d0b6
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/conv.py
@@ -0,0 +1,31 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import ExportNode, generate_file, generate_str
+from pathlib import Path
+
+@operator_register("Conv")
+class Conv(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+    def export(self, export_folder:Path, list_configs:list):
+        include_path = f"attributes/{self.name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/attributes/conv.jinja",
+            name=self.name,
+            **self.attributes
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT /"templates/graph_ctor/conv.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/fc.py b/aidge_core/aidge_export_aidge/operator_export/fc.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcd528528707dc6eec917790b46e509c2984fa66
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/fc.py
@@ -0,0 +1,37 @@
+from aidge_core.aidge_export_aidge.utils import operator_register,parse_node_input
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import ExportNode, generate_file, generate_str
+from pathlib import Path
+
+@operator_register("FC")
+class FC(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+
+    def export(self, export_folder:Path, list_configs:list):
+
+
+        include_path = f"attributes/{self.name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/attributes/fc.jinja",
+            name=self.name,
+            InChannels=self.inputs_dims[1][1],
+            OutChannels=self.operator.out_channels(),
+            **self.attributes
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/fc.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/maxpooling.py b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c63e71b423b90f62536cafd25c61101e76e0562
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py
@@ -0,0 +1,32 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import ExportNode, generate_file, generate_str
+from pathlib import Path
+
+@operator_register("MaxPooling")
+class MaxPooling(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+
+    def export(self, export_folder:Path, list_configs:list):
+        include_path = f"attributes/{self.name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/attributes/maxpooling.jinja",
+            name=self.name,
+            **self.attributes
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/maxpooling.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/producer.py b/aidge_core/aidge_export_aidge/operator_export/producer.py
new file mode 100644
index 0000000000000000000000000000000000000000..c04019043a6cd7d1a0de95a0ba32f2bb7a3a4bec
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/producer.py
@@ -0,0 +1,65 @@
+from aidge_core.aidge_export_aidge.utils import operator_register
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import DataType, ExportNode, generate_file, generate_str
+import numpy as np
+from pathlib import Path
+
+# Convert aidge datatype to C++ type
+datatype_converter = {
+    DataType.Float64 : "double",
+    DataType.Float32 : "float",
+    DataType.Float16 : "half_float::half",
+    DataType.Int8    : "int8_t",
+    DataType.Int16   : "int16_t",
+    DataType.Int32   : "int32_t",
+    DataType.Int64   : "int64_t",
+    DataType.UInt8   : "uint8_t",
+    DataType.UInt16  : "uint16_t",
+    DataType.UInt32  : "uint32_t",
+    DataType.UInt64  : "uint64_t"
+}
+
+
+@operator_register("Producer")
+class Producer(ExportNode):
+    """
+    If there is a standardization of the export operators
+    then this class should be just a inheritance of ProducerCPP
+    """
+    def __init__(self, node):
+        super().__init__(node)
+        child, in_idx = self.node.output(0)[0]
+        self.tensor_name = f"{child.name()}_{in_idx}"
+        self.values = np.array(self.operator.get_output(0))
+
+    def export(self, export_folder:Path, list_configs:list):
+        assert(len(self.node.output(0)) == 1)
+
+        include_path = f"parameters/{self.tensor_name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+        aidge_tensor = self.operator.get_output(0)
+        aidge_type = aidge_tensor.dtype()
+        if aidge_type in datatype_converter:
+            datatype = datatype_converter[aidge_type]
+        else:
+            raise RuntimeError(f"No conversion found for data type {aidge_type}.")
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/parameter.jinja",
+            dims = aidge_tensor.dims(),
+            data_t = datatype, # TODO : get data from producer
+            name = self.tensor_name,
+            values = str(aidge_tensor)
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/producer.jinja",
+            name=self.name,
+            tensor_name=self.tensor_name,
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/relu.py b/aidge_core/aidge_export_aidge/operator_export/relu.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0f4f6afdc35737a8967f51c1859bda0c9773f88
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/relu.py
@@ -0,0 +1,21 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core import ExportNode, generate_str
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from pathlib import Path
+
+@operator_register("ReLU")
+class ReLU(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+    def export(self, export_folder:Path, list_configs:list):
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/relu.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/sub.py b/aidge_core/aidge_export_aidge/operator_export/sub.py
new file mode 100644
index 0000000000000000000000000000000000000000..efcdd0924fbcf6944b0fb95a967e1a3e16ccc3c5
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/sub.py
@@ -0,0 +1,21 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core import ExportNode, generate_str
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from pathlib import Path
+
+@operator_register("Sub")
+class Sub(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+    def export(self, export_folder:Path, list_configs:list):
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/sub.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/static/CMakeLists.txt b/aidge_core/aidge_export_aidge/static/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c52e09e82699b1f2c0f8e10ed9f4ab83fbb0f10f
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/CMakeLists.txt
@@ -0,0 +1,148 @@
+cmake_minimum_required(VERSION 3.15)
+
+
+file(STRINGS "${CMAKE_SOURCE_DIR}/version.txt" version)
+file(STRINGS "${CMAKE_SOURCE_DIR}/project_name.txt" project)
+
+message(STATUS "Project name: ${project}")
+message(STATUS "Project version: ${version}")
+
+# Note : project name is {project} and python module name is also {project}
+set(module_name _${project}) # target name
+
+project(${project})
+set(CXX_STANDARD 14)
+
+##############################################
+# Define options
+option(PYBIND "python binding" ON)
+option(WERROR "Warning as error" OFF)
+option(TEST "Enable tests" ON)
+option(COVERAGE "Enable coverage" OFF)
+option(ENABLE_ASAN "Enable ASan (AddressSanitizer) for runtime analysis of memory use (over/underflow, memory leak, ...)" OFF)
+
+##############################################
+# Import utils CMakeLists
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
+include(PybindModuleCreation)
+
+if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
+    Include(CodeCoverage)
+endif()
+
+##############################################
+# Find system dependencies
+find_package(aidge_core REQUIRED)
+# Example of adding dependency to backend CPU
+# find_package(aidge_backend_cpu REQUIRED)
+
+##############################################
+# Create target and set properties
+file(GLOB_RECURSE src_files "src/*.cpp")
+file(GLOB_RECURSE inc_files "include/*.hpp")
+
+add_library(${module_name} ${src_files} ${inc_files})
+target_link_libraries(${module_name}
+    PUBLIC
+        _aidge_core # _ is added because we link the target not the project
+        # _aidge_backend_cpu  # Example of adding dependency to backend CPUs
+)
+
+#Set target properties
+set_property(TARGET ${module_name} PROPERTY POSITION_INDEPENDENT_CODE ON)
+
+if( ${ENABLE_ASAN} )
+    message("Building ${module_name} with ASAN.")
+    set(SANITIZE_FLAGS -fsanitize=address -fno-omit-frame-pointer)
+    target_link_libraries(${module_name}
+        PUBLIC
+            -fsanitize=address
+    )
+    target_compile_options(${module_name}
+        PRIVATE
+            ${SANITIZE_FLAGS}
+    )
+endif()
+
+target_include_directories(${module_name}
+    PUBLIC
+        $<INSTALL_INTERFACE:include>
+        $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+    PRIVATE
+        ${CMAKE_CURRENT_SOURCE_DIR}/src
+)
+
+# PYTHON BINDING
+if (PYBIND)
+    generate_python_binding(${project} ${module_name})
+
+    # Handles Python + pybind11 headers dependencies
+    target_link_libraries(${module_name}
+        PUBLIC
+            pybind11::pybind11
+        )
+endif()
+
+target_compile_features(${module_name} PRIVATE cxx_std_14)
+
+target_compile_options(${module_name} PRIVATE
+    $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
+    -Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror>>)
+target_compile_options(${module_name} PRIVATE
+    $<$<CXX_COMPILER_ID:MSVC>:
+    /W4>)
+
+if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
+    append_coverage_compiler_flags()
+endif()
+
+##############################################
+# Installation instructions
+
+include(GNUInstallDirs)
+set(INSTALL_CONFIGDIR ${CMAKE_INSTALL_LIBDIR}/cmake/${project})
+
+install(TARGETS ${module_name} EXPORT ${project}-targets
+  LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+  ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+  RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+)
+
+install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
+
+#Export the targets to a script
+
+install(EXPORT ${project}-targets
+ FILE "${project}-targets.cmake"
+ DESTINATION ${INSTALL_CONFIGDIR}
+ COMPONENT ${module_name}
+)
+
+#Create a ConfigVersion.cmake file
+include(CMakePackageConfigHelpers)
+write_basic_package_version_file(
+    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config-version.cmake"
+    VERSION ${version}
+    COMPATIBILITY AnyNewerVersion
+)
+
+configure_package_config_file("${project}-config.cmake.in"
+    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config.cmake"
+    INSTALL_DESTINATION ${INSTALL_CONFIGDIR}
+)
+
+#Install the config, configversion and custom find modules
+install(FILES
+    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config.cmake"
+    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config-version.cmake"
+    DESTINATION ${INSTALL_CONFIGDIR}
+)
+
+##############################################
+## Exporting from the build tree
+export(EXPORT ${project}-targets
+    FILE "${CMAKE_CURRENT_BINARY_DIR}/${project}-targets.cmake")
+
+# Compile executable
+add_executable(main main.cpp)
+target_link_libraries(main PUBLIC _aidge_core ${module_name})
diff --git a/aidge_core/aidge_export_aidge/static/README.md b/aidge_core/aidge_export_aidge/static/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1ce48d5274cbc2f007bde7c7be01e41e617cb19a
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/README.md
@@ -0,0 +1,5 @@
+To compile:
+
+> mkdir build && cd build
+> cmake -DCMAKE_INSTALL_PREFIX:PATH=/data1/is156025/cm264821/anaconda3/envs/aidge_demo/lib/libAidge ..
+> make all install
diff --git a/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake b/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..87e70fc38c9e4ec4ddb44cbe5d7fb2a31c2e94d6
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake
@@ -0,0 +1,21 @@
+function(generate_python_binding name target_to_bind) 
+    add_definitions(-DPYBIND)
+    Include(FetchContent)
+
+    FetchContent_Declare(
+    PyBind11
+    GIT_REPOSITORY https://github.com/pybind/pybind11.git
+    GIT_TAG        v2.10.4 # or a later release
+    )
+
+    # Use the New FindPython mode, recommanded. Requires CMake 3.15+
+    find_package(Python COMPONENTS Interpreter Development)
+    FetchContent_MakeAvailable(PyBind11)
+
+    message(STATUS "Creating binding for module ${name}")
+    file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
+
+    pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
+    target_include_directories(${name} PUBLIC "python_binding")
+    target_link_libraries(${name} PUBLIC ${target_to_bind})
+endfunction()
diff --git a/aidge_core/aidge_export_aidge/static/export-config.cmake.in b/aidge_core/aidge_export_aidge/static/export-config.cmake.in
new file mode 100644
index 0000000000000000000000000000000000000000..f3604be11c27d86caf1ad8a48b333b9bd8f30625
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/export-config.cmake.in
@@ -0,0 +1,3 @@
+include(${CMAKE_CURRENT_LIST_DIR}/aidge_backend_cpu-config-version.cmake)
+
+include(${CMAKE_CURRENT_LIST_DIR}/aidge_backend_cpu-targets.cmake)
diff --git a/aidge_core/aidge_export_aidge/static/include/dnn.hpp b/aidge_core/aidge_export_aidge/static/include/dnn.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..3a4d5c02eceee1054c93b8ad635a71d3d04ac2fc
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/include/dnn.hpp
@@ -0,0 +1,17 @@
+#ifndef DNN_HPP
+#define DNN_HPP
+#include <aidge/graph/GraphView.hpp>
+/**
+ * @brief This file contains all of what is related to the construction of the
+ * neural network
+ *
+ */
+
+/**
+ * @brief This function generate the exported Aidge::GraphView.
+ *
+ * @return std::shared_ptr<Aidge::GraphView>
+ */
+std::shared_ptr<Aidge::GraphView> generateModel();
+
+#endif /* DNN_HPP */
diff --git a/aidge_core/aidge_export_aidge/static/main.cpp b/aidge_core/aidge_export_aidge/static/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ab8bac1851b6d2dae4bf97bd3af10e19e0b71c1e
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/main.cpp
@@ -0,0 +1,16 @@
+#include <iostream>
+#include <aidge/backend/cpu.hpp>
+
+#include "include/dnn.hpp"
+
+int main()
+{
+
+    std::cout << "BEGIN" << std::endl;
+
+    std::shared_ptr<Aidge::GraphView> graph = generateModel();
+
+    std::cout << "END" << std::endl;
+
+    return 0;
+}
diff --git a/aidge_core/aidge_export_aidge/static/project_name.txt b/aidge_core/aidge_export_aidge/static/project_name.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d5637593fe045bb602bd181bf0f242f0943fb9fd
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/project_name.txt
@@ -0,0 +1 @@
+export
diff --git a/aidge_core/aidge_export_aidge/static/python_binding/pybind.cpp b/aidge_core/aidge_export_aidge/static/python_binding/pybind.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..072fc4cd6012996a4fcda1d18b8244209c69797a
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/python_binding/pybind.cpp
@@ -0,0 +1,14 @@
+#include <pybind11/pybind11.h>
+
+#include "dnn.hpp"
+
+namespace py = pybind11;
+
+
+void init_export(py::module& m){
+    m.def("generate_model", generateModel);
+}
+
+PYBIND11_MODULE(export, m) {
+    init_export(m);
+}
diff --git a/aidge_core/aidge_export_aidge/static/version.txt b/aidge_core/aidge_export_aidge/static/version.txt
new file mode 100644
index 0000000000000000000000000000000000000000..77d6f4ca23711533e724789a0a0045eab28c5ea6
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/version.txt
@@ -0,0 +1 @@
+0.0.0
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..88b976a47fdeaad587e9ec50c27a085e88de23c5
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
@@ -0,0 +1,19 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+#define _{{name|upper}}_IN_CHANNELS  {{InChannels}}
+#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}}
+
+{% for i in range(KernelDims|length) %}
+#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}}
+{%- endfor %}
+{% for i in range(StrideDims|length) %}
+#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}}
+{%- endfor %}
+{% for i in range(DilationDims|length) %}
+#define _{{name|upper}}_DILATION_{{i}} {{DilationDims[i]}}
+{%- endfor %}
+
+#define _{{name|upper}}_NO_BIAS {{NoBias|int}}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..96eabf09d47a9b72eeac8e95ce8f5eb8e6d30243
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
@@ -0,0 +1,9 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+#define _{{name|upper}}_IN_CHANNELS  {{InChannels}}
+#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}}
+
+#define _{{name|upper}}_NO_BIAS {{NoBias|int}}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja b/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..d258f580e6ff9c523a87b834fdccf2f3b14fb133
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja
@@ -0,0 +1,13 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+{% for i in range(KernelDims|length) %}
+#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}}
+{%- endfor %}
+{% for i in range(StrideDims|length) %}
+#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}}
+{%- endfor %}
+
+#define _{{name|upper}}_CEIL_MODE {{CeilMode|int}}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/dnn.jinja b/aidge_core/aidge_export_aidge/templates/dnn.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..5da46b2d8a439a359dfb1c7ec8ebc18e8d516767
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/dnn.jinja
@@ -0,0 +1,36 @@
+/********************************************************************************
+ * This file has been generated by the Aidge export.
+ ********************************************************************************/
+
+/*** STD INCLUDES ***/
+#include <memory>  // std::shared_ptr
+
+/*** AIDGE INCLUDES ***/
+#include <aidge/graph/GraphView.hpp>  // Aidge::GraphView
+#include <aidge/graph/Node.hpp>       // Aidge::Node
+#include <aidge/graph/OpArgs.hpp>     // Aidge::Sequential
+
+/*** AIDGE OPERATORS ***/
+{%- for operator in operators %}
+#include <aidge/operator/{{operator}}.hpp>
+{%- endfor %}
+
+/*** OPERATOR ATTRIBUTES & PARAMETERS ***/
+{%- for header in headers %}
+#include "{{ header }}"
+{%- endfor %}
+
+/*** HEADER ***/
+#include "dnn.hpp"
+
+
+std::shared_ptr<Aidge::GraphView> generateModel() {
+    /*** BUILDING GRAPH ***/
+    std::shared_ptr<Aidge::GraphView> graph = std::make_shared<Aidge::GraphView>();
+
+    {%- for action in actions %}
+    {{ action }}
+    {%- endfor %}
+
+    return graph;
+}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8e841ea2a10c71b884736dcbd7cfd03b52c5ad4f
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja
@@ -0,0 +1,7 @@
+{# NOTE: Trying a shorter notation like {%- for input in inputs if input[0] %}
+will mess up loop.index as the input set up at None will not increment ! #}
+{%- for input in inputs %}
+{%- if input[0] %}
+{{input[0]}}->addChild({{name}}, {{input[1]}}, {{loop.index - 1}}); {# NOTE: loop.index begin at 1 #}
+{%- endif %}
+{%- endfor %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..72a12c6015e3962eaecc0301f54bf228c16fb29c
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
@@ -0,0 +1,27 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Conv(
+            _{{name|upper}}_IN_CHANNELS,
+            _{{name|upper}}_OUT_CHANNELS,
+            {
+            {%- for i in range(KernelDims|length) -%}
+                _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            "{{name}}",
+            {
+            {%- for i in range(StrideDims|length) -%}
+                _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            {
+            {%- for i in range(DilationDims|length) -%}
+                _{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            _{{name|upper}}_NO_BIAS
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..80d978543fd15d4cce3b4329a9bd3481fb88afaa
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja
@@ -0,0 +1,12 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::FC(
+            _{{name|upper}}_IN_CHANNELS,
+            _{{name|upper}}_OUT_CHANNELS,
+            _{{name|upper}}_NO_BIAS,
+            "{{name}}"
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..c6587c128509712e1a8e903e7484476548e9347d
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja
@@ -0,0 +1,20 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::MaxPooling(
+            {
+            {%- for i in range(KernelDims|length) -%}
+                _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            "{{name}}",
+            {
+            {%- for i in range(StrideDims|length) -%}
+                _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            _{{name|upper}}_CEIL_MODE
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/producer.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/producer.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8e0a465a044ebfcc249206f9f5886c7a38fc3252
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/producer.jinja
@@ -0,0 +1,9 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Producer(
+            {{tensor_name}},
+            "{{name}}"
+        );
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/relu.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/relu.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8fd58a30bddd39647dd3b25b2982e67174220381
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/relu.jinja
@@ -0,0 +1,9 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::ReLU(
+            "{{name}}"
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/sub.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/sub.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..d9417751e4117cb296ec874d946b93c2c64df614
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/sub.jinja
@@ -0,0 +1,9 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Sub(
+            "{{name}}"
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/parameter.jinja b/aidge_core/aidge_export_aidge/templates/parameter.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..11a407cc89f72f24167871a594decc6d90ab489d
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/parameter.jinja
@@ -0,0 +1,11 @@
+#ifndef EXPORT_PARAMETERS_{{name|upper}}_H
+#define EXPORT_PARAMETERS_{{name|upper}}_H
+
+#include <aidge/data/Tensor.hpp>
+#include <memory>
+
+std::shared_ptr<Aidge::Tensor> {{name}} = std::make_shared<Aidge::Tensor>(Aidge::Array{{dims|length}}D<{{data_t}}, {{ dims|join(", ") }}> {
+{{ values }}
+});
+
+#endif /* EXPORT_PARAMETERS_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/pybind.jinja b/aidge_core/aidge_export_aidge/templates/pybind.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..9b48cc97f242813cc33b77bfc028e85c08b0cec7
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/pybind.jinja
@@ -0,0 +1,14 @@
+#include <pybind11/pybind11.h>
+
+#include "dnn.hpp"
+
+namespace py = pybind11;
+
+
+void init_{{name}}(py::module& m){
+    m.def("generate_model", generateModel);
+}
+
+PYBIND11_MODULE({{name}}, m) {
+    init_{{name}}(m);
+}
diff --git a/aidge_core/aidge_export_aidge/utils/__init__.py b/aidge_core/aidge_export_aidge/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecdf2aec2692a48e108d5f4ad05ed05803319525
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/utils/__init__.py
@@ -0,0 +1,11 @@
+from .operator_registry import *
+
+def parse_node_input(node_inputs: list) -> list:
+    """Parse node intputs in order to adapt the list for Jinja.
+
+    :param node_inputs: return of node.inputs()
+    :type node_inputs: list of tuple of aidge_core.Node, output idx.
+    :return: list of tuple of node name, output idx.
+    :rtype: list
+    """
+    return [None if parent_node is None else (parent_node.name(), outId) for parent_node, outId in node_inputs]
diff --git a/aidge_core/aidge_export_aidge/utils/operator_registry.py b/aidge_core/aidge_export_aidge/utils/operator_registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd6fbaaceeba9c2125b38354eca9cc116acd29b1
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/utils/operator_registry.py
@@ -0,0 +1,18 @@
+OPERATORS_REGISTRY = {}
+
+def operator_register(*args):
+
+    key_list = [arg for arg in args]
+
+    def decorator(operator):
+        def wrapper(*args, **kwargs):
+            return operator(*args, **kwargs)
+
+        for key in key_list:
+            OPERATORS_REGISTRY[key] = operator
+
+        return wrapper
+    return decorator
+
+def supported_operators():
+    return list(OPERATORS_REGISTRY.keys())
diff --git a/aidge_core/export/code_generation.py b/aidge_core/export/code_generation.py
deleted file mode 100644
index b18b5476f8e083bcbe3d4f6c4a57132ebe7b780f..0000000000000000000000000000000000000000
--- a/aidge_core/export/code_generation.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-from jinja2 import Environment, FileSystemLoader
-
-
-def generate_file(file_path: str, template_path: str, **kwargs) -> None:
-    """Generate a file at `file_path` using the jinja template located at `file_path`.
-
-    kwargs are used to fill the template.
-
-    :param file_path: path where to generate the file
-    :type file_path: str
-    :param template_path: Path to the template to use for code generation
-    :type template_path: str
-    """
-    # Get directory name of the file
-    dirname = os.path.dirname(file_path)
-
-    # If directory doesn't exist, create it
-    if not os.path.exists(dirname):
-        os.makedirs(dirname)
-
-    # Get directory name and name of the template
-    template_dir = os.path.dirname(template_path)
-    template_name = os.path.basename(template_path)
-
-    # Select template
-    template = Environment(loader=FileSystemLoader(
-        template_dir)).get_template(template_name)
-
-    # Generate file
-    content = template.render(kwargs)
-    with open(file_path, mode="w", encoding="utf-8") as message:
-        message.write(content)
-
-def generate_str(template_path:str, **kwargs) -> str:
-    """Generate a string using the jinja template located at `file_path`.
-    kwargs are used to fill the template.
-
-    :param template_path: Path to the template to use for code generation
-    :type template_path: str
-    :return: A string of the interpreted template
-    :rtype: str
-    """
-    dirname = os.path.dirname(template_path)
-    filename = os.path.basename(template_path)
-    template = Environment(loader=FileSystemLoader(dirname)).get_template(filename)
-    return template.render(kwargs)
diff --git a/aidge_core/export/__init__.py b/aidge_core/export_utils/__init__.py
similarity index 100%
rename from aidge_core/export/__init__.py
rename to aidge_core/export_utils/__init__.py
diff --git a/aidge_core/export_utils/code_generation.py b/aidge_core/export_utils/code_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..a02fc0966702cec7a2cbe33f8411bb71e3035e90
--- /dev/null
+++ b/aidge_core/export_utils/code_generation.py
@@ -0,0 +1,46 @@
+from pathlib import Path
+from jinja2 import Environment, FileSystemLoader
+from typing import Union
+
+
+def generate_file(file_path: Union[Path, str], template_path: Union[Path, str], **kwargs) -> None:
+    """Generate a file at `file_path` using the jinja template located at `file_path`.
+
+    kwargs are used to fill the template.
+
+    :param file_path: path where to generate the file
+    :type file_path: pathlib.Path or str
+    :param template_path: Path to the template to use for code generation
+    :type template_path: pathlib.Path or str
+    """
+    # Convert str -> Path for compatibility !
+    if isinstance(file_path, str):
+        file_path = Path(file_path)
+    if isinstance(template_path, str):
+        template_path = Path(template_path)
+    # Make dir
+    file_path.parent.mkdir(parents=True, exist_ok=True)
+
+    # Select template
+    template = Environment(loader=FileSystemLoader(
+        template_path.parent)).get_template(template_path.name)
+
+    # Generate file
+    with open(file_path, mode="w", encoding="utf-8") as file:
+        file.write(template.render(kwargs))
+
+
+def generate_str(template_path: Union[Path, str], **kwargs) -> str:
+    """Generate a string using the jinja template located at `file_path`.
+    kwargs are used to fill the template.
+
+    :param template_path: Path to the template to use for code generation
+    :type template_path: pathlib.Path or str
+    :return: A string of the interpreted template
+    :rtype: str
+    """
+    # Convert str -> Path for compatibility !
+    if isinstance(template_path, str):
+        template_path = Path(template_path)
+    return Environment(loader=FileSystemLoader(
+        template_path.parent)).get_template(template_path.name).render(kwargs)
diff --git a/aidge_core/export/node_export.py b/aidge_core/export_utils/node_export.py
similarity index 100%
rename from aidge_core/export/node_export.py
rename to aidge_core/export_utils/node_export.py
diff --git a/aidge_core/unit_tests/static/main.cpp b/aidge_core/unit_tests/static/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..06171e2a036a18b0dea3dca40de34c296d99222d
--- /dev/null
+++ b/aidge_core/unit_tests/static/main.cpp
@@ -0,0 +1,19 @@
+/*
+Example main.cpp used to test aidge export.
+This file is copied in the test export.
+*/
+#include <iostream>
+
+#include "include/dnn.hpp"
+
+int main()
+{
+
+    std::cout << "BEGIN" << std::endl;
+
+    std::shared_ptr<Aidge::GraphView> graph = generateModel();
+
+    std::cout << "END" << std::endl;
+
+    return 0;
+}
diff --git a/aidge_core/unit_tests/test_export.py b/aidge_core/unit_tests/test_export.py
new file mode 100644
index 0000000000000000000000000000000000000000..3061d7940603b8eeca2c07368bc1bbd6756fa1f3
--- /dev/null
+++ b/aidge_core/unit_tests/test_export.py
@@ -0,0 +1,83 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+from functools import reduce
+import pathlib
+import os
+import sys
+import subprocess
+import shutil
+import numpy as np
+
+def initFiller(model):
+    # Initialize parameters (weights and biases)
+    for node in model.get_nodes():
+        if node.type() == "Producer":
+            prod_op = node.get_operator()
+            value = prod_op.get_output(0)
+            value.set_backend("cpu")
+            tuple_out = node.output(0)[0]
+            # No conv in current network
+            if tuple_out[0].type() == "Conv" and tuple_out[1]==1:
+                # Conv weight
+                aidge_core.xavier_uniform_filler(value)
+            elif tuple_out[0].type() == "Conv" and tuple_out[1]==2:
+                # Conv bias
+                aidge_core.constant_filler(value, 0.01)
+            elif tuple_out[0].type() == "FC" and tuple_out[1]==1:
+                # FC weight
+                aidge_core.normal_filler(value)
+            elif tuple_out[0].type() == "FC" and tuple_out[1]==2:
+                # FC bias
+                aidge_core.constant_filler(value, 0.01)
+            else:
+                pass
+
+
+class test_export(unittest.TestCase):
+    """Test aidge export
+    """
+    def setUp(self):
+        self.EXPORT_PATH = pathlib.Path("myexport")
+    def tearDown(self):
+        pass
+
+    def test_generate_export(self):
+        # Create model
+
+        model = aidge_core.sequential([
+            aidge_core.FC(in_channels=32*32*3, out_channels=512, name="InputNode"),
+            aidge_core.ReLU(name="Relu0"),
+            aidge_core.FC(in_channels=512, out_channels=256, name="FC1"),
+            aidge_core.ReLU(name="Relu1"),
+            aidge_core.FC(in_channels=256, out_channels=128, name="FC2"),
+            aidge_core.ReLU(name="Relu2"),
+            aidge_core.FC(in_channels=128, out_channels=10, name="OutputNode"),
+        ])
+
+        initFiller(model)
+
+        # Export model
+        aidge_core.export(self.EXPORT_PATH, model)
+        self.assertTrue(self.EXPORT_PATH.is_dir(), "Export folder has not been generated")
+        os.makedirs(self.EXPORT_PATH / "build", exist_ok=True)
+
+        # Test compilation of export
+        install_path = os.path.join(sys.prefix, "lib", "libAidge")  if "AIDGE_INSTALL" not in os.environ else os.environ["AIDGE_INSTALL"]
+
+        shutil.copyfile(pathlib.Path(__file__).parent / "static/main.cpp", self.EXPORT_PATH / "main.cpp")
+
+        subprocess.check_call(['cmake', str(self.EXPORT_PATH.absolute()), f'-DCMAKE_INSTALL_PREFIX:PATH={install_path}'], cwd=str(self.EXPORT_PATH / "build"))
+        subprocess.check_call(['make', 'all', 'install'], cwd=str(self.EXPORT_PATH / "build"))
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 940440bad52e367fe04872a308c99e4c802fa242..651a5de69596ee867a97b06ba683f49b05a41303 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -59,6 +59,7 @@
 #include "aidge/operator/ReduceMean.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Reshape.hpp"
+#include "aidge/operator/Resize.hpp"
 #include "aidge/operator/Shape.hpp"
 #include "aidge/operator/Scaling.hpp"
 #include "aidge/operator/Slice.hpp"
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 5a2f4e348ebb62d18151b797c4843a9743ee8bba..80f8408a01be5a9b1f485251af0b13b8069404c5 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -283,6 +283,11 @@ class Tensor : public Data,
      */
     Tensor operator/(const Tensor& other) const;
 
+    /**
+     * @brief Element-wise sqrt operation for Tensor.
+     * @return Tensor
+     */
+    Tensor sqrt() const;
 
     ~Tensor() noexcept;
 
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 3e7999cef20d8a2dbc8d5b403d59cb257e5a4722..4206331a2bea9f4ba51ffef6fc17447a23735951 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -133,19 +133,20 @@ inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, cons
 }
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const std::string& extension) {
+std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const std::string& extension) {
     assert(inputIdx != gk_IODefaultIndex);
     static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
     const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension);
     auto prod = Producer(dims, prodName);
     prod->addChild(otherNode, 0, inputIdx);
     otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
+    return prod;
 }
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <std::size_t DIM>
-void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) {
-    addProducer(otherNode, inputIdx, to_array(dims), extension);
+std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) {
+    return addProducer(otherNode, inputIdx, to_array(dims), extension);
 }
 } // namespace Aidge
 
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c4c0e6fba9a7f26c411ad25ec0b6b488374613fa
--- /dev/null
+++ b/include/aidge/operator/Resize.hpp
@@ -0,0 +1,83 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_Resize_H_
+#define AIDGE_CORE_OPERATOR_Resize_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Resize_Op : public OperatorTensor,
+                  public Registrable<Resize_Op, std::string, std::shared_ptr<OperatorImpl>(const Resize_Op&)>{
+
+public:
+    static const std::string Type;
+
+    Resize_Op()
+        : OperatorTensor(Type, 4, 0, 1){}
+        
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy. 
+     */
+
+    Resize_Op(const Resize_Op& op)
+        : OperatorTensor(op)
+    {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Resize_Op, *this, op.backend());
+        }
+        else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Resize_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Resize_Op>(*this);
+    }
+
+    bool dimsForwarded() const override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    static const std::vector<std::string> getInputsName(){
+        //  roi, scales, sizes, even if considered as const parameters/input
+        return {"data_input", "roi ", "scales", "sizes"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Resize(const std::string &name = "") {
+
+    return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
+}
+
+}  // namespace Aidge
+
+
+#endif /* AIDGE_CORE_OPERATOR_Resize_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6e27cd6532db0e17751f6c664d14665909fb14b4
--- /dev/null
+++ b/include/aidge/operator/ShiftGELU.hpp
@@ -0,0 +1,75 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 25.06.2024
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SHIFTGELU_H_
+#define AIDGE_CORE_OPERATOR_SHIFTGELU_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class ShiftGELU_Op : public OperatorTensor,
+    public Registrable<ShiftGELU_Op, std::string, std::shared_ptr<OperatorImpl>(const ShiftGELU_Op&)> {
+public:
+    static const std::string Type;
+
+    ShiftGELU_Op() : OperatorTensor(Type, 1, 0, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ShiftGELU_Op(const ShiftGELU_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ShiftGELU_Op, *this, op.backend());
+        }else{
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ShiftGELU_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ShiftGELU_Op>(*this);
+    }
+
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> ShiftGELU(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<ShiftGELU_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SHIFTGELU_H_ */
diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..bb54d55ba609239e74b40f4ca96b646860e83a3a
--- /dev/null
+++ b/include/aidge/operator/ShiftMax.hpp
@@ -0,0 +1,75 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 25.06.2024
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SHIFTMAX_H_
+#define AIDGE_CORE_OPERATOR_SHIFTMAX_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class ShiftMax_Op : public OperatorTensor,
+    public Registrable<ShiftMax_Op, std::string, std::shared_ptr<OperatorImpl>(const ShiftMax_Op&)> {
+public:
+    static const std::string Type;
+
+    ShiftMax_Op() : OperatorTensor(Type, 1, 0, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ShiftMax_Op(const ShiftMax_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ShiftMax_Op, *this, op.backend());
+        }else{
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ShiftMax_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ShiftMax_Op>(*this);
+    }
+
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> ShiftMax(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<ShiftMax_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SHIFTMAX_H_ */
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index bca246c94434b280a12d070526ad4ffb2c7fbe7b..955b510e6cce6712e4738c0064836dbb733a3c3d 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -22,9 +22,11 @@ void init_Data(py::module& m){
     .value("Float32", DataType::Float32)
     .value("Float16", DataType::Float16)
     .value("Int8", DataType::Int8)
+    .value("Int16", DataType::Int16)
     .value("Int32", DataType::Int32)
     .value("Int64", DataType::Int64)
     .value("UInt8", DataType::UInt8)
+    .value("UInt16", DataType::UInt16)
     .value("UInt32", DataType::UInt32)
     .value("UInt64", DataType::UInt64)
     ;
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index fbb389432a50528fc4ecd0f41e931565d2eab0bb..185e4771295f96cc02adfa3d669ffbb558195ca0 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -77,6 +77,7 @@ void init_Tensor(py::module& m){
     .def(py::self - py::self)
     .def(py::self * py::self)
     .def(py::self / py::self)
+	.def("sqrt", &Tensor::sqrt)
     .def("set_datatype", &Tensor::setDataType, py::arg("datatype"), py::arg("copyCast") = true)
     .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true)
     .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims)
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a925af8cf357dabc09f4e8e3c39af9519b4ed550
--- /dev/null
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -0,0 +1,29 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Resize.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Resize(py::module& m) {
+    py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance())
+        .def_static("get_inputs_name", &Resize_Op::getInputsName)
+        .def_static("get_outputs_name", &Resize_Op::getOutputsName);
+
+    declare_registrable<Resize_Op>(m, "ResizeOp");
+
+    m.def("Resize", &Resize, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 42e29fd43324d12ea4cac2c16c88a056903b7c54..9443ed55eaaf6dc04ad9ee4612ed9d491aed54ae 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -51,6 +51,7 @@ void init_Pow(py::module&);
 void init_ReduceMean(py::module&);
 void init_ReLU(py::module&);
 void init_Reshape(py::module&);
+void init_Resize(py::module&);
 void init_Scaling(py::module&);
 void init_Shape(py::module&);
 void init_Sigmoid(py::module&);
@@ -121,6 +122,7 @@ void init_Aidge(py::module& m) {
     init_ReduceMean(m);
     init_ReLU(m);
     init_Reshape(m);
+    init_Resize(m);
     init_Scaling(m);
     init_Shape(m);
     init_Sigmoid(m);
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index 4bb7d50fa4f6d3da00dd176e3ff5be037017e3ad..ac35ce0a62408a69637a4160c9a008aba9dceb66 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -28,6 +28,7 @@ void init_Scheduler(py::module& m){
     .def("resetScheduling", &Scheduler::resetScheduling)
     .def("generate_scheduling", &Scheduler::generateScheduling)
     .def("get_static_scheduling", &Scheduler::getStaticScheduling, py::arg("step") = 0)
+    .def("graph_view", &Scheduler::graphView)
     .def("generate_memory", &Scheduler::generateMemory, py::arg("inc_producers") = false, py::arg("wrap_around_buffer") = false)
     ;
 
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index de9a4bd0ac7848052ba34068c7f5fa25b865f53c..28fb90cebf8e387e69f1ec39c46a6a47c8a4d316 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -20,6 +20,7 @@
 #include "aidge/operator/Div.hpp"
 #include "aidge/operator/Mul.hpp"
 #include "aidge/operator/Sub.hpp"
+#include "aidge/operator/Sqrt.hpp"
 #include "aidge/operator/Transpose.hpp"
 #include "aidge/utils/Types.h"
 
@@ -115,6 +116,17 @@ Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
     return div_.getOutput(0)->clone();
 }
 
+Aidge::Tensor Aidge::Tensor::sqrt() const {
+    AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
+    auto sqrt_ = Sqrt_Op();
+    sqrt_.associateInput(0, std::make_shared<Tensor>(*this));
+    sqrt_.setDataType(dataType());
+    sqrt_.setDataFormat(dataFormat());
+    sqrt_.setBackend(mImpl->backend());
+    sqrt_.forward();
+    return sqrt_.getOutput(0)->clone();
+}
+
 Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
     if (this == &other) {
         return *this;
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..085373301662a46fa8d03e17e98e5f98eb5216e6
--- /dev/null
+++ b/src/operator/Resize.cpp
@@ -0,0 +1,133 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Resize.hpp"
+
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int64_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+#include <fmt/core.h>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Resize_Op::Type = "Resize";
+
+bool Aidge::Resize_Op::dimsForwarded() const {
+    // in case of ROI add getInput(1) condition
+    if ((getInput(1) && !getInput(1)->empty())
+        || (getInput(2) && !getInput(2)->empty())
+        || (getInput(3) && !getInput(3)->empty())
+        )
+    {
+        // output dims are data dependent
+        return false;
+    }
+
+    return OperatorTensor::dimsForwarded();
+}
+
+bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
+    
+    // check inputs have been associated
+    if (!getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
+    }
+
+    AIDGE_ASSERT(getInput(0)->nbDims() ==  4,\
+            "input tensor must have dimensions = 4 (batch, channel, height, width).");
+
+    if (getInput(0)->empty()) {
+        return false;
+    }
+
+    bool input1ROIPresent           = getInput(1) && !getInput(1)->empty();
+    bool input2ScalesPresent        = getInput(2) && !getInput(2)->empty();
+    bool input3SizesPresent         = getInput(3) && !getInput(3)->empty();
+
+    AIDGE_ASSERT(input2ScalesPresent != input3SizesPresent, "Only one of scales and  sizes can be specified.")
+
+    if (input1ROIPresent) {
+
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input #1 (ROI) is given and it is not supported.");
+
+    } else if (input2ScalesPresent)  {
+
+        if (!allowDataDependency) {
+            Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #2");
+            return false;
+        }
+
+        AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(2)->size(),\
+             "input #0 and input #2 (Scales) must have the same dimensions.");
+    
+        std::vector<DimSize_t>      outDims = getInput(0)->dims();
+        const std::vector<DimSize_t> inDims = getInput(0)->dims();
+
+        std::shared_ptr<Tensor> fallback;
+        const auto& scales = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+
+        for (std::size_t dim=0; dim < getInput(2)->size(); ++dim) {
+            outDims[dim] = inDims[dim]*static_cast<int64_t*>(scales.getImpl()->hostPtr())[dim];
+        }
+
+        mOutputs[0]->resize(outDims);
+
+        return true;
+
+    } else if (input3SizesPresent) {
+
+        if (!allowDataDependency) {
+            Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #3");
+            return false;
+        }
+
+        AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(3)->size(),\
+             "input #0 and input #3 (Sizes) must have the same dimensions.");
+        
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+
+        std::shared_ptr<Tensor> fallback;
+        const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+
+        for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {            
+            outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
+        }
+        
+        mOutputs[0]->resize(outDims);
+        
+        return true;
+    } else {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: Either Input #2 or Input #3 must be present.");
+    }
+
+    return false; 
+}
+
+void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Resize_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+
+    // By default, automatically set backend for all inputs: roi, scales and sizes 
+    if(getInput(1)) {
+        getInput(1)->setBackend(name, device);
+    }
+    if(getInput(2)) {
+        getInput(2)->setBackend(name, device);
+    }
+    if(getInput(3)) {
+        getInput(3)->setBackend(name, device);
+    }
+}
\ No newline at end of file
diff --git a/src/operator/ShiftGELU.cpp b/src/operator/ShiftGELU.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ede83e291bd1670885192e3ac8f4958e185c28e2
--- /dev/null
+++ b/src/operator/ShiftGELU.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 25.06.2024
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ShiftGELU.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ShiftGELU_Op::Type = "ShiftGELU";
+
+void Aidge::ShiftGELU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(ShiftGELU_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/ShiftMax.cpp b/src/operator/ShiftMax.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..eb77ae655354eac03fbdc0f1a84a44391795ee8c
--- /dev/null
+++ b/src/operator/ShiftMax.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 25.06.2024
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ShiftMax.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ShiftMax_Op::Type = "ShiftMax";
+
+void Aidge::ShiftMax_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(ShiftMax_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index 21009318cddae7ce60a01592b19ab237a77fbd2b..7c8c9c2ba2119e0dc708ef4b788690eb223ea0b3 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -32,10 +32,12 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
                           std::shared_ptr<Aidge::Node> batchnormNode) {
     // Case: convNode is a MetaOperator ending with a Convolution
     // eg. PaddedConv
+    std::shared_ptr<Node> metaNode;
     if (!(convNode -> getOperator() -> isAtomic())) {
-        const std::shared_ptr<MetaOperator_Op> metaNode = std::static_pointer_cast<MetaOperator_Op>(convNode -> getOperator());
-        const std::shared_ptr<GraphView>  metanodeGraph = metaNode -> getMicroGraph();
-        const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> outputNodes = metanodeGraph -> getOrderedOutputs();
+        metaNode = convNode;
+        const auto metaOp = std::static_pointer_cast<MetaOperator_Op>(convNode -> getOperator());
+        const std::shared_ptr<GraphView>  metaOpGraph = metaOp -> getMicroGraph();
+        const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> outputNodes = metaOpGraph -> getOrderedOutputs();
         if (outputNodes.size() != 1) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Bad MetaOperator argument for fuseBatchNorm recipie.");
         }
@@ -68,6 +70,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
         convNbOutChannels = convOpPtr->nbChannels();
         kernelDims = convOpPtr->getAttr<std::array<DimSize_t, 2>>("KernelDims");
     }
+    AIDGE_ASSERT(kernelDims.size() == 2, "fuseBatchNorm(): only 2D convolutions are supported");
 
     std::shared_ptr<Tensor> scaleBuf, shiftBuf, b_meanBuf, b_varBuf;
     const Tensor& scale = batchOp->getInput(1)->refCastFrom(scaleBuf, DataType::Float32, "cpu");
@@ -98,6 +101,51 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
         fmt::print("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
     }
 
+    // Add bias if it is non existant, as there will be a bias after the fuse
+    if (!convOp->getInput(2)) {
+        if (metaNode) {
+            // Conv is inside a meta-operator, we add bias outside it
+            // Find the correct input index of the meta-operator corresponding
+            // to the bias:
+            const auto metaOp = std::static_pointer_cast<MetaOperator_Op>(metaNode->getOperator());
+            const auto metaOpGraph = metaOp->getMicroGraph();
+            IOIndex_t inputIdx = 0;
+            for (auto input : metaOpGraph->getOrderedInputs()) {
+                if (input.first == convNode && input.second == 2) {
+                    break;
+                }
+                ++inputIdx;
+            }
+
+            auto prod = addProducer(metaNode, inputIdx, {convNbOutChannels}, "b");
+            // Add the new bias node to the same views as the meta node
+            for (auto g : metaNode->views()) {
+                g->add(prod);
+            }
+        }
+        else {
+            auto prod = addProducer(convNode, 2, {convNbOutChannels}, "b");
+            if (convNode->input(1).first) {
+                // Add the new bias node to the same views as the weights node
+                // if possible
+                for (auto g : convNode->input(1).first->views()) {
+                    g->add(prod);
+                }
+            }
+            else {
+                for (auto g : convNode->views()) {
+                    g->add(prod);
+                }
+            }
+        }
+
+        AIDGE_INTERNAL_ASSERT(convOp->getInput(2) != nullptr);
+
+        // Use the same backend for the bias than for the weights
+        convOp->getInput(2)->setBackend(convOp->getInput(1)->backend());
+        convOp->getInput(2)->zeros();
+    }
+
     std::shared_ptr<Tensor> weightBuf, biasBuf;
     Tensor& weight = convOp->getInput(1)->refCastFrom(weightBuf, DataType::Float32, "cpu");
     Tensor& bias = convOp->getInput(2)->refCastFrom(biasBuf, DataType::Float32, "cpu");
@@ -112,7 +160,6 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
                         ? b_var.get<float>(outChId) : meanVariance));
         // Weights adjustments
         for (std::size_t channel = 0; channel < channelsSize; ++channel) {
-            // TODO : Suppose kerneldims = 2
             for (std::size_t k0 = 0; k0 < kernelDims[0]; ++k0) {
                 for (std::size_t k1 = 0; k1 < kernelDims[1]; ++k1) {
                     std::vector<DimSize_t> currentIdx = {outChId, channel, k0, k1};
@@ -122,7 +169,6 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
             }
         }
 
-        // TODO : check if noBias==true is set, then set biasValue to 0
         float biasValue = bias.get<float>(outChId);
 
         biasValue = shift.get<float>(outChId) + (biasValue - b_mean.get<float>(outChId)) * factor;