diff --git a/aidge_core/export_utils/export_registry.py b/aidge_core/export_utils/export_registry.py
index 70c3e5fa47cb35cbf6611a5359e6a37e0f17620d..2490bfa2e11cc37ef9f40d9762795c713133bd13 100644
--- a/aidge_core/export_utils/export_registry.py
+++ b/aidge_core/export_utils/export_registry.py
@@ -3,23 +3,34 @@ import aidge_core
 from aidge_core.export_utils import ExportNode
 
 class classproperty:
-    """Helper class to define class properties,
-    Is equivalent to applying the decorator ``@property`` and ``@classmethod``.
-    But these two decorator are exclusive with python > 12.
-    See discussion https://discuss.python.org/t/add-a-supported-read-only-classproperty-decorator-in-the-stdlib/18090/12
+    """Helper class to define class-level properties.
+
+    Equivalent to applying both the ``@property`` and ``@classmethod`` decorators,
+    allowing methods to be accessed as class properties. These two decorators
+    are otherwise incompatible prior to Python 3.12.
+
+    See discussion: https://discuss.python.org/t/add-a-supported-read-only-classproperty-decorator-in-the-stdlib/18090/12
     """
 
     def __init__(self, fget):
+        """
+        :param fget: Function to be wrapped as a class property.
+        :type fget: Callable
+        """
         self.fget = fget
 
     def __get__(self, instance, owner):
         return self.fget(owner)
 
 
-# TODO: very naive implementation !
-# error handling should be added !
 class ExportLib(aidge_core.OperatorImpl):
-    """Aidge export lib, define a registry
+    """Aidge export library that manages a registry for operators and static files.
+
+    This class provides a structure for registering different operator types and
+    export nodes, facilitating access and management of these elements.
+
+    :ivar _name: The name of the export library, used for namespacing.
+    :ivar static_files: A dictionary mapping paths of static files to their target locations relative to the export root.
     """
     # PUBLIC
     # Lib name useful ?
@@ -46,25 +57,48 @@ class ExportLib(aidge_core.OperatorImpl):
         """
         return cls._cls_export_node_registry.setdefault(cls, {})
 
-    # Override the virtual OperatorImpl method, in order to provide available
-    # implementation specifications
-    def get_available_impl_specs(self):
+    def get_available_impl_specs(self) -> List[aidge_core.ImplSpec]:
+        """Override the virtual OperatorImpl method, in order to provide available
+        implementation specifications.
+
+        :return: List of implementation specification available for the type of operator.
+        :rtype: List[aidge_core.ImplSpec]
+        """
         if self.get_operator().type() in self._export_node_registry:
             spec_vec = [i for i, _ in self._export_node_registry[self.get_operator().type()]]
             return spec_vec
         else:
             return []
 
-    def get_export_node(self, spec: aidge_core.aidge_core.ImplSpec):
+    def get_export_node(self, spec: aidge_core.ImplSpec) -> ExportNode:
+        """Given an :py:class:`aidge_core.ImplSpec`, return the ExportNode that is the closest match.
+
+        :param spec: Implementation specification to match
+        :type spec: :py:class:`aidge_core.ImplSpec
+        :return: The class ExportNode that is the closest match
+        :rtype: aidge_core.ImplSpec
+        """
         for registered_spec, export_node in self._export_node_registry[self.get_operator().type()]:
             if registered_spec == spec:
 
                 return export_node
         return None
 
-    # Decorator to register kernels for this export
     @classmethod
     def register(cls, op_type, spec):
+        """Decorator to register an operator implementation for a specified operator type.
+
+        Registers an operator under a given operator type and specification,
+        adding it to the export library registry. This method supports both
+        single operator types (str) and lists of types (List[str]).
+
+        :param op_type: The operator type(s) to register.
+        :type op_type: Union[str, List[str]]
+        :param spec: Implementation specification for the operator.
+        :type spec: :py:class:``aidge_core.ImplSpec``
+        :return: A wrapper class that initializes the registered operator.
+        :rtype: Callable
+        """
         def decorator(operator):
             class Wrapper(operator):
                 def __init__(self, *args, **kwargs):
@@ -92,9 +126,20 @@ class ExportLib(aidge_core.OperatorImpl):
             return Wrapper
         return decorator
 
-    # Decorator to register kernels for this export
     @classmethod
     def register_metaop(cls, op_type, spec):
+        """Decorator to register a MetaOperator with the export library.
+
+        Registers a MetaOperator under a given operator type and specification. This decorator
+        is intended for operator types that are grouped as meta operators.
+
+        :param op_type: Operator type(s) to register as a ``MetaOperator``.
+        :type op_type: Union[str, List[str]]
+        :param spec: Implementation specification for the MetaOperator.
+        :type spec: aidge_core.ImplSpec
+        :return: A wrapper class that initializes the registered MetaOperator.
+        :rtype: Callable
+        """
         def decorator(operator):
             class Wrapper(operator):
                 def __init__(self, *args, **kwargs):
@@ -114,3 +159,38 @@ class ExportLib(aidge_core.OperatorImpl):
                 spec.attrs.add_attr("type", type_name) # MetaOperator specs need to verify the type
             return Wrapper
         return decorator
+
+
+    @classmethod
+    def register_generic(cls, op_type, spec):
+        """Decorator to register a GenericOperator with the export library.
+
+        Registers a GenericOperator under a given operator type and specification. This decorator
+        is intended for operator types that are grouped as meta operators.
+
+        :param op_type: Operator type(s) to register as a ``GenericOperator``.
+        :type op_type: Union[str, List[str]]
+        :param spec: Implementation specification for the GenericOperator.
+        :type spec: aidge_core.ImplSpec
+        :return: A wrapper class that initializes the registered GenericOperator.
+        :rtype: Callable
+        """
+        def decorator(operator):
+            class Wrapper(operator):
+                def __init__(self, *args, **kwargs):
+                    return operator(*args, **kwargs)
+            type_list = []
+            if isinstance(op_type, list):
+                type_list = op_type
+            elif isinstance(op_type, str):
+                type_list = [op_type]
+            else:
+                raise TypeError("Argument 'op_type' of register method should be of type 'List[str]' or 'str', got {type(type)}")
+            for type_name in type_list:
+                if (type_name not in cls._export_node_registry):
+                    cls._export_node_registry[type_name] = []
+                cls._export_node_registry[type_name].append((spec, operator))
+                aidge_core.register_GenericOperatorOp([cls._name, type_name], cls)
+                spec.attrs.add_attr("type", type_name) # GenericOperator specs need to verify the type
+            return Wrapper
+        return decorator
diff --git a/aidge_core/export_utils/generate_main.py b/aidge_core/export_utils/generate_main.py
index b7eee930669cc5f15f88714acf4884f5e30333b1..58946136fbdbeb35a0919d656efcc1de587e5136 100644
--- a/aidge_core/export_utils/generate_main.py
+++ b/aidge_core/export_utils/generate_main.py
@@ -2,7 +2,7 @@ import aidge_core
 from pathlib import Path
 from aidge_core.export_utils import generate_file, data_conversion
 
-def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView) -> None:
+def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView, inputs_tensor=None) -> None:
     """
     Generate a C++ file to manage the forward pass of a model using the given graph structure.
 
@@ -10,11 +10,15 @@ def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView) -> N
     and tensor sizes. It uses this data to populate a C++ file template (`main.jinja`), creating a file (`main.cpp`)
     that call the `model_forward` function, which handles data flow and processing for the exported model.
 
+    This function also generate files containing input tensor if they have been set.
+
     :param export_folder: Path to the folder where the generated C++ file (`main.cpp`) will be saved.
     :type export_folder: str
     :param graph_view: An instance of :py:class:`aidge_core.graph_view`, providing access to nodes and
                        ordered input/output data within the computational graph.
     :type graph_view: aidge_core.graph_view
+    :param inputs_tensor: **For future** argument to provide tensor to use in the main function, not implemented yet!
+    :type inputs_tensor: None
     :raises RuntimeError: If there is an inconsistency in the output arguments (names, data types, sizes),
                           indicating an internal bug in the graph representation.
     """
@@ -27,14 +31,23 @@ def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView) -> N
 
     for in_node, in_idx in gv_inputs:
         in_node_input, in_node_input_idx = in_node.input(in_idx)
-        inputs_name.append(f"{in_node.name()}_input_{in_idx}" if in_node_input is None else f"{in_node_input.name()}_output_{in_node_input_idx}")
+        in_name = f"{in_node.name()}_input_{in_idx}" if in_node_input is None else f"{in_node_input.name()}_output_{in_node_input_idx}"
+        inputs_name.append(in_name)
+        input_tensor = in_node.get_operator().get_input(in_idx)
+        if input_tensor is None or input_tensor.undefined() or not input_tensor.has_impl():
+            if inputs_tensor is not None:
+                aidge_core.Log.notice("No support for inputs_tensor argument yet.")
+                aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
+            else:
+                aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
+        else:
+            aidge_core.export_utils.generate_input_file(export_folder=export_folder, array_name=in_name, tensor=input_tensor)
+
     for out_node, out_id in gv_outputs:
         outputs_name.append(f"{out_node.name()}_output_{out_id}")
         out_tensor = out_node.get_operator().get_output(out_id)
         outputs_dtype.append(data_conversion.aidge2c(out_tensor.dtype()))
         outputs_size.append(out_tensor.size())
-        print(out_tensor.size())
-
 
     if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
             raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
diff --git a/aidge_core/export_utils/node_export.py b/aidge_core/export_utils/node_export.py
index 479eaf01ff8c8e85a3bf83adac88f5ee7fe86857..5777814a0b10c49d0f75245bdc4e9681027bdfb8 100644
--- a/aidge_core/export_utils/node_export.py
+++ b/aidge_core/export_utils/node_export.py
@@ -7,8 +7,6 @@ from typing import List
 
 
 def get_chan(tensor: aidge_core.Tensor) -> int:
-    """Given a tensor return the number of channel
-    """
     dformat = tensor.dformat()
     dims = tensor.dims()
     if dformat == aidge_core.dformat.default:
@@ -89,39 +87,78 @@ def get_width(tensor: aidge_core.Tensor) -> int:
 class ExportNode(ABC):
     """Abstract class to interface node with export generation.
 
-    This class expose a dictionary ``attributes`` which contains all the information required to generate an export:
-    - All the attributes of the Aidge node are automatically fetch, the key to get an attribute is the attribute name in python format, example ``no_bias``
-    - **name**: Name of the Node, ``str``
-    - **node**: Aidge Node, :py:class:`aidge_core.Node`
-    - **nb_in**: Number of inputs, ``int``
-    - **in_name**: unique name for each input, if no input node the name is ``{node_name}_input_{in_id}``, if there is a parent, the name is ``{parent_name}_output_{out_id}``, ``list[str]``
-    - **in_dims**: A list of the dimension for each inputs, ``list[list[int]]``
-    - **in_node**: A list of Node associated for each inputs, ``list[aidge_core.Node]``
-    - **in_size**: A list of the size for each inputs, ``list[int]``
-    - **in_chan**: A list of channel for each inputs, deduced by the dataformat, ``list[int]``
-    - **in_height**: A list of height for each inputs, deduced by the dataformat, ``list[int]``
-    - **in_width**: A list of width for each inputs, deduced by the dataformat, ``list[int]``
-    - **in_dtype**: A list of type (Aidge format) for each input, ``List[:py:class:`aidge_core.dtype`]``
-    - **in_cdtype**: A list of type (C/C++ format) for each input, ``List[str]``
-    - **out_name**: unique name for each output, the name is ``{name}_output_{out_id}``, ``list[str]``
-    - **out_node**: A list of list of Node associated for each outputs, ``list[list[aidge_core.Node]]``
-    - **nb_out**: Number of outputs, ``int``
-    - **out_dims**: A list of the dimension for each inputs, ``list[list[int]]``
-    - **out_size**: A list of the size for each outputs, ``list[int]``
-    - **out_chan**: A list of channel for each outputs, deduced by the dataformat, ``list[int]``
-    - **out_height**: A list of height for each outputs, deduced by the dataformat, ``list[int]``
-    - **out_width**: A list of width for each outputs, deduced by the dataformat, ``list[int]``
-    - **out_dtype**: A list of type (Aidge format) for each output, ``List[:py:class:`aidge_core.dtype`]``
-    - **out_cdtype**: A list of type (C/C++ format) for each output, ``List[str]``
-    - **mem_info**: True if mem_info is available for this node, ``bool``
-    - **mem_info_size**: A list of memory size for each output, ``List[int]``
-    - **mem_info_offset**: A list of offset to access each output, ``List[int]``
-    - **mem_info_stride**: old N2D2
-    - **mem_info_length**: old N2D2
-    - **mem_info_cont_size**: old N2D2
-    - **mem_info_cont_offset**: old N2D2
-    - **mem_info_wrap_offset**: old N2D2
-    - **mem_info_wrap_size**: old N2D2
+    This class exposes a dictionary, ``attributes``, which contains all the information
+    required to generate an export for a given node, including input/output names,
+    dimensions, types, and optional memory information.
+
+    - All the attributes of the Aidge Operator are automatically fetch, the key to get an attribute is the attribute name in python format, example ``no_bias``
+
+    - **node** (aidge_core.Node): The Aidge Node instance associated with this ExportNode.
+
+    - **name** (str): Name of the node, typically set via the Aidge node.
+
+    - **nb_in** (int): Number of input connections for the node.
+
+    - **nb_out** (int): Number of output connections for the node.
+
+    - **in_name** (list[str]): Unique name for each input connection.
+
+      Format:
+        - If no input node: ``{node_name}_input_{in_id}``
+        - If there is a parent node: ``{parent_name}_output_{out_id}``
+
+    - **in_dims** (list[list[int]]): Dimensions of each input.
+
+    - **in_node** (list[aidge_core.Node]): List of associated input nodes.
+
+    - **in_size** (list[int]): Size of each input.
+
+    - **in_chan** (list[int]): Channels in each input, based on data format.
+
+    - **in_height** (list[int]): Height of each input, based on data format.
+
+    - **in_width** (list[int]): Width of each input, based on data format.
+
+    - **in_dtype** (list[:py:class:`aidge_core.dtype`]): Data type for each input (Aidge format).
+
+    - **in_cdtype** (list[str]): Data type for each input (C/C++ format).
+
+    - **out_name** (list[str]): Unique name for each output, formatted as ``{name}_output_{out_id}``.
+
+    - **out_node** (list[list[aidge_core.Node]]): Associated output nodes for each output.
+
+    - **out_dims** (list[list[int]]): Dimensions of each output.
+
+    - **out_size** (list[int]): Size of each output.
+
+    - **out_chan** (list[int]): Channels in each output, based on data format.
+
+    - **out_height** (list[int]): Height of each output, based on data format.
+
+    - **out_width** (list[int]): Width of each output, based on data format.
+
+    - **out_dtype** (list[:py:class:`aidge_core.dtype`]): Data type for each output (Aidge format).
+
+    - **out_cdtype** (list[str]): Data type for each output (C/C++ format).
+
+    - **mem_info** (bool): True if `mem_info` is available for this node.
+
+    - **mem_info_size** (list[int]): Memory size for each output, if applicable.
+
+    - **mem_info_offset** (list[int]): Offset to access each output, if applicable.
+
+    - **mem_info_stride** (list[int]): Stride for accessing each output.
+
+    - **mem_info_length** (list[int]): Length of each output.
+
+    - **mem_info_cont_size** (list[int]): Continuous size for each output.
+
+    - **mem_info_cont_offset** (list[int]): Continuous offset for each output.
+
+    - **mem_info_wrap_offset** (list[int]): Wrap offset for each output.
+
+    - **mem_info_wrap_size** (list[int]): Wrap size for each output.
+
     """
 
     @abstractmethod
@@ -260,7 +297,64 @@ class ExportNode(ABC):
             else:
                 raise RuntimeError(f"No output for {self.node.name()}")
 
+    # **Class Attributes:**
+
+    # - **config_template** (str): Path to the template defining how to export the node definition.
+    #   This template is required for exporting; if undefined, raises an error, if no config
+    #   template is required set this to an empty string.
+
+    # - **forward_template** (str): Path to the template for generating code to perform a forward pass
+    #   of the node. Required for exporting the forward pass; raises an error if undefined.
+
+    # - **include_list** (list[str]): List of include paths (e.g., "include/toto.hpp") to be added to
+    #   the generated export files. Must be defined before export; raises an error if undefined.
+
+    # - **kernels_to_copy** (list[str]): List of paths to kernel files that should be copied during
+    #   export. The kernels are copied to ``kernels_path``, and are automatically
+    #   added to the include list.
+
+    # - **kernels_path** (str): Path where all kernels are stored in the export, prefixed by the
+    #   `export_root`. Defaults to "include/kernels".
+
+    # - **config_path** (str): Path of the configuration folder where node definitions are exported.
+    #   Defaults to "include/layers".
+
+    # - **config_extension** (str): File extension for the configuration files, typically for header
+    #   files. Defaults to "h".
+
 class ExportNodeCpp(ExportNode):
+    """Class for exporting Aidge nodes with C++ code generation.
+
+    This subclass of :class:`ExportNode` defines specific templates,
+    configuration paths, and other attributes required to generate
+    C++ code for Aidge nodes, including header and source files
+    for node definitions and forward passes.
+
+    :var config_template: Path to the template defining how to export the node definition.
+        This template is required for exporting; if undefined, raises an error, if no config
+        template is required set this to an empty string.
+    :vartype config_template: str
+    :var forward_template: Path to the template for generating code to perform a forward pass
+        of the node. Required for exporting the forward pass; raises an error if undefined.
+    :vartype forward_template: str
+    :var include_list: List of include paths (e.g., "include/toto.hpp") to be added to
+        the generated export files. Must be defined before export; raises an error if undefined.
+    :vartype include_list: list[str]
+    :var kernels_to_copy: List of paths to kernel files that should be copied during
+        export. The kernels are copied to ``kernels_path``, and are automatically
+        added to the include list.
+    :vartype kernels_to_copy: list[str]
+    :var kernels_path: Path where all kernels are stored in the export, prefixed by the
+        `export_root`. Defaults to "include/kernels".
+    :vartype kernels_path: str
+    :var config_path: Path of the configuration folder where node definitions are exported.
+        Defaults to "include/layers".
+    :vartype config_path: str
+    :var config_extension: File extension for the configuration files, typically for header
+        files. Defaults to "h".
+    :vartype config_extension: str
+    """
+
     # Path to the template defining how to export the node definition
     config_template: str = None
     # Path to the template defining how to export the node definition
@@ -277,8 +371,20 @@ class ExportNodeCpp(ExportNode):
     config_path: str = "include/layers"
     # Config_folder_extension
     config_extension: str = "h"
+
+
     def export(self, export_folder: str):
-        """Define how to export the node definition.
+        """Defines how to export the node definition.
+
+        This method checks that `config_template`, `include_list`, and
+        `kernels_to_copy` are defined, then copies each kernel to the export folder,
+        appends their paths to the include list, and generates the configuration file
+        based on the `config_template`.
+
+        :param export_folder: Folder path where the files are exported.
+        :type export_folder: str
+        :return: List of include paths with the paths for kernels and configuration files.
+        :rtype: list[str]
         """
         if self.config_template is None:
             raise ValueError("config_template have not been defined")
@@ -313,7 +419,7 @@ class ExportNodeCpp(ExportNode):
         return self.include_list + kernel_include_list
 
     def forward(self):
-        """Define how to generate code to perform a forward pass.
+        """Generates code for a forward pass using the `forward_template`.
         """
         if self.forward_template is None:
             raise ValueError("forward_template have not been defined")
diff --git a/aidge_core/export_utils/scheduler_export.py b/aidge_core/export_utils/scheduler_export.py
index f1de6f823356b0d217dc7fd98db006a8c97e450c..40823b00191aab8c6a53481a340f1f2f8a719102 100644
--- a/aidge_core/export_utils/scheduler_export.py
+++ b/aidge_core/export_utils/scheduler_export.py
@@ -6,130 +6,184 @@ from aidge_core.export_utils import ExportLib, generate_file, copy_file
 from typing import List, Tuple
 
 
-def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib = None, memory_manager=None, memory_manager_args=None, labels=False) -> None:
-        graphview = scheduler.graph_view()
-        export_folder = Path().absolute() / export_folder_path
-
-        os.makedirs(str(export_folder), exist_ok=True)
-
-        dnn_folder = export_folder / "dnn"
-        os.makedirs(str(dnn_folder), exist_ok=True)
-
-        if memory_manager_args is None:
-            memory_manager_args = {}
-
-        if memory_manager is None:
-            raise ValueError("A memory manager is required (no default value yet).")
-        peak_mem, mem_info = memory_manager(
-            scheduler, **memory_manager_args)
-
-        # List of function call for forward.cpp
-        list_actions: List[str] = []
-        # List of headers for forward.cpp
-        list_configs: List[str] = []
-
-        inputs_name: List[str] = []
-        inputs_dtype: List[str] = []
-        outputs_name: List[str] = []
-        outputs_dtype: List[str] = []
-        outputs_size: List[int] = []
-
-        # List of aidge_core.Node ordered by scheduler
-        list_forward_nodes: List[aidge_core.Node] = scheduler.get_static_scheduling()
-
-        # If exportLib define use it
-        # else parse component in platform
-        # if export_lib is None:
-        #     raise ValueError("Export need an ExportLib.")
-        for node in list_forward_nodes:
-            if export_lib is not None:
-                aidge_core.Log.debug(f"Setting backend {export_lib._name} to {node.name()}[{node.type()}].")
-                node.get_operator().set_backend(export_lib._name)
-
-            op_impl = node.get_operator().get_impl()
-            if op_impl is None:
-                raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an implementation.")
-            if not isinstance(op_impl, ExportLib):
-                raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an exportable backend ({op_impl}).")
-
-            is_input:bool  = node in graphview.get_input_nodes()
-            is_output:bool = node in graphview.get_output_nodes()
-
-            if is_input:
-                # GraphView.get_inputs_nodes() returns the nodes that have an Input set to None or not in the graph
-                # However, some inputs are Optional and thus the node may not be an input of the graph!
-                # So we need ot check that all the inputs of the nodes or in the graph or not optional
-                # This is what the following code block is checking.
-                for idx, node_in in enumerate(node.inputs()):
-                    optional:bool = node.get_operator().input_category(idx) == aidge_core.InputCategory.OptionalData
-                    # Note: node_in is a Tuple(Node, out_idx)
-                    in_graph:bool = node_in[0] in graphview.get_nodes()
-                    is_input &= (in_graph or not optional)
-
-            # Get operator current specs
-            required_specs = op_impl.get_required_spec()
-            # Get specs of the implementation that match current specs
-            specs = op_impl.get_best_match(required_specs)
-            # Retrieve said implementation
-            export_node = op_impl.get_export_node(specs)
-
-            if export_node is None:
-                raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].")
-            # Instanciate ExportNode
-            op = export_node(node, mem_info[node])
-
-            # For configuration files
-            list_configs += op.export(dnn_folder)
-            # For forward file
-            list_actions += op.forward()
-            if is_input:
-                for idx, node_in in enumerate(node.inputs()):
-                    if node_in[0] not in graphview.get_nodes():
-                        inputs_name.append(op.attributes["in_name"][idx])
-                        inputs_dtype.append(
-                            op.attributes["in_cdtype"][idx]
-                        )
-            if is_output:
-                for idx in range(len(node.outputs())):
-                    outputs_name.append(op.attributes["out_name"][idx])
-                    outputs_dtype.append(
-                        op.attributes["out_cdtype"][idx]
-                    )
-                    outputs_size.append(op.attributes["out_size"][idx])
-
-        func_name = "model_forward"
-        ROOT = Path(__file__).resolve().parents[0]
-        generate_file(
-            str(dnn_folder / "src" / "forward.cpp"),
-            str(ROOT / "templates" / "forward.jinja"),
-            func_name=func_name,
-            headers=set(list_configs),
-            actions=list_actions,
-            mem_ctype=inputs_dtype[0],  # Legacy behavior ...
-            mem_section=export_lib.mem_section,
-            peak_mem=peak_mem,
-            inputs_name=inputs_name,
-            inputs_dtype=inputs_dtype,
-            outputs_name=outputs_name,
-            outputs_dtype=outputs_dtype
-        )
-
-        # Generate dnn API
-        generate_file(
-            str(dnn_folder / "include" / "forward.hpp"),
-            str(ROOT / "templates" / "forward_header.jinja"),
-            libraries=[],
-            func_name=func_name,
-            inputs_name=inputs_name,
-            inputs_dtype=inputs_dtype,
-            outputs_name=outputs_name,
-            outputs_dtype=outputs_dtype
-        )
-
-        if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
-            raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
-
+def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib = None, memory_manager=None, memory_manager_args=None) -> None:
+    """Exports an aidge_core.Scheduler to C++ code.
+
+    This function generates files for a given computation graph, including forward-pass functions,
+    configuration headers, and the main API entry point for the exported model. It requires a
+    memory manager to allocate resources, and optionally an `ExportLib` instance to handle backend
+    configurations for node operators.
+
+
+    1. **Export Preparation**:
+        - Initializes export and DNN folders, checking that required memory management functions are defined.
+        - Retrieves peak memory usage and memory details for each node using the `memory_manager`.
+
+    2. **Configuration Generation**:
+        - Iterates over nodes scheduled by `scheduler`, configuring backends if `export_lib` is specified.
+        - Exports configuration headers and forward-pass actions for each node by invoking `op.export()`
+            and `op.forward()`, appending these to `list_configs` and `list_actions`, respectively.
+        - Collects information on input and output nodes, including their names, data types, and sizes.
+
+    3. **Code Generation**:
+        - Defines the forward-pass function, `model_forward`, with inputs and outputs based on node attributes.
+        - Generates the following files:
+
+            - **forward.cpp**: Implements the model forward pass using templates, applying configurations
+            and actions for each node.
+
+            - **forward.hpp**: Exports the forward API, defining inputs and outputs.
+
+            - **main.cpp**: Main entry file, serving as the model's forward-pass interface.
+
+    4. **Static File Export (Optional)**:
+        - If `export_lib` is specified, static files are copied to the export folder based on `export_lib`
+        specifications.
+
+
+    :param scheduler: Scheduler instance managing the computation graph.
+                    Uses `graph_view` and `get_static_scheduling` methods
+                    to retrieve the computation graph layout and ordered nodes.
+    :type scheduler: aidge_core.Scheduler
+    :param export_folder_path: Path to the folder where the generated export files will be saved.
+                            Creates this folder, along with subdirectories for model and source files.
+    :type export_folder_path: str
+    :param export_lib: Library providing the backend implementation for node operators.
+                    Defaults to None. If provided, each node's backend is set to the library's name.
+    :type export_lib: ExportLib, optional
+    :param memory_manager: Required function for managing memory allocation. It should take
+                        `scheduler` and optional `memory_manager_args` as parameters, returning
+                        `peak_mem` (peak memory usage) and `mem_info` (memory details for each node).
+    :type memory_manager: callable
+    :param memory_manager_args: Additional arguments passed to `memory_manager`. Defaults to an empty dictionary.
+    :type memory_manager_args: dict, optional
+    """
+    graphview = scheduler.graph_view()
+    export_folder = Path().absolute() / export_folder_path
+
+    os.makedirs(str(export_folder), exist_ok=True)
+
+    dnn_folder = export_folder / "dnn"
+    os.makedirs(str(dnn_folder), exist_ok=True)
+
+    if memory_manager_args is None:
+        memory_manager_args = {}
+
+    if memory_manager is None:
+        raise ValueError("A memory manager is required (no default value yet).")
+    peak_mem, mem_info = memory_manager(
+        scheduler, **memory_manager_args)
+
+    # List of function call for forward.cpp
+    list_actions: List[str] = []
+    # List of headers for forward.cpp
+    list_configs: List[str] = []
+
+    inputs_name: List[str] = []
+    inputs_dtype: List[str] = []
+    outputs_name: List[str] = []
+    outputs_dtype: List[str] = []
+    outputs_size: List[int] = []
+
+    # List of aidge_core.Node ordered by scheduler
+    list_forward_nodes: List[aidge_core.Node] = scheduler.get_static_scheduling()
+
+    # If exportLib define use it
+    # else parse component in platform
+    # if export_lib is None:
+    #     raise ValueError("Export need an ExportLib.")
+    for node in list_forward_nodes:
         if export_lib is not None:
-            # Copy all static files in the export
-            for source, destination in export_lib.static_files.items():
-                copy_file(source, str(export_folder / destination))
+            aidge_core.Log.debug(f"Setting backend {export_lib._name} to {node.name()}[{node.type()}].")
+            node.get_operator().set_backend(export_lib._name)
+
+        op_impl = node.get_operator().get_impl()
+        if op_impl is None:
+            raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an implementation.")
+        if not isinstance(op_impl, ExportLib):
+            raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an exportable backend ({op_impl}).")
+
+        is_input:bool  = node in graphview.get_input_nodes()
+        is_output:bool = node in graphview.get_output_nodes()
+
+        if is_input:
+            # GraphView.get_inputs_nodes() returns the nodes that have an Input set to None or not in the graph
+            # However, some inputs are Optional and thus the node may not be an input of the graph!
+            # So we need to check that all the inputs of the nodes or in the graph or not optional
+            # This is what the following code block is checking.
+            for idx, node_in in enumerate(node.inputs()):
+                optional:bool = node.get_operator().input_category(idx) == aidge_core.InputCategory.OptionalData
+                # Note: node_in is a Tuple(Node, out_idx)
+                in_graph:bool = node_in[0] in graphview.get_nodes()
+                is_input &= (in_graph or not optional)
+
+        # Get operator current specs
+        required_specs = op_impl.get_required_spec()
+        # Get specs of the implementation that match current specs
+        specs = op_impl.get_best_match(required_specs)
+        # Retrieve said implementation
+        export_node = op_impl.get_export_node(specs)
+
+        if export_node is None:
+            raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].")
+        # Instanciate ExportNode
+        op = export_node(node, mem_info[node])
+
+        # For configuration files
+        list_configs += op.export(dnn_folder)
+        # For forward file
+        list_actions += op.forward()
+        if is_input:
+            for idx, node_in in enumerate(node.inputs()):
+                if node_in[0] not in graphview.get_nodes():
+                    inputs_name.append(op.attributes["in_name"][idx])
+                    inputs_dtype.append(
+                        op.attributes["in_cdtype"][idx]
+                    )
+        if is_output:
+            for idx in range(len(node.outputs())):
+                outputs_name.append(op.attributes["out_name"][idx])
+                outputs_dtype.append(
+                    op.attributes["out_cdtype"][idx]
+                )
+                outputs_size.append(op.attributes["out_size"][idx])
+
+    func_name = "model_forward"
+    ROOT = Path(__file__).resolve().parents[0]
+    generate_file(
+        str(dnn_folder / "src" / "forward.cpp"),
+        str(ROOT / "templates" / "forward.jinja"),
+        func_name=func_name,
+        headers=set(list_configs),
+        actions=list_actions,
+        # Note: Graph may not have inputs, so we need to check with output
+        # In the future, we should remove this as it is not compatible
+        # with a mix precision approach.
+        mem_ctype=outputs_dtype[0],  # Legacy behavior ...
+        mem_section=export_lib.mem_section,
+        peak_mem=peak_mem,
+        inputs_name=inputs_name,
+        inputs_dtype=inputs_dtype,
+        outputs_name=outputs_name,
+        outputs_dtype=outputs_dtype
+    )
+
+    # Generate dnn API
+    generate_file(
+        str(dnn_folder / "include" / "forward.hpp"),
+        str(ROOT / "templates" / "forward_header.jinja"),
+        libraries=[],
+        func_name=func_name,
+        inputs_name=inputs_name,
+        inputs_dtype=inputs_dtype,
+        outputs_name=outputs_name,
+        outputs_dtype=outputs_dtype
+    )
+
+    if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
+        raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
+
+    if export_lib is not None:
+        # Copy all static files in the export
+        for source, destination in export_lib.static_files.items():
+            copy_file(source, str(export_folder / destination))
diff --git a/aidge_core/export_utils/templates/main.jinja b/aidge_core/export_utils/templates/main.jinja
index d0c22719a5d9b9eaa15d3ef9ef86307a060b54be..697a97b53ef05eedddf5ac66f262581475e655c6 100644
--- a/aidge_core/export_utils/templates/main.jinja
+++ b/aidge_core/export_utils/templates/main.jinja
@@ -26,7 +26,7 @@ int main()
     {% endfor %}
 
     // Call the forward function
-    {{ func_name }}({{ inputs_name|join(", ") }}, &{{ outputs_name|join(", &") }});
+    {{ func_name }}({{ inputs_name|join(", ") }}{% if inputs_name %}, {% endif %}&{{ outputs_name|join(", &") }});
 
     // Print the results of each output
     {%- for o in range(outputs_name | length) %}
diff --git a/aidge_core/mem_info.py b/aidge_core/mem_info.py
index 1f2f48dd64aba276e70940d5bf461da6f50a4b38..1a800f9488905a4aed3a907deca8327979effbe4 100644
--- a/aidge_core/mem_info.py
+++ b/aidge_core/mem_info.py
@@ -42,6 +42,26 @@ def compute_default_mem_info(scheduler: aidge_core.Scheduler) -> Tuple[int, List
 
 
 def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder: Path, wrapping: bool = False) -> Tuple[int, List[dict]]:
+    """Generates optimized memory information for a computation graph managed by a scheduler.
+
+    This function analyzes the memory usage of a computation graph, determining the memory peak
+    and detailed memory management information for each node in the scheduler. It supports optional
+    wrapping of memory buffers and logs memory usage statistics to facilitate memory optimization.
+
+    :param scheduler: Scheduler instance that organizes the computation graph. It manages the
+                      nodes and facilitates memory planning by invoking its `generate_memory` method.
+    :type scheduler: aidge_core.Scheduler
+    :param stats_folder: Directory path to store memory statistics and plots generated by `mem_manager`.
+                         If provided as a string, it is converted to a `Path` object.
+    :type stats_folder: Path
+    :param wrapping: Boolean flag to enable or disable wrap-around buffer optimization.
+                     Defaults to `False`.
+    :type wrapping: bool, optional
+    :return: A tuple containing the peak memory size and a list of memory information for each
+             scheduled node. The memory information for each node includes details such as size,
+             offset, stride, length, count, and optional wrap-around details.
+    :rtype: Tuple[int, List[dict]]
+    """
     # The forward dims has to done outside the function
     # Also supposed the generation of the scheduler has been performed outside
     # Otherwise decomment the following line
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 89b2c06a52f180ffb35363cb6ab07d4242e12033..327f4f7c3d43b5194f23cfaed8674ee0b47bd6a2 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 class GenericOperator_Op
     : public OperatorTensor,
-      public Registrable<GenericOperator_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>> {
+      public Registrable<GenericOperator_Op, std::array<std::string, 2>, std::function<std::shared_ptr<OperatorImpl>(const GenericOperator_Op &)>> {
 private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 6af8fef88e411af0a3ecbe5a771bf7af24de411a..f125291fafb89ec7ae81678a37e2bde2222a1054 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -64,5 +64,7 @@ void init_GenericOperator(py::module& m) {
             }
             return genericNode;
         }, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"), py::arg("name") = "");
+
+    declare_registrable<GenericOperator_Op>(m, "GenericOperatorOp");
 }
 }  // namespace Aidge
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index 0f90a5a58dbd8d69b847022c336075ecc3f3d553..c5bca92406e518df593fcc6c3a40525a4ba81dfa 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -86,7 +86,15 @@ bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) {
 }
 
 void Aidge::GenericOperator_Op::setBackend(const std::string & name, DeviceIdx_t device) {
-    Log::warn("GenericOperator::setBackend(): cannot set backend for a generic operator, as no implementation has been provided!");
+    if (Registrar<GenericOperator_Op>::exists({name, type()})) {
+        // A custom implementation exists for this meta operator
+        mImpl = Registrar<GenericOperator_Op>::create({name, type()})(*this);
+    }else{
+        Log::warn("GenericOperator::setBackend(): cannot set backend for a generic operator, as no implementation has been provided!");
+    }
+
+
+
 
     for (std::size_t i = 0; i < nbOutputs(); ++i) {
         mOutputs[i]->setBackend(name, device);
@@ -108,4 +116,4 @@ std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
                                                 Aidge::IOIndex_t nbOut,
                                                 const std::string& name) {
     return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
-}
\ No newline at end of file
+}