diff --git a/CHANGELOG b/CHANGELOG
index db89edd1191865c81c16ea7accaae850ced7082c..46c726b24039613f57bd4ab676a9f833702d03cb 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,5 @@
+# Version 0.2.0 (december 6, 2024)
+
 # Version 0.0.1 (January 23, 2024)
 
 Initial release
diff --git a/aidge_export_cpp/__init__.py b/aidge_export_cpp/__init__.py
index 6ab2e03aa559ad9c8cf878f1968b73478592124c..99df1302f6183eb1301fe6863824e7a0bd67c229 100644
--- a/aidge_export_cpp/__init__.py
+++ b/aidge_export_cpp/__init__.py
@@ -2,6 +2,7 @@ r"""
 Aidge Export for CPP standalone projects
 
 """
+from .export_registry import ExportLibCpp
 
 from .operators import *
 from collections import defaultdict
diff --git a/aidge_export_cpp/export.py b/aidge_export_cpp/export.py
index 1d876bfab7769606540e3eebb5f9102d95b8c7f8..ebac7a809caef0b192bf030de9c98a9d18af9f34 100644
--- a/aidge_export_cpp/export.py
+++ b/aidge_export_cpp/export.py
@@ -1,18 +1,17 @@
 import re
 import os
-from pathlib import Path
-import shutil
 import numpy as np
-from typing import List, Union
-from jinja2 import Environment, FileSystemLoader
 
 import aidge_core
+
 from aidge_core.export_utils.code_generation import *
-from aidge_export_cpp.utils import (ROOT, OPERATORS_REGISTRY, supported_operators)
-from aidge_export_cpp.utils.converter import aidge_datatype2ctype, numpy_dtype2ctype
-import aidge_export_cpp.operators
+from aidge_core.mem_info import compute_default_mem_info
+
+from aidge_export_cpp.utils import ROOT
+from aidge_export_cpp.utils.converter import numpy_dtype2ctype
+from aidge_export_cpp import ExportLibCpp
 from aidge_export_cpp.utils.generation import *
-from aidge_export_cpp.memory import *
+# from aidge_export_cpp.memory import *
 
 
 def generate_input_file(export_folder:str,
@@ -33,86 +32,100 @@ def generate_input_file(export_folder:str,
     )
 
 
-def export(export_folder_name, graphview, scheduler):
-
-    export_folder = Path().absolute() / export_folder_name
-
-    os.makedirs(str(export_folder), exist_ok=True)
-
-    dnn_folder = export_folder / "dnn"
-    os.makedirs(str(dnn_folder), exist_ok=True)
-
-    list_actions = []
-    list_configs = []
-
-    list_forward_nodes = scheduler.get_static_scheduling()
-
-    for node in list_forward_nodes:
-        if node.type() in supported_operators():
-            op = OPERATORS_REGISTRY[node.type()](node)
-
-            # For configuration files
-            list_configs = op.export(dnn_folder, list_configs)
-
-            # For forward file
-            list_actions = op.forward(list_actions)
-
-
-    # Memory management
-    mem_size, mem_info = compute_default_mem_info(scheduler)
-
-    # Generate the memory file
-    generate_file(
-        str(dnn_folder / "memory" / "mem_info.h"),
-        str(ROOT / "templates" / "memory" / "mem_info.jinja"),
-        mem_size = mem_size,
-        mem_info_legends = MEMORY_INFO_TEMPLATE,
-        mem_info = mem_info
-    )
-    list_configs.append("memory/mem_info.h")
-
-    # Get entry nodes
-    # It supposes the entry nodes are producers with constant=false
-    # Store the datatype & name
-    list_inputs_name = []
-    for node in graphview.get_nodes():
-        if node.type() == "Producer":
-            if not node.get_operator().attr.constant:
-                export_type = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-                list_inputs_name.append((export_type, node.name()))
-
-    # Get output nodes
-    # Store the datatype & name, like entry nodes
-    list_outputs_name = []
-    for node in graphview.get_nodes():
-        if len(node.get_children()) == 0:
-            export_type = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-            list_outputs_name.append((export_type, node.name()))
-
-    # Generate forward file
-    # TODO: for now the mem type is bound for all intermediate results, should change.
-    # Note that we may have all inputs constants, hence select output type
-    assert len(list_outputs_name) >= 1, f"TODO: requires some output to determine mem type"
-    mem_ctype = list_outputs_name[0][0]
-    generate_file(
-        str(dnn_folder / "src" / "forward.cpp"),
-        str(ROOT / "templates" / "network" / "network_forward.jinja"),
-        headers=list_configs,
-        actions=list_actions,
-        inputs= list_inputs_name,
-        outputs=list_outputs_name,
-        mem_ctype=mem_ctype,
-    )
-
-    # Generate dnn API
-    generate_file(
-        str(dnn_folder / "include" / "dnn.hpp"),
-        str(ROOT / "templates" / "network" / "dnn_header.jinja"),
-        libraries=[],
-        functions=get_functions_from_c_file(str(dnn_folder / "src" / "forward.cpp")),
+def export(export_folder_name, graphview, scheduler, mem_wrapping=False):
+    aidge_core.export_utils.scheduler_export(
+        scheduler,
+        export_folder_name,
+        ExportLibCpp,
+        memory_manager=compute_default_mem_info
     )
 
-    # Copy all static files in the export
-    shutil.copy(str(ROOT / "static" / "main.cpp"), str(export_folder))
-    shutil.copy(str(ROOT / "static" / "Makefile"), str(export_folder))
-    shutil.copytree(str(ROOT / "static" / "include"), str(dnn_folder / "include"), dirs_exist_ok=True)
+    # export_folder = Path().absolute() / export_folder_name
+
+    # os.makedirs(str(export_folder), exist_ok=True)
+
+    # dnn_folder = export_folder / "dnn"
+    # os.makedirs(str(dnn_folder), exist_ok=True)
+
+    # list_actions = []
+    # list_configs = []
+    # peak_mem, mem_info = compute_default_mem_info(scheduler)
+    # list_forward_nodes = scheduler.get_static_scheduling()
+
+    # for node in list_forward_nodes:
+    #     if ExportLibCpp.exportable(node):
+    #         op = ExportLibCpp.get_export_node(node)(node, mem_info[node])
+    #         # For configuration files
+    #         list_configs = op.export(dnn_folder, list_configs)
+
+    #         # For forward file
+    #         list_actions = op.forward(list_actions)
+    #     else:
+    #         raise RuntimeError(f"Operator not supported: {node.type()} !")
+
+    # # Memory management
+    # # stats_folder = export_folder / "statistics"
+    # # os.makedirs(str(stats_folder), exist_ok=True)
+    # # mem_size, mem_info = generate_optimized_memory_info(stats_folder, scheduler, mem_wrapping)
+    # # peak_mem, mem_info = compute_default_mem_info(scheduler)
+
+    # # Generate the memory file
+    # # generate_file(
+    # #     str(dnn_folder / "memory" / "mem_info.h"),
+    # #     str(ROOT / "templates" / "memory" / "mem_info.jinja"),
+    # #     mem_size = mem_size,
+    # #     mem_info_legends = MEMORY_INFO_TEMPLATE,
+    # #     mem_info = mem_info
+    # # )
+    # # list_configs.append("memory/mem_info.h")
+
+    # # Get entry nodes
+    # # Store the datatype & name
+    # list_inputs_name = []
+    # for node in graphview.get_input_nodes():
+    #     for idx, node_input_tuple in enumerate(node.inputs()):
+    #         node_input, _ = node_input_tuple
+    #         if node_input is None:
+    #             export_type = aidge2c(node.get_operator().get_output(0).dtype())
+    #             list_inputs_name.append((export_type, f"{node.name()}_input_{idx}"))
+    #         elif node_input not in graphview.get_nodes():
+    #             export_type = aidge2c(node_input.get_operator().get_output(0).dtype())
+    #             list_inputs_name.append((export_type, node_input.name()))
+
+
+    # # Get output nodes
+    # # Store the datatype & name, like entry nodes
+    # list_outputs_name = []
+    # for node in graphview.get_nodes():
+    #     if len(node.get_children()) == 0:
+    #         export_type = aidge2c(node.get_operator().get_output(0).dtype())
+    #         list_outputs_name.append((export_type, f"{node.name()}_output_0"))
+
+    # # Generate forward file
+    # # TODO: for now the mem type is bound for all intermediate results, should change.
+    # # Note that we may have all inputs constants, hence select output type
+    # assert len(list_outputs_name) >= 1, f"TODO: requires some output to determine mem type"
+    # mem_ctype = list_outputs_name[0][0]
+    # generate_file(
+    #     str(dnn_folder / "src" / "forward.cpp"),
+    #     str(ROOT / "templates" / "network" / "network_forward.jinja"),
+    #     headers=set(list_configs),
+    #     actions=list_actions,
+    #     inputs= list_inputs_name,
+    #     outputs=list_outputs_name,
+    #     mem_ctype=mem_ctype,
+    #     peak_mem=peak_mem
+    # )
+
+    # # Generate dnn API
+    # generate_file(
+    #     str(dnn_folder / "include" / "dnn.hpp"),
+    #     str(ROOT / "templates" / "network" / "dnn_header.jinja"),
+    #     libraries=[],
+    #     functions=get_functions_from_c_file(str(dnn_folder / "src" / "forward.cpp")),
+    # )
+
+    # # Copy all static files in the export
+    # shutil.copy(str(ROOT / "static" / "main.cpp"), str(export_folder))
+    # shutil.copy(str(ROOT / "static" / "Makefile"), str(export_folder))
+    # shutil.copytree(str(ROOT / "static" / "include"), str(dnn_folder / "include"), dirs_exist_ok=True)
diff --git a/aidge_export_cpp/export_registry.py b/aidge_export_cpp/export_registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1aa83b3beeb4cffa3d3076b28df7c8745c0b457
--- /dev/null
+++ b/aidge_export_cpp/export_registry.py
@@ -0,0 +1,10 @@
+from aidge_core.export_utils import ExportLib
+from aidge_export_cpp.utils import ROOT
+
+class ExportLibCpp(ExportLib):
+    _name="export_cpp"
+    static_files={
+        str(ROOT / "static" / "Makefile"): "",
+        str(ROOT / "static" / "include" / "network" / "typedefs.hpp"): "dnn/include/network",
+        str(ROOT / "static" / "include" / "network" / "utils.hpp"): "dnn/include/network",
+    }
diff --git a/aidge_export_cpp/kernels/activation.hpp b/aidge_export_cpp/kernels/activation.hpp
index b5fbd61d0791b6f60a836331e840cb874aadc816..d6695159255e4c2c12ced879a90cbe6b01dae0eb 100644
--- a/aidge_export_cpp/kernels/activation.hpp
+++ b/aidge_export_cpp/kernels/activation.hpp
@@ -4,36 +4,36 @@
 #include <type_traits>
 #include "network/typedefs.hpp"
 #include "network/utils.hpp"
-#include "network/rescaling.hpp"
+#include "kernels/rescaling.hpp"
 
-template<typename Output_T, typename T,  
+template<typename Output_T, typename T,
          typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
 __attribute__((always_inline)) inline
-Output_T saturate (T value, int32_t /*sat*/) 
+Output_T saturate (T value, int32_t /*sat*/)
 {
     return value;
 }
 
-template<typename Output_T, typename T,  
+template<typename Output_T, typename T,
          typename std::enable_if<!std::is_floating_point<T>::value>::type* = nullptr>
 __attribute__((always_inline)) inline
-Output_T saturate (T value, uint32_t sat) 
+Output_T saturate (T value, uint32_t sat)
 {
     if (std::is_unsigned<Output_T>::value) {
         return clamp(value, T(0), (T(1) << sat) - 1);
     } else {
         return clamp(value, -(T(1) << (sat - 1)), (T(1) << (sat - 1)) - 1);
-    }                                                 
+    }
 }
 
-template<typename Output_T, 
-         typename Sum_T, 
+template<typename Output_T,
+         typename Sum_T,
          typename Rescaling_T>
-__attribute__((always_inline)) inline 
-Output_T activation_forward_value (Sum_T weightedSum, 
-                                   int output, 
-                                   ActivationFunction_T func, 
-                                   const Rescaling_T& __restrict rescaling) 
+__attribute__((always_inline)) inline
+Output_T activation_forward_value (Sum_T weightedSum,
+                                   int output,
+                                   ActivationFunction_T func,
+                                   const Rescaling_T& __restrict rescaling)
 {
     switch(func) {
         case Linear:
@@ -49,7 +49,7 @@ Output_T activation_forward_value (Sum_T weightedSum,
             break;
     }
 
-    // Value fixed here for now but it should be generated by 
+    // Value fixed here for now but it should be generated by
     // the export module or determined by the type of Output_T
     // For now only works for int8_t and uint8_t
     const uint32_t NB_BITS = 8;
@@ -60,7 +60,7 @@ Output_T activation_forward_value (Sum_T weightedSum,
 template<int NB_DATA,
          ActivationFunction_T ACTIVATION,
          typename Input_T, typename Output_T, typename Rescaling_T>
-__attribute__((always_inline)) inline 
+__attribute__((always_inline)) inline
 void activation_forward (
     const Input_T* __restrict inputs,
     Output_T* __restrict outputs,
diff --git a/aidge_export_cpp/kernels/batchnorm.hpp b/aidge_export_cpp/kernels/batchnorm.hpp
index d63c961c06122cb0808c3c50640fe958bdd736db..740ea21e6f66ba338985db4f724a5d57377e1f81 100644
--- a/aidge_export_cpp/kernels/batchnorm.hpp
+++ b/aidge_export_cpp/kernels/batchnorm.hpp
@@ -2,17 +2,17 @@
 #define __AIDGE_EXPORT_CPP_KERNELS_BATCHNORM__
 
 #include "network/typedefs.hpp"
-#include "network/rescaling.hpp"
+#include "kernels/rescaling.hpp"
 #include <math.h>
 
 // WARNING: this kernel only works for 32-bits floating point values
 
-template<int NB_OUTPUTS, 
+template<int NB_OUTPUTS,
          int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH,
          ActivationFunction_T ACTIVATION,
-         typename Input_T, typename Output_T, 
+         typename Input_T, typename Output_T,
          typename Param_T>
-__attribute__((always_inline)) inline 
+__attribute__((always_inline)) inline
 void batchnorm_forward (
     const Input_T* __restrict inputs,
     Output_T* __restrict outputs,
diff --git a/aidge_export_cpp/kernels/convolution.hpp b/aidge_export_cpp/kernels/convolution.hpp
index 39f931a5b214aad068b5f927b7a75aecdbdf6196..efc7ee7a192112018d6c582207560d93b4548add 100644
--- a/aidge_export_cpp/kernels/convolution.hpp
+++ b/aidge_export_cpp/kernels/convolution.hpp
@@ -2,13 +2,13 @@
 #define __AIDGE_EXPORT_CPP_KERNELS_CONVOLUTION__
 
 #include "network/typedefs.hpp"
-#include "network/rescaling.hpp"
+#include "kernels/rescaling.hpp"
 #include "network/utils.hpp"
 #include "kernels/macs.hpp"
 #include "kernels/activation.hpp"
 
 
-template<int NB_CHANNELS, 
+template<int NB_CHANNELS,
          int CHANNELS_HEIGHT, int CHANNELS_WIDTH,
          int NB_OUTPUTS,
          int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH,
@@ -17,10 +17,10 @@ template<int NB_CHANNELS,
          int DILATION_Y, int DILATION_X,
          int KERNEL_HEIGHT, int KERNEL_WIDTH,
          ActivationFunction_T ACTIVATION,
-         typename Input_T, typename Output_T, 
+         typename Input_T, typename Output_T,
          typename Weight_T, typename Bias_T,
          typename Rescaling_T>
-__attribute__((always_inline)) inline 
+__attribute__((always_inline)) inline
 void convolution_forward(
     const Input_T* __restrict inputs,
     Output_T* __restrict outputs,
@@ -28,10 +28,10 @@ void convolution_forward(
     const Bias_T* __restrict biases,
     const Rescaling_T& __restrict rescaling)
 {
-    constexpr int DILATED_KERNEL_HEIGHT 
+    constexpr int DILATED_KERNEL_HEIGHT
             = KERNEL_HEIGHT + (DILATION_Y - 1) * (KERNEL_HEIGHT - 1);
 
-    constexpr int DILATED_KERNEL_WIDTH 
+    constexpr int DILATED_KERNEL_WIDTH
             = KERNEL_WIDTH + (DILATION_X - 1) * (KERNEL_WIDTH - 1);
 
     constexpr int OUTPUTS_HEIGHT_NOPAD
@@ -44,7 +44,7 @@ void convolution_forward(
             : max(PADDING_Y - (oy * STRIDE_Y), 0);
         const int syMax = (PADDING_Y == 0
                 && OUTPUTS_HEIGHT == OUTPUTS_HEIGHT_NOPAD) ? DILATED_KERNEL_HEIGHT
-            : clamp(CHANNELS_HEIGHT + PADDING_Y - (oy * STRIDE_Y), 
+            : clamp(CHANNELS_HEIGHT + PADDING_Y - (oy * STRIDE_Y),
                     0, DILATED_KERNEL_HEIGHT);
         const int iy = (oy * STRIDE_Y) - PADDING_Y;
 
@@ -57,7 +57,7 @@ void convolution_forward(
                 const int sxMax = (PADDING_X == 0
                         && OUTPUTS_WIDTH == OUTPUTS_WIDTH_NOPAD)
                             ? DILATED_KERNEL_WIDTH
-                    : clamp(CHANNELS_WIDTH + PADDING_X - (ox * STRIDE_X), 
+                    : clamp(CHANNELS_WIDTH + PADDING_X - (ox * STRIDE_X),
                             0, DILATED_KERNEL_WIDTH);
                 const int ix = (ox * STRIDE_X) - PADDING_X;
 
@@ -85,8 +85,8 @@ void convolution_forward(
                         || sxMax - sxMin == KERNEL_WIDTH))
                     {
                         macsOnRange<KERNEL_WIDTH * NB_CHANNELS>(
-                            inputs + iOffset, 
-                            weights + wOffset, 
+                            inputs + iOffset,
+                            weights + wOffset,
                             weightedSum);
                     }
                     else {
@@ -100,11 +100,11 @@ void convolution_forward(
 
                             int iOffsetInRange = iOffset
                                 + sx * DILATION_X * NB_CHANNELS;
-                        
+
                             macsOnRange<NB_CHANNELS>(
                                 // same input line so no wrapping can occur
-                                inputs + iOffsetInRange, 
-                                weights + wOffset + sx * NB_CHANNELS, 
+                                inputs + iOffsetInRange,
+                                weights + wOffset + sx * NB_CHANNELS,
                                 weightedSum);
                         }
                     }
diff --git a/aidge_export_cpp/kernels/fullyconnected.hpp b/aidge_export_cpp/kernels/fullyconnected.hpp
index 92aef1501f7ef96a977f25ab4faf33521ac56540..895ed1c21d35e7e266f788407dd7f42719607ad7 100644
--- a/aidge_export_cpp/kernels/fullyconnected.hpp
+++ b/aidge_export_cpp/kernels/fullyconnected.hpp
@@ -2,20 +2,20 @@
 #define __AIDGE_EXPORT_CPP_KERNELS_FULLYCONNECTED__
 
 #include "network/typedefs.hpp"
-#include "network/rescaling.hpp"
+#include "kernels/rescaling.hpp"
 #include "network/utils.hpp"
 #include "kernels/macs.hpp"
 #include "kernels/activation.hpp"
 
-template<int NB_CHANNELS, 
+template<int NB_CHANNELS,
          int CHANNELS_HEIGHT, int CHANNELS_WIDTH,
          int NB_OUTPUTS,
          int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH,
          ActivationFunction_T ACTIVATION,
-         typename Input_T, typename Output_T, 
+         typename Input_T, typename Output_T,
          typename Weight_T, typename Bias_T,
          typename Rescaling_T>
-__attribute__((always_inline)) inline 
+__attribute__((always_inline)) inline
 void fullyconnected_forward (
     const Input_T* __restrict inputs,
     Output_T* __restrict outputs,
@@ -35,7 +35,7 @@ void fullyconnected_forward (
         for (int iy = 0; iy < CHANNELS_HEIGHT; ++iy) {
             for (int ix = 0; ix < CHANNELS_WIDTH; ++ix) {
                 for (int ch = 0; ch < NB_CHANNELS; ++ch) {
-                    weightedSum += inputs[CHANNELS_WIDTH*NB_CHANNELS*iy + NB_CHANNELS*ix + ch] 
+                    weightedSum += inputs[CHANNELS_WIDTH*NB_CHANNELS*iy + NB_CHANNELS*ix + ch]
                                 * weights[CHANNELS_HEIGHT*CHANNELS_WIDTH*NB_CHANNELS*och + CHANNELS_HEIGHT*CHANNELS_WIDTH*ch + CHANNELS_HEIGHT*iy + ix];
                 }
             }
@@ -58,8 +58,8 @@ Here the kernel to use with inputs in NHWC and weights in NHWC
                                     * (iy + CHANNELS_HEIGHT * och);
 
             macsOnRange<NB_CHANNELS * CHANNELS_WIDTH>(
-                inputs + iOffset, 
-                weights + wOffset, 
+                inputs + iOffset,
+                weights + wOffset,
                 weightedSum);
         }
 
@@ -69,4 +69,4 @@ Here the kernel to use with inputs in NHWC and weights in NHWC
 }
 
 
-#endif  // __AIDGE_EXPORT_CPP_KERNELS_FULLYCONNECTED__
\ No newline at end of file
+#endif  // __AIDGE_EXPORT_CPP_KERNELS_FULLYCONNECTED__
diff --git a/aidge_export_cpp/static/include/network/rescaling.hpp b/aidge_export_cpp/kernels/rescaling.hpp
similarity index 100%
rename from aidge_export_cpp/static/include/network/rescaling.hpp
rename to aidge_export_cpp/kernels/rescaling.hpp
diff --git a/aidge_export_cpp/memory.py b/aidge_export_cpp/memory.py
deleted file mode 100644
index 780edcd7cd56852a89425d528bd37d79c9aa95a0..0000000000000000000000000000000000000000
--- a/aidge_export_cpp/memory.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import aidge_core
-import aidge_backend_cpu
-from typing import List
-
-# for each layer, name: [size, offset start]
-# Example:
-#define ENV_MEM_SIZE 3
-#define ENV_OFFSET 0
-MEMORY_INFO_TEMPLATE = ["layer_name", "size", "offset"]
-
-
-# Default memory management, which can be used for development
-def compute_default_mem_info(scheduler: aidge_core.Scheduler):
-    
-    list_forward_nodes = scheduler.get_static_scheduling()
-    mem_info = []
-    mem_size = 0
-
-    # Exclude Producers and the last layers (because the results are stored outside the export)
-    for i, node in enumerate(list_forward_nodes):
-        if node.type() != "Producer":
-            if len(node.get_children()) != 0:
-                dims = node.get_operator().get_output(0).dims()
-                mem = 1
-                for dim in dims:
-                    mem *= dim
-
-                # Add memeory info
-                mem_info.append([node.name(), mem, mem_size])
-                
-                # Increment offset for the next layer
-                mem_size += mem
-
-    return mem_size, mem_info
-
-
-def generate_optimized_memory_info(scheduler: aidge_core.Scheduler,
-                                   wrapping:bool = False):
-    
-    # The forward dims has to done outside the function
-
-    # Generate the memory manager
-    mem_manager = scheduler.generate_memory(inc_producers=False, wrap_around_buffer=wrapping)
-
-    mem_size = 0
-    mem_info = []
-
-
-    return mem_size, mem_info
\ No newline at end of file
diff --git a/aidge_export_cpp/operators.py b/aidge_export_cpp/operators.py
index 602aea8dcee699ced05863ecc627e94193e9e653..9654a20d3be3c258c195f6f6f35b706f56ccdda7 100644
--- a/aidge_export_cpp/operators.py
+++ b/aidge_export_cpp/operators.py
@@ -1,41 +1,19 @@
 import os
-import shutil
 import numpy as np
 from pathlib import Path
-from jinja2 import Environment, FileSystemLoader
-
-from aidge_core import ExportNode
-from aidge_core.export_utils.code_generation import *
-from aidge_export_cpp.utils import ROOT, operator_register
+import aidge_core
+from aidge_core.export_utils import ExportNode, ExportNodeCpp, generate_file
+from aidge_export_cpp.utils import ROOT
 from aidge_export_cpp.utils.converter import numpy_dtype2ctype
-from aidge_export_cpp.utils.generation import *
-
-##############################################
-################### Utils ####################
-##############################################
-
-def get_node_parents(node):
-    parents = []
-    for parent in node.get_parents():
-        if parent.type() != "Producer":
-            parents.append(parent)
-    return parents
-
-def get_producer_parents(node):
-    parents = []
-    for parent in node.get_parents():
-        if parent.type() == "Producer":
-            parents.append(parent)
-    return parents
-
+from aidge_export_cpp import ExportLibCpp
 
 ##############################################
 ############## Export functions ##############
 ##############################################
 
-def export_params(name:str,
+def export_params(name: str,
                   array: np.ndarray,
-                  filepath:str):
+                  filepath: str):
 
     # Get directory name of the file
     dirname = os.path.dirname(filepath)
@@ -47,474 +25,258 @@ def export_params(name:str,
     generate_file(
         filepath,
         str(ROOT / "templates" / "data" / "parameters.jinja"),
-        name = name,
-        data_t = numpy_dtype2ctype(array.dtype),
-        values = array.tolist()
+        name=name,
+        data_t=numpy_dtype2ctype(array.dtype),
+        values=array.tolist()
     )
 
 
-##############################################
-################### Actions ##################
-##############################################
-
-def set_up_output(name, datatype):
-    return f"{datatype}* {name} = ({datatype}*) mem + {name.upper()}_OFFSET;"
-
-
 ##############################################
 ############## Operators helper ##############
 ##############################################
 
-
-@operator_register("Producer")
+@ExportLibCpp.register("Producer", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
 class ProducerCPP(ExportNode):
-
-    def __init__(self, node):
-        super().__init__(node)
-        self.constant = self.operator.attr.constant
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         self.values = np.array(self.operator.get_output(0))
 
-        if len(self.values.shape) == 4:
+        if len(self.values.shape) == 4:  # Note: export in HWC
             self.values = np.transpose(self.values, (0, 2, 3, 1))
 
-    def export(self, export_folder:Path, list_configs:list):
-
-        # If not constant, it is a dataprovider
-        # and not a parameter provider
-        if (self.constant):
-            list_configs.append(f"parameters/{self.name}.h")
-
-            # Export in HWC
-            export_params(self.name,
-                          self.values.reshape(-1),
-                          str(export_folder / "parameters" / f"{self.name}.h"))
-
-        return list_configs
+    def export(self, export_folder: Path):
+        header_path = f"include/parameters/{self.attributes['name']}.h"
+        export_params(
+            self.attributes['out_name'][0],
+            self.values.reshape(-1),
+            str(export_folder / header_path))
+        return [header_path]
 
-    def forward(self, list_actions:list):
+    def forward(self):
         # A Producer does nothing during forward
-        return list_actions
-
-
-@operator_register("ReLU")
-class ReLUCPP(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-        self.nb_data = 1
-        for i in self.inputs_dims[0]:
-            self.nb_data *= i
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        copyfile(str(ROOT / "kernels" / "activation.hpp"),
-                 str(export_folder / "include" / "kernels"))
-
-        list_configs.append("kernels/activation.hpp")
-        list_configs.append(f"layers/{self.name}.h")
-        generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
-            str(ROOT / "templates" / "configuration" / "activation_config.jinja"),
-            name=self.name,
-            nb_data=self.nb_data,
-            activation="Rectifier",
-            rescaling="NoScaling")
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_str(
-            str(ROOT / "templates" / "kernel_forward" / "activation_forward.jinja"),
-            name=self.name,
-            input_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input",
-            output_name=self.name
-        ))
-        return list_actions
-
-
-@operator_register("Conv")
-class ConvCPP(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-        self.kernel = node.get_operator().attr.kernel_dims
-        self.stride = node.get_operator().attr.stride_dims
-        self.dilation = node.get_operator().attr.dilation_dims
-
+        return []
+
+# TODO : find a way to remove this dummy exportnode
+@ExportLibCpp.register("Pad2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class Pad_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        raise NotImplementedError("Pad2D nodes is not implemented")
+
+
+@ExportLibCpp.register("ReLU", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class ReLUCPP(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["activation"] = "Rectifier"
+        self.attributes["rescaling"] = "NoScaling"
+        self.config_template = str(
+            ROOT / "templates" / "configuration" / "activation_config.jinja")
+        self.forward_template = str(
+            ROOT / "templates" / "kernel_forward" / "activation_forward.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "kernels" / "activation.hpp"),
+            str(ROOT / "kernels" / "rescaling.hpp")
+        ]
+
+@ExportLibCpp.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class ConvCPP(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         # No padding with Conv
         # Use PaddedConv to add padding attribute
-        self.padding = [0, 0]
-
-        self.nb_channels = node.get_operator().in_channels()
-        self.nb_outputs = node.get_operator().out_channels()
-
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        if len(self.outputs_dims[0]) == 4:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        copyfile(str(ROOT / "kernels" / "convolution.hpp"),
-                 str(export_folder / "include" / "kernels"))
-        copyfile(str(ROOT / "kernels" / "macs.hpp"),
-                 str(export_folder / "include" / "kernels"))
-        copyfile(str(ROOT / "kernels" / "activation.hpp"),
-                 str(export_folder / "include" / "kernels"))
-
-        list_configs.append("kernels/convolution.hpp")
-        list_configs.append(f"layers/{self.name}.h")
-        generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
-            str(ROOT / "templates" / "configuration" / "convolution_config.jinja"),
-            name=self.name,
-            input_dims=self.inputs_dims[0],
-            output_dims=self.outputs_dims[0],
-            kernel=self.kernel,
-            stride=self.stride,
-            padding=self.padding,
-            dilation=self.dilation,
-            activation="Linear",
-            rescaling="NoScaling")
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_str(
-            str(ROOT / "templates" / "kernel_forward" / "convolution_forward.jinja"),
-            name=self.name,
-            input_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input",
-            output_name=self.name,
-            weights_name=self.inputs[1].name(),
-            biases_name=self.inputs[2].name()
-        ))
-        return list_actions
-
-
-@operator_register("PaddedConv")
-class PaddedConvCPP(ConvCPP):
-    def __init__(self, node):
-        ExportNode.__init__(self, node)
-
+        self.attributes["padding"] = [0, 0]
+        self.attributes["activation"] = "Linear"
+        self.attributes["rescaling"] = "NoScaling"
+        self.config_template = str(
+            ROOT / "templates" / "configuration" / "convolution_config.jinja")
+        self.forward_template = str(
+            ROOT / "templates" / "kernel_forward" / "convolution_forward.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "kernels" / "convolution.hpp"),
+            str(ROOT / "kernels" / "macs.hpp"),
+            str(ROOT / "kernels" / "activation.hpp"),
+            str(ROOT / "kernels" / "rescaling.hpp")
+        ]
+
+@ExportLibCpp.register_metaop("PaddedConv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class PaddedConvCPP(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        # TODO find a way to retrive attr for meta op
         for n in self.operator.get_micro_graph().get_nodes():
-            if n.type() == "Pad":
-                self.padding = n.get_operator().attr.begin_end_borders
-            if n.type() == "Conv":
-                self.kernel = n.get_operator().attr.kernel_dims
-                self.stride = n.get_operator().attr.stride_dims
-                self.dilation = n.get_operator().attr.dilation_dims
-
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        if len(self.outputs_dims[0]) == 4:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
-
-@operator_register("Add")
-class AddCPP(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-    def export(self, export_folder:str, list_configs:list):
-        list_configs.append(f"layers/{self.name}.h")
-        list_configs.append("kernels/elemwise.hpp")
-
-        copyfile(str(ROOT / "kernels" / "elemwise.hpp"),
-                 str(export_folder / "include" / "kernels"))
-        copyfile(str(ROOT / "kernels" / "activation.hpp"),
-                 str(export_folder / "include" / "kernels"))
-
-        generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
-            str(ROOT / "templates" / "configuration" / "elemwise_config.jinja"),
-            name=self.name,
-            nb_elts=np.prod(self.inputs_dims[0]),
-            activation="Linear",
-            elemwise_op="Add",
-            rescaling="NoScaling")
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-        list_actions.append(generate_str(
-            str(ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja"),
-            name=self.name,
-            inputs1_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input1",
-            inputs2_name=self.inputs[1].name() if self.inputs[1] else self.name + "_input2",
-            output_name=self.name
-        ))
-        return list_actions
-
-@operator_register("Sub")
-class SubCPP(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-    def export(self, export_folder:str, list_configs:list):
-        list_configs.append(f"layers/{self.name}.h")
-        list_configs.append("kernels/elemwise.hpp")
-        copyfile(str(ROOT / "kernels" / "elemwise.hpp"),
-                 str(export_folder / "include" / "kernels"))
-        copyfile(str(ROOT / "kernels" / "activation.hpp"),
-                 str(export_folder / "include" / "kernels"))
-
-        generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
-            str(ROOT / "templates" / "configuration" / "elemwise_config.jinja"),
-            name=self.name,
-            nb_elts=np.prod(self.inputs_dims[0]),
-            activation="Linear",
-            elemwise_op="Sub",
-            rescaling="NoScaling")
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_str(
-            str(ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja"),
-            name=self.name,
-            inputs1_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input1",
-            inputs2_name=self.inputs[1].name() if self.inputs[1] else self.name + "_input2",
-            output_name=self.name
-        ))
-        return list_actions
-
-@operator_register("Mul")
-class MulCPP(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-    def export(self, export_folder:str, list_configs:list):
-        list_configs.append(f"layers/{self.name}.h")
-        list_configs.append("kernels/elemwise.hpp")
-        copyfile(str(ROOT / "kernels" / "elemwise.hpp"),
-                 str(export_folder / "include" / "kernels"))
-        copyfile(str(ROOT / "kernels" / "activation.hpp"),
-                 str(export_folder / "include" / "kernels"))
-
-        generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
-            str(ROOT / "templates" / "configuration" / "elemwise_config.jinja"),
-            name=self.name,
-            nb_elts=np.prod(self.inputs_dims[0]),
-            activation="Linear",
-            elemwise_op="Mul",
-            rescaling="NoScaling")
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_str(
-            str(ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja"),
-            name=self.name,
-            inputs1_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input1",
-            inputs2_name=self.inputs[1].name() if self.inputs[1] else self.name + "_input2",
-            output_name=self.name
-        ))
-        return list_actions
-
-@operator_register("MaxPooling")
-class MaxPoolCPP(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-        self.kernel = node.get_operator().attr.kernel_dims
-        self.stride = node.get_operator().attr.stride_dims
+            if n.type() == "Pad2D":
+                self.attributes["padding"] = n.get_operator(
+                ).attr.begin_end_borders
+            if n.type() == "Conv2D":
+                self.attributes["kernel_dims"] = n.get_operator(
+                ).attr.kernel_dims
+                self.attributes["stride_dims"] = n.get_operator(
+                ).attr.stride_dims
+                self.attributes["dilation_dims"] = n.get_operator(
+                ).attr.dilation_dims
+        self.attributes["activation"] = "Linear"
+        self.attributes["rescaling"] = "NoScaling"
+        self.config_template = str(
+            ROOT / "templates" / "configuration" / "convolution_config.jinja")
+        self.forward_template = str(
+            ROOT / "templates" / "kernel_forward" / "convolution_forward.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "kernels" / "convolution.hpp"),
+            str(ROOT / "kernels" / "macs.hpp"),
+            str(ROOT / "kernels" / "activation.hpp"),
+            str(ROOT / "kernels" / "rescaling.hpp")
+        ]
+
+@ExportLibCpp.register("Add", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class AddCPP(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["elemwise_op"] = "Add"
+        self.attributes["activation"] = "Linear"
+        self.attributes["rescaling"] = "NoScaling"
+        self.config_template = str(
+            ROOT / "templates" / "configuration" / "elemwise_config.jinja")
+        self.forward_template = str(
+            ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "kernels" / "elemwise.hpp"),
+            str(ROOT / "kernels" / "activation.hpp"),
+            str(ROOT / "kernels" / "rescaling.hpp")
+        ]
+
+@ExportLibCpp.register("Sub", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class SubCPP(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["elemwise_op"] = "Sub"
+        self.attributes["activation"] = "Linear"
+        self.attributes["rescaling"] = "NoScaling"
+        self.config_template = str(
+            ROOT / "templates" / "configuration" / "elemwise_config.jinja")
+        self.forward_template = str(
+            ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "kernels" / "elemwise.hpp"),
+            str(ROOT / "kernels" / "activation.hpp"),
+            str(ROOT / "kernels" / "rescaling.hpp")
+        ]
+
+
+@ExportLibCpp.register("Mul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class MulCPP(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["elemwise_op"] = "Mul"
+        self.attributes["activation"] = "Linear"
+        self.attributes["rescaling"] = "NoScaling"
+        self.config_template = str(
+            ROOT / "templates" / "configuration" / "elemwise_config.jinja")
+        self.forward_template = str(
+            ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "kernels" / "elemwise.hpp"),
+            str(ROOT / "kernels" / "activation.hpp"),
+            str(ROOT / "kernels" / "rescaling.hpp")
+        ]
+
+@ExportLibCpp.register("MaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class MaxPoolCPP(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
 
         # No padding with MaxPooling
         # Use PaddedMaxPooling to add padding attribute
-        self.padding = [0, 0]
-
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        if len(self.outputs_dims[0]) == 4:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        copyfile(str(ROOT / "kernels" / "pooling.hpp"),
-                 str(export_folder / "include" / "kernels"))
-
-        list_configs.append("kernels/pooling.hpp")
-        list_configs.append(f"layers/{self.name}.h")
-
-        generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
-            str(ROOT / "templates" / "configuration" / "pooling_config.jinja"),
-            name=self.name,
-            input_dims=self.inputs_dims[0],
-            output_dims=self.outputs_dims[0],
-            kernel=self.kernel,
-            stride=self.stride,
-            padding=self.padding,
-            pool_type="Max",
-            activation="Linear")
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_str(
-            str(ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja"),
-            name=self.name,
-            input_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input",
-            output_name=self.name
-        ))
-        return list_actions
-
-@operator_register("FC")
-class FcCPP(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-        elif len(self.inputs_dims[0]) == 2:
-            # if dims == [batch, nb_channels]
-            # transform to [nb_channels, 1, 1]
-            self.inputs_dims[0] = [self.inputs_dims[0][1], 1, 1]
-
-        if len(self.outputs_dims[0]) == 2:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = [self.outputs_dims[0][1], 1, 1]
-
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        copyfile(str(ROOT / "kernels" / "fullyconnected.hpp"),
-                 str(export_folder / "include" / "kernels"))
-        copyfile(str(ROOT / "kernels" / "macs.hpp"),
-                 str(export_folder / "include" / "kernels"))
-        copyfile(str(ROOT / "kernels" / "activation.hpp"),
-                 str(export_folder / "include" / "kernels"))
-
-        # Add to config list the include of configurations
-        list_configs.append("kernels/fullyconnected.hpp")
-        list_configs.append(f"layers/{self.name}.h")
-
-        # Export configuration file
-        generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
-            str(ROOT / "templates" / "configuration" / "fullyconnected_config.jinja"),
-            name=self.name,
-            input_dims=self.inputs_dims[0],
-            output_dims=self.outputs_dims[0],
-            activation="Linear",
-            rescaling="NoScaling")
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-        list_actions.append(generate_str(
-            str(ROOT / "templates" / "kernel_forward" / "fullyconnected_forward.jinja"),
-            name=self.name,
-            inputs_name= self.inputs[0].name() if (self.inputs[0] is not None) else self.name + '_input',
-            weights_name=self.inputs[1].name(),
-            biases_name=self.inputs[2].name(),
-            outputs_name=self.name
-        ))
-        return list_actions
-
-@operator_register("MatMul")
-class MatMulCPP(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-        dims0, dims1, outdims = [tuple(x) for x in [self.inputs_dims[0], self.inputs_dims[1], self.outputs_dims[0]]]
-
-        # TODO: MatMul aidge operator supports N-D multi broadcast dimensions where N > 2
-        assert len(dims0) <= 2 and len(dims1) <= 2, (
-            f"MatMul export do not support yet dimensions above 2D:  inputs shapes are: {dims0}, {dims1}")
-
-        # Cast to at least 1D
-        # Note that from MatMul::forwardDims(), scalar inputs are supported
-        # which is actually more general than np.matmul
-        dims0 = dims0 if len(dims0) >= 1 else (1, 1)
-        dims1 = dims1 if len(dims1) >= 1 else (1, 1)
-
-        # Cast to at least 2D
-        dims0 = dims0 if len(dims0) >= 2 else (1, dims0[0])
-        dims1 = dims1 if len(dims1) >= 2 else (dims1[0], 1)
-        assert dims0[1] == dims1[0], (
-            f"MatMul input dimensions do no match, expected (m, k), (k, n): inputs shapes are: {dims0}, {dims1}")
-
-        outdims = outdims if len(outdims) > 0 else (1, 1)
-        assert outdims == (dims0[0], dims1[1]), (
-            f"MatMul output dimensions do no match, expected (m, n) for inputs (m, k) (k, n): output shape is: {outdims}, inputs shapes are: {dims0}, {dims1}")
-
-        self.matmul_inputs_dims = dims0, dims1
-        self.matmul_output_dims = outdims
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        copyfile(str(ROOT / "kernels" / "matmul.hpp"),
-                 str(export_folder / "include" / "kernels"))
-        copyfile(str(ROOT / "kernels" / "activation.hpp"),
-                 str(export_folder / "include" / "kernels"))
-
-        # Add to config list the include of configurations
-        list_configs.append("kernels/matmul.hpp")
-        list_configs.append(f"layers/{self.name}.h")
-
-        # Export configuration file
-        generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
-            str(ROOT / "templates" / "configuration" / "matmul_config.jinja"),
-            name=self.name,
-            inputs_dims=self.matmul_inputs_dims,
-            output_dims=self.matmul_output_dims,
-            activation="Linear",
-            rescaling="NoScaling",
-        )
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-        list_actions.append(generate_str(
-            str(ROOT / "templates" / "kernel_forward" / "matmul_forward.jinja"),
-            name=self.name,
-            inputs1_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input1",
-            inputs2_name=self.inputs[1].name() if self.inputs[1] else self.name + "_input2",
-            outputs_name=self.name
-        ))
-        return list_actions
+        self.attributes["padding"] = [0, 0]
+        self.attributes["pool_type"] = "Max"
+        self.attributes["activation"] = "Linear"
+
+        self.config_template = str(
+            ROOT / "templates" / "configuration" / "pooling_config.jinja")
+        self.forward_template = str(
+            ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "kernels" / "pooling.hpp"),
+            str(ROOT / "kernels" / "activation.hpp"),
+            str(ROOT / "kernels" / "rescaling.hpp")
+        ]
+
+
+@ExportLibCpp.register_metaop("PaddedMaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class PaddedMaxPoolCPP(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        for n in self.operator.get_micro_graph().get_nodes():
+            if n.type() == "Pad2D":
+                self.attributes["padding"] = n.get_operator(
+                ).attr.begin_end_borders
+            if n.type() == "MaxPooling2D":
+                self.attributes["kernel_dims"] = n.get_operator(
+                ).attr.kernel_dims
+                self.attributes["stride_dims"] = n.get_operator(
+                ).attr.stride_dims
+        self.attributes["pool_type"] = "Max"
+        self.attributes["activation"] = "Linear"
+
+        self.config_template = str(
+            ROOT / "templates" / "configuration" / "pooling_config.jinja")
+        self.forward_template = str(
+            ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "kernels" / "pooling.hpp"),
+            str(ROOT / "kernels" / "activation.hpp"),
+            str(ROOT / "kernels" / "rescaling.hpp")
+        ]
+
+
+@ExportLibCpp.register("GlobalAveragePooling", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class GlobalAveragePoolCPP(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        self.attributes["stride_dims"] = [1, 1]
+        # No padding with MaxPooling
+        # Use PaddedMaxPooling to add padding attribute
+        self.attributes["padding"] = [0, 0]
+        self.attributes["kernel_dims"] = [
+            self.attributes["in_height"][0],
+            self.attributes["in_width"][0],
+        ]
+        self.attributes["pool_type"] = "Average"
+        self.attributes["activation"] = "Linear"
+
+        self.config_template = str(
+            ROOT / "templates" / "configuration" / "pooling_config.jinja")
+        self.forward_template = str(
+            ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "kernels" / "pooling.hpp"),
+            str(ROOT / "kernels" / "activation.hpp"),
+            str(ROOT / "kernels" / "rescaling.hpp")
+        ]
+
+@ExportLibCpp.register("FC", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class FcCPP(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["activation"] = "Linear"
+        self.attributes["rescaling"] = "NoScaling"
+        self.config_template = str(
+            ROOT / "templates" / "configuration" / "fullyconnected_config.jinja")
+        self.forward_template = str(
+            ROOT / "templates" / "kernel_forward" / "fullyconnected_forward.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "kernels" / "fullyconnected.hpp"),
+            str(ROOT / "kernels" / "macs.hpp"),
+            str(ROOT / "kernels" / "activation.hpp"),
+            str(ROOT / "kernels" / "rescaling.hpp")
+        ]
diff --git a/aidge_export_cpp/operators_old.py b/aidge_export_cpp/operators_old.py
deleted file mode 100644
index aa780fb293168800714d8fea827642510bc8597f..0000000000000000000000000000000000000000
--- a/aidge_export_cpp/operators_old.py
+++ /dev/null
@@ -1,474 +0,0 @@
-from aidge_core import ExportNode
-import numpy as np
-from jinja2 import Environment, FileSystemLoader
-import os
-import shutil
-from aidge_export_cpp.register import export_cpp_register
-
-dirpath = os.path.dirname(__file__)
-
-class KERNELS:
-    ACTIVATION = dirpath + "/kernels/activation.hpp"
-    BATCHNORM = dirpath + "/kernels/batchnorm.hpp"
-    CONV = dirpath + "/kernels/convolution.hpp"
-    ADD = dirpath + "/kernels/elemwise.hpp"
-    FC = dirpath + "/kernels//fullyconnected.hpp"
-    POOLING = dirpath + "/kernels/pooling.hpp"
-    LEAKYRELU = dirpath + "/kernels/leakyrelu.hpp"
-
-class KERNELS_FORWARD:
-    ACTIVATION = dirpath + "/templates/kernel_forward/activation_forward.jinja"
-    BATCHNORM = dirpath + "/templates/kernel_forward/batchnorm_forward.jinja"
-    CONV = dirpath + "/templates/kernel_forward/convolution_forward.jinja"
-    ADD = dirpath + "/templates/kernel_forward/elemwise_forward.jinja"
-    FC = dirpath + "/templates/kernel_forward/fullyconnected_forward.jinja"
-    POOLING = dirpath + "/templates/kernel_forward/pooling_forward.jinja"
-    LEAKYRELU = dirpath + "/templates/kernel_forward/leakyrelu_forward.jinja"
-
-class CONFIGURATIONS:
-    ACTIVATION = dirpath + "/templates/configuration/activation_config.jinja"
-    BATCHNORM = dirpath + "/templates/configuration/batchnorm_config.jinja"
-    CONV = dirpath + "/templates/configuration/convolution_config.jinja"
-    ADD = dirpath + "/templates/configuration/elemwise_config.jinja"
-    FC = dirpath + "/templates/configuration//fullyconnected_config.jinja"
-    POOLING = dirpath + "/templates/configuration/pooling_config.jinja"
-    LEAKYRELU = dirpath + "/templates/configuration/leakyrelu_config.jinja"
-
-##############################################
-############## Export functions ##############
-##############################################
-
-def generate_file(filename, templatename, **kwargs):
-
-    # Get directory name of the file
-    dirname = os.path.dirname(filename)
-
-    # If directory doesn't exist, create it
-    if not os.path.exists(dirname):
-        os.makedirs(dirname)
-
-    # Get directory name and name of the template
-    template_dir = os.path.dirname(templatename)
-    template_name = os.path.basename(templatename)
-
-    # Select template
-    template = Environment(loader=FileSystemLoader(template_dir)).get_template(template_name)
-
-    # Generate file
-    content = template.render(kwargs)
-    with open(filename, mode="w", encoding="utf-8") as message:
-        message.write(content)
-
-
-def generate_action(template_path, **kwargs):
-    dirname = os.path.dirname(template_path)
-    filename = os.path.basename(template_path)
-    template = Environment(loader=FileSystemLoader(dirname)).get_template(filename)
-    return template.render(kwargs)
-
-
-def copyfile(filename, dst_folder):
-
-    # If directory doesn't exist, create it
-    if not os.path.exists(dst_folder):
-        os.makedirs(dst_folder)
-
-    shutil.copy(filename, dst_folder)
-
-
-def export_to_static(name, array, filepath):
-
-    # Get directory name of the file
-    dirname = os.path.dirname(filepath)
-
-    # If directory doesn't exist, create it
-    if not os.path.exists(dirname):
-        os.makedirs(dirname)
-
-    generate_file(
-        filepath,
-        dirpath + "/templates/data/data_static.jinja",
-        dims = array.shape,
-        data_t = "float",
-        name = name,
-        values = array.tolist()
-    )
-
-
-##############################################
-################### Utils ####################
-##############################################
-
-def get_node_parents(node):
-    parents = []
-    for parent in node.get_parents():
-        if parent.type() != "Producer":
-            parents.append(parent)
-    return parents
-
-def get_producer_parents(node):
-    parents = []
-    for parent in node.get_parents():
-        if parent.type() == "Producer":
-            parents.append(parent)
-    return parents
-
-
-##############################################
-################### Actions ##################
-##############################################
-
-def set_up_output(name, datatype):
-    return f"{datatype}* {name} = ({datatype}*) mem + {name.upper()}_OFFSET;"
-
-
-##############################################
-############## Operators helper ##############
-##############################################
-
-@export_cpp_register("Conv")
-class ConvCPP(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-        self.kernel = node.get_operator().get_attr("KernelDims")
-        self.stride = node.get_operator().get_attr("StrideDims")
-
-        # Not working anymore because Padding is a standalone operator
-        # self.padding = node.get_operator().get_attr("PaddingDims")
-        self.padding = [1, 1]
-        self.dilation = node.get_operator().get_attr("DilationDims")
-        self.nb_channels = node.get_operator().get_attr("InChannels")
-        self.nb_outputs = node.get_operator().get_attr("OutChannels")
-
-    def export(self, export_folder:str, list_configs:list):
-
-        copyfile(KERNELS.CONV, f"{export_folder}/include/kernels/")
-        copyfile(dirpath + "/kernels/macs.hpp", f"{export_folder}/include/kernels/")
-        copyfile(dirpath + "/kernels/activation.hpp", f"{export_folder}/include/kernels/")
-
-        list_configs.append("kernels/convolution.hpp")
-        list_configs.append(f"layers/{self.name}.h")
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            CONFIGURATIONS.CONV,
-            name=self.name,
-            input_dims=self.inputs_dims[0][1:],
-            output_dims=self.outputs_dims[0][1:],
-            kernel=self.kernel,
-            stride=self.stride,
-            padding=self.padding,
-            dilation=self.dilation,
-            activation="Linear",
-            rescaling="NoScaling")
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_action(
-            KERNELS_FORWARD.CONV,
-            name=self.name,
-            input_name=self.inputs[0].name(),
-            output_name=self.name,
-            weights_name=self.inputs[1].name(),
-            biases_name=self.inputs[2].name()
-        ))
-        return list_actions
-
-
-class BatchNormCPP:
-    def __init__(self, node):
-        self.name = node.name()
-        self.epsilon = node.get_operator().get_attr("Epsilon")
-
-        self.producers = get_producer_parents(node)
-
-        self.scales = np.array(self.producers[0].get_operator().get_output(0)).reshape(-1).tolist()
-        self.biases = np.array(self.producers[1].get_operator().get_output(0)).reshape(-1).tolist()
-        self.means = np.array(self.producers[2].get_operator().get_output(0)).reshape(-1).tolist()
-        self.vars = np.array(self.producers[3].get_operator().get_output(0)).reshape(-1).tolist()
-
-        parents = get_node_parents(node)
-        if len(parents) == 0:
-            self.input_name = "in"
-        else :
-            self.input_name = parents[0].name()
-
-    def export(self, export_folder:str, list_configs:list):
-        list_configs.append(f"layers/{self.name}.h")
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            "tensorrt/templates/configuration/batchnorm_config.jinja",
-            name=self.name,
-            input_dims=[0, 0, 0],
-            output_dims=[0, 0, 0],
-            activation="Linear",
-            epsilon=self.epsilon)
-
-        # export the batchnorm parameters
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        list_actions.append(set_up_output(self.name, "float"))
-        list_actions.append(generate_action(
-            "cpp/templates/kernel_forward/batchnorm_forward.jinja",
-            name=self.name,
-            input_name=self.input_name,
-            output_name=self.name,
-            biases_name=self.producers[0].name(),
-            variances_name=self.producers[1].name(),
-            means_name=self.producers[2].name(),
-            scales_name=self.producers[3].name()
-        ))
-        return list_actions
-
-@export_cpp_register("ReLU")
-class ReLUCPP(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-        self.nb_data = 1
-        for i in self.inputs_dims[0]:
-            self.nb_data *= i
-
-    def export(self, export_folder:str, list_configs:list):
-
-        copyfile(KERNELS.ACTIVATION, f"{export_folder}/include/kernels/")
-
-        list_configs.append("kernels/activation.hpp")
-        list_configs.append(f"layers/{self.name}.h")
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            CONFIGURATIONS.ACTIVATION,
-            name=self.name,
-            nb_data=self.nb_data,
-            activation="Rectifier",
-            rescaling="NoScaling")
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_action(
-            KERNELS_FORWARD.ACTIVATION,
-            name=self.name,
-            input_name=self.inputs[0].name(),
-            output_name=self.name
-        ))
-        return list_actions
-
-
-@export_cpp_register("LeakyReLU")
-class LeakyReLUCPP(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-        self.alpha = node.get_operator().get_attr("NegativeSlope")
-
-        self.nb_data = 1
-        for i in self.inputs_dims[0]:
-            self.nb_data *= i
-
-    def export(self, export_folder:str, list_configs:list):
-
-        copyfile(KERNELS.LEAKYRELU, f"{export_folder}/include/kernels/")
-
-        list_configs.append("kernels/activation.hpp")
-        list_configs.append(f"layers/{self.name}.h")
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            CONFIGURATIONS.LEAKYRELU,
-            name=self.name,
-            nb_data=self.nb_data,
-            alpha = self.alpha)
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_action(
-            KERNELS_FORWARD.LEAKYRELU,
-            name=self.name,
-            input_name=self.inputs[0].name(),
-            output_name=self.name
-        ))
-        return list_actions
-
-
-class AddCPP:
-    def __init__(self, node):
-        self.name = node.name()
-        self.parents = get_node_parents(node)
-
-    def export(self, export_folder:str, list_configs:list):
-        list_configs.append(f"layers/{self.name}.h")
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            CONFIGURATIONS.ADD,
-            name=self.name,
-            input_dims=[0, 0, 0],
-            output_dims=[0, 0, 0],
-            activation="Linear",
-            elemwise_op="Sum")
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        list_actions.append(set_up_output(self.name, "float"))
-        list_actions.append(generate_action(
-            "cpp/templates/kernel_forward/elemwise_forward.jinja",
-            name=self.name,
-            input1_name=self.parents[0].name(),
-            input2_name=self.parents[1].name(),
-            output_name=self.name
-        ))
-        return list_actions
-
-
-@export_cpp_register("MaxPooling")
-class MaxPoolCPP(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-        self.kernel = node.get_operator().get_attr("KernelDims")
-        self.stride = node.get_operator().get_attr("StrideDims")
-
-        # Not supported by the core...
-        # self.padding = node.get_operator().get_attr("PaddingDims")
-        self.padding = [0, 0]
-
-    def export(self, export_folder:str, list_configs:list):
-
-        copyfile(KERNELS.POOLING, f"{export_folder}/include/kernels/")
-
-        list_configs.append("kernels/pooling.hpp")
-        list_configs.append(f"layers/{self.name}.h")
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            CONFIGURATIONS.POOLING,
-            name=self.name,
-            input_dims=self.inputs_dims[0],
-            output_dims=self.outputs_dims[0],
-            kernel=self.kernel,
-            stride=self.stride,
-            padding=self.padding,
-            pool_type="Max",
-            activation="Linear")
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_action(
-            KERNELS_FORWARD.POOLING,
-            name=self.name,
-            input_name=self.inputs[0].name(),
-            output_name=self.name
-        ))
-        return list_actions
-
-
-class GlobalAvgPoolCPP:
-    def __init__(self, node):
-
-        # node.get_operator().set_compute_output_dims(lambda x: [[x[0][0], x[0][1], 1, 1]])
-        pass
-
-    def export(self, export_folder:str, list_configs:list):
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        list_actions.append(set_up_output(self.name, "float"))
-        list_actions.append(generate_action(
-            "cpp/templates/kernel_forward/pooling_forward.jinja",
-            name=self.name,
-            input_name=self.input_name,
-            output_name=self.name
-        ))
-        return list_actions
-
-@export_cpp_register("FC")
-class FcCPP(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-        if len(self.inputs_dims[0]) == 2:
-            self.inputs_dims[0] = [self.inputs_dims[0][1], 1, 1]
-        elif len(self.inputs_dims[0]) == 4:
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        if len(self.outputs_dims[0]) == 2:
-            self.outputs_dims[0] = [self.outputs_dims[0][1], 1, 1]
-
-    def export(self, export_folder:str, list_configs:list):
-
-        copyfile(KERNELS.FC, f"{export_folder}/include/kernels/")
-        copyfile(dirpath + "/kernels/macs.hpp", f"{export_folder}/include/kernels/")
-        copyfile(dirpath + "/kernels/activation.hpp", f"{export_folder}/include/kernels/")
-
-        list_configs.append("kernels/fullyconnected.hpp")
-        list_configs.append(f"layers/{self.name}.h")
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            CONFIGURATIONS.FC,
-            name=self.name,
-            input_dims=self.inputs_dims[0],
-            output_dims=self.outputs_dims[0],
-            activation="Linear",
-            rescaling="NoScaling")
-
-        return list_configs
-
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_action(
-            KERNELS_FORWARD.FC,
-            name=self.name,
-            input_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input",
-            output_name=self.name,
-            weights_name=self.inputs[1].name(),
-            biases_name=self.inputs[2].name()
-        ))
-        return list_actions
-
-
-@export_cpp_register("Producer")
-class ProducerCPP(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-        self.values = np.array(self.operator.get_output(0))
-
-    def export(self, export_folder:str, list_configs:list):
-
-        list_configs.append(f"parameters/{self.name}.h")
-        export_to_static(self.name,
-                            self.values.reshape(-1),
-                            f"{export_folder}/parameters/{self.name}.h")
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-        return list_actions
diff --git a/aidge_export_cpp/static/include/network/utils.hpp b/aidge_export_cpp/static/include/network/utils.hpp
index 8942d3abd80eb362ca972f0c0710dc80df501add..e2bfbe2f35b3522d0600f10e8481e0879338f43a 100644
--- a/aidge_export_cpp/static/include/network/utils.hpp
+++ b/aidge_export_cpp/static/include/network/utils.hpp
@@ -1,16 +1,23 @@
 #ifndef __AIDGE_EXPORT_CPP_NETWORK_UTILS__
 #define __AIDGE_EXPORT_CPP_NETWORK_UTILS__
 
+#ifdef SAVE_OUTPUTS
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <cstdio>      // fprintf
+#include <type_traits> // std::is_floating_point
+#endif
+
 /**
  * @brief   Integer clamping
  * @param[in]  v   Value to be clamped
  * @param[in]  lo  Saturating lower bound
  * @param[in]  hi  Saturating higher bound
  * @returns         Value clamped between lo and hi
- * 
+ *
  */
 __attribute__((always_inline)) static inline
-int clamp (int v, int lo, int hi) 
+int clamp (int v, int lo, int hi)
 {
     if(v < lo) {
         return lo;
@@ -27,7 +34,7 @@ int clamp (int v, int lo, int hi)
  * @brief   Maximum of two integer values
  */
 __attribute__((always_inline)) static inline
-int max (int lhs, int rhs) 
+int max (int lhs, int rhs)
 {
     return (lhs >= rhs) ? lhs : rhs;
 }
@@ -36,9 +43,107 @@ int max (int lhs, int rhs)
  * @brief   Minimum of two integer values
  */
 __attribute__((always_inline)) static inline
-int min (int lhs, int rhs) 
+int min (int lhs, int rhs)
 {
     return (lhs <= rhs) ? lhs : rhs;
 }
 
-#endif  // __AIDGE_EXPORT_CPP_NETWORK_UTILS__
+
+#ifdef SAVE_OUTPUTS
+enum class Format {
+    Default,
+    NCHW,
+    NHWC,
+    CHWN,
+    NCDHW,
+    NDHWC,
+    CDHWN
+};
+
+
+template<typename Output_T>
+inline void saveOutputs(
+    int NB_OUTPUTS,
+    int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH,
+    int OUTPUT_MEM_CONT_OFFSET,
+    int OUTPUT_MEM_CONT_SIZE,
+    int OUTPUT_MEM_WRAP_OFFSET,
+    int OUTPUT_MEM_WRAP_SIZE,
+    int OUTPUT_MEM_STRIDE,
+    const Output_T* __restrict outputs,
+    FILE* pFile,
+    Format format)
+{
+    // default is NHCW !
+    if (format == Format::NHWC) {
+        fprintf(pFile, "(");
+        for(int oy = 0; oy < OUTPUTS_HEIGHT; oy++) {
+            fprintf(pFile, "(");
+
+            for(int ox = 0; ox < OUTPUTS_WIDTH; ox++) {
+                fprintf(pFile, "(");
+
+                const int oPos = (ox + OUTPUTS_WIDTH * oy);
+                int oOffset = OUTPUT_MEM_STRIDE * oPos;
+
+                if (OUTPUT_MEM_WRAP_SIZE > 0
+                    && oOffset >= OUTPUT_MEM_CONT_SIZE)
+                {
+                    oOffset += OUTPUT_MEM_WRAP_OFFSET - OUTPUT_MEM_CONT_OFFSET
+                                - OUTPUT_MEM_CONT_SIZE;
+                }
+
+                for (int output = 0; output < NB_OUTPUTS; output++) {
+                    if (std::is_floating_point<Output_T>::value)
+                        fprintf(pFile, "%f", static_cast<float>(outputs[oOffset + output]));
+                    else
+                        fprintf(pFile, "%d", static_cast<int>(outputs[oOffset + output]));
+
+                    fprintf(pFile, ", ");
+                }
+
+                fprintf(pFile, "), \n");
+            }
+
+            fprintf(pFile, "), \n");
+        }
+
+        fprintf(pFile, ")\n");
+    }
+    else if (format == Format::NCHW || format == Format::Default) {
+        for(int output = 0; output < NB_OUTPUTS; output++) {
+            fprintf(pFile, "%d:\n", output);
+            for(int oy = 0; oy < OUTPUTS_HEIGHT; oy++) {
+                for(int ox = 0; ox < OUTPUTS_WIDTH; ox++) {
+                    const int oPos = (ox + OUTPUTS_WIDTH * oy);
+                    int oOffset = OUTPUT_MEM_STRIDE * oPos;
+                    if (OUTPUT_MEM_WRAP_SIZE > 0
+                        && oOffset >= OUTPUT_MEM_CONT_SIZE)
+                    {
+                        oOffset += OUTPUT_MEM_WRAP_OFFSET
+                            - OUTPUT_MEM_CONT_OFFSET - OUTPUT_MEM_CONT_SIZE;
+                    }
+
+                    if (std::is_floating_point<Output_T>::value)
+                        fprintf(pFile, "%f", static_cast<float>(outputs[oOffset + output]));
+                    else
+                        fprintf(pFile, "%d",  static_cast<int>(outputs[oOffset + output]));
+
+                    fprintf(pFile, " ");
+                }
+
+                fprintf(pFile, "\n");
+            }
+
+            fprintf(pFile, "\n");
+        }
+
+        fprintf(pFile, "\n");
+    }
+    else {
+        printf("Warning unsupported dataformat.\n");
+    }
+}
+#endif // SAVE_OUTPUTS
+
+#endif // __AIDGE_EXPORT_CPP_NETWORK_UTILS__
diff --git a/aidge_export_cpp/static/main.cpp b/aidge_export_cpp/static/main.cpp
deleted file mode 100644
index 3f2b930c733fe8c9bfe05e303357ef61ad59db2f..0000000000000000000000000000000000000000
--- a/aidge_export_cpp/static/main.cpp
+++ /dev/null
@@ -1,21 +0,0 @@
-
-#include <iostream>
-#include "dnn.hpp"
-#include "inputs.h"
-
-int main()
-{
-    // Example for MNIST dataset
-    // Feel free to change this file for your own projects
-    const unsigned int nb_classes = 10;
-
-    float results[nb_classes];
-    model_forward(inputs, results);
-
-    for (unsigned int i = 0; i < nb_classes; ++i)
-    {
-        std::cout << i << ": " << results[i] << std::endl;
-    }
-
-    return 0;
-}
\ No newline at end of file
diff --git a/aidge_export_cpp/templates/configuration/_def_io.jinja b/aidge_export_cpp/templates/configuration/_def_io.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..66756cf8f501035f7222272f9c410908f499f06f
--- /dev/null
+++ b/aidge_export_cpp/templates/configuration/_def_io.jinja
@@ -0,0 +1,14 @@
+{# NOTE: Suppose input is first #}
+// INPUT CONF
+{% for inidx in range(nb_in) -%}
+#define {{ in_name[inidx]|upper }}_NB_CHANNELS {{ in_chan[inidx] }}
+#define {{ in_name[inidx]|upper }}_IN_HEIGHT {{ in_height[inidx] }}
+#define {{ in_name[inidx]|upper }}_IN_WIDTH {{ in_width[inidx] }}
+{% endfor %}
+
+// OUTPUT CONF
+{% for outidx in range(nb_out) -%}
+#define {{ out_name[outidx]|upper }}_NB_OUTPUTS {{ out_chan[outidx] }}
+#define {{ out_name[outidx]|upper }}_OUT_HEIGHT {{ out_height[outidx] }}
+#define {{ out_name[outidx]|upper }}_OUT_WIDTH {{ out_width[outidx] }}
+{% endfor %}
diff --git a/aidge_export_cpp/templates/configuration/_meminfo.jinja b/aidge_export_cpp/templates/configuration/_meminfo.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..15d15425b4330f68b4a97c31e9cf7a1076cc93e8
--- /dev/null
+++ b/aidge_export_cpp/templates/configuration/_meminfo.jinja
@@ -0,0 +1,11 @@
+// MEMINFO CONF
+{% for outidx in range(nb_out) -%}
+#define {{ out_name[outidx]|upper }}_SIZE {{ mem_info_size[outidx]}}
+#define {{ out_name[outidx]|upper }}_OFFSET {{ mem_info_offset[outidx]}}
+#define {{ out_name[outidx]|upper }}_STRIDE {{ mem_info_stride[outidx]}}
+#define {{ out_name[outidx]|upper }}_LENGTH {{ mem_info_length[outidx]}}
+#define {{ out_name[outidx]|upper }}_CONT_SIZE {{ mem_info_cont_size[outidx]}}
+#define {{ out_name[outidx]|upper }}_CONT_OFFSET {{ mem_info_cont_offset[outidx]}}
+#define {{ out_name[outidx]|upper }}_WRAP_OFFSET {{ mem_info_wrap_offset[outidx]}}
+#define {{ out_name[outidx]|upper }}_WRAP_SIZE {{ mem_info_wrap_size[outidx]}}
+{% endfor %}
diff --git a/aidge_export_cpp/templates/configuration/activation_config.jinja b/aidge_export_cpp/templates/configuration/activation_config.jinja
index 15fb3391fa6314eb90bda7de216eda6b8a929e5d..1ab5b21e2915a95318b531d14964077005839ffe 100644
--- a/aidge_export_cpp/templates/configuration/activation_config.jinja
+++ b/aidge_export_cpp/templates/configuration/activation_config.jinja
@@ -1,10 +1,14 @@
 {#- For name header -#}
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
+#include "kernels/rescaling.hpp"
 
 {# For layer configuration -#}
+{%- set nb_data = in_chan[0] * in_height[0] * in_width[0] %}
 #define {{ name|upper }}_NB_DATA {{ nb_data }}
 #define {{ name|upper }}_ACTIVATION {{ activation }}
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
 static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_cpp/templates/configuration/batchnorm_config.jinja b/aidge_export_cpp/templates/configuration/batchnorm_config.jinja
index 3431ade3691f44f791a4747d94dbfc27b0d4943c..701ba7c46e4727eca86fcabf3ed997cab69f4e92 100644
--- a/aidge_export_cpp/templates/configuration/batchnorm_config.jinja
+++ b/aidge_export_cpp/templates/configuration/batchnorm_config.jinja
@@ -3,12 +3,8 @@
 #define {{ name|upper }}_LAYER_H
 
 {# For layer configuration -#}
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
 #define {{ name|upper }}_ACTIVATION {{ activation }}
 #define {{ name|upper }}_EPSILON {{ epsilon }}
 
diff --git a/aidge_export_cpp/templates/configuration/convolution_config.jinja b/aidge_export_cpp/templates/configuration/convolution_config.jinja
index 34e74c2f8f772dfdf3208bb275ec04ad6a1ce58f..a4a2462d1f475424d799f38e52075fafb333c0d0 100644
--- a/aidge_export_cpp/templates/configuration/convolution_config.jinja
+++ b/aidge_export_cpp/templates/configuration/convolution_config.jinja
@@ -1,29 +1,25 @@
 {#- For name header -#}
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
-
+#include "kernels/rescaling.hpp"
 {# For layer configuration -#}
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
 #define {{ name|upper }}_PADDING_Y {{ padding[1] }}
 #define {{ name|upper }}_PADDING_X {{ padding[0] }}
-#define {{ name|upper }}_STRIDE_Y {{ stride[1] }}
-#define {{ name|upper }}_STRIDE_X {{ stride[0] }}
-#define {{ name|upper }}_DILATION_Y {{ dilation[1] }}
-#define {{ name|upper }}_DILATION_X {{ dilation[0] }}
-#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel[1] }}
-#define {{ name|upper }}_KERNEL_WIDTH {{ kernel[0] }}
+#define {{ name|upper }}_STRIDE_Y {{ stride_dims[1] }}
+#define {{ name|upper }}_STRIDE_X {{ stride_dims[0] }}
+#define {{ name|upper }}_DILATION_Y {{ dilation_dims[1] }}
+#define {{ name|upper }}_DILATION_X {{ dilation_dims[0] }}
+#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel_dims[1] }}
+#define {{ name|upper }}_KERNEL_WIDTH {{ kernel_dims[0] }}
 #define {{ name|upper }}_ACTIVATION {{ activation }}
 static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
 
 {#- Calculate sizes #}
-{%- set weights_size = output_dims[0] * input_dims[0] * kernel[1] * kernel[0] %}
+{%- set weights_size = out_chan[0] * in_chan[0] * kernel_dims[1] * kernel_dims[0] %}
 #define {{ name|upper }}_WEIGHTS_SIZE {{ weights_size }}
-#define {{ name|upper }}_BIASES_SIZE {{ output_dims[0] }}
+#define {{ name|upper }}_BIASES_SIZE {{ out_chan[0] }}
 
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_cpp/templates/configuration/elemwise_config.jinja b/aidge_export_cpp/templates/configuration/elemwise_config.jinja
index 8073de9501fd7d09deea25d799ec1be70b3a6963..91a0be4cc4b6fc15e8b979ecd3ca01f122ebc63d 100644
--- a/aidge_export_cpp/templates/configuration/elemwise_config.jinja
+++ b/aidge_export_cpp/templates/configuration/elemwise_config.jinja
@@ -1,9 +1,12 @@
 {#- For name header -#}
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
+#include "kernels/rescaling.hpp"
 
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
 {# For layer configuration -#}
-#define {{ name|upper }}_NB_ELTS {{ nb_elts }}
+#define {{ name|upper }}_NB_ELTS {{ in_dims[0]|join('*') }}
 #define {{ name|upper }}_ACTIVATION {{ activation }}
 #define {{ name|upper }}_ELEM_OP {{ elemwise_op }}
 static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
diff --git a/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja b/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja
index dc2e6ae2cfadc8714680b8f6d849adff7a4b31b9..3c803388894935b99d60d740c7abdb0cfc853482 100644
--- a/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja
+++ b/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja
@@ -1,21 +1,16 @@
 {#- For name header -#}
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
-
+#include "kernels/rescaling.hpp"
 {# For layer configuration -#}
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
 #define {{ name|upper }}_ACTIVATION {{ activation }}
 static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
 
 {#- Calculate sizes #}
-{%- set weights_size = output_dims[0] * input_dims[0] * input_dims[1] * input_dims[2] %}
+{%- set weights_size = out_chan[0] * in_chan[0] * in_height[0] * in_width[0] %}
 #define {{ name|upper }}_WEIGHTS_SIZE {{ weights_size }}
-#define {{ name|upper }}_BIASES_SIZE {{ output_dims[0] }}
-
+#define {{ name|upper }}_BIASES_SIZE {{ out_chan[0] }}
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_cpp/templates/configuration/leakyrelu_config.jinja b/aidge_export_cpp/templates/configuration/leakyrelu_config.jinja
index b8e770115dfe6577a59cfae12bbcc87e7c82399e..80903622d394bac9132ae3015f82ef72ac2242ea 100644
--- a/aidge_export_cpp/templates/configuration/leakyrelu_config.jinja
+++ b/aidge_export_cpp/templates/configuration/leakyrelu_config.jinja
@@ -3,6 +3,8 @@
 #define {{ name|upper }}_LAYER_H
 
 {# For layer configuration -#}
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
 #define {{ name|upper }}_NB_DATA {{ nb_data }}
 #define {{ name|upper }}_ALPHA {{ alpha }}
 
diff --git a/aidge_export_cpp/templates/configuration/pooling_config.jinja b/aidge_export_cpp/templates/configuration/pooling_config.jinja
index 3f2ca701227e7c828003d7e218758a01fd03a274..afdb51f21f1cea5679d6dc6dfab57ba313ed11a5 100644
--- a/aidge_export_cpp/templates/configuration/pooling_config.jinja
+++ b/aidge_export_cpp/templates/configuration/pooling_config.jinja
@@ -3,18 +3,14 @@
 #define {{ name|upper }}_LAYER_H
 
 {# For layer configuration -#}
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
 #define {{ name|upper }}_PADDING_Y {{ padding[1] }}
 #define {{ name|upper }}_PADDING_X {{ padding[0] }}
-#define {{ name|upper }}_STRIDE_Y {{ stride[1] }}
-#define {{ name|upper }}_STRIDE_X {{ stride[0] }}
-#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel[1] }}
-#define {{ name|upper }}_KERNEL_WIDTH {{ kernel[0] }}
+#define {{ name|upper }}_STRIDE_Y {{ stride_dims[1] }}
+#define {{ name|upper }}_STRIDE_X {{ stride_dims[0] }}
+#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel_dims[1] }}
+#define {{ name|upper }}_KERNEL_WIDTH {{ kernel_dims[0] }}
 #define {{ name|upper }}_POOLING_TYPE {{ pool_type }}
 #define {{ name|upper }}_ACTIVATION {{ activation }}
 
diff --git a/aidge_export_cpp/templates/kernel_forward/_mem_offset.jinja b/aidge_export_cpp/templates/kernel_forward/_mem_offset.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..b85aae8f6cde13a9314b2ffef231f5dfbe416883
--- /dev/null
+++ b/aidge_export_cpp/templates/kernel_forward/_mem_offset.jinja
@@ -0,0 +1,6 @@
+{% filter indent(width=4, first=False) %}
+
+{% for outidx in range(nb_out) -%}
+{{out_cdtype[outidx]}}* {{out_name[outidx]}} = ({{out_cdtype[outidx]}}*) mem + {{out_name[outidx]|upper}}_OFFSET;
+{% endfor %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/_save_outputs.jinja b/aidge_export_cpp/templates/kernel_forward/_save_outputs.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..ddefc0c7bfbb5fc5e9298091f755b0438496fe53
--- /dev/null
+++ b/aidge_export_cpp/templates/kernel_forward/_save_outputs.jinja
@@ -0,0 +1,20 @@
+/*
+#ifdef SAVE_OUTPUTS
+{% for outidx in range(nb_out) -%}
+    FILE* {{out_name[outidx]|upper}}_STREAM = fopen("outputs/{{out_name[outidx]}}.txt", "w");
+    saveOutputs<{{out_cdtype[outidx]}}>(
+        {{out_name[outidx]|upper}}_NB_OUTPUTS,
+        {{out_name[outidx]|upper}}_OUT_HEIGHT,
+        {{out_name[outidx]|upper}}_OUT_WIDTH,
+        {{out_name[outidx]|upper}}_CONT_OFFSET,
+        {{out_name[outidx]|upper}}_CONT_SIZE,
+        {{out_name[outidx]|upper}}_WRAP_OFFSET,
+        {{out_name[outidx]|upper}}_WRAP_SIZE,
+        {{out_name[outidx]|upper}}_STRIDE,
+        {{out_name[outidx]}},
+        {{out_name[outidx]|upper}}_STREAM,
+        Format::{{out_format[outidx]}});
+    fclose({{out_name[outidx]|upper}}_STREAM);
+{% endfor %}
+#endif
+*/
diff --git a/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja b/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja
index c11935ab5bf6bd44dd88d7735f9f0b4bb5e7404b..9a39495e268361a16ee5215ecb15c3b3b9bd9479 100644
--- a/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja
@@ -1,3 +1,7 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
 activation_forward<{{name|upper}}_NB_DATA,
                    {{name|upper}}_ACTIVATION>
-                   ({{input_name}}, {{output_name}}, {{name|upper}}_RESCALING);
\ No newline at end of file
+                   ({{in_name[0]}}, {{out_name[0]}}, {{name|upper}}_RESCALING);
+{% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja b/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja
index 3568b297477a68dce38c1f3a86f086d90937d9c3..5a759b839cd0b04b3b82f8ca4cb8dd1b0201f4f7 100644
--- a/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja
@@ -1,5 +1,9 @@
-batchnorm_forward<{{name|upper}}_NB_OUTPUTS,
-                  {{name|upper}}_OUTPUTS_HEIGHT,
-                  {{name|upper}}_OUTPUTS_WIDTH,
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+batchnorm_forward<{{ out_name[0]|upper }}_NB_OUTPUTS,
+                  {{ out_name[0]|upper }}_OUT_HEIGHT,
+                  {{ out_name[0]|upper }}_OUT_WIDTH,
                   {{name|upper}}_ACTIVATION>
-                  ({{input_name}}, {{output_name}}, {{biases_name}}, {{variances_name}}, {{means_name}}, {{scales_name}}, {{name|upper}}_EPSILON);
\ No newline at end of file
+                  ({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{in_name[3]}}, {{in_name[4]}}, {{name|upper}}_EPSILON);
+{% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja b/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja
index fa253e6af0003cfece3fd8515ab105bfbe16d829..421013b9590dabe6ee0ac12f969494913414a530 100644
--- a/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja
@@ -1,9 +1,11 @@
-convolution_forward<{{name|upper}}_NB_CHANNELS,
-                    {{name|upper}}_CHANNELS_HEIGHT,
-                    {{name|upper}}_CHANNELS_WIDTH,
-                    {{name|upper}}_NB_OUTPUTS,
-                    {{name|upper}}_OUTPUTS_HEIGHT,
-                    {{name|upper}}_OUTPUTS_WIDTH,
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+convolution_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
+                    {{ in_name[0]|upper }}_IN_HEIGHT,
+                    {{ in_name[0]|upper }}_IN_WIDTH,
+                    {{ out_name[0]|upper }}_NB_OUTPUTS,
+                    {{ out_name[0]|upper }}_OUT_HEIGHT,
+                    {{ out_name[0]|upper }}_OUT_WIDTH,
                     {{name|upper}}_PADDING_Y,
                     {{name|upper}}_PADDING_X,
                     {{name|upper}}_STRIDE_Y,
@@ -13,4 +15,6 @@ convolution_forward<{{name|upper}}_NB_CHANNELS,
                     {{name|upper}}_KERNEL_HEIGHT,
                     {{name|upper}}_KERNEL_WIDTH,
                     {{name|upper}}_ACTIVATION>
-                    ({{input_name}}, {{output_name}}, {{weights_name}}, {{biases_name}}, {{name|upper}}_RESCALING);
+                    ({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{name|upper}}_RESCALING);
+{% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja b/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja
index 30dce76d865d99994d9132932b868a54b9d7178b..f60d163dcbfd6eff75e6b66c37bc5e57cf2cfca9 100644
--- a/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja
@@ -1,4 +1,8 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
 elemwise_forward<{{name|upper}}_NB_ELTS,
                  {{name|upper}}_ELEM_OP,
                  {{name|upper}}_ACTIVATION>
-                 ({{output_name}}, {{name|upper}}_RESCALING, {{inputs1_name}}, {{inputs2_name}});
+                 ({{out_name[0]}}, {{name|upper}}_RESCALING, {{in_name[0]}}, {{in_name[1]}});
+{% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja b/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja
index 67832b9ec27b87b6ba617e47e3e2fde39bb8cf4c..cac97de22b20c4c8e0953e0d6cb2f40a18d0cb30 100644
--- a/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja
@@ -1,8 +1,12 @@
-fullyconnected_forward<{{name|upper}}_NB_CHANNELS,
-                       {{name|upper}}_CHANNELS_HEIGHT,
-                       {{name|upper}}_CHANNELS_WIDTH,
-                       {{name|upper}}_NB_OUTPUTS,
-                       {{name|upper}}_OUTPUTS_HEIGHT,
-                       {{name|upper}}_OUTPUTS_WIDTH,
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+fullyconnected_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
+                       {{ in_name[0]|upper }}_IN_HEIGHT,
+                       {{ in_name[0]|upper }}_IN_WIDTH,
+                       {{ out_name[0]|upper }}_NB_OUTPUTS,
+                       {{ out_name[0]|upper }}_OUT_HEIGHT,
+                       {{ out_name[0]|upper }}_OUT_WIDTH,
                        {{name|upper}}_ACTIVATION>
-                       ({{inputs_name}}, {{outputs_name}}, {{weights_name}}, {{biases_name}}, {{name|upper}}_RESCALING);
\ No newline at end of file
+                       ({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{name|upper}}_RESCALING);
+{% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja b/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja
index e72ea2178f26107aaa4f7571cbf93f7c2e2f94d2..591fafeec996f9b7dc8f52a779cda5eea8a53eae 100644
--- a/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja
@@ -1,2 +1,6 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
 leakyrelu_forward<{{name|upper}}_NB_DATA>
-                   ({{input_name}}, {{output_name}}, {{name|upper}}_ALPHA);
\ No newline at end of file
+                   ({{input_name}}, {{output_name}}, {{name|upper}}_ALPHA);
+{% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja b/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja
index 20dbb0f85db27f0e82905e073de49d924595a74f..c730923cfc4f8b534cab85a82b4fce5161a528de 100644
--- a/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja
@@ -1,9 +1,11 @@
-pooling_forward<{{name|upper}}_NB_CHANNELS,
-                {{name|upper}}_CHANNELS_HEIGHT,
-                {{name|upper}}_CHANNELS_WIDTH,
-                {{name|upper}}_NB_OUTPUTS,
-                {{name|upper}}_OUTPUTS_HEIGHT,
-                {{name|upper}}_OUTPUTS_WIDTH,
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+pooling_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
+                {{ in_name[0]|upper }}_IN_HEIGHT,
+                {{ in_name[0]|upper }}_IN_WIDTH,
+                {{ out_name[0]|upper }}_NB_OUTPUTS,
+                {{ out_name[0]|upper }}_OUT_HEIGHT,
+                {{ out_name[0]|upper }}_OUT_WIDTH,
                 {{name|upper}}_PADDING_Y,
                 {{name|upper}}_PADDING_X,
                 {{name|upper}}_STRIDE_Y,
@@ -12,4 +14,6 @@ pooling_forward<{{name|upper}}_NB_CHANNELS,
                 {{name|upper}}_KERNEL_WIDTH,
                 {{name|upper}}_POOLING_TYPE,
                 {{name|upper}}_ACTIVATION>
-                ({{input_name}}, {{output_name}});
\ No newline at end of file
+                ({{in_name[0]}}, {{out_name[0]}});
+{% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/memory/mem_info.jinja b/aidge_export_cpp/templates/memory/mem_info.jinja
deleted file mode 100644
index fe1c3c00f5c4cc78b56502de3e7d91208e22ff35..0000000000000000000000000000000000000000
--- a/aidge_export_cpp/templates/memory/mem_info.jinja
+++ /dev/null
@@ -1,16 +0,0 @@
-{#- For name header -#}
-#ifndef MEM_INFO_H
-#define MEM_INFO_H
-
-#define MEMORY_SIZE {{ mem_size }}
-
-{% for i in range(mem_info|length) -%}
-{%- set layer_name = mem_info[i][0] %}
-/* {{layer_name}} memory */
-{% for j in range(1, mem_info[i]|length) %}
-#define {{ layer_name|upper }}_{{ mem_info_legends[j]|upper }} {{ mem_info[i][j] }}
-{%- endfor %}
-{% endfor %}
-
-
-#endif /* MEM_INFO_H */
\ No newline at end of file
diff --git a/aidge_export_cpp/templates/network/dnn_header.jinja b/aidge_export_cpp/templates/network/dnn_header.jinja
deleted file mode 100644
index 31733485302cbbd7f7001d36cdfa4338cb2e8be8..0000000000000000000000000000000000000000
--- a/aidge_export_cpp/templates/network/dnn_header.jinja
+++ /dev/null
@@ -1,15 +0,0 @@
-{#- For name header -#}
-#ifndef DNN_HPP
-#define DNN_HPP
-
-{#- For libraries #}
-{% for lib in libraries %}
-#include <{{ lib }}>
-{%- endfor %}
-
-{% for func in functions %}
-{{ func }}
-{% endfor %}
-
-
-#endif /* DNN_HPP */
\ No newline at end of file
diff --git a/aidge_export_cpp/templates/network/environment.jinja b/aidge_export_cpp/templates/network/environment.jinja
deleted file mode 100644
index ced5991779f79efd0a6a981d050937aac63f8257..0000000000000000000000000000000000000000
--- a/aidge_export_cpp/templates/network/environment.jinja
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef ENV_LAYER_H
-#define ENV_LAYER_H
-
-#include <stdint.h>
-
-#define ENV_SIZE_X {{ size_x }}
-#define ENV_SIZE_Y {{ size_y }}
-#define ENV_NB_OUTPUTS {{ nb_outputs }}
-
-#define ENV_DATA_UNSIGNED {{ is_unsigned }}
-
-#define ENV_OUTPUTS_SIZE (ENV_NB_OUTPUTS*ENV_SIZE_X*ENV_SIZE_Y)
-
-#define NETWORK_TARGETS 1
-//Output targets network dimension definition:
-static unsigned int OUTPUTS_HEIGHT[NETWORK_TARGETS] = {1};
-static unsigned int OUTPUTS_WIDTH[NETWORK_TARGETS] = {1};
-static unsigned int NB_OUTPUTS[NETWORK_TARGETS] = {1000};
-static unsigned int NB_TARGET[NETWORK_TARGETS] = {1000};
-static unsigned int OUTPUTS_SIZE[NETWORK_TARGETS] = {(OUTPUTS_WIDTH[0]*OUTPUTS_HEIGHT[0])};
-typedef int32_t Target_0_T;
-typedef Target_0_T Target_T;
-
-
-#endif // ENV_LAYER_H
diff --git a/aidge_export_cpp/templates/network/network_forward.jinja b/aidge_export_cpp/templates/network/network_forward.jinja
deleted file mode 100644
index e7bde0a1285bcc12931f73d97fb216362cdca6a8..0000000000000000000000000000000000000000
--- a/aidge_export_cpp/templates/network/network_forward.jinja
+++ /dev/null
@@ -1,23 +0,0 @@
-{#- For libraries #}
-
-#include <stdint.h>
-
-#include "network/rescaling.hpp"
-
-// Layer & memory configurations
-{%- for header in headers %}
-#include "{{ header }}"
-{%- endfor %}
-
-{# mem has the datatype of the firt input #}
-{#- Change here to improve it -#}
-static {{mem_ctype}} mem[MEMORY_SIZE];
-
-{# Forward function #}
-{#- Support multiple inputs with different datatypes and multiple outputs with different datatypes -#}
-void model_forward({% for inp in inputs %}const {{inp[0]}}* {{inp[1]}}, {% endfor %}{% for out in outputs %}{{out[0]}}* {{out[1]}}{{ ", " if not loop.last else "" }}{% endfor %})
-{
-    {%- for action in actions %}
-    {{ action }}
-    {%- endfor %}
-}
\ No newline at end of file
diff --git a/aidge_export_cpp/utils/converter.py b/aidge_export_cpp/utils/converter.py
index 0fe7a675a9a00aad6d5a9447097e744b6493bc5c..d4af124280e2c89ec44123c90ee509347003f960 100644
--- a/aidge_export_cpp/utils/converter.py
+++ b/aidge_export_cpp/utils/converter.py
@@ -1,5 +1,4 @@
 import numpy as np
-import aidge_core
 
 def numpy_dtype2ctype(dtype):
     if dtype == np.int8:
@@ -17,19 +16,3 @@ def numpy_dtype2ctype(dtype):
     # Add more dtype mappings as needed
     else:
         raise ValueError(f"Unsupported {dtype} dtype")
-
-
-def aidge_datatype2ctype(datatype):
-    if datatype == aidge_core.dtype.int8:
-        return "int8_t"
-    elif datatype == aidge_core.dtype.int32:
-        return "int32_t"
-    elif datatype == aidge_core.dtype.int64:
-        return "int64_t"
-    elif datatype == aidge_core.dtype.float32:
-        return "float"
-    elif datatype == aidge_core.dtype.float64:
-        return "double"
-    # Add more dtype mappings as needed
-    else:
-        raise ValueError(f"Unsupported {datatype} aidge datatype")
\ No newline at end of file
diff --git a/version.txt b/version.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5faa42c8a89ea0f5ab797259dce62bb190eb28c6
--- /dev/null
+++ b/version.txt
@@ -0,0 +1,2 @@
+0.2.0
+