diff --git a/aidge_export_cpp/export.py b/aidge_export_cpp/export.py
index 5b7198d7bf712a01cd05b2c6351e60d07e90dc72..d9dc2969d712deeeda977dbfa7309fde00aefc84 100644
--- a/aidge_export_cpp/export.py
+++ b/aidge_export_cpp/export.py
@@ -35,7 +35,7 @@ def generate_input_file(export_folder:str,
     )
 
 
-def export(export_folder_name, graphview, scheduler):
+def export(export_folder_name, graphview, scheduler, mem_wrapping=False):
 
     export_folder = Path().absolute() / export_folder_name
 
@@ -62,6 +62,9 @@ def export(export_folder_name, graphview, scheduler):
             raise RuntimeError(f"Operator not supported: {node.type()} !")
 
     # Memory management
+    # stats_folder = export_folder / "statistics"
+    # os.makedirs(str(stats_folder), exist_ok=True)
+    # mem_size, mem_info = generate_optimized_memory_info(stats_folder, scheduler, mem_wrapping)
     mem_size, mem_info = compute_default_mem_info(scheduler)
 
     # Generate the memory file
@@ -94,7 +97,7 @@ def export(export_folder_name, graphview, scheduler):
     for node in graphview.get_nodes():
         if len(node.get_children()) == 0:
             export_type = aidge2c(node.get_operator().get_output(0).dtype())
-            list_outputs_name.append((export_type, node.name()))
+            list_outputs_name.append((export_type, f"{node.name()}_output_0"))
 
     # Generate forward file
     generate_file(
diff --git a/aidge_export_cpp/memory.py b/aidge_export_cpp/memory.py
index 780edcd7cd56852a89425d528bd37d79c9aa95a0..249c36a1ccf6820432c0b841a1abf672234c622e 100644
--- a/aidge_export_cpp/memory.py
+++ b/aidge_export_cpp/memory.py
@@ -1,8 +1,25 @@
+import os
+import shutil
+from typing import List
+from pathlib import Path
 import aidge_core
 import aidge_backend_cpu
-from typing import List
 
-# for each layer, name: [size, offset start]
+# for each layer,
+# name              [size, stride, length, count, contigious offset, contigious size, wrapping offset, wrapping size]
+# true values       [nb_outputs, nb_outputs, width, width, offset start, total size, 0, 0]
+# Example:
+#define ENV_MEM_SIZE 3
+#define ENV_MEM_STRIDE 3
+#define ENV_MEM_LENGTH 224
+#define ENV_MEM_COUNT 224
+#define ENV_MEM_CONT_OFFSET 0
+#define ENV_MEM_CONT_SIZE 150528
+#define ENV_MEM_WRAP_OFFSET 0
+#define ENV_MEM_WRAP_SIZE 0
+# MEMORY_INFO_TEMPLATE = ["layer_name", "size", "offset", "stride", "length", "count", "cont_offset", "cont_size", "wrap_offset", "wrap_size"]
+
+# for each layer, name: [size, offset start] (old style)
 # Example:
 #define ENV_MEM_SIZE 3
 #define ENV_OFFSET 0
@@ -11,7 +28,7 @@ MEMORY_INFO_TEMPLATE = ["layer_name", "size", "offset"]
 
 # Default memory management, which can be used for development
 def compute_default_mem_info(scheduler: aidge_core.Scheduler):
-    
+
     list_forward_nodes = scheduler.get_static_scheduling()
     mem_info = []
     mem_size = 0
@@ -27,23 +44,83 @@ def compute_default_mem_info(scheduler: aidge_core.Scheduler):
 
                 # Add memeory info
                 mem_info.append([node.name(), mem, mem_size])
-                
+
                 # Increment offset for the next layer
                 mem_size += mem
 
     return mem_size, mem_info
 
 
-def generate_optimized_memory_info(scheduler: aidge_core.Scheduler,
+def generate_optimized_memory_info(stats_folder: Path,
+                                   scheduler: aidge_core.Scheduler,
                                    wrapping:bool = False):
-    
-    # The forward dims has to done outside the function
 
+    # The forward dims has to done outside the function
+    # Also supposed the generation of the scheduler has been performed outside
+    # Otherwise decomment the following line
+    # scheduler.generate_scheduling()
     # Generate the memory manager
+    # So far, the Producers are not take in consideration in the meory manager => inc_producers=False
     mem_manager = scheduler.generate_memory(inc_producers=False, wrap_around_buffer=wrapping)
 
-    mem_size = 0
+    # List of nodes which are connected at the input of the graph (None if input is not connected)
+    nodes_at_input = [n[0] for n in scheduler.graph_view().inputs()]
+    # Use gnuplot to generate the log
+    try:
+        os.makedirs(str(stats_folder / "graph"), exist_ok=True)
+        mem_manager.log("memory_info")
+        os.chmod("memory_info_plot.gnu", 0o777)
+        os.system("./memory_info_plot.gnu")
+        shutil.move("memory_info", str(stats_folder / "graph"/ "memory_info"))
+        shutil.move("memory_info_plot.png", str(stats_folder / "graph" / "memory_info_plot.png"))
+        os.remove("memory_info_plot.gnu")
+    except:
+        print("Please install gnuplot if you want memory plot from MemoryManager.")
+
+    # In the export, we currently use an unified memory buffer whose size
+    # is determined by the memory peak usage
+    mem_size = mem_manager.get_peak_usage()
     mem_info = []
 
+    mem_planes = mem_manager.get_planes()
+
+    for node in scheduler.get_static_scheduling():
+        if node.type() == "Producer":
+            continue # Skipping memory management for producers
+        if node in nodes_at_input:
+            # Input memory management (suppose tensor ends with [:, channel, height, width]))
+            tensor = node.get_operator().get_output(0)
+            if tensor is None:
+                raise RuntimeError("Warning input producer not provided")
+            if len(tensor.dims()) < 3:
+                raise RuntimeError(f"Input producer dimensions must be with [:, channel, height, width] but got {tensor.dims()} instead")
 
-    return mem_size, mem_info
\ No newline at end of file
+            name = node.name()
+            offset = 0                  # Suppose input data is stored outside the export function
+                                        # so the memory offset is not important to consider
+            size = tensor.dims()[-3]    # Should be nb_channels
+            stride = tensor.dims()[-3]  # Should be nb_channels
+            length = tensor.dims()[-1]  # Should be width
+            count = tensor.dims()[-2]   # Should be height
+            cont_offset = 0             # Suppose input data is stored outside the export function
+                                        # so the memory offset is not important to consider
+            cont_size = tensor.dims()[-1] * tensor.dims()[-2] * tensor.dims()[-3] # Size of input
+            wrap_offset = 0     # No wrapping
+            wrap_size = 0       # No wrapping
+        else:
+            plane = mem_planes[node][0]
+            name = node.name()
+            offset = plane.offset
+            size = plane.size
+            stride = plane.stride
+            length = plane.length
+            count = plane.count
+            cont_offset = plane.get_contiguous_offset()
+            cont_size = plane.get_contiguous_size()
+            wrap_offset = plane.get_wrapped_offset()
+            wrap_size = plane.get_wrapped_size()
+
+        mem_info.append([name, size, offset, stride, length, count,
+                        cont_offset, cont_size, wrap_offset, wrap_size])
+
+    return mem_size, mem_info
diff --git a/aidge_export_cpp/operators.py b/aidge_export_cpp/operators.py
index 4f9833c4670d7ed796c9fd1cc70c321d2c2c1c72..288973971121b0122c1b314fb9f244afdb81a1cc 100644
--- a/aidge_export_cpp/operators.py
+++ b/aidge_export_cpp/operators.py
@@ -53,14 +53,6 @@ def export_params(name:str,
     )
 
 
-##############################################
-################### Actions ##################
-##############################################
-
-def set_up_output(name, datatype):
-    return f"{datatype}* {name} = ({datatype}*) mem + {name.upper()}_OFFSET;"
-
-
 ##############################################
 ############## Operators helper ##############
 ##############################################
@@ -71,7 +63,6 @@ class ProducerCPP(ExportNode):
 
     def __init__(self, node):
         super().__init__(node)
-        self.constant = self.operator.attr.constant
         self.values = np.array(self.operator.get_output(0))
 
         if len(self.values.shape) == 4: # Note: export in HWC
@@ -79,11 +70,11 @@ class ProducerCPP(ExportNode):
 
     def export(self, export_folder:Path, list_configs:list):
 
-        list_configs.append(f"parameters/{self.name}.h")
+        list_configs.append(f"parameters/{self.attributes['name']}.h")
         export_params(
-            self.name,
+            self.attributes['out_name'][0],
             self.values.reshape(-1),
-            str(export_folder / "parameters" / f"{self.name}.h"))
+            str(export_folder / "parameters" / f"{self.attributes['name']}.h"))
 
         return list_configs
 
@@ -97,9 +88,6 @@ class ReLUCPP(ExportNode):
     def __init__(self, node):
         super().__init__(node)
 
-        self.nb_data = 1
-        for i in self.inputs_dims[0]:
-            self.nb_data *= i
 
     def export(self, export_folder:Path, list_configs:list):
 
@@ -107,27 +95,22 @@ class ReLUCPP(ExportNode):
                  str(export_folder / "include" / "kernels"))
 
         list_configs.append("kernels/activation.hpp")
-        list_configs.append(f"layers/{self.name}.h")
+        list_configs.append(f"layers/{self.attributes['name']}.h")
+
         generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
+            str(export_folder / "layers" / f"{self.attributes['name']}.h"),
             str(ROOT / "templates" / "configuration" / "activation_config.jinja"),
-            name=self.name,
-            nb_data=self.nb_data,
             activation="Rectifier",
-            rescaling="NoScaling")
+            rescaling="NoScaling",
+            **self.attributes
+            )
 
         return list_configs
 
     def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
         list_actions.append(generate_str(
             str(ROOT / "templates" / "kernel_forward" / "activation_forward.jinja"),
-            name=self.name,
-            input_name=f"{self.name}_input" if self.inputs[0] is None else self.inputs[0].name(),
-            output_name=self.name
+            **self.attributes
         ))
         return list_actions
 
@@ -137,26 +120,17 @@ class ConvCPP(ExportNode):
     def __init__(self, node):
         super().__init__(node)
 
-        self.kernel = node.get_operator().attr.kernel_dims
-        self.stride = node.get_operator().attr.stride_dims
-        self.dilation = node.get_operator().attr.dilation_dims
+        # self.kernel = node.get_operator().attr.kernel_dims
+        # self.stride = node.get_operator().attr.stride_dims
+        # self.dilation = node.get_operator().attr.dilation_dims
 
         # No padding with Conv
         # Use PaddedConv to add padding attribute
-        self.padding = [0, 0]
+        self.attributes["padding"] = [0, 0]
 
-        self.nb_channels = node.get_operator().in_channels()
-        self.nb_outputs = node.get_operator().out_channels()
+        # self.nb_channels = node.get_operator().in_channels()
+        # self.nb_outputs = node.get_operator().out_channels()
 
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        if len(self.outputs_dims[0]) == 4:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
 
     def export(self, export_folder:Path, list_configs:list):
 
@@ -168,33 +142,21 @@ class ConvCPP(ExportNode):
                  str(export_folder / "include" / "kernels"))
 
         list_configs.append("kernels/convolution.hpp")
-        list_configs.append(f"layers/{self.name}.h")
+        list_configs.append(f"layers/{self.attributes['name']}.h")
         generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
+            str(export_folder / "layers" / f"{self.attributes['name']}.h"),
             str(ROOT / "templates" / "configuration" / "convolution_config.jinja"),
-            name=self.name,
-            input_dims=self.inputs_dims[0],
-            output_dims=self.outputs_dims[0],
-            kernel=self.kernel,
-            stride=self.stride,
-            padding=self.padding,
-            dilation=self.dilation,
             activation="Linear",
-            rescaling="NoScaling")
+            rescaling="NoScaling",
+            **self.attributes,
+            )
 
         return list_configs
 
     def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
         list_actions.append(generate_str(
             str(ROOT / "templates" / "kernel_forward" / "convolution_forward.jinja"),
-            name=self.name,
-            input_name=f"{self.name}_input_0" if self.inputs[0] is None else self.inputs[0].name(),
-            output_name=self.name,
-            weights_name=f"{self.name}_input_1" if self.inputs[1] is None else self.inputs[1].name(),
-            biases_name=f"{self.name}_input_2" if self.inputs[2] is None else self.inputs[2].name()
+            **self.attributes
         ))
         return list_actions
 
@@ -204,23 +166,14 @@ class PaddedConvCPP(ConvCPP):
     def __init__(self, node):
         ExportNode.__init__(self, node)
 
+        # TODO find a way to retrive attr for meta op
         for n in self.operator.get_micro_graph().get_nodes():
             if n.type() == "Pad":
-                self.padding = n.get_operator().attr.begin_end_borders
+                self.attributes["padding"] = n.get_operator().attr.begin_end_borders
             if n.type() == "Conv":
-                self.kernel = n.get_operator().attr.kernel_dims
-                self.stride = n.get_operator().attr.stride_dims
-                self.dilation = n.get_operator().attr.dilation_dims
-
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        if len(self.outputs_dims[0]) == 4:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
+                self.attributes["kernel_dims"]   = n.get_operator().attr.kernel_dims
+                self.attributes["stride_dims"]   = n.get_operator().attr.stride_dims
+                self.attributes["dilation_dims"] = n.get_operator().attr.dilation_dims
 
 @operator_register("Add")
 class AddCPP(ExportNode):
@@ -228,32 +181,26 @@ class AddCPP(ExportNode):
         super().__init__(node)
 
     def export(self, export_folder:str, list_configs:list):
-        list_configs.append(f"layers/{self.name}.h")
+        list_configs.append(f"layers/{self.attributes['name']}.h")
         list_configs.append("kernels/elemwise.hpp")
 
         copyfile(str(ROOT / "kernels" / "elemwise.hpp"),
                  str(export_folder / "include" / "kernels"))
 
         generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
+            str(export_folder / "layers" / f"{self.attributes['name']}.h"),
             str(ROOT / "templates" / "configuration" / "elemwise_config.jinja"),
-            name=self.name,
-            nb_elts=np.prod(self.inputs_dims[0]),
             activation="Linear",
             elemwise_op="Add",
-            rescaling="NoScaling")
+            rescaling="NoScaling",
+            **self.attributes)
 
         return list_configs
 
     def forward(self, list_actions:list):
-
-        list_actions.append(set_up_output(self.name, "float"))
         list_actions.append(generate_str(
             str(ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja"),
-            name=self.name,
-            inputs1_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input1",
-            inputs2_name=self.inputs[1].name() if self.inputs[1] else self.name + "_input2",
-            output_name=self.name
+            **self.attributes
         ))
         return list_actions
 
@@ -263,30 +210,24 @@ class SubCPP(ExportNode):
         super().__init__(node)
 
     def export(self, export_folder:str, list_configs:list):
-        list_configs.append(f"layers/{self.name}.h")
+        list_configs.append(f"layers/{self.attributes['name']}.h")
         list_configs.append("kernels/elemwise.hpp")
         copyfile(str(ROOT / "kernels" / "elemwise.hpp"),
                  str(export_folder / "include" / "kernels"))
         generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
+            str(export_folder / "layers" / f"{self.attributes['name']}.h"),
             str(ROOT / "templates" / "configuration" / "elemwise_config.jinja"),
-            name=self.name,
-            nb_elts=np.prod(self.inputs_dims[0]),
             activation="Linear",
             elemwise_op="Sub",
-            rescaling="NoScaling")
+            rescaling="NoScaling",
+            **self.attributes)
 
         return list_configs
 
     def forward(self, list_actions:list):
-
-        list_actions.append(set_up_output(self.name, "float"))
         list_actions.append(generate_str(
             str(ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja"),
-            name=self.name,
-            inputs1_name=f"{self.name}_input_0" if self.inputs[0] is None else self.inputs[0].name(),
-            inputs2_name=f"{self.name}_input_1" if self.inputs[1] is None else self.inputs[1].name(),
-            output_name=self.name
+            **self.attributes
         ))
         return list_actions
 
@@ -297,20 +238,10 @@ class PaddedMaxPoolCPP(ExportNode):
         super().__init__(node)
         for n in self.operator.get_micro_graph().get_nodes():
             if n.type() == "Pad":
-                self.padding = n.get_operator().attr.begin_end_borders
+                self.attributes["padding"] = n.get_operator().attr.begin_end_borders
             if n.type() == "MaxPooling":
-                self.kernel = n.get_operator().attr.kernel_dims
-                self.stride = n.get_operator().attr.stride_dims
-
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        if len(self.outputs_dims[0]) == 4:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
+                self.attributes["kernel_dims"] = n.get_operator().attr.kernel_dims
+                self.attributes["stride_dims"] = n.get_operator().attr.stride_dims
 
     def export(self, export_folder:Path, list_configs:list):
 
@@ -318,32 +249,21 @@ class PaddedMaxPoolCPP(ExportNode):
                  str(export_folder / "include" / "kernels"))
 
         list_configs.append("kernels/pooling.hpp")
-        list_configs.append(f"layers/{self.name}.h")
+        list_configs.append(f"layers/{self.attributes['name']}.h")
 
         generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
+            str(export_folder / "layers" / f"{self.attributes['name']}.h"),
             str(ROOT / "templates" / "configuration" / "pooling_config.jinja"),
-            name=self.name,
-            input_dims=self.inputs_dims[0],
-            output_dims=self.outputs_dims[0],
-            kernel=self.kernel,
-            stride=self.stride,
-            padding=self.padding,
             pool_type="Max",
-            activation="Linear")
+            activation="Linear",
+            **self.attributes)
 
         return list_configs
 
     def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
         list_actions.append(generate_str(
             str(ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja"),
-            name=self.name,
-            input_name=f"{self.name}_input_0" if self.inputs[0] is None else self.inputs[0].name(),
-            output_name=self.name
+            **self.attributes
         ))
         return list_actions
 
@@ -352,22 +272,9 @@ class MaxPoolCPP(ExportNode):
     def __init__(self, node):
         super().__init__(node)
 
-        self.kernel = node.get_operator().attr.kernel_dims
-        self.stride = node.get_operator().attr.stride_dims
-
         # No padding with MaxPooling
         # Use PaddedMaxPooling to add padding attribute
-        self.padding = [0, 0]
-
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        if len(self.outputs_dims[0]) == 4:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
+        self.attributes["padding"] = [0, 0]
 
     def export(self, export_folder:Path, list_configs:list):
 
@@ -375,32 +282,21 @@ class MaxPoolCPP(ExportNode):
                  str(export_folder / "include" / "kernels"))
 
         list_configs.append("kernels/pooling.hpp")
-        list_configs.append(f"layers/{self.name}.h")
+        list_configs.append(f"layers/{self.attributes['name']}.h")
 
         generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
+            str(export_folder / "layers" / f"{self.attributes['name']}.h"),
             str(ROOT / "templates" / "configuration" / "pooling_config.jinja"),
-            name=self.name,
-            input_dims=self.inputs_dims[0],
-            output_dims=self.outputs_dims[0],
-            kernel=self.kernel,
-            stride=self.stride,
-            padding=self.padding,
             pool_type="Max",
-            activation="Linear")
+            activation="Linear",
+            **self.attributes)
 
         return list_configs
 
     def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
         list_actions.append(generate_str(
             str(ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja"),
-            name=self.name,
-            input_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input",
-            output_name=self.name
+            **self.attributes
         ))
         return list_actions
 
@@ -409,25 +305,14 @@ class GlobalAveragePoolCPP(ExportNode):
     def __init__(self, node):
         super().__init__(node)
 
-        self.stride = [1, 1]
-
+        self.attributes["stride_dims"] = [1, 1]
         # No padding with MaxPooling
         # Use PaddedMaxPooling to add padding attribute
-        self.padding = [0, 0]
-
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-            self.kernel = self.inputs_dims[0][1:]
-        else:
-            raise RuntimeError("Input dims != 4 not supported.")
-        if len(self.outputs_dims[0]) == 4:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
-        elif len(self.outputs_dims[0]) == 2:
-            self.outputs_dims[0] = [self.outputs_dims[0][1], 1, 1]
+        self.attributes["padding"] = [0, 0]
+        self.attributes["kernel_dims"] = [
+            self.attributes["in_height"][0],
+            self.attributes["in_width"][0],
+        ]
 
     def export(self, export_folder:Path, list_configs:list):
 
@@ -435,32 +320,21 @@ class GlobalAveragePoolCPP(ExportNode):
                  str(export_folder / "include" / "kernels"))
 
         list_configs.append("kernels/pooling.hpp")
-        list_configs.append(f"layers/{self.name}.h")
+        list_configs.append(f"layers/{self.attributes['name']}.h")
 
         generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
+            str(export_folder / "layers" / f"{self.attributes['name']}.h"),
             str(ROOT / "templates" / "configuration" / "pooling_config.jinja"),
-            name=self.name,
-            input_dims=self.inputs_dims[0],
-            output_dims=self.outputs_dims[0],
-            kernel=self.kernel,
-            stride=self.stride,
-            padding=self.padding,
             pool_type="Average",
-            activation="Linear")
+            activation="Linear",
+            **self.attributes)
 
         return list_configs
 
     def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
         list_actions.append(generate_str(
             str(ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja"),
-            name=self.name,
-            input_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input",
-            output_name=self.name
+            **self.attributes
         ))
         return list_actions
 
@@ -469,22 +343,13 @@ class GlobalAveragePoolCPP(ExportNode):
 class FcCPP(ExportNode):
     def __init__(self, node):
         super().__init__(node)
+        copy_includes ={
 
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-        elif len(self.inputs_dims[0]) == 2:
-            # if dims == [batch, nb_channels]
-            # transform to [nb_channels, 1, 1]
-            self.inputs_dims[0] = [self.inputs_dims[0][1], 1, 1]
-
-        if len(self.outputs_dims[0]) == 2:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = [self.outputs_dims[0][1], 1, 1]
-
+        }
+        includes = []
+        templates()
 
+---
     def export(self, export_folder:Path, list_configs:list):
 
         copyfile(str(ROOT / "kernels" / "fullyconnected.hpp"),
@@ -496,30 +361,20 @@ class FcCPP(ExportNode):
 
         # Add to config list the include of configurations
         list_configs.append("kernels/fullyconnected.hpp")
-        list_configs.append(f"layers/{self.name}.h")
+        list_configs.append(f"layers/{self.attributes['name']}.h")
 
         # Export configuration file
         generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
+            str(export_folder / "layers" / f"{self.attributes['name']}.h"),
             str(ROOT / "templates" / "configuration" / "fullyconnected_config.jinja"),
-            name=self.name,
-            input_dims=self.inputs_dims[0],
-            output_dims=self.outputs_dims[0],
-            activation="Linear",
-            rescaling="NoScaling")
+            **self.attributes)
 
         return list_configs
 
     def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
         list_actions.append(generate_str(
             str(ROOT / "templates" / "kernel_forward" / "fullyconnected_forward.jinja"),
-            name=self.name,
-            inputs_name=f"{self.name}_input" if self.inputs[0] is None else self.inputs[0].name(),
-            weights_name=self.inputs[1].name(),
-            biases_name=self.inputs[2].name(), # TODO we should check if bias
-            outputs_name=self.name
+            **self.attributes
         ))
         return list_actions
 
diff --git a/aidge_export_cpp/static/include/network/utils.hpp b/aidge_export_cpp/static/include/network/utils.hpp
index 8942d3abd80eb362ca972f0c0710dc80df501add..11c8e060eb5b7607e1d90be62114780828176d3d 100644
--- a/aidge_export_cpp/static/include/network/utils.hpp
+++ b/aidge_export_cpp/static/include/network/utils.hpp
@@ -7,10 +7,10 @@
  * @param[in]  lo  Saturating lower bound
  * @param[in]  hi  Saturating higher bound
  * @returns         Value clamped between lo and hi
- * 
+ *
  */
 __attribute__((always_inline)) static inline
-int clamp (int v, int lo, int hi) 
+int clamp (int v, int lo, int hi)
 {
     if(v < lo) {
         return lo;
@@ -27,7 +27,7 @@ int clamp (int v, int lo, int hi)
  * @brief   Maximum of two integer values
  */
 __attribute__((always_inline)) static inline
-int max (int lhs, int rhs) 
+int max (int lhs, int rhs)
 {
     return (lhs >= rhs) ? lhs : rhs;
 }
@@ -36,7 +36,7 @@ int max (int lhs, int rhs)
  * @brief   Minimum of two integer values
  */
 __attribute__((always_inline)) static inline
-int min (int lhs, int rhs) 
+int min (int lhs, int rhs)
 {
     return (lhs <= rhs) ? lhs : rhs;
 }
diff --git a/aidge_export_cpp/templates/configuration/_def_io.jinja b/aidge_export_cpp/templates/configuration/_def_io.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..9770465a0540a77052acc8ee1e1a618bc3e9491b
--- /dev/null
+++ b/aidge_export_cpp/templates/configuration/_def_io.jinja
@@ -0,0 +1,8 @@
+{# NOTE: Suppose input is first #}
+#define {{ name|upper }}_NB_CHANNELS {{ in_chan[0] }}
+#define {{ name|upper }}_CHANNELS_HEIGHT {{ in_height[0] }}
+#define {{ name|upper }}_CHANNELS_WIDTH {{ in_width[0] }}
+#define {{ name|upper }}_NB_OUTPUTS {{ out_chan[0] }}
+#define {{ name|upper }}_OUTPUTS_HEIGHT {{ out_height[0] }}
+#define {{ name|upper }}_OUTPUTS_WIDTH {{ out_width[0] }}
+
diff --git a/aidge_export_cpp/templates/configuration/activation_config.jinja b/aidge_export_cpp/templates/configuration/activation_config.jinja
index 15fb3391fa6314eb90bda7de216eda6b8a929e5d..0c7968150262df76416398afdf074fd1fd8fdb8c 100644
--- a/aidge_export_cpp/templates/configuration/activation_config.jinja
+++ b/aidge_export_cpp/templates/configuration/activation_config.jinja
@@ -3,6 +3,7 @@
 #define {{ name|upper }}_LAYER_H
 
 {# For layer configuration -#}
+{%- set nb_data = in_chan[0] * in_height[0] * in_width[0] %}
 #define {{ name|upper }}_NB_DATA {{ nb_data }}
 #define {{ name|upper }}_ACTIVATION {{ activation }}
 static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
diff --git a/aidge_export_cpp/templates/configuration/batchnorm_config.jinja b/aidge_export_cpp/templates/configuration/batchnorm_config.jinja
index 3431ade3691f44f791a4747d94dbfc27b0d4943c..8b8d7207462fe05b7b89d268ce661f3e2a11d7c8 100644
--- a/aidge_export_cpp/templates/configuration/batchnorm_config.jinja
+++ b/aidge_export_cpp/templates/configuration/batchnorm_config.jinja
@@ -3,12 +3,7 @@
 #define {{ name|upper }}_LAYER_H
 
 {# For layer configuration -#}
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
+{% include "./_def_io.jinja" %}
 #define {{ name|upper }}_ACTIVATION {{ activation }}
 #define {{ name|upper }}_EPSILON {{ epsilon }}
 
diff --git a/aidge_export_cpp/templates/configuration/convolution_config.jinja b/aidge_export_cpp/templates/configuration/convolution_config.jinja
index 34e74c2f8f772dfdf3208bb275ec04ad6a1ce58f..efd4fa2d5f94a6e9bc4ead20294c0075d99de9fb 100644
--- a/aidge_export_cpp/templates/configuration/convolution_config.jinja
+++ b/aidge_export_cpp/templates/configuration/convolution_config.jinja
@@ -3,27 +3,22 @@
 #define {{ name|upper }}_LAYER_H
 
 {# For layer configuration -#}
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
+{% include "./_def_io.jinja" %}
 #define {{ name|upper }}_PADDING_Y {{ padding[1] }}
 #define {{ name|upper }}_PADDING_X {{ padding[0] }}
-#define {{ name|upper }}_STRIDE_Y {{ stride[1] }}
-#define {{ name|upper }}_STRIDE_X {{ stride[0] }}
-#define {{ name|upper }}_DILATION_Y {{ dilation[1] }}
-#define {{ name|upper }}_DILATION_X {{ dilation[0] }}
-#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel[1] }}
-#define {{ name|upper }}_KERNEL_WIDTH {{ kernel[0] }}
+#define {{ name|upper }}_STRIDE_Y {{ stride_dims[1] }}
+#define {{ name|upper }}_STRIDE_X {{ stride_dims[0] }}
+#define {{ name|upper }}_DILATION_Y {{ dilation_dims[1] }}
+#define {{ name|upper }}_DILATION_X {{ dilation_dims[0] }}
+#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel_dims[1] }}
+#define {{ name|upper }}_KERNEL_WIDTH {{ kernel_dims[0] }}
 #define {{ name|upper }}_ACTIVATION {{ activation }}
 static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
 
 {#- Calculate sizes #}
-{%- set weights_size = output_dims[0] * input_dims[0] * kernel[1] * kernel[0] %}
+{%- set weights_size = out_chan[0] * in_chan[0] * kernel_dims[1] * kernel_dims[0] %}
 #define {{ name|upper }}_WEIGHTS_SIZE {{ weights_size }}
-#define {{ name|upper }}_BIASES_SIZE {{ output_dims[0] }}
+#define {{ name|upper }}_BIASES_SIZE {{ out_chan[0] }}
 
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_cpp/templates/configuration/elemwise_config.jinja b/aidge_export_cpp/templates/configuration/elemwise_config.jinja
index 8073de9501fd7d09deea25d799ec1be70b3a6963..315667efa9582193ad97807cd740b1385d0d3b61 100644
--- a/aidge_export_cpp/templates/configuration/elemwise_config.jinja
+++ b/aidge_export_cpp/templates/configuration/elemwise_config.jinja
@@ -3,7 +3,7 @@
 #define {{ name|upper }}_LAYER_H
 
 {# For layer configuration -#}
-#define {{ name|upper }}_NB_ELTS {{ nb_elts }}
+#define {{ name|upper }}_NB_ELTS {{ in_dims[0]|join('*') }}
 #define {{ name|upper }}_ACTIVATION {{ activation }}
 #define {{ name|upper }}_ELEM_OP {{ elemwise_op }}
 static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
diff --git a/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja b/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja
index dc2e6ae2cfadc8714680b8f6d849adff7a4b31b9..848f832ceabf64d1f55242ac98a492512f11df16 100644
--- a/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja
+++ b/aidge_export_cpp/templates/configuration/fullyconnected_config.jinja
@@ -3,19 +3,13 @@
 #define {{ name|upper }}_LAYER_H
 
 {# For layer configuration -#}
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
+{% include "./_def_io.jinja" %}
 #define {{ name|upper }}_ACTIVATION {{ activation }}
 static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
 
 {#- Calculate sizes #}
-{%- set weights_size = output_dims[0] * input_dims[0] * input_dims[1] * input_dims[2] %}
+{%- set weights_size = out_chan[0] * in_chan[0] * in_height[0] * in_width[0] %}
 #define {{ name|upper }}_WEIGHTS_SIZE {{ weights_size }}
-#define {{ name|upper }}_BIASES_SIZE {{ output_dims[0] }}
-
+#define {{ name|upper }}_BIASES_SIZE {{ out_chan[0] }}
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_cpp/templates/configuration/pooling_config.jinja b/aidge_export_cpp/templates/configuration/pooling_config.jinja
index 3f2ca701227e7c828003d7e218758a01fd03a274..db4b18f91b005d2ae38e9022cdb4f92eca06cd42 100644
--- a/aidge_export_cpp/templates/configuration/pooling_config.jinja
+++ b/aidge_export_cpp/templates/configuration/pooling_config.jinja
@@ -3,18 +3,13 @@
 #define {{ name|upper }}_LAYER_H
 
 {# For layer configuration -#}
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
+{% include "./_def_io.jinja" %}
 #define {{ name|upper }}_PADDING_Y {{ padding[1] }}
 #define {{ name|upper }}_PADDING_X {{ padding[0] }}
-#define {{ name|upper }}_STRIDE_Y {{ stride[1] }}
-#define {{ name|upper }}_STRIDE_X {{ stride[0] }}
-#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel[1] }}
-#define {{ name|upper }}_KERNEL_WIDTH {{ kernel[0] }}
+#define {{ name|upper }}_STRIDE_Y {{ stride_dims[1] }}
+#define {{ name|upper }}_STRIDE_X {{ stride_dims[0] }}
+#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel_dims[1] }}
+#define {{ name|upper }}_KERNEL_WIDTH {{ kernel_dims[0] }}
 #define {{ name|upper }}_POOLING_TYPE {{ pool_type }}
 #define {{ name|upper }}_ACTIVATION {{ activation }}
 
diff --git a/aidge_export_cpp/templates/kernel_forward/_save_outputs.jinja b/aidge_export_cpp/templates/kernel_forward/_save_outputs.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..796e38d44dcb0bc44609a98fffa380a0157dc810
--- /dev/null
+++ b/aidge_export_cpp/templates/kernel_forward/_save_outputs.jinja
@@ -0,0 +1,17 @@
+/* COMMENTED FOR THE MOMENT
+#ifdef SAVE_OUTPUTS
+    FILE* {{name|upper}}_STREAM = fopen("outputs/{{name}}_output.txt", "w");
+    saveOutputs(
+        {{name|upper}}_NB_OUTPUTS,
+        {{name|upper}}_OUTPUTS_HEIGHT,
+        {{name|upper}}_OUTPUTS_WIDTH,
+        {{name|upper}}_CONT_OFFSET,
+        {{name|upper}}_CONT_SIZE,
+        {{name|upper}}_WRAP_OFFSET,
+        {{name|upper}}_WRAP_SIZE,
+        {{name|upper}}_STRIDE,
+        {{out_name[0]}},
+        {{name|upper}}_STREAM, Network::Format::CHW);
+    fclose({{name|upper}}_STREAM);
+#endif
+*/
diff --git a/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja b/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja
index c11935ab5bf6bd44dd88d7735f9f0b4bb5e7404b..c3f032ea1fa5b8f69e7b0bd7245efa72dd6f3e78 100644
--- a/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja
@@ -1,3 +1,7 @@
+{% if not is_output %}
+{{out_cdtype[0]}}* {{out_name[0]}} = ({{out_cdtype[0]}}*) mem + {{name|upper}}_OFFSET;
+{% endif %}
 activation_forward<{{name|upper}}_NB_DATA,
                    {{name|upper}}_ACTIVATION>
-                   ({{input_name}}, {{output_name}}, {{name|upper}}_RESCALING);
\ No newline at end of file
+                   ({{in_name[0]}}, {{out_name[0]}}, {{name|upper}}_RESCALING);
+{% include "./_save_outputs.jinja" %}
diff --git a/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja b/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja
index 3568b297477a68dce38c1f3a86f086d90937d9c3..4bf5191b033bc952be43ca1799d13550a3a56b71 100644
--- a/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja
@@ -2,4 +2,5 @@ batchnorm_forward<{{name|upper}}_NB_OUTPUTS,
                   {{name|upper}}_OUTPUTS_HEIGHT,
                   {{name|upper}}_OUTPUTS_WIDTH,
                   {{name|upper}}_ACTIVATION>
-                  ({{input_name}}, {{output_name}}, {{biases_name}}, {{variances_name}}, {{means_name}}, {{scales_name}}, {{name|upper}}_EPSILON);
\ No newline at end of file
+                  ({{input_name}}, {{output_name}}, {{biases_name}}, {{variances_name}}, {{means_name}}, {{scales_name}}, {{name|upper}}_EPSILON);
+{% include "./_save_outputs.jinja" %}
diff --git a/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja b/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja
index fa253e6af0003cfece3fd8515ab105bfbe16d829..a79542ebd120cc125f22a72e3bc3f6140651b415 100644
--- a/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja
@@ -1,3 +1,6 @@
+{% if not is_output %}
+{{out_cdtype[0]}}* {{out_name[0]}} = ({{out_cdtype[0]}}*) mem + {{name|upper}}_OFFSET;
+{% endif %}
 convolution_forward<{{name|upper}}_NB_CHANNELS,
                     {{name|upper}}_CHANNELS_HEIGHT,
                     {{name|upper}}_CHANNELS_WIDTH,
@@ -13,4 +16,5 @@ convolution_forward<{{name|upper}}_NB_CHANNELS,
                     {{name|upper}}_KERNEL_HEIGHT,
                     {{name|upper}}_KERNEL_WIDTH,
                     {{name|upper}}_ACTIVATION>
-                    ({{input_name}}, {{output_name}}, {{weights_name}}, {{biases_name}}, {{name|upper}}_RESCALING);
+                    ({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{name|upper}}_RESCALING);
+{% include "./_save_outputs.jinja" %}
diff --git a/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja b/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja
index 30dce76d865d99994d9132932b868a54b9d7178b..0ed82049f46bf81c5c8901d59af6cbae605dbe8c 100644
--- a/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja
@@ -1,4 +1,8 @@
+{% if not is_output %}
+{{out_cdtype[0]}}* {{out_name[0]}} = ({{out_cdtype[0]}}*) mem + {{name|upper}}_OFFSET;
+{% endif %}
 elemwise_forward<{{name|upper}}_NB_ELTS,
                  {{name|upper}}_ELEM_OP,
                  {{name|upper}}_ACTIVATION>
-                 ({{output_name}}, {{name|upper}}_RESCALING, {{inputs1_name}}, {{inputs2_name}});
+                 ({{out_name[0]}}, {{name|upper}}_RESCALING, {{in_name[0]}}, {{in_name[1]}});
+{% include "./_save_outputs.jinja" %}
diff --git a/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja b/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja
index 67832b9ec27b87b6ba617e47e3e2fde39bb8cf4c..1d2ba52382fc5ca3ca6b6e519f46d9a074661428 100644
--- a/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja
@@ -1,3 +1,6 @@
+{% if not is_output %}
+{{out_cdtype[0]}}* {{out_name[0]}} = ({{out_cdtype[0]}}*) mem + {{name|upper}}_OFFSET;
+{% endif %}
 fullyconnected_forward<{{name|upper}}_NB_CHANNELS,
                        {{name|upper}}_CHANNELS_HEIGHT,
                        {{name|upper}}_CHANNELS_WIDTH,
@@ -5,4 +8,5 @@ fullyconnected_forward<{{name|upper}}_NB_CHANNELS,
                        {{name|upper}}_OUTPUTS_HEIGHT,
                        {{name|upper}}_OUTPUTS_WIDTH,
                        {{name|upper}}_ACTIVATION>
-                       ({{inputs_name}}, {{outputs_name}}, {{weights_name}}, {{biases_name}}, {{name|upper}}_RESCALING);
\ No newline at end of file
+                       ({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{name|upper}}_RESCALING);
+{% include "./_save_outputs.jinja" %}
diff --git a/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja b/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja
index e72ea2178f26107aaa4f7571cbf93f7c2e2f94d2..7c4148ca57c2665ca09da8e6f427db75ab19f88d 100644
--- a/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja
@@ -1,2 +1,3 @@
 leakyrelu_forward<{{name|upper}}_NB_DATA>
-                   ({{input_name}}, {{output_name}}, {{name|upper}}_ALPHA);
\ No newline at end of file
+                   ({{input_name}}, {{output_name}}, {{name|upper}}_ALPHA);
+{% include "./_save_outputs.jinja" %}
diff --git a/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja b/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja
index 20dbb0f85db27f0e82905e073de49d924595a74f..ea216fa76a95ccac7434e59e39c9328b678e59fb 100644
--- a/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja
@@ -1,3 +1,6 @@
+{% if not is_output %}
+{{out_cdtype[0]}}* {{out_name[0]}} = ({{out_cdtype[0]}}*) mem + {{name|upper}}_OFFSET;
+{% endif %}
 pooling_forward<{{name|upper}}_NB_CHANNELS,
                 {{name|upper}}_CHANNELS_HEIGHT,
                 {{name|upper}}_CHANNELS_WIDTH,
@@ -12,4 +15,5 @@ pooling_forward<{{name|upper}}_NB_CHANNELS,
                 {{name|upper}}_KERNEL_WIDTH,
                 {{name|upper}}_POOLING_TYPE,
                 {{name|upper}}_ACTIVATION>
-                ({{input_name}}, {{output_name}});
\ No newline at end of file
+                ({{in_name[0]}}, {{out_name[0]}});
+{% include "./_save_outputs.jinja" %}
diff --git a/aidge_export_cpp/templates/network/network_forward.jinja b/aidge_export_cpp/templates/network/network_forward.jinja
index b9c313cad157f90b40a94d47c2782d1f3f954bad..b594f5eef8abe260f0ff28a766bef210e0ad5eba 100644
--- a/aidge_export_cpp/templates/network/network_forward.jinja
+++ b/aidge_export_cpp/templates/network/network_forward.jinja
@@ -2,6 +2,11 @@
 
 #include <stdint.h>
 
+#ifdef SAVE_OUTPUTS
+#include <sys/types.h>
+#include <sys/stat.h>
+#endif
+
 #include "network/rescaling.hpp"
 
 // Layer & memory configurations
@@ -17,7 +22,15 @@ static {{inputs[0][0]}} mem[MEMORY_SIZE];
 {#- Support multiple inputs with different datatypes and multiple outputs with different datatypes -#}
 void model_forward({% for inp in inputs %}const {{inp[0]}}* {{inp[1]}}, {% endfor %}{% for out in outputs %}{{out[0]}}* {{out[1]}}{{ ", " if not loop.last else "" }}{% endfor %})
 {
+
+    #ifdef SAVE_OUTPUTS
+    // Creation of the outputs directory
+    struct stat st = {0};
+    if (stat("outputs", &st) == -1) {
+        mkdir("outputs", 0700);
+    }
+    #endif
     {%- for action in actions %}
     {{ action }}
     {%- endfor %}
-}
\ No newline at end of file
+}