diff --git a/aidge_export_arm_cortexm/memory.py b/aidge_export_arm_cortexm/memory.py
index 44c582c5004e42945451633e02578c4d27fdeb1f..5e2cd36de1130b55f8978ae80d57ac7c30facb6a 100644
--- a/aidge_export_arm_cortexm/memory.py
+++ b/aidge_export_arm_cortexm/memory.py
@@ -1,12 +1,29 @@
+import os
+import shutil
+from typing import List
+from pathlib import Path
 import aidge_core
 import aidge_backend_cpu
-from typing import List
 
-# for each layer, name: [size, offset start]
+# for each layer, 
+# name              [size, stride, length, count, contigious offset, contigious size, wrapping offset, wrapping size]
+# true values       [nb_outputs, nb_outputs, width, width, offset start, total size, 0, 0]
+# Example:
+#define ENV_MEM_SIZE 3
+#define ENV_MEM_STRIDE 3
+#define ENV_MEM_LENGTH 224
+#define ENV_MEM_COUNT 224
+#define ENV_MEM_CONT_OFFSET 0
+#define ENV_MEM_CONT_SIZE 150528
+#define ENV_MEM_WRAP_OFFSET 0
+#define ENV_MEM_WRAP_SIZE 0
+MEMORY_INFO_TEMPLATE = ["layer_name", "size", "stride", "length", "count", "cont_offset", "cont_size", "wrap_offset", "wrap_size"]
+
+# for each layer, name: [size, offset start] (old style)
 # Example:
 #define ENV_MEM_SIZE 3
 #define ENV_OFFSET 0
-MEMORY_INFO_TEMPLATE = ["layer_name", "size", "offset"]
+# MEMORY_INFO_TEMPLATE = ["layer_name", "size", "offset"]
 
 
 # Default memory management, which can be used for development
@@ -25,8 +42,9 @@ def compute_default_mem_info(scheduler: aidge_core.Scheduler):
                 for dim in dims:
                     mem *= dim
 
-                # Add memeory info
-                mem_info.append([node.name(), mem, mem_size])
+                # Add memory info
+                # Only size and cont_offset matter
+                mem_info.append([node.name(), mem, 0, 0, 0, mem_size, mem, 0, 0])
                 
                 # Increment offset for the next layer
                 mem_size += mem
@@ -34,17 +52,77 @@ def compute_default_mem_info(scheduler: aidge_core.Scheduler):
     return mem_size, mem_info
 
 
-def generate_optimized_memory_info(scheduler: aidge_core.Scheduler,
+def generate_optimized_memory_info(stats_folder: Path,
+                                   scheduler: aidge_core.Scheduler,
                                    wrapping:bool = False):
     
     # The forward dims has to done outside the function
+    # Also supposed the generation of the scheduler has been performed outside
+    # Otherwise decomment the following line
+    # scheduler.generate_scheduling()
 
     # Generate the memory manager
+    # So far, the Producers are not take in consideration in the meory manager => inc_producers=False
     mem_manager = scheduler.generate_memory(inc_producers=False, wrap_around_buffer=wrapping)
 
-    mem_size = 0
+    # In the export, we currently use an unified memory buffer whose size 
+    # is determined by the memory peak usage
+    mem_size = mem_manager.get_peak_usage()
     mem_info = []
 
-    # Add the parsing of the memory manager
+    mem_planes = mem_manager.get_planes()
+
+    for node in scheduler.get_static_scheduling():
+
+        # Skip memory management for the parameter producers
+        if node.type() == "Producer":
+            if node.get_operator().get_attr("Constant"):
+                continue
+            else:
+                # Input memory management (suppose tensor ends with [:, channel, height, width]))
+                tensor = node.get_operator().get_output(0)
+                if tensor is None:
+                    raise RuntimeError("Warning input producer not provided")
+                if len(tensor.dims()) < 3:
+                    raise RuntimeError("Input producer dimensions must be with [:, channel, height, width]")
+
+                name = node.name()
+                size = tensor.dims()[-3]    # Should be nb_channels
+                stride = tensor.dims()[-3]  # Should be nb_channels
+                length = tensor.dims()[-1]  # Should be width
+                count = tensor.dims()[-2]   # Should be height
+                cont_offset = 0             # Suppose input data is stored outside the export function
+                                            # so the memory offset is not important to consider
+                cont_size = tensor.dims()[-1] * tensor.dims()[-2] * tensor.dims()[-3] # Size of input
+                wrap_offset = 0     # No wrapping
+                wrap_size = 0       # No wrapping
+        else:
+            plane = mem_planes[node][0]
+
+            name = node.name()
+            size = plane.size
+            stride = plane.stride
+            length = plane.length
+            count = plane.count
+            cont_offset = plane.get_contiguous_offset()
+            cont_size = plane.get_contiguous_size()
+            wrap_offset = plane.get_wrapped_offset()
+            wrap_size = plane.get_wrapped_size()
+
+        mem_info.append([name, size, stride, length, count, 
+                        cont_offset, cont_size, wrap_offset, wrap_size])
+
+    # Use gnuplot to generate the log
+    try:
+        os.makedirs(str(stats_folder / "graph"), exist_ok=True)
+        mem_manager.log("memory_info")
+        os.chmod("memory_info_plot.gnu", 0o777)
+        os.system("./memory_info_plot.gnu")
+        shutil.move("memory_info", str(stats_folder / "graph"/ "memory_info"))
+        shutil.move("memory_info_plot.png", str(stats_folder / "graph" / "memory_info_plot.png"))
+        os.remove("memory_info_plot.gnu")
+    except:
+        print("Please install gnuplot if you want memory plot from MemoryManager.")
+
 
     return mem_size, mem_info
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/templates/memory/mem_info.jinja b/aidge_export_arm_cortexm/templates/memory/mem_info.jinja
index fe1c3c00f5c4cc78b56502de3e7d91208e22ff35..f835d9649a599c9339256c59d5941fcfc8f1b545 100644
--- a/aidge_export_arm_cortexm/templates/memory/mem_info.jinja
+++ b/aidge_export_arm_cortexm/templates/memory/mem_info.jinja
@@ -1,16 +1,16 @@
-{#- For name header -#}
 #ifndef MEM_INFO_H
 #define MEM_INFO_H
 
 #define MEMORY_SIZE {{ mem_size }}
+#define MEMORY_ALIGNMENT {{ mem_alignment }}
 
 {% for i in range(mem_info|length) -%}
 {%- set layer_name = mem_info[i][0] %}
 /* {{layer_name}} memory */
 {% for j in range(1, mem_info[i]|length) %}
-#define {{ layer_name|upper }}_{{ mem_info_legends[j]|upper }} {{ mem_info[i][j] }}
+#define {{ layer_name|upper }}_MEM_{{ mem_info_legends[j]|upper }} {{ mem_info[i][j] }}
 {%- endfor %}
 {% endfor %}
 
 
-#endif /* MEM_INFO_H */
\ No newline at end of file
+#endif /* MEM_INFO_H */