diff --git a/.gitignore b/.gitignore
index 8eb208c0cff93ea86a79a962adbc89978b7ae50e..55ab6d78711f9af47af0458596f474ba44379676 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,4 +31,4 @@ ENV/
 xml*/
 
 # Model parameters
-*.onnx
\ No newline at end of file
+*.onnx
diff --git a/aidge_export_arm_cortexm/export.py b/aidge_export_arm_cortexm/export.py
index 5179937e86974de4e3134f9652e99ef2918751bc..640766c7b5172a212cfd054676c1beec1298ba01 100644
--- a/aidge_export_arm_cortexm/export.py
+++ b/aidge_export_arm_cortexm/export.py
@@ -4,7 +4,7 @@ import shutil
 from pathlib import Path
 import numpy as np
 
-from aidge_core.export_utils.code_generation import *
+from aidge_core.export.code_generation import *
 from aidge_export_arm_cortexm.utils import (ROOT, AVAILABLE_BOARDS, has_board, \
                                             OPERATORS_REGISTRY, supported_operators)
 import aidge_export_arm_cortexm.operators
@@ -95,20 +95,26 @@ def export(export_folder_name,
     # It supposes the entry nodes are producers with constant=false
     # Store the datatype & name
     list_inputs_name = []
+    first_element_added = False
     for node in graphview.get_nodes():
         if node.type() == "Producer":
-            if not node.get_operator().get_attr("Constant"):
+            if not first_element_added:
+                    export_type = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+                    list_inputs_name.append((export_type, node.name()))
+                    first_element_added = True
+            if not node.get_operator().attr.constant:
                 export_type = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
                 list_inputs_name.append((export_type, node.name()))
 
     # Get output nodes
     # Store the datatype & name, like entry nodes
+    
     list_outputs_name = []
     for node in graphview.get_nodes():
         if len(node.get_children()) == 0:
-            if node.get_operator().has_attr("DataType"):
+            if node.get_operator().attr.has_attr('dtype'):
                 # Temporary fix because impossible to set DataType of a generic operator
-                export_type = aidge_datatype2ctype(node.get_operator().get_attr("DataType"))
+                export_type = aidge_datatype2ctype(node.get_operator().attr.dtype)
             else:
                 export_type = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
 
diff --git a/aidge_export_arm_cortexm/memory.py b/aidge_export_arm_cortexm/memory.py
index 5e2cd36de1130b55f8978ae80d57ac7c30facb6a..65e50c30a47ad0a519107faa5af61f606754a99c 100644
--- a/aidge_export_arm_cortexm/memory.py
+++ b/aidge_export_arm_cortexm/memory.py
@@ -29,7 +29,7 @@ MEMORY_INFO_TEMPLATE = ["layer_name", "size", "stride", "length", "count", "cont
 # Default memory management, which can be used for development
 def compute_default_mem_info(scheduler: aidge_core.Scheduler):
     
-    list_forward_nodes = scheduler.get_static_scheduling()
+    list_forward_nodes = scheduler
     mem_info = []
     mem_size = 0
 
@@ -76,7 +76,7 @@ def generate_optimized_memory_info(stats_folder: Path,
 
         # Skip memory management for the parameter producers
         if node.type() == "Producer":
-            if node.get_operator().get_attr("Constant"):
+            if node.get_operator().attr.constant:
                 continue
             else:
                 # Input memory management (suppose tensor ends with [:, channel, height, width]))
diff --git a/aidge_export_arm_cortexm/operators.py b/aidge_export_arm_cortexm/operators.py
index b0f4f60b68d7ca414c50b821551e7664d05ca27a..9584ec8073d7d53a0d3fd55589e04e1bf2ad032e 100644
--- a/aidge_export_arm_cortexm/operators.py
+++ b/aidge_export_arm_cortexm/operators.py
@@ -8,7 +8,7 @@ from typing import Tuple, List, Union, Dict
 
 import aidge_core
 from aidge_core import ExportNode
-from aidge_core.export_utils.code_generation import *
+from aidge_core.export.code_generation import *
 from aidge_export_arm_cortexm.utils import ROOT, operator_register
 from aidge_export_arm_cortexm.utils.converter import numpy_dtype2ctype, aidge_datatype2dataformat, aidge_datatype2ctype
 from aidge_export_arm_cortexm.utils.generation import *
@@ -73,7 +73,7 @@ class Producer_ARMCortexM:
     def __init__(self, node):
         self.name = node.name()
         self.operator = node.get_operator()
-        self.constant = self.operator.get_attr("Constant")
+        self.constant = self.operator.attr.constant
         self.values = np.array(self.operator.get_output(0))
 
     def export(self, export_file:Path, format:str = "NHWC"):
@@ -267,17 +267,18 @@ class Conv_ARMCortexM(ExportNode):
         self.scaling = Scaling()("no_scaling")
         self.activation = "Linear"
 
-        self.kernel = node.get_operator().get_attr("KernelDims")
-        self.stride = node.get_operator().get_attr("StrideDims")
-        self.dilation = node.get_operator().get_attr("DilationDims")
+        self.kernel = node.get_operator().attr.kernel_dims
+        self.stride = node.get_operator().attr.stride_dims
+        self.dilation = node.get_operator().attr.dilation_dims
 
         # No padding with Conv
         # Use PaddedConv to add padding attribute
         self.padding = [0, 0]
 
-        self.nb_channels = node.get_operator().get_attr("InChannels")
-        self.nb_outputs = node.get_operator().get_attr("OutChannels")
-
+        self.nb_channels = node.get_operator().in_channels()
+        self.nb_outputs = node.get_operator().out_channels()
+        if self.inputs[0] is None : 
+            raise RuntimeError("")      
         if len(self.inputs_dims[0]) == 4:
             # if dims == [batch, nb_channels, height, width]
             # transform to [nb_channels, height, width]
@@ -392,11 +393,11 @@ class PaddedConv_ARMCortexM(Conv_ARMCortexM):
 
         for n in self.operator.get_micro_graph().get_nodes():
             if n.type() == "Pad":
-                self.padding = n.get_operator().get_attr("BeginEndBorders")
+                self.padding = n.get_operator().attr.begin_end_borders
             if n.type() == "Conv":
-                self.kernel = n.get_operator().get_attr("KernelDims")
-                self.stride = n.get_operator().get_attr("StrideDims")
-                self.dilation = n.get_operator().get_attr("DilationDims")
+                self.kernel = n.get_operator().attr.kernel_dims
+                self.stride = n.get_operator().attr.stride_dims
+                self.dilation = n.get_operator().attr.dilation_dims
 
         if len(self.inputs_dims[0]) == 4:
             # if dims == [batch, nb_channels, height, width]
@@ -414,23 +415,23 @@ class ConvReluScaling_ARMCortexM(Conv_ARMCortexM):
      def __init__(self, node, board, library):
         super(Conv_ARMCortexM, self).__init__(node, board, library)
 
-        if self.operator.has_attr("BeginEndBorders"):
-            self.padding = self.operator.get_attr("BeginEndBorders")
+        if self.operator.has_attr("Begin_End_Borders"):
+            self.padding = self.operator.attr.begin_end_borders
 
         self.activation = "Rectifier"
 
         # Should do this line but there is a bug while changing the datatype of generic operator...
         # self.datatype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
         # Do this instead
-        if self.operator.get_attr("quantizedNbBits") == 8:
-            if self.operator.get_attr("isOutputUnsigned"):
+        if self.operator.attr.quantized_nb_bits == 8:
+            if self.operator.attr.is_output_unsigned:
                 self.datatype = aidge_datatype2ctype(aidge_core.DataType.UInt8)
             else:
                 self.datatype = aidge_datatype2ctype(aidge_core.DataType.Int8)
 
         # Impose Single Shift (perhaps change it to have a more modular system)
-        self.scaling = Scaling(self.operator.get_attr("scalingFactor"),
-                               self.operator.get_attr("quantizedNbBits"))("floating_point")
+        self.scaling = Scaling(self.operator.attr.scaling_factor,
+                               self.operator.attr.quantized_nb_bits)("floating_point")
 
 
 class Pooling_ARMCortexM(ExportNode):
@@ -444,8 +445,8 @@ class Pooling_ARMCortexM(ExportNode):
         self.pool_type = "None"
         self.activation = "Linear"
 
-        self.kernel = node.get_operator().get_attr("KernelDims")
-        self.stride = node.get_operator().get_attr("StrideDims")
+        self.kernel = node.get_operator().attr.kernel_dims
+        self.stride = node.get_operator().attr.stride_dims
 
         # No padding with MaxPooling or AvgPooling
         # Use PaddedMaxPooling/PaddedAvgPooling to add padding attribute
@@ -677,15 +678,15 @@ class FCScaling_ARMCortexM(FC_ARMCortexM):
         # Should do this line but there is a bug while changing the datatype of generic operator...
         # self.datatype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
         # Do this instead
-        if self.operator.get_attr("quantizedNbBits") == 8:
-            if self.operator.get_attr("isOutputUnsigned"):
+        if self.operator.attr.quantized_nb_bits == 8:
+            if self.operator.attr.is_output_unsigned:
                 self.datatype = aidge_datatype2ctype(aidge_core.DataType.UInt8)
             else:
                 self.datatype = aidge_datatype2ctype(aidge_core.DataType.Int8)
 
         # Impose Single Shift (perhaps change it to have a more modular system)
-        self.scaling = Scaling(self.operator.get_attr("scalingFactor"),
-                               self.operator.get_attr("quantizedNbBits"))("floating_point")
+        self.scaling = Scaling(self.operator.attr.scaling_factor,
+                               self.operator.attr.quantized_nb_bits)("floating_point")
 
 
 @operator_register("FcReluScaling")
diff --git a/aidge_export_arm_cortexm/operators_old.py b/aidge_export_arm_cortexm/operators_old.py
index 04a090b65c56a0a503618c80480e51ac4fbd6550..3440b248b1b719f4e5573d5c4dcc9df0b450122c 100644
--- a/aidge_export_arm_cortexm/operators_old.py
+++ b/aidge_export_arm_cortexm/operators_old.py
@@ -403,9 +403,9 @@ class Slice(ExportNode):
 
     def __init__(self, node, board, dataformat, library):
 
-        self.axes = node.get_operator().get_attr("axes")
-        self.starts = node.get_operator().get_attr("starts")
-        self.ends = node.get_operator().get_attr("ends")
+        self.axes = node.get_operator().attr.axes
+        self.starts = node.get_operator().attr.starts
+        self.ends = node.get_operator().attr.ends
 
         # Compute output dims
         out_dims = [self.ends[x-1] - self.starts[x-1] for x in self.axes]
@@ -460,7 +460,7 @@ class Concat(ExportNode):
 
     def __init__(self, node, board, dataformat, library):
 
-        self.axis = node.get_operator().get_attr("axis")
+        self.axis = node.get_operator().attr.axis
         out_dims = node.get_operator().get_input(0).dims()
 
         out_dims[self.axis - 1] = 0
diff --git a/aidge_export_arm_cortexm/templates/network/network_forward.jinja b/aidge_export_arm_cortexm/templates/network/network_forward.jinja
index b00e42f813066505cf03dd9f324a4ac418e45818..bde5553020d1a36f225a1402172715a7446c4496 100644
--- a/aidge_export_arm_cortexm/templates/network/network_forward.jinja
+++ b/aidge_export_arm_cortexm/templates/network/network_forward.jinja
@@ -12,7 +12,11 @@
 
 {# mem has the datatype of the firt input #}
 {#- Change here to improve it -#}
+{% if inputs[0][0] %}
 static {{inputs[0][0]}} mem[MEMORY_SIZE];
+{% else %}
+static float mem[MEMORY_SIZE];
+{% endif %}
 
 {# Forward function #}
 {#- Support multiple inputs with different datatypes and multiple outputs with different datatypes -#}
diff --git a/aidge_export_arm_cortexm/utils/converter.py b/aidge_export_arm_cortexm/utils/converter.py
index 426aa69d782417c699f790568fd5881b15ecdb1a..841c59e57975192b0968aef9187f9af257fca5c0 100644
--- a/aidge_export_arm_cortexm/utils/converter.py
+++ b/aidge_export_arm_cortexm/utils/converter.py
@@ -22,17 +22,17 @@ def numpy_dtype2ctype(dtype):
     
 
 def aidge_datatype2ctype(datatype):
-    if datatype == aidge_core.DataType.Int8:
+    if datatype == aidge_core.dtype.int8:
         return "int8_t"
-    elif datatype == aidge_core.DataType.UInt8:
+    elif datatype == aidge_core.dtype.uint8:
         return "uint8_t"
-    elif datatype == aidge_core.DataType.Int32:
+    elif datatype == aidge_core.dtype.int32:
         return "int32_t"
-    elif datatype == aidge_core.DataType.Int64:
+    elif datatype == aidge_core.dtype.int64:
         return "int64_t"
-    elif datatype == aidge_core.DataType.Float32:
+    elif datatype == aidge_core.dtype.float32:
         return "float"
-    elif datatype == aidge_core.DataType.Float64:
+    elif datatype == aidge_core.dtype.float64:
         return "double"
     # Add more dtype mappings as needed
     else:
@@ -40,15 +40,15 @@ def aidge_datatype2ctype(datatype):
     
 
 def aidge_datatype2dataformat(datatype):
-    if datatype == aidge_core.DataType.Int8:
+    if datatype == aidge_core.dtype.int8:
         return "int8"
-    elif datatype == aidge_core.DataType.Int32:
+    elif datatype == aidge_core.dtype.int32:
         return "int32"
-    elif datatype == aidge_core.DataType.Int64:
+    elif datatype == aidge_core.dtype.int64:
         return "int64"
-    elif datatype == aidge_core.DataType.Float32:
+    elif datatype == aidge_core.dtype.float32:
         return "float32"
-    elif datatype == aidge_core.DataType.Float64:
+    elif datatype == aidge_core.dtype.float64:
         return "float64"
     # Add more dtype mappings as needed
     else: