diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Relu/aidge_relu_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Relu/aidge_relu_float32.c
index 5e1bb6bca480aff223484653e2f08702299a33e3..baee52ebfe7e20331bd40b62c913135292ad65f8 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Relu/aidge_relu_float32.c
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Relu/aidge_relu_float32.c
@@ -1,10 +1,8 @@
-
-
-void aidge_relu_float32 (float* inputs, 
+void aidge_relu_float32 (float* inputs,
                          float* outputs,
                          unsigned int size)
 {
     for (unsigned int i = 0; i < size; ++i) {
         outputs[i] = (inputs[i] < 0.0f) ? 0.0f : inputs[i];
     }
-}
\ No newline at end of file
+}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Sigmoid/aidge_sigmoid_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Sigmoid/aidge_sigmoid_float32.h
similarity index 100%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Sigmoid/aidge_sigmoid_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Sigmoid/aidge_sigmoid_float32.h
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Softmax/aidge_softmax_chw_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Softmax/aidge_softmax_chw_float32.h
similarity index 100%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Softmax/aidge_softmax_chw_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Softmax/aidge_softmax_chw_float32.h
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Add/aidge_add_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Add/aidge_add_float32.h
similarity index 100%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Add/aidge_add_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Add/aidge_add_float32.h
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Div/aidge_div_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Div/aidge_div_float32.h
similarity index 100%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Div/aidge_div_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Div/aidge_div_float32.h
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Mul/aidge_mul_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Mul/aidge_mul_float32.h
similarity index 100%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Mul/aidge_mul_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Mul/aidge_mul_float32.h
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Sub/aidge_sub_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Sub/aidge_sub_float32.h
similarity index 100%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Sub/aidge_sub_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Sub/aidge_sub_float32.h
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/MatMul/aidge_matmul_chw_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/MatMul/aidge_matmul_chw_float32.h
similarity index 100%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/MatMul/aidge_matmul_chw_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/MatMul/aidge_matmul_chw_float32.h
diff --git a/aidge_export_arm_cortexm/_N2D2/static/assert.h b/aidge_export_arm_cortexm/_Aidge_Arm/static/assert.h
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/assert.h
rename to aidge_export_arm_cortexm/_Aidge_Arm/static/assert.h
diff --git a/aidge_export_arm_cortexm/_N2D2/static/kernels/Conv.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/static/kernels/Conv.hpp
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/kernels/Conv.hpp
rename to aidge_export_arm_cortexm/_Aidge_Arm/static/kernels/Conv.hpp
diff --git a/aidge_export_arm_cortexm/_N2D2/static/kernels/Fc.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/static/kernels/Fc.hpp
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/kernels/Fc.hpp
rename to aidge_export_arm_cortexm/_Aidge_Arm/static/kernels/Fc.hpp
diff --git a/aidge_export_arm_cortexm/_N2D2/static/kernels/Macs.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/static/kernels/Macs.hpp
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/kernels/Macs.hpp
rename to aidge_export_arm_cortexm/_Aidge_Arm/static/kernels/Macs.hpp
diff --git a/aidge_export_arm_cortexm/_N2D2/static/kernels/Pooling.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/static/kernels/Pooling.hpp
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/kernels/Pooling.hpp
rename to aidge_export_arm_cortexm/_Aidge_Arm/static/kernels/Pooling.hpp
diff --git a/aidge_export_arm_cortexm/_N2D2/static/nn_scaling_functions.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/static/nn_scaling_functions.hpp
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/nn_scaling_functions.hpp
rename to aidge_export_arm_cortexm/_Aidge_Arm/static/nn_scaling_functions.hpp
diff --git a/aidge_export_arm_cortexm/_N2D2/static/typedefs.h b/aidge_export_arm_cortexm/_Aidge_Arm/static/typedefs.h
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/typedefs.h
rename to aidge_export_arm_cortexm/_Aidge_Arm/static/typedefs.h
diff --git a/aidge_export_arm_cortexm/_N2D2/static/utils.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/static/utils.hpp
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/utils.hpp
rename to aidge_export_arm_cortexm/_Aidge_Arm/static/utils.hpp
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/_def_io.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/_def_io.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..66756cf8f501035f7222272f9c410908f499f06f
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/_def_io.jinja
@@ -0,0 +1,14 @@
+{# NOTE: Suppose input is first #}
+// INPUT CONF
+{% for inidx in range(nb_in) -%}
+#define {{ in_name[inidx]|upper }}_NB_CHANNELS {{ in_chan[inidx] }}
+#define {{ in_name[inidx]|upper }}_IN_HEIGHT {{ in_height[inidx] }}
+#define {{ in_name[inidx]|upper }}_IN_WIDTH {{ in_width[inidx] }}
+{% endfor %}
+
+// OUTPUT CONF
+{% for outidx in range(nb_out) -%}
+#define {{ out_name[outidx]|upper }}_NB_OUTPUTS {{ out_chan[outidx] }}
+#define {{ out_name[outidx]|upper }}_OUT_HEIGHT {{ out_height[outidx] }}
+#define {{ out_name[outidx]|upper }}_OUT_WIDTH {{ out_width[outidx] }}
+{% endfor %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/_meminfo.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/_meminfo.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..15d15425b4330f68b4a97c31e9cf7a1076cc93e8
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/_meminfo.jinja
@@ -0,0 +1,11 @@
+// MEMINFO CONF
+{% for outidx in range(nb_out) -%}
+#define {{ out_name[outidx]|upper }}_SIZE {{ mem_info_size[outidx]}}
+#define {{ out_name[outidx]|upper }}_OFFSET {{ mem_info_offset[outidx]}}
+#define {{ out_name[outidx]|upper }}_STRIDE {{ mem_info_stride[outidx]}}
+#define {{ out_name[outidx]|upper }}_LENGTH {{ mem_info_length[outidx]}}
+#define {{ out_name[outidx]|upper }}_CONT_SIZE {{ mem_info_cont_size[outidx]}}
+#define {{ out_name[outidx]|upper }}_CONT_OFFSET {{ mem_info_cont_offset[outidx]}}
+#define {{ out_name[outidx]|upper }}_WRAP_OFFSET {{ mem_info_wrap_offset[outidx]}}
+#define {{ out_name[outidx]|upper }}_WRAP_SIZE {{ mem_info_wrap_size[outidx]}}
+{% endfor %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/activation.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/activation.jinja
index 82817e995ee5b4f684c6cdcc3073637b88d0e6d0..717757f32ea7009f6b3d7c0602a935f31c68502a 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/activation.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/activation.jinja
@@ -2,15 +2,15 @@
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
 
-/* Activation {{ activation_type|lower }} layer */
+/* Activation "{{ activation_type|lower }}" layer */
 
 {# For layer configuration -#}
-#define {{ name|upper }}_INPUTS_SIZE {{ nb_inputs }}
-#define {{ name|upper }}_OUTPUTS_SIZE {{ nb_outputs }}
+#define {{ name|upper }}_INPUTS_SIZE {{ in_size[0] }}
+#define {{ name|upper }}_OUTPUTS_SIZE {{ out_size[0] }}
 {% if axis is defined %}
 #define {{ name|upper }}_AXIS {{ axis }}
-#define {{name|upper}}_INPUT_DIMS_SIZE {{ input_dims|length}}
+#define {{ name|upper }}_INPUT_DIMS_SIZE {{ in_dims[0]|length}}
 {% endif %}
-#define {{ name|upper }}_ACTIVATION {{ activation_type }}
+#define {{ name|upper }}_ACTIVATION "{{ activation_type }}"
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/conv_config.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/conv_config.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..6a48f2b55c4889829823f0abf095cff40ebecc71
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/conv_config.jinja
@@ -0,0 +1,41 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+#include "typedefs.h"
+#include "nn_scaling_functions.hpp"
+
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
+
+// Attributes
+#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel_dims[1] }}
+#define {{ name|upper }}_KERNEL_WIDTH {{ kernel_dims[0] }}
+#define {{ name|upper }}_PADDING_Y {{ padding[1] }}
+#define {{ name|upper }}_PADDING_X {{ padding[0] }}
+#define {{ name|upper }}_STRIDE_Y {{ stride_dims[1] }}
+#define {{ name|upper }}_STRIDE_X {{ stride_dims[0] }}
+#define {{ name|upper }}_DILATION_Y {{ dilation_dims[1] }}
+#define {{ name|upper }}_DILATION_X {{ dilation_dims[0] }}
+
+// Activation/Scaling
+#define {{ name|upper }}_ACTIVATION {{ activation }}
+
+{%- if scaling_type == "floating_point" %}
+static const N2D2_Export::FloatingPointScaling {{ name|upper }}_SCALING = { {{scaling_value}} };
+{%- elif scaling_type == "fixed_point" %}
+static const N2D2_Export::FixedPointScaling<{{scaling_value}}, {{fractional_bits}}> {{ name|upper }}_SCALING;
+{%- elif scaling_type == "single_shift" %}
+static const N2D2_Export::SingleShiftScaling<{{shift_value}}> {{ name|upper }}_SCALING;
+{%- else %}
+static const N2D2_Export::NoScaling {{ name|upper }}_SCALING;
+{%- endif %}
+
+// Sizes
+#define {{ name|upper }}_WEIGHTS_SIZE {{ out_chan[0] * in_chan[0] * kernel_dims[1] * kernel_dims[0] }}
+#define {{ name|upper }}_BIASES_SIZE {{ out_chan[0] }}
+#define {{ name|upper }}_OUTPUTS_SIZE {{ out_chan[0] * out_height[0] * out_width[0] }}
+#define {{ name|upper }}_CHANNELS_SIZE {{ in_chan[0] * in_height[0] * in_width[0] }}
+
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/elemwise.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/elemwise.jinja
index c5e4281dba7b3146516bec019ed30b6136a10014..dc3bdc00f90f903f116f42e4825dc7613b2a2180 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/elemwise.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/elemwise.jinja
@@ -5,13 +5,17 @@
 /* ElemWise - {{ elemwise_op }} layer */
 
 {# For layer configuration -#}
-#define {{ name|upper }}_INPUTS_SIZE {{ nb_inputs }}
-#define {{ name|upper }}_OUTPUTS_SIZE {{ nb_outputs }}
+#define {{ name|upper }}_INPUTS_SIZE {{ in_size[0] }}
+#define {{ name|upper }}_OUTPUTS_SIZE {{ out_size[0] }}
 
 
-#define {{name|upper}}_INPUT_A_DIMS_SIZE {{ input_dims[0]|length}}
-#define {{name|upper}}_INPUT_B_DIMS_SIZE {{ input_dims[1]|length}}
-#define {{name|upper}}_OUTPUT_DIMS_SIZE {{ output_dims[0]|length}}
+#define {{ in_name[0]|upper }}_DIMS {{ in_dims[0] }}
+#define {{ in_name[1]|upper }}_DIMS {{ in_dims[1] }}
+#define {{ out_name[0]|upper }}_DIMS {{ out_dims[0] }}
+
+#define {{in_name[0]|upper}}_NB_DIM {{ in_dims[0]|length}}
+#define {{in_name[1]|upper}}_NB_DIM {{ in_dims[1]|length}}
+#define {{out_name[0]|upper}}_NB_DIM {{ out_dims[0]|length}}
 #define {{ name|upper }}_ELEM_OP {{ elemwise_op }}
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_N2D2/templates/configuration/fc_config.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/fc_config.jinja
similarity index 52%
rename from aidge_export_arm_cortexm/_N2D2/templates/configuration/fc_config.jinja
rename to aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/fc_config.jinja
index e09cc0425490a67968dd1c6294a1003b102e4e2f..ab33588ca642d54e6fb5fcc58fe7a5279d4ddb32 100644
--- a/aidge_export_arm_cortexm/_N2D2/templates/configuration/fc_config.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/fc_config.jinja
@@ -6,13 +6,8 @@
 #include "nn_scaling_functions.hpp"
 
 {# For layer configuration -#}
-// In/Out
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
 
 // Activation/Scaling
 #define {{ name|upper }}_ACTIVATION {{ activation }}
@@ -28,12 +23,10 @@ static const N2D2_Export::NoScaling {{ name|upper }}_SCALING;
 {%- endif %}
 
 {# Calculate sizes -#}
-{%- set weights_size = output_dims[0] * input_dims[0] * input_dims[1] * input_dims[2] -%}
 // Sizes
-#define {{ name|upper }}_WEIGHTS_SIZE {{ weights_size }}
-#define {{ name|upper }}_BIASES_SIZE {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_SIZE {{ output_dims[0] * output_dims[1] * output_dims[2] }}
-#define {{ name|upper }}_CHANNELS_SIZE {{ input_dims[0] * input_dims[1] * input_dims[2] }}
-
+#define {{ name|upper }}_WEIGHTS_SIZE {{ out_chan[0] * in_chan[0] * in_height[0] * in_width[0] }}
+#define {{ name|upper }}_BIASES_SIZE {{ out_chan[0] }}
+#define {{ name|upper }}_OUTPUTS_SIZE {{ out_chan[0] * out_height[0] * out_width[0] }}
+#define {{ name|upper }}_CHANNELS_SIZE {{ in_chan[0] * in_height[0] * in_width[0] }}
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/matmul.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/matmul.jinja
index 86b40d40784ca8fb6a59fb1172627843d3df80db..2168a59be92c3e7d3e2ded20c03d9d4e57478b11 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/matmul.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/matmul.jinja
@@ -4,9 +4,14 @@
 
 {# For layer configuration -#}
 
-#define {{name|upper}}_INPUT_A_DIMS_SIZE {{ input_dims[0]|length}}
-#define {{name|upper}}_INPUT_B_DIMS_SIZE {{ input_dims[1]|length}}
-#define {{name|upper}}_OUTPUT_DIMS_SIZE {{ output_dims[0]|length}}
+
+#define {{ in_name[0]|upper }}_DIMS {{ in_dims[0] }}
+#define {{ in_name[1]|upper }}_DIMS {{ in_dims[1] }}
+#define {{ out_name[0]|upper }}_DIMS {{ out_dims[0] }}
+
+#define {{name|upper}}_INPUT_A_DIMS_SIZE {{ in_dims[0]|length}}
+#define {{name|upper}}_INPUT_B_DIMS_SIZE {{ in_dims[1]|length}}
+#define {{name|upper}}_OUTPUT_DIMS_SIZE {{ out_dims[0]|length}}
 
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/pool_config.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/pool_config.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..02586f2958896397b5d6b18cdc6f1ddfa27f476e
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/pool_config.jinja
@@ -0,0 +1,22 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+#include "typedefs.h"
+
+{# For layer configuration -#}
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
+
+// Attributes
+#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel_dims[1] }}
+#define {{ name|upper }}_KERNEL_WIDTH {{ kernel_dims[0] }}
+#define {{ name|upper }}_PADDING_Y {{ padding[1] }}
+#define {{ name|upper }}_PADDING_X {{ padding[0] }}
+#define {{ name|upper }}_STRIDE_Y {{ stride_dims[1] }}
+#define {{ name|upper }}_STRIDE_X {{ stride_dims[0] }}
+
+#define {{ name|upper }}_POOLING_TYPE {{ pool_type }}
+#define {{ name|upper }}_ACTIVATION {{ activation }}
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/relu.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/relu.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..f896b95289a4baaeb03fad45d31c7cb640a17851
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/relu.jinja
@@ -0,0 +1,9 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+{# For layer configuration -#}
+#define {{ name|upper }}_INPUTS_SIZE {{ in_size[0] }}
+#define {{ name|upper }}_OUTPUTS_SIZE {{ out_size[0] }}
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation.jinja
index b1a2289e77ab5789812ad91f4bd4dfccbfcae64e..8baa4ea019d4624994aabf4a6685f41cd6de4b60 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation.jinja
@@ -1 +1 @@
-    aidge_{{activation_type|lower}}_{{dataformat}}({{input_name}}, {{output_name}}, {% if activation_type is eq('softmax') %} {{input_name}}_DIMS, {{name|upper}}_AXIS, {{name|upper}}_INPUT_DIMS_SIZE,{% endif %} {{name|upper}}_OUTPUTS_SIZE);
\ No newline at end of file
+    aidge_{{activation_type|lower}}_{{dataformat}}({{in_name[0]}}, {{out_name[0]}}, {% if activation_type is eq('softmax') %} {{in_name[0]}}_DIMS, {{name|upper}}_AXIS, {{name|upper}}_INPUT_DIMS_SIZE,{% endif %} {{name|upper}}_OUTPUTS_SIZE);
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/conv_kernel.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/conv_kernel.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..f637c89134cc5ff988edaab152917bdd694166f9
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/conv_kernel.jinja
@@ -0,0 +1,24 @@
+N2D2_Export::convcellPropagate<{{ in_name[0]|upper }}_NB_CHANNELS,
+                               {{ in_name[0]|upper }}_CHANNELS_HEIGHT,
+                               {{ in_name[0]|upper }}_CHANNELS_WIDTH,
+                               {{ out_name[0]|upper }}_NB_OUTPUTS,
+                               {{ out_name[0]|upper }}_OUTPUTS_HEIGHT,
+                               {{ out_name[0]|upper }}_OUTPUTS_WIDTH,
+                               {{ name|upper }}_PADDING_Y,
+                               {{ name|upper }}_PADDING_X,
+                               {{ name|upper }}_STRIDE_Y,
+                               {{ name|upper }}_STRIDE_X,
+                               {{ name|upper }}_KERNEL_HEIGHT,
+                               {{ name|upper }}_KERNEL_WIDTH,
+                               {{ name|upper }}_ACTIVATION,
+                               {{ out_name[0]|upper }}_MEM_CONT_OFFSET,
+                               {{ out_name[0]|upper }}_MEM_CONT_SIZE,
+                               {{ out_name[0]|upper }}_MEM_WRAP_OFFSET,
+                               {{ out_name[0]|upper }}_MEM_WRAP_SIZE,
+                               {{ out_name[0]|upper }}_MEM_STRIDE,
+                               {{ name|upper }}_MEM_CONT_OFFSET,
+                               {{ name|upper }}_MEM_CONT_SIZE,
+                               {{ name|upper }}_MEM_WRAP_OFFSET,
+                               {{ name|upper }}_MEM_WRAP_SIZE,
+                               {{ name|upper }}_MEM_STRIDE>
+                               ({{in_name[0]}}, {{out_name[0]}}, {{in_name[2]}}, {{in_name[1]}}, {{ name|upper }}_SCALING);
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/convolution.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/convolution.jinja
index 35aaaa09b916cead955eb3f2ad49dfc296b890af..9fcde23663b5e13d020087e9ce4b097de428babe 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/convolution.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/convolution.jinja
@@ -1 +1 @@
-aidge_conv2d_hwc_{{dataformat}} ({{input_name}}, {{weight_name}}, {{bias_name}}, {{output_name}}, {{name|upper}}_NB_CHANNELS, {{name|upper}}_CHANNELS_WIDTH, {{name|upper}}_CHANNELS_HEIGHT, {{name|upper}}_KERNEL_Y, {{name|upper}}_KERNEL_X, {{name|upper}}_NB_OUTPUTS, {{name|upper}}_OUTPUTS_WIDTH, {{name|upper}}_OUTPUTS_HEIGHT, {{name|upper}}_PADDING_X, {{name|upper}}_PADDING_Y, {{name|upper}}_STRIDE_X, {{name|upper}}_STRIDE_Y, {{name|upper}}_DILATION_X, {{name|upper}}_DILATION_Y);
\ No newline at end of file
+aidge_conv2d_hwc_{{dataformat}} ({{input_name}}, {{weight_name}}, {{input_name[2]}}, {{output_name}}, {{name|upper}}_NB_CHANNELS, {{name|upper}}_CHANNELS_WIDTH, {{name|upper}}_CHANNELS_HEIGHT, {{name|upper}}_KERNEL_Y, {{name|upper}}_KERNEL_X, {{name|upper}}_NB_OUTPUTS, {{name|upper}}_OUTPUTS_WIDTH, {{name|upper}}_OUTPUTS_HEIGHT, {{name|upper}}_PADDING_X, {{name|upper}}_PADDING_Y, {{name|upper}}_STRIDE_X, {{name|upper}}_STRIDE_Y, {{name|upper}}_DILATION_X, {{name|upper}}_DILATION_Y);
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/elemwise.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/elemwise.jinja
index 0f3f1c8758a4d4d8944384973bf90054b5e91fca..c4ca30d6f9ef7cab5fbc3f260b23275feed3b6f0 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/elemwise.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/elemwise.jinja
@@ -1 +1 @@
-aidge_{{elemwise_type|lower}}_{{dataformat}}  ({{inputa_name}}, {{inputb_name}}, {{output_name}}, {{inputa_name}}_DIMS, {{inputb_name}}_DIMS, {{output_name}}_DIMS,{{name|upper}}_INPUT_A_DIMS_SIZE,{{name|upper}}_INPUT_B_DIMS_SIZE,{{name|upper}}_OUTPUT_DIMS_SIZE, {{name|upper}}_OUTPUTS_SIZE);
\ No newline at end of file
+aidge_{{elemwise_type|lower}}_{{dtype[0]}}  ({{in_name[0]}}, {{in_name[1]}}, {{out_name[0]}}, {{in_name[0]}}_DIMS, {{in_name[1]}}_DIMS, {{out_name[0]}}_DIMS,{{in_name[0]|upper}}_NB_DIM,{{in_name[1]|upper}}_NB_DIM,{{out_name[0]|upper}}_NB_DIM, {{name|upper}}_OUTPUTS_SIZE);
diff --git a/aidge_export_arm_cortexm/_N2D2/templates/kernel/fc_kernel.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/fc_kernel.jinja
similarity index 54%
rename from aidge_export_arm_cortexm/_N2D2/templates/kernel/fc_kernel.jinja
rename to aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/fc_kernel.jinja
index 5d252e2dfc808d1dd86c313471d442bf8bef9af6..93637f2cc91a4004ded15aa35db7f19f829b997c 100644
--- a/aidge_export_arm_cortexm/_N2D2/templates/kernel/fc_kernel.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/fc_kernel.jinja
@@ -1,18 +1,18 @@
 N2D2_Export::fccellPropagate<{{ name | upper }}_NB_CHANNELS,
-                             {{ name | upper }}_CHANNELS_HEIGHT, 
+                             {{ name | upper }}_CHANNELS_HEIGHT,
                              {{ name | upper }}_CHANNELS_WIDTH,
                              {{ name | upper }}_NB_OUTPUTS,
-                             {{ name | upper }}_OUTPUTS_HEIGHT, 
+                             {{ name | upper }}_OUTPUTS_HEIGHT,
                              {{ name | upper }}_OUTPUTS_WIDTH,
                              {{ name | upper }}_ACTIVATION,
-                             {{ parent_name | upper }}_MEM_CONT_OFFSET,
-                             {{ parent_name | upper }}_MEM_CONT_SIZE,
-                             {{ parent_name | upper }}_MEM_WRAP_OFFSET,
-                             {{ parent_name | upper }}_MEM_WRAP_SIZE,
-                             {{ parent_name | upper }}_MEM_STRIDE,
+                             {{ out_name[0]|upper | upper }}_MEM_CONT_OFFSET,
+                             {{ out_name[0]|upper | upper }}_MEM_CONT_SIZE,
+                             {{ out_name[0]|upper | upper }}_MEM_WRAP_OFFSET,
+                             {{ out_name[0]|upper | upper }}_MEM_WRAP_SIZE,
+                             {{ out_name[0]|upper | upper }}_MEM_STRIDE,
                              {{ name | upper }}_MEM_CONT_OFFSET,
                              {{ name | upper }}_MEM_CONT_SIZE,
                              {{ name | upper }}_MEM_WRAP_OFFSET,
                              {{ name | upper }}_MEM_WRAP_SIZE,
                              {{ name | upper }}_MEM_STRIDE>
-                             ({{ inputs_name }}, {{ outputs_name }}, {{ biases_name }}, {{ weights_name}}, {{ name | upper }}_SCALING);
+                             ({{ in_name[0] }}, {{ out_name[0] }}, {{ in_name[2] }}, {{ in_name[1] }}, {{ name | upper }}_SCALING);
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/matmul.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/matmul.jinja
index 15ff05fec3bb40332ac7968d20f594009f7903a4..8338cbeed7e5b79ee6311d1844d04f86f92dcb42 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/matmul.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/matmul.jinja
@@ -1 +1 @@
-aidge_matmul_chw_{{dataformat}} ({{inputa_name}}, {{inputb_name}}, {{output_name}}, {{inputa_name}}_DIMS, {{inputb_name}}_DIMS, {{output_name}}_DIMS ,{{name|upper}}_INPUT_A_DIMS_SIZE,{{name|upper}}_INPUT_B_DIMS_SIZE,{{name|upper}}_OUTPUT_DIMS_SIZE);
\ No newline at end of file
+aidge_matmul_chw_{{dataformat}} ({{in_name[0]}}, {{in_name[1]}}, {{out_name[0]}}, {{in_name[0]}}_DIMS, {{in_name[1]}}_DIMS, {{out_name[0]}}_DIMS, {{name|upper}}_INPUT_A_DIMS_SIZE, {{name|upper}}_INPUT_B_DIMS_SIZE, {{name|upper}}_OUTPUT_DIMS_SIZE);
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/pool_kernel.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/pool_kernel.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..95b5b024d4f1d455af9a2ef4167ff5bdfb3b49c0
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/pool_kernel.jinja
@@ -0,0 +1,25 @@
+N2D2_Export::poolcellPropagat<{{ in_name[0]|upper }}_NB_CHANNELS,
+                               {{ in_name[0]|upper }}_CHANNELS_HEIGHT,
+                               {{ in_name[0]|upper }}_CHANNELS_WIDTH,
+                               {{ out_name[0]|upper }}_NB_OUTPUTS,
+                               {{ out_name[0]|upper }}_OUTPUTS_HEIGHT,
+                               {{ out_name[0]|upper }}_OUTPUTS_WIDTH,
+                               {{ name|upper }}_PADDING_Y,
+                               {{ name|upper }}_PADDING_X,
+                               {{ name|upper }}_STRIDE_Y,
+                               {{ name|upper }}_STRIDE_X,
+                               {{ name|upper }}_KERNEL_HEIGHT,
+                               {{ name|upper }}_KERNEL_WIDTH,
+                               {{ name|upper }}_POOLING_TYPE,
+                               {{ name|upper }}_ACTIVATION,
+                               {{ out_name[0]|upper }}_MEM_CONT_OFFSET,
+                               {{ out_name[0]|upper }}_MEM_CONT_SIZE,
+                               {{ out_name[0]|upper }}_MEM_WRAP_OFFSET,
+                               {{ out_name[0]|upper }}_MEM_WRAP_SIZE,
+                               {{ out_name[0]|upper }}_MEM_STRIDE,
+                               {{ name|upper }}_MEM_CONT_OFFSET,
+                               {{ name|upper }}_MEM_CONT_SIZE,
+                               {{ name|upper }}_MEM_WRAP_OFFSET,
+                               {{ name|upper }}_MEM_WRAP_SIZE,
+                               {{ name|upper }}_MEM_STRIDE>
+                               ({{in_name[0]}}, {{out_name[0]}});
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/relu.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/relu.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..cb11a50802e4f713ef23930e88bafd014dbcd616
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/relu.jinja
@@ -0,0 +1 @@
+aidge_relu_float32({{in_name[0]}}, {{out_name[0]}},  {{in_name[0]}}_DIMS, {{name|upper}}_OUTPUTS_SIZE);
diff --git a/aidge_export_arm_cortexm/_N2D2/templates/configuration/conv_config.jinja b/aidge_export_arm_cortexm/_N2D2/templates/configuration/conv_config.jinja
deleted file mode 100644
index 1e46543bd51e6bc529000d4a0f48cb8cfb52f831..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_N2D2/templates/configuration/conv_config.jinja
+++ /dev/null
@@ -1,49 +0,0 @@
-{#- For name header -#}
-#ifndef {{ name|upper }}_LAYER_H
-#define {{ name|upper }}_LAYER_H
-
-#include "typedefs.h"
-#include "nn_scaling_functions.hpp"
-
-{# For layer configuration -#}
-// In/Out
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
-
-// Attributes
-#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel[1] }}
-#define {{ name|upper }}_KERNEL_WIDTH {{ kernel[0] }}
-#define {{ name|upper }}_PADDING_Y {{ padding[1] }}
-#define {{ name|upper }}_PADDING_X {{ padding[0] }}
-#define {{ name|upper }}_STRIDE_Y {{ stride[1] }}
-#define {{ name|upper }}_STRIDE_X {{ stride[0] }}
-#define {{ name|upper }}_DILATION_Y {{ dilation[1] }}
-#define {{ name|upper }}_DILATION_X {{ dilation[0] }}
-
-// Activation/Scaling
-#define {{ name|upper }}_ACTIVATION {{ activation }}
-
-{%- if scaling_type == "floating_point" %}
-static const N2D2_Export::FloatingPointScaling {{ name|upper }}_SCALING = { {{scaling_value}} };
-{%- elif scaling_type == "fixed_point" %}
-static const N2D2_Export::FixedPointScaling<{{scaling_value}}, {{fractional_bits}}> {{ name|upper }}_SCALING;
-{%- elif scaling_type == "single_shift" %}
-static const N2D2_Export::SingleShiftScaling<{{shift_value}}> {{ name|upper }}_SCALING;
-{%- else %}
-static const N2D2_Export::NoScaling {{ name|upper }}_SCALING;
-{%- endif %}
-
-{# Calculate sizes -#}
-{%- set weights_size = output_dims[0] * input_dims[0] * kernel[1] * kernel[0] %}
-// Sizes
-#define {{ name|upper }}_WEIGHTS_SIZE {{ weights_size }}
-#define {{ name|upper }}_BIASES_SIZE {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_SIZE {{ output_dims[0] * output_dims[1] * output_dims[2] }}
-#define {{ name|upper }}_CHANNELS_SIZE {{ input_dims[0] * input_dims[1] * input_dims[2] }}
-
-
-#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_N2D2/templates/configuration/pool_config.jinja b/aidge_export_arm_cortexm/_N2D2/templates/configuration/pool_config.jinja
deleted file mode 100644
index f514b375d67e39f5f254862b5ec3dadb4b6c61d0..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_N2D2/templates/configuration/pool_config.jinja
+++ /dev/null
@@ -1,27 +0,0 @@
-{#- For name header -#}
-#ifndef {{ name|upper }}_LAYER_H
-#define {{ name|upper }}_LAYER_H
-
-#include "typedefs.h"
-
-{# For layer configuration -#}
-// In/Out
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
-
-// Attributes
-#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel[1] }}
-#define {{ name|upper }}_KERNEL_WIDTH {{ kernel[0] }}
-#define {{ name|upper }}_PADDING_Y {{ padding[1] }}
-#define {{ name|upper }}_PADDING_X {{ padding[0] }}
-#define {{ name|upper }}_STRIDE_Y {{ stride[1] }}
-#define {{ name|upper }}_STRIDE_X {{ stride[0] }}
-
-#define {{ name|upper }}_POOLING_TYPE {{ pool_type }}
-#define {{ name|upper }}_ACTIVATION {{ activation }}
-
-#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_N2D2/templates/kernel/conv_kernel.jinja b/aidge_export_arm_cortexm/_N2D2/templates/kernel/conv_kernel.jinja
deleted file mode 100644
index 5a0798ea14a0acc4f8d8f0e08cf8c4ee4ef8a5b0..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_N2D2/templates/kernel/conv_kernel.jinja
+++ /dev/null
@@ -1,19 +0,0 @@
-N2D2_Export::convcellPropagate<{{ name | upper }}_NB_CHANNELS,
-                               {{ name | upper }}_CHANNELS_HEIGHT, {{ name | upper }}_CHANNELS_WIDTH,
-                               {{ name | upper }}_NB_OUTPUTS,
-                               {{ name | upper }}_OUTPUTS_HEIGHT, {{ name | upper }}_OUTPUTS_WIDTH,
-                               {{ name | upper }}_PADDING_Y, {{ name | upper }}_PADDING_X,
-                               {{ name | upper }}_STRIDE_Y, {{ name | upper }}_STRIDE_X,
-                               {{ name | upper }}_KERNEL_HEIGHT, {{ name | upper }}_KERNEL_WIDTH,
-                               {{ name | upper }}_ACTIVATION,
-                               {{ parent_name | upper }}_MEM_CONT_OFFSET,
-                               {{ parent_name | upper }}_MEM_CONT_SIZE,
-                               {{ parent_name | upper }}_MEM_WRAP_OFFSET,
-                               {{ parent_name | upper }}_MEM_WRAP_SIZE,
-                               {{ parent_name | upper }}_MEM_STRIDE,
-                               {{ name | upper }}_MEM_CONT_OFFSET,
-                               {{ name | upper }}_MEM_CONT_SIZE,
-                               {{ name | upper }}_MEM_WRAP_OFFSET,
-                               {{ name | upper }}_MEM_WRAP_SIZE,
-                               {{ name | upper }}_MEM_STRIDE>
-                               ({{ inputs_name }}, {{ outputs_name }}, {{ biases_name }}, {{ weights_name}}, {{ name | upper }}_SCALING);
diff --git a/aidge_export_arm_cortexm/_N2D2/templates/kernel/pool_kernel.jinja b/aidge_export_arm_cortexm/_N2D2/templates/kernel/pool_kernel.jinja
deleted file mode 100644
index d1207bd6575dc1f20f364af9d1365923c09a66b2..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_N2D2/templates/kernel/pool_kernel.jinja
+++ /dev/null
@@ -1,20 +0,0 @@
-N2D2_Export::poolcellPropagate<{{ name | upper }}_NB_CHANNELS,
-                               {{ name | upper }}_CHANNELS_HEIGHT, {{ name | upper }}_CHANNELS_WIDTH,
-                               {{ name | upper }}_NB_OUTPUTS,
-                               {{ name | upper }}_OUTPUTS_HEIGHT, {{ name | upper }}_OUTPUTS_WIDTH,
-                               {{ name | upper }}_PADDING_Y, {{ name | upper }}_PADDING_X,
-                               {{ name | upper }}_STRIDE_Y, {{ name | upper }}_STRIDE_X,
-                               {{ name | upper }}_KERNEL_HEIGHT, {{ name | upper }}_KERNEL_WIDTH,
-                               {{ name | upper }}_POOLING_TYPE,
-                               {{ name | upper }}_ACTIVATION,
-                               {{ parent_name | upper }}_MEM_CONT_OFFSET,
-                               {{ parent_name | upper }}_MEM_CONT_SIZE,
-                               {{ parent_name | upper }}_MEM_WRAP_OFFSET,
-                               {{ parent_name | upper }}_MEM_WRAP_SIZE,
-                               {{ parent_name | upper }}_MEM_STRIDE,
-                               {{ name | upper }}_MEM_CONT_OFFSET,
-                               {{ name | upper }}_MEM_CONT_SIZE,
-                               {{ name | upper }}_MEM_WRAP_OFFSET,
-                               {{ name | upper }}_MEM_WRAP_SIZE,
-                               {{ name | upper }}_MEM_STRIDE>
-                               ({{ inputs_name }}, {{ outputs_name }});
diff --git a/aidge_export_arm_cortexm/__init__.py b/aidge_export_arm_cortexm/__init__.py
index 681245bac492c2fe30eb9f3ad963c424d1f2dcc7..38bb61f7b153b0a6fc0cc2441453040170e99c7f 100644
--- a/aidge_export_arm_cortexm/__init__.py
+++ b/aidge_export_arm_cortexm/__init__.py
@@ -7,3 +7,5 @@ This module has to be used with the Aidge suite
 __version__ = "0.0.1"
 
 from .export import *
+from .export_registry import ExportLibAidgeARM, ExportLibCMSISNN
+from .operators import *
diff --git a/aidge_export_arm_cortexm/export.py b/aidge_export_arm_cortexm/export.py
index 3e8e2d520cb1b899737de7aae689254b9384aa1b..979e19458860d05299a54d9591e8a9fc62885812 100644
--- a/aidge_export_arm_cortexm/export.py
+++ b/aidge_export_arm_cortexm/export.py
@@ -1,19 +1,11 @@
-import re
 import os
 import shutil
 from pathlib import Path
-import numpy as np
-from aidge_core.export_utils.data_conversion import aidge2c
-from aidge_core.export_utils.code_generation import *
-from aidge_export_arm_cortexm.utils import (ROOT, AVAILABLE_BOARDS, has_board, \
-                                            OPERATORS_REGISTRY, supported_operators)
-import aidge_export_arm_cortexm.operators
-from aidge_export_arm_cortexm.utils.scheduler import topological_sort
-from aidge_export_arm_cortexm.utils.generation import get_functions_from_c_file, get_functions_from_c_folder, get_filenames_from_folder
-from aidge_export_arm_cortexm.utils.converter import *
-from aidge_export_arm_cortexm.memory import *
-
-
+from aidge_export_arm_cortexm.utils import (ROOT, AVAILABLE_BOARDS, has_board)
+from aidge_export_arm_cortexm.export_registry import ExportLibAidgeARM
+from aidge_core.mem_info import compute_default_mem_info
+from aidge_core.export_utils import scheduler_export
+# from aidge_core.export_utils.code_generation import *
 
 def export(export_folder_name,
            graphview,
@@ -22,6 +14,12 @@ def export(export_folder_name,
            library:str = "aidge",
            mem_wrapping = False):
 
+    scheduler_export(
+        scheduler,
+        export_folder_name,
+        ExportLibAidgeARM,
+        memory_manager=compute_default_mem_info
+    )
     # Create export directory
     export_folder = Path().absolute() / export_folder_name
     os.makedirs(str(export_folder), exist_ok=True)
@@ -32,131 +30,125 @@ def export(export_folder_name,
 
     # Determine which board the user wants
     # to select correct config
-    if has_board(board):
-        board_path = AVAILABLE_BOARDS[board]
-    else:
-        raise ValueError(f"{board} not found in the package. Please among those boards: {list(AVAILABLE_BOARDS.keys())}")
-
+    board_path = ROOT / "boards" / "stm32" / "H7"
     # Copy all static files in the export
     shutil.copytree(board_path, str(export_folder), dirs_exist_ok=True)
-
     # For N2D2 library, copy static folder to export/include
-    if library == "n2d2":
-        dnn_include_folder = dnn_folder / "include"
-        os.makedirs(str(dnn_include_folder), exist_ok=True)
-        shutil.copytree(str(ROOT / "_N2D2" / "static"), str(dnn_include_folder), dirs_exist_ok=True)
-
-    # Create statistics directory
-    stats_folder = export_folder / "statistics"
-    os.makedirs(str(stats_folder), exist_ok=True)
-
-    # Sort layers according to a scheduler
-    if not isinstance(scheduler, aidge_core.Scheduler):
-        # No scheduler provided by the user, use the default one
-        list_forward_nodes = topological_sort(graphview)
-        mem_size, mem_info = compute_default_mem_info(list_forward_nodes)
-    else:
-        list_forward_nodes = scheduler.get_static_scheduling()
-        mem_size, mem_info = generate_optimized_memory_info(stats_folder, scheduler, mem_wrapping)
+    dnn_include_folder = dnn_folder / "include"
+    os.makedirs(str(dnn_include_folder), exist_ok=True)
+    shutil.copytree(str(ROOT / "_N2D2" / "static"), str(dnn_include_folder), dirs_exist_ok=True)
+
+    # # Create statistics directory
+    # stats_folder = export_folder / "statistics"
+    # os.makedirs(str(stats_folder), exist_ok=True)
+
+    # # Sort layers according to a scheduler
+    # if not isinstance(scheduler, aidge_core.Scheduler):
+    #     # No scheduler provided by the user, use the default one
+    #     list_forward_nodes = topological_sort(graphview)
+    #     mem_size, mem_info = compute_default_mem_info(list_forward_nodes)
+    # else:
+    #     list_forward_nodes = scheduler.get_static_scheduling()
+    #     mem_size, mem_info = generate_optimized_memory_info(stats_folder, scheduler, mem_wrapping)
 
     # Set some lists of elements for generating forward file
-    list_actions = []
-    list_configs = []
+    # list_actions = []
+    # list_configs = []
 
-    # Export layer configurations
-    for node in list_forward_nodes:
-        if node.type() == "Producer":
-            # We do not treat Producer here but i the nodes which use them
-            continue
+    # # Export layer configurations
+    # for node in list_forward_nodes:
+    #     if node.type() == "Producer":
+    #         # We do not treat Producer here but i the nodes which use them
+    #         continue
 
-        if node.type() in supported_operators():
-            op = OPERATORS_REGISTRY[node.type()](node, board, library)
-            # Export the configuration
-            list_configs = op.export(dnn_folder, list_configs)
+    #     if node.type() in supported_operators():
+    #         op = OPERATORS_REGISTRY[node.type()](node, board, library)
+    #         # Export the configuration
+    #         list_configs = op.export(dnn_folder, list_configs)
 
-            # Add forward kernel
-            list_actions = op.forward(list_actions)
-        else:
-            print(f"Warning: {node.type()} is not supported in the export.\nPlease add the implementation.")
+    #         # Add forward kernel
+    #         list_actions = op.forward(list_actions)
+    #     else:
+    #         print(f"Warning: {node.type()} is not supported in the export.\nPlease add the implementation.")
 
     # Generate the memory file
-    generate_file(
-        str(dnn_folder / "memory" / "mem_info.h"),
-        str(ROOT / "templates" / "memory" / "mem_info.jinja"),
-        mem_size = mem_size,
-        mem_info_legends = MEMORY_INFO_TEMPLATE,
-        mem_info = mem_info,
-        mem_alignment = 1  # Fixed memory alignement so far, feel free to adapt it
-    )
-    list_configs.append("memory/mem_info.h")
+    # generate_file(
+    #     str(dnn_folder / "memory" / "mem_info.h"),
+    #     str(ROOT / "templates" / "memory" / "mem_info.jinja"),
+    #     mem_size = mem_size,
+    #     mem_info_legends = MEMORY_INFO_TEMPLATE,
+    #     mem_info = mem_info,
+    #     mem_alignment = 1  # Fixed memory alignement so far, feel free to adapt it
+    # )
+    # list_configs.append("memory/mem_info.h")
 
     # Get entry nodes
     # It supposes the entry nodes are producers with constant=false
     # Store the datatype & name
-    list_inputs_name = []
-    first_element_added = False
-    for node in graphview.get_nodes():
-        if node.type() == "Producer":
-            if not first_element_added:
-                    export_type = aidge2c(node.get_operator().get_output(0).dtype())
-                    list_inputs_name.append((export_type, node.name()))
-                    first_element_added = True
-            if not node.get_operator().attr.constant:
-                export_type = aidge2c(node.get_operator().get_output(0).dtype())
-                list_inputs_name.append((export_type, node.name()))
+    # list_inputs_name = []
+    # first_element_added = False
+    # for node in graphview.get_nodes():
+    #     if node.type() == "Producer":
+    #         if not first_element_added:
+    #                 export_type = aidge2c(node.get_operator().get_output(0).dtype())
+    #                 list_inputs_name.append((export_type, node.name()))
+    #                 first_element_added = True
+    #         if not node.get_operator().attr.constant:
+    #             export_type = aidge2c(node.get_operator().get_output(0).dtype())
+    #             list_inputs_name.append((export_type, node.name()))
 
     # Get output nodes
     # Store the datatype & name, like entry nodes
 
-    list_outputs_name = []
-    for node in graphview.get_nodes():
-        if len(node.get_children()) == 0:
-            if node.get_operator().attr.has_attr('dtype'):
-                # Temporary fix because impossible to set DataType of a generic operator
-                export_type = aidge2c(node.get_operator().attr.dtype)
-            else:
-                export_type = aidge2c(node.get_operator().get_output(0).dtype())
-
-            list_outputs_name.append((export_type, node.name()))
-
-    if library == "n2d2":
-        forward_file = "forward.cpp"
-    else:
-        forward_file = "forward.c"
-
-    # Generate forward file
-    generate_file(
-        str(dnn_folder / "src" / forward_file),
-        str(ROOT / "templates" / "network" / "network_forward.jinja"),
-        headers=set(list_configs),
-        actions=list_actions,
-        inputs= list_inputs_name,
-        outputs=list_outputs_name
-    )
-
-    # Generate dnn internal API
-    if library == "aidge":
-        # For Aidge, parse all kernels source code and retrieve function prototypes
-        generate_file(
-            str(dnn_folder / "include" / "network_functions.h"),
-            str(ROOT / "templates" / "network" / "network_prototypes.jinja"),
-            libraries=[],
-            functions=get_functions_from_c_folder(str(dnn_folder / "src" / "kernels")),
-        )
-    elif library == "n2d2":
-        # For N2D2, parse all the files in include/kernel/ and retrieve the names of the files
-        generate_file(
-            str(dnn_folder / "include" / "network_functions.h"),
-            str(ROOT / "templates" / "network" / "network_prototypes.jinja"),
-            libraries=[],
-            files=[str(Path("kernels") / x) for x in get_filenames_from_folder(str(dnn_folder / "include" / "kernels"), r'^.*\.hpp$')],
-        )
-
-    # Generate dnn API
-    generate_file(
-        str(dnn_folder / "include" / "dnn.h"),
-        str(ROOT / "templates" / "network" / "dnn_header.jinja"),
-        libraries=["stdint.h"],
-        functions=get_functions_from_c_file(str(dnn_folder / "src" / forward_file)),
-    )
+    # list_outputs_name = []
+    # for node in graphview.get_nodes():
+    #     if len(node.get_children()) == 0:
+    #         if node.get_operator().attr.has_attr('dtype'):
+    #             # Temporary fix because impossible to set DataType of a generic operator
+    #             export_type = aidge2c(node.get_operator().attr.dtype)
+    #         else:
+    #             export_type = aidge2c(node.get_operator().get_output(0).dtype())
+
+    #         list_outputs_name.append((export_type, node.name()))
+
+    # if library == "n2d2":
+    #     forward_file = "forward.cpp"
+    # else:
+    #     forward_file = "forward.c"
+
+    # # Generate forward file
+    # generate_file(
+    #     str(dnn_folder / "src" / forward_file),
+    #     str(ROOT / "templates" / "network" / "network_forward.jinja"),
+    #     headers=set(list_configs),
+    #     actions=list_actions,
+    #     inputs= list_inputs_name,
+    #     outputs=list_outputs_name
+    # )
+
+    # # Generate dnn internal API
+    # if library == "aidge":
+    #     # For Aidge, parse all kernels source code and retrieve function prototypes
+    #     generate_file(
+    #         str(dnn_folder / "include" / "network_functions.h"),
+    #         str(ROOT / "templates" / "network" / "network_prototypes.jinja"),
+    #         libraries=[],
+    #         functions=get_functions_from_c_folder(str(dnn_folder / "src" / "kernels")),
+    #     )
+    # elif library == "n2d2":
+    #     # For N2D2, parse all the files in include/kernel/ and retrieve the names of the files
+    #     generate_file(
+    #         str(dnn_folder / "include" / "network_functions.h"),
+    #         str(ROOT / "templates" / "network" / "network_prototypes.jinja"),
+    #         libraries=[],
+    #         files=[str(Path("kernels") / x) for x in get_filenames_from_folder(str(dnn_folder / "include" / "kernels"), r'^.*\.hpp$')],
+    #     )
+
+    # # Generate dnn API
+    # generate_file(
+    #     str(dnn_folder / "include" / "dnn.h"),
+    #     str(ROOT / "templates" / "network" / "dnn_header.jinja"),
+    #     libraries=["stdint.h"],
+    #     functions=get_functions_from_c_file(str(dnn_folder / "src" / forward_file)),
+    # )
 
diff --git a/aidge_export_arm_cortexm/export_registry.py b/aidge_export_arm_cortexm/export_registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..f51867dea0b894548fd9472d32062e860e89c7c2
--- /dev/null
+++ b/aidge_export_arm_cortexm/export_registry.py
@@ -0,0 +1,10 @@
+from aidge_core.export_utils import ExportLib
+
+class ExportLibAidgeARM(ExportLib):
+    _name="aidge_arm"
+    static_files={
+    }
+
+class ExportLibCMSISNN(ExportLib):
+    _name="export_cmsisnn"
+
diff --git a/aidge_export_arm_cortexm/memory.py b/aidge_export_arm_cortexm/memory.py
deleted file mode 100644
index 7f7983fc7898bbd2d7fa383ecc0b5f16f290918f..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/memory.py
+++ /dev/null
@@ -1,130 +0,0 @@
-import os
-import shutil
-from typing import List
-from pathlib import Path
-import aidge_core
-import aidge_backend_cpu
-
-# for each layer, 
-# name              [size, stride, length, count, contigious offset, contigious size, wrapping offset, wrapping size]
-# true values       [nb_outputs, nb_outputs, width, width, offset start, total size, 0, 0]
-# Example:
-#define ENV_MEM_SIZE 3
-#define ENV_MEM_STRIDE 3
-#define ENV_MEM_LENGTH 224
-#define ENV_MEM_COUNT 224
-#define ENV_MEM_CONT_OFFSET 0
-#define ENV_MEM_CONT_SIZE 150528
-#define ENV_MEM_WRAP_OFFSET 0
-#define ENV_MEM_WRAP_SIZE 0
-MEMORY_INFO_TEMPLATE = ["layer_name", "size", "stride", "length", "count", "cont_offset", "cont_size", "wrap_offset", "wrap_size"]
-
-# for each layer, name: [size, offset start] (old style)
-# Example:
-#define ENV_MEM_SIZE 3
-#define ENV_OFFSET 0
-# MEMORY_INFO_TEMPLATE = ["layer_name", "size", "offset"]
-
-
-# Default memory management, which can be used for development
-def compute_default_mem_info(scheduler: aidge_core.Scheduler):
-    
-    list_forward_nodes = scheduler
-    mem_info = []
-    mem_size = 0
-
-    # Exclude Producers and the last layers (because the results are stored outside the export)
-    for i, node in enumerate(list_forward_nodes):
-        if node.type() != "Producer" and node.type() != "Reshape":
-        # if node.type() != "Producer":
-            if len(node.get_children()) != 0:
-                dims = node.get_operator().get_output(0).dims()
-                mem = 1
-                for dim in dims:
-                    mem *= dim
-
-                # Add memory info
-                # Only size and cont_offset matter
-                mem_info.append([node.name(), mem, 0, 0, 0, mem_size, mem, 0, 0])
-                
-                # Increment offset for the next layer
-                mem_size += mem
-
-    return mem_size, mem_info
-
-
-def generate_optimized_memory_info(stats_folder: Path,
-                                   scheduler: aidge_core.Scheduler,
-                                   wrapping:bool = False):
-    
-    # The forward dims has to done outside the function
-    # Also supposed the generation of the scheduler has been performed outside
-    # Otherwise decomment the following line
-    # scheduler.generate_scheduling()
-
-    # Generate the memory manager
-    # So far, the Producers are not take in consideration in the meory manager => inc_producers=False
-    mem_manager = scheduler.generate_memory(inc_producers=False, wrap_around_buffer=wrapping)
-
-    # In the export, we currently use an unified memory buffer whose size 
-    # is determined by the memory peak usage
-    mem_size = mem_manager.get_peak_usage()
-    mem_info = []
-
-    mem_planes = mem_manager.get_planes()
-
-    for node in scheduler.get_static_scheduling():
-
-        # Skip memory management for the parameter producers
-        if node.type() == "Producer":
-            if node.get_operator().attr.constant:
-                continue
-            else:
-                # Input memory management (suppose tensor ends with [:, channel, height, width]))
-                tensor = node.get_operator().get_output(0)
-                if tensor is None:
-                    raise RuntimeError("Warning input producer not provided")
-                if len(tensor.dims()) < 3:
-                    raise RuntimeError("Input producer dimensions must be with [:, channel, height, width]")
-
-                name = node.name()
-                size = tensor.dims()[-3]    # Should be nb_channels
-                stride = tensor.dims()[-3]  # Should be nb_channels
-                length = tensor.dims()[-1]  # Should be width
-                count = tensor.dims()[-2]   # Should be height
-                cont_offset = 0             # Suppose input data is stored outside the export function
-                                            # so the memory offset is not important to consider
-                cont_size = tensor.dims()[-1] * tensor.dims()[-2] * tensor.dims()[-3] # Size of input
-                wrap_offset = 0     # No wrapping
-                wrap_size = 0       # No wrapping
-        # elif node.type() != "Reshape":
-        else:
-            plane = mem_planes[node][0]
-
-            name = node.name()
-            size = plane.size
-            stride = plane.stride
-            length = plane.length
-            count = plane.count
-            cont_offset = plane.get_contiguous_offset()
-            cont_size = plane.get_contiguous_size()
-            wrap_offset = plane.get_wrapped_offset()
-            wrap_size = plane.get_wrapped_size()
-
-        mem_info.append([name, size, stride, length, count, 
-                        cont_offset, cont_size, wrap_offset, wrap_size])
-
-    # Use gnuplot to generate the log
-    try:
-        os.makedirs(str(stats_folder / "graph"), exist_ok=True)
-        mem_manager.log("memory_info")
-        os.chmod("memory_info_plot.gnu", 0o777)
-        os.system("./memory_info_plot.gnu")
-        shutil.move("memory_info", str(stats_folder / "graph"/ "memory_info"))
-        shutil.move("memory_info_plot.png", str(stats_folder / "graph" / "memory_info_plot.png"))
-        os.remove("memory_info_plot.gnu")
-    except:
-        print("Please install gnuplot if you want memory plot from MemoryManager.")
-
-
-    return mem_size, mem_info
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/operators.py b/aidge_export_arm_cortexm/operators.py
index d6d6836b296516dee62f9fb77dda389e68cd6a5a..63bb7d341863d7881d4454fc802736b16896d19c 100644
--- a/aidge_export_arm_cortexm/operators.py
+++ b/aidge_export_arm_cortexm/operators.py
@@ -1,38 +1,16 @@
 import os
 import math
-import shutil
 import numpy as np
 from pathlib import Path
-from jinja2 import Environment, FileSystemLoader
-from typing import Tuple, List, Union, Dict
+from typing import Tuple, List
 
-import aidge_core
-from aidge_core import ExportNode
+# import aidge_core
+from aidge_core.export_utils import ExportNode, ExportNodeCpp, operator_register
 from aidge_core.export_utils.code_generation import *
-from aidge_core.export_utils.data_conversion import aidge2c
-
-from aidge_export_arm_cortexm.utils import ROOT, operator_register
-from aidge_export_arm_cortexm.utils.converter import numpy_dtype2ctype, aidge_datatype2dataformat, aidge2c
+from aidge_export_arm_cortexm.utils import ROOT
+from aidge_export_arm_cortexm.utils.converter import numpy_dtype2ctype
 from aidge_export_arm_cortexm.utils.generation import *
-
-##############################################
-################### Utils ####################
-##############################################
-
-def get_node_parents(node):
-    parents = []
-    for parent in node.get_parents():
-        if parent.type() != "Producer":
-            parents.append(parent)
-    return parents
-
-def get_producer_parents(node):
-    parents = []
-    for parent in node.get_parents():
-        if parent.type() == "Producer":
-            parents.append(parent)
-    return parents
-
+from aidge_export_arm_cortexm.export_registry import ExportLibAidgeARM
 
 ##############################################
 ############## Export functions ##############
@@ -66,37 +44,32 @@ def set_up_output(name, dtype):
     return f"{dtype}* {name} = ({dtype}*) mem + {name.upper()}_MEM_CONT_OFFSET;"
 
 
-##############################################
-############## Operators helper ##############
-##############################################
 
+@operator_register(ExportLibAidgeARM, "Producer")
+class Producer_ARMCortexM(ExportNode):
 
-class Producer_ARMCortexM:
-    def __init__(self, node):
-        self.name = node.name()
-        self.operator = node.get_operator()
-        self.constant = self.operator.attr.constant
+    def __init__(self, node, mem_info, is_input, is_output):
+        super().__init__(node, mem_info, is_input, is_output)
         self.values = np.array(self.operator.get_output(0))
 
-    def export(self, export_file:Path, format:str = "NHWC"):
-
-        if (len(self.values.shape) == 4):
-            # Suppose original dataformat is NCHW
-            if format == "NCHW":
-                export_params(self.name,
-                              self.values.reshape(-1),
-                              str(export_file))
-            elif format == "NHWC":
-                export_params(self.name,
-                              np.transpose(self.values, (0, 2, 3, 1)).reshape(-1),
-                              str(export_file))
-            else:
-                raise RuntimeError("Producer format export not supported.")
-        else:
-            export_params(self.name,
-                          self.values.reshape(-1),
-                          str(export_file))
+        if len(self.values.shape) == 4:  # Note: export in HWC
+            self.values = np.transpose(self.values, (0, 2, 3, 1))
+
+    def export(self, export_folder: Path):
+        header_path = f"include/parameters/{self.attributes['name']}.h"
+        export_params(
+            self.attributes['out_name'][0],
+            self.values.reshape(-1),
+            str(export_folder / header_path))
+        return [header_path]
+
+    def forward(self):
+        # A Producer does nothing during forward
+        return []
 
+    @classmethod
+    def exportable(cls, node):
+        return True # TODO add check i/o NCHW
 
 class Scaling():
     class ScalingMode:
@@ -204,1035 +177,441 @@ class Scaling():
         return self.scaling
 
 
-@operator_register("ReLU")
-class ReLU_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Activation" / "Relu" / "aidge_relu_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "activation.jinja"),
-                name=self.name,
-                activation_type="\"RELU\"",
-                nb_inputs=np.prod(self.inputs_dims[0]),
-                nb_outputs=np.prod(self.outputs_dims[0]))
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "activation.jinja"),
-                name=self.name,
-                activation_type="relu",
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                output_name=self.name
-            ))
-
-        return list_actions
-
-
-@operator_register("Conv")
-class Conv_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
+@operator_register(ExportLibAidgeARM, "ReLU")
+class ReLU_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, is_input, is_output):
         super().__init__(node)
-
-        self.producers = []
-        # Exclude first input which is a real input
-        for i in range(1, len(node.inputs())):
-            producer = node.input(i)[0]
-            self.producers.append(Producer_ARMCortexM(producer))
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
-        self.scaling = Scaling()("no_scaling")
-        self.activation = "Linear"
-
-        self.kernel = node.get_operator().attr.kernel_dims
-        self.stride = node.get_operator().attr.stride_dims
-        self.dilation = node.get_operator().attr.dilation_dims
-
+        self.attributes["activation_type"] = "RELU"
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "relu.jinja")
+
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "relu.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Activation" / "Relu" / "aidge_relu_float32.c"),
+        ]
+    @classmethod
+    def exportable(cls, node):
+        return True # TODO add check i/o NCHW
+
+@operator_register(ExportLibAidgeARM, "Conv")
+class Conv_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, is_input, is_output):
+        super().__init__(node, mem_info, is_input, is_output)
+        self.attributes["activation"] = "Linear"
+        self.attributes.update(Scaling()("no_scaling"))
         # No padding with Conv
         # Use PaddedConv to add padding attribute
-        self.padding = [0, 0]
-
-        self.nb_channels = node.get_operator().in_channels()
-        self.nb_outputs = node.get_operator().out_channels()
-        if self.inputs[0] is None :
-            raise RuntimeError("")
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        if len(self.outputs_dims[0]) == 4:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
-
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        # Export weights to NHWC format
-        self.producers[0].export(export_folder / "parameters" / f"{self.producers[0].name}.h")
-        list_configs.append(f"parameters/{self.producers[0].name}.h")
-
-        # Export biases
-        if (len(self.producers) > 1):
-            # Convert the biases to int32
-            if self.dataformat != "float32":
-                self.producers[1].values = self.producers[1].values.astype(np.int32)
-
-            self.producers[1].export(export_folder / "parameters" / f"{self.producers[1].name}.h")
-            list_configs.append(f"parameters/{self.producers[1].name}.h")
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "aidge_conv2d_hwc_float32.c"),
-                         str(Path(export_folder) / "src" / "kernels"))
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
-                         str(Path(export_folder) / "include"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "convolution.jinja"),
-                name=self.name,
-                input_dims=self.inputs_dims[0],
-                output_dims=self.outputs_dims[0],
-                kernel=self.kernel,
-                stride=self.stride,
-                padding=self.padding,
-                dilation=self.dilation)
-
-        elif self.library == "n2d2":
-            # Export configuration file
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_N2D2" / "templates" / "configuration" / "conv_config.jinja"),
-                name=self.name,
-                input_dims=self.inputs_dims[0],
-                output_dims=self.outputs_dims[0],
-                kernel=self.kernel,
-                stride=self.stride,
-                padding=self.padding,
-                dilation=self.dilation,
-                activation=self.activation,
-                **self.scaling)
-
-        return list_configs
-
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "convolution.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                output_name=self.name,
-                weight_name=self.inputs[1].name(),
-                bias_name=self.inputs[2].name()
-            ))
-
-        elif self.library == "n2d2":
-            list_actions.append(generate_str(
-                str(ROOT / "_N2D2" / "templates" / "kernel" / "conv_kernel.jinja"),
-                name=self.name,
-                parent_name=self.inputs[0].name(),
-                inputs_name=self.inputs[0].name(),
-                weights_name=self.inputs[1].name(),
-                biases_name=self.inputs[2].name(),
-                outputs_name=self.name
-            ))
-
-        return list_actions
-
-
-@operator_register("PaddedConv")
-class PaddedConv_ARMCortexM(Conv_ARMCortexM):
-    def __init__(self, node, board, library):
-        ExportNode.__init__(self, node)
-
-        self.producers = []
-        # Exclude first input which is a real input
-        for i in range(1, len(node.inputs())):
-            producer = node.input(i)[0]
-            self.producers.append(Producer_ARMCortexM(producer))
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
-        self.scaling = Scaling()("no_scaling")
-        self.activation = "Linear"
-
+        self.attributes["padding"] = [0, 0]
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "conv_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = []
+    @classmethod
+    def exportable(cls, node):
+        return True # TODO add check i/o NCHW
+
+@operator_register(ExportLibAidgeARM, "PaddedConv")
+class PaddedConv_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, is_input, is_output):
+        super().__init__(node, mem_info, is_input, is_output)
+        self.attributes["activation"] = "Linear"
+        self.attributes.update(Scaling()("no_scaling"))
         for n in self.operator.get_micro_graph().get_nodes():
             if n.type() == "Pad":
-                self.padding = n.get_operator().attr.begin_end_borders
+                self.attributes["padding"] = n.get_operator(
+                ).attr.begin_end_borders
             if n.type() == "Conv":
-                self.kernel = n.get_operator().attr.kernel_dims
-                self.stride = n.get_operator().attr.stride_dims
-                self.dilation = n.get_operator().attr.dilation_dims
-
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        if len(self.outputs_dims[0]) == 4:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
+                self.attributes["kernel_dims"] = n.get_operator(
+                ).attr.kernel_dims
+                self.attributes["stride_dims"] = n.get_operator(
+                ).attr.stride_dims
+                self.attributes["dilation_dims"] = n.get_operator(
+                ).attr.dilation_dims
 
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "conv_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = []
+    @classmethod
+    def exportable(cls, node):
+        return True # TODO add check i/o NCHW
 
-@operator_register("ConvReluScaling")
-class ConvReluScaling_ARMCortexM(Conv_ARMCortexM):
-     def __init__(self, node, board, library):
-        super(Conv_ARMCortexM, self).__init__(node, board, library)
 
-        if self.operator.has_attr("Begin_End_Borders"):
-            self.padding = self.operator.attr.begin_end_borders
-
-        self.activation = "Rectifier"
-
-        # Should do this line but there is a bug while changing the dtype of generic operator...
-        # self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
-        # Do this instead
-        if self.operator.attr.quantized_nb_bits == 8:
-            if self.operator.attr.is_output_unsigned:
-                self.dtype = aidge2c(aidge_core.dtype.uint8)
-            else:
-                self.dtype = aidge2c(aidge_core.dtype.int8)
 
-        # Impose Single Shift (perhaps change it to have a more modular system)
-        self.scaling = Scaling(self.operator.attr.scaling_factor,
-                               self.operator.attr.quantized_nb_bits)("floating_point")
-
-
-class Pooling_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
-        self.pool_type = "None"
-        self.activation = "Linear"
-
-        self.kernel = node.get_operator().attr.kernel_dims
-        self.stride = node.get_operator().attr.stride_dims
 
+class Pooling_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, is_input, is_output):
+        super().__init__(node, mem_info, is_input, is_output)
+        self.attributes["activation"] = "Linear"
+        self.attributes["pool_type"] = "None"
         # No padding with MaxPooling or AvgPooling
         # Use PaddedMaxPooling/PaddedAvgPooling to add padding attribute
-        self.padding = [0, 0]
-
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        if len(self.outputs_dims[0]) == 4:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
-
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
+        self.attributes["padding"] = [0, 0]
 
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Pooling" / "aidge_maxpool2d_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
-                         str(Path(export_folder) / "include"))
-
-            # Export configuration file
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "pooling.jinja"),
-                name=self.name,
-                input_dims=self.inputs_dims[0],
-                output_dims=self.outputs_dims[0],
-                kernel=self.kernel,
-                stride=self.stride,
-                padding=self.padding,
-                pool_type=self.pool_type)
-
-
-        elif self.library == "n2d2":
-
-            # Nothing to copy
-
-            # Export configuration file
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_N2D2" / "templates" / "configuration" / "pool_config.jinja"),
-                name=self.name,
-                input_dims=self.inputs_dims[0],
-                output_dims=self.outputs_dims[0],
-                kernel=self.kernel,
-                stride=self.stride,
-                padding=self.padding,
-                pool_type=self.pool_type,
-                activation=self.activation)
-
-
-        return list_configs
-
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "pooling.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                pool_type=self.pool_type.lower(),
-                input_name=self.inputs[0].name(),
-                output_name=self.name
-            ))
-
-        elif self.library == "n2d2":
-
-            list_actions.append(generate_str(
-                str(ROOT / "_N2D2" / "templates" / "kernel" / "pool_kernel.jinja"),
-                name=self.name,
-                parent_name=self.inputs[0].name(),
-                inputs_name=self.inputs[0].name(),
-                outputs_name=self.name
-            ))
-
-        return list_actions
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "pool_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "pool_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = []
+        self.kernel = node.get_operator().attr.kernel_dims
+        self.stride = node.get_operator().attr.stride_dims
+    @classmethod
+    def exportable(cls, node):
+        return True # TODO add check i/o NCHW
+
+@operator_register(ExportLibAidgeARM, "FC")
+class FC_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, is_input, is_output):
+        super().__init__(node, mem_info, is_input, is_output)
+        self.attributes["activation"] = "Linear"
+        self.attributes.update(Scaling()("no_scaling"))
+        # No padding with Conv
+        # Use PaddedConv to add padding attribute
+        self.attributes["padding"] = [0, 0]
 
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "fc_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "fc_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = []
+    @classmethod
+    def exportable(cls, node):
+        return True # TODO add check i/o NCHW
 
-@operator_register("MaxPooling")
+@operator_register(ExportLibAidgeARM, "MaxPooling")
 class MaxPooling_ARMCortexM(Pooling_ARMCortexM):
-    def __init__(self, node, board, library):
-        super().__init__(node, board, library)
-        self.pool_type = "Max"
-
+    def __init__(self, node, mem_info, is_input, is_output):
+        super().__init__(node, mem_info, is_input, is_output)
+        self.attributes["pool_type"] = "Max"
 
-@operator_register("AvgPooling")
+@operator_register(ExportLibAidgeARM, "AvgPooling")
 class AvgPooling_ARMCortexM(Pooling_ARMCortexM):
-    def __init__(self, node, board, library):
-        super().__init__(node, board, library)
-        self.pool_type = "Avg"
-
-
-@operator_register("FC")
-class FC_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-
-        self.producers = []
-        # Exclude first input which is a real input
-        for i in range(1, len(node.inputs())):
-            producer = node.input(i)[0]
-            self.producers.append(Producer_ARMCortexM(producer))
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
-        self.scaling = Scaling()("no_scaling")
-        self.activation = "Linear"
-
-        # if len(self.inputs_dims[0]) == 4:
-        #     # if dims == [batch, nb_channels, height, width]
-        #     # transform to [nb_channels, height, width]
-        #     self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        #     # It also means that we need to change the dataformat of the weights
-        #     weights = self.producers[0].values
-        #     if len(weights.shape) == 2:
-        #         weights = weights.reshape(weights.shape[0], weights.shape[1], 1, 1)
-
-        if len(self.inputs_dims[0]) == 3:
-            # if dims == [nb_channels, height, width]
-            # transform to [batch, nb_channels, height, width]
-            self.inputs_dims[0] = [1, self.inputs_dims[0][0], self.inputs_dims[0][1], self.inputs_dims[0][2]]
-
-
-        elif len(self.inputs_dims[0]) == 2:
-            # if dims == [batch, nb_channels]
-            # transform to [batch,nb_channels, 1, 1]
-            self.inputs_dims[0] = [self.inputs_dims[0][0], self.inputs_dims[0][1], 1, 1]
-
-
-        # if len(self.outputs_dims[0]) == 2:
-        #     # if dims == [batch, nb_outputs]
-        #     # transform to [nb_outputs, 1, 1]
-        #     self.outputs_dims[0] = [self.outputs_dims[0][1], 1, 1]
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        # Export weights to NHWC format
-        self.producers[0].export(export_folder / "parameters" / f"{self.producers[0].name}.h")
-        list_configs.append(f"parameters/{self.producers[0].name}.h")
-
-        # Export biases
-        if (len(self.producers) > 1):
-            # Convert the biases to int32
-            if self.dataformat != "float32":
-                self.producers[1].values = self.producers[1].values.astype(np.int32)
-
-            self.producers[1].export(export_folder / "parameters" / f"{self.producers[1].name}.h")
-            list_configs.append(f"parameters/{self.producers[1].name}.h")
-
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                # Take this kernel for now to avoid bad transpose weights (see aidge_export_cpp)
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "FullyConnected" / "aidge_fc_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            # Export configuration file
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "fullyconnected.jinja"),
-                name=self.name,
-                nb_batch=self.inputs_dims[0][0],
-                nb_channels=self.inputs_dims[0][1],
-                channel_height=self.inputs_dims[0][2],
-                channel_width=self.inputs_dims[0][3],
-                nb_outputs=self.outputs_dims[0][1])
-
-        elif self.library == "n2d2":
-
-            # Export configuration file
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_N2D2" / "templates" / "configuration" / "fc_config.jinja"),
-                name=self.name,
-                input_dims=self.inputs_dims[0],
-                output_dims=self.outputs_dims[0],
-                activation=self.activation,
-                **self.scaling)
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "fullyconnected.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                weight_name=self.inputs[1].name(),
-                bias_name=self.inputs[2].name(),
-                output_name=self.name
-            ))
-
-        elif self.library == "n2d2":
-            list_actions.append(generate_str(
-            str(ROOT / "_N2D2" / "templates" / "kernel" / "fc_kernel.jinja"),
-            name=self.name,
-            parent_name=self.inputs[0].name(),
-            inputs_name=self.inputs[0].name(),
-            weights_name=self.inputs[1].name(),
-            biases_name=self.inputs[2].name(),
-            outputs_name=self.name
-        ))
-
-
-        return list_actions
-
-
-@operator_register("FcScaling")
-class FCScaling_ARMCortexM(FC_ARMCortexM):
-
-    def __init__(self, node, board, library):
-        super(FC_ARMCortexM, self).__init__(node, board, library)
-
-        # Should do this line but there is a bug while changing the datatype of generic operator...
-        # self.datatype = aidge2c(node.get_operator().get_output(0).dtype())
-        # Do this instead
-        if self.operator.attr.quantized_nb_bits == 8:
-            if self.operator.attr.is_output_unsigned:
-                self.dtype = aidge2c(aidge_core.dtype.uint8)
-            else:
-                self.dtype = aidge2c(aidge_core.dtype.int8)
-
-        # Impose Single Shift (perhaps change it to have a more modular system)
-        self.scaling = Scaling(self.operator.attr.scaling_factor,
-                               self.operator.attr.quantized_nb_bits)("floating_point")
-
-
-@operator_register("FcReluScaling")
-class FCReluScaling_ARMCortexM(FCScaling_ARMCortexM):
-    def __init__(self, node, board, library):
-        super(FCScaling_ARMCortexM, self).__init__(node, board, library)
-
-        self.activation = "Rectifier"
-
-
-@operator_register("Add")
-class Add_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-
-        super().__init__(node)
-
-        self.producers = []
-
-        for i in range(0, len(node.inputs())):
-            if node.input(i)[0].type()=="Producer" and node.input(i)[0].get_operator().attr.constant == True:
-                producer = node.input(i)[0]
-                self.producers.append(Producer_ARMCortexM(producer))
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-
-
-
-    def export(self, export_folder:Path,list_configs:list):
-        for i in range(len(self.producers)):
-            self.producers[i].export(export_folder / "parameters" / f"{self.producers[i].name}.h")
-            list_configs.append(f"parameters/{self.producers[i].name}.h")
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
-
-        export_params(f"{self.inputs[1].name()}_DIMS", np.array(self.inputs_dims[1],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[1].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[1].name()}_DIMS.h")
-
-        export_params(f"{self.name}_DIMS", np.array(self.outputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.name}_DIMS.h")
-        list_configs.append(f"dimensions/{self.name}_DIMS.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "ElemWise" / "Add" / "aidge_add_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
-                         str(Path(export_folder) / "include"))
-
-                generate_file(
-                    str(export_folder / "layers" / f"{self.name}.h"),
-                    str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "elemwise.jinja"),
-                    name=self.name,
-                    nb_inputs=np.prod(self.inputs_dims[0]),
-                    nb_outputs=np.prod(self.outputs_dims[0]),
-                    input_dims=self.inputs_dims,
-                    output_dims=self.outputs_dims,
-                    elemwise_op="\"ADD\"")
-
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "elemwise.jinja"),
-                name=self.name,
-                elemwise_type = "add",
-                dataformat=self.dataformat,
-                inputa_name=self.inputs[0].name(),
-                inputb_name=self.inputs[1].name(),
-                output_name=self.name))
-
-
-        return list_actions
-
-@operator_register("Mul")
-class Mul_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-
-        super().__init__(node)
-
-        self.producers = []
-
-        for i in range(0, len(node.inputs())):
-            if node.input(i)[0].type()=="Producer" and node.input(i)[0].get_operator().attr.constant == True:
-                producer = node.input(i)[0]
-                self.producers.append(Producer_ARMCortexM(producer))
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-
-
-    def export(self, export_folder:Path,list_configs:list):
-        for i in range(len(self.producers)):
-            self.producers[i].export(export_folder / "parameters" / f"{self.producers[i].name}.h")
-            list_configs.append(f"parameters/{self.producers[i].name}.h")
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
-
-        export_params(f"{self.inputs[1].name()}_DIMS", np.array(self.inputs_dims[1],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[1].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[1].name()}_DIMS.h")
-
-        export_params(f"{self.name}_DIMS", np.array(self.outputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.name}_DIMS.h")
-        list_configs.append(f"dimensions/{self.name}_DIMS.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "ElemWise" / "Mul" / "aidge_mul_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
-                         str(Path(export_folder) / "include"))
-
-                generate_file(
-                    str(export_folder / "layers" / f"{self.name}.h"),
-                    str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "elemwise.jinja"),
-                    name=self.name,
-                    nb_inputs=np.prod(self.inputs_dims[0]),
-                    nb_outputs=np.prod(self.outputs_dims[0]),
-                    input_dims=self.inputs_dims,
-                    output_dims=self.outputs_dims,
-                    elemwise_op="\"MUL\"")
-
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "elemwise.jinja"),
-                name=self.name,
-                elemwise_type = "mul",
-                dataformat=self.dataformat,
-                inputa_name=self.inputs[0].name(),
-                inputb_name=self.inputs[1].name(),
-                output_name=self.name))
-
-
-        return list_actions
-
-@operator_register("Softmax")
-class Softmax_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-
-        super().__init__(node)
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-        self.axis =  node.get_operator().attr.axis
-
-
-    def export(self, export_folder:Path,list_configs:list):
-
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
-
-        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Activation" / "Softmax" / "aidge_softmax_chw_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "activation.jinja"),
-                name=self.name,
-                activation_type="\"SOFTMAX\"",
-                nb_inputs=np.prod(self.inputs_dims[0]),
-                nb_outputs=np.prod(self.outputs_dims[0]),
-                axis=self.axis,
-                input_dims = self.inputs_dims[0])
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "activation_chw.jinja"),
-                name=self.name,
-                activation_type="softmax",
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                output_name=self.name
-            ))
-
-        return list_actions
-
-@operator_register("BatchNorm")
-class BatchNorm2D_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-
-        super().__init__(node)
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-        self.epsilon = node.get_operator().attr.epsilon
-
-
-        self.producers = []
-
-        for i in range(0, len(node.inputs())):
-            if node.input(i)[0].type()=="Producer":
-                producer = node.input(i)[0]
-                self.producers.append(Producer_ARMCortexM(producer))
-
-    def export(self, export_folder:Path,list_configs:list):
-        for i in range(len(self.producers)):
-            self.producers[i].export(export_folder / "parameters" / f"{self.producers[i].name}.h")
-            list_configs.append(f"parameters/{self.producers[i].name}.h")
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "BatchNorm" / "aidge_batchnorm2d_chw_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "batchnorm2d.jinja"),
-                name=self.name,
-                epsilon=self.epsilon,
-                input_dims = self.inputs_dims[0])
-
-        return list_configs
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "batchnorm2d.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                running_mean_name=self.inputs[3].name(),
-                running_var_name=self.inputs[4].name(),
-                weight_name=self.inputs[1].name(),
-                bias_name=self.inputs[2].name(),
-                output_name=self.name
-            ))
-        return list_actions
-
-@operator_register("Sigmoid")
-class Sigmoid_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Activation" / "Sigmoid" / "aidge_sigmoid_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "activation.jinja"),
-                name=self.name,
-                activation_type="\"SIGMOID\"",
-                nb_inputs=np.prod(self.inputs_dims[0]),
-                nb_outputs=np.prod(self.outputs_dims[0]))
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "activation.jinja"),
-                name=self.name,
-                activation_type="sigmoid",
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                output_name=self.name
-            ))
-
-        return list_actions
-
-@operator_register("Reshape")
-class Reshape_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        # node.set_name(self.inputs[0].name())
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Reshape" / "aidge_reshape_chw_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "reshape.jinja"),
-                name=self.name,
-                nb_inputs=np.prod(self.inputs_dims[0]),
-                nb_outputs=np.prod(self.outputs_dims[0]))
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "reshape.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                output_name=self.name,
-            ))
-
-        return list_actions
-
-@operator_register("MatMul")
-class Matmul_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-        self.producers = []
-
-        for i in range(0, len(node.inputs())):
-            if node.input(i)[0].type()=="Producer" and node.input(i)[0].get_operator().attr.constant == True:
-                producer = node.input(i)[0]
-                self.producers.append(Producer_ARMCortexM(producer))
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-
-
-    def export(self, export_folder:Path, list_configs:list):
-        for i in range(len(self.producers)):
-            self.producers[i].export(export_folder / "parameters" / f"{self.producers[i].name}.h")
-            list_configs.append(f"parameters/{self.producers[i].name}.h")
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
-
-        export_params(f"{self.inputs[1].name()}_DIMS", np.array(self.inputs_dims[1],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[1].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[1].name()}_DIMS.h")
-
-        export_params(f"{self.name}_DIMS", np.array(self.outputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.name}_DIMS.h")
-        list_configs.append(f"dimensions/{self.name}_DIMS.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "MatMul" / "aidge_matmul_chw_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
-                         str(Path(export_folder) / "include"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "matmul.jinja"),
-                name=self.name,
-                input_dims=self.inputs_dims,
-                output_dims=self.outputs_dims)
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "matmul.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                inputa_name=self.inputs[0].name(),
-                inputb_name=self.inputs[1].name(),
-                output_name=self.name))
-
-        return list_actions
-
-@operator_register("Gather")
-class Gather_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-        self.indices =  node.get_operator().attr.indices
-        self.axis =  node.get_operator().attr.axis
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
-
-        export_params(f"{self.name}_INDEXES", np.array(self.indices,dtype=np.int32),export_folder / "dimensions" / f"{self.name}_INDEXES.h")
-        list_configs.append(f"dimensions/{self.name}_INDEXES.h")
-
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Transform" / "Gather" / "aidge_gather_chw_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "gather.jinja"),
-                name=self.name,
-                axis = self.axis,
-                indices = self.indices,
-                input_dims=self.inputs_dims[0],
-                nb_outputs=np.prod(self.outputs_dims[0])
-            )
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "gather.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                output_name=self.name
-            ))
+    def __init__(self, node, mem_info, is_input, is_output):
+        super().__init__(node, mem_info, is_input, is_output)
+        self.attributes["pool_type"] = "Avg"
+
+@operator_register(ExportLibAidgeARM, "FcReluScaling")
+class FC_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, is_input, is_output):
+        super().__init__(node, mem_info, is_input, is_output)
+        self.attributes["activation"] = "Rectifier"
+        self.attributes.update(Scaling(self.operator.attr.scaling_factor,
+                               self.operator.attr.quantized_nb_bits)("floating_point"))
+        # No padding with Conv
+        # Use PaddedConv to add padding attribute
+        self.attributes["padding"] = [0, 0]
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "fc_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "fc_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = []
+
+@operator_register(ExportLibAidgeARM, "Add")
+class Add_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, is_input, is_output):
+        super().__init__(node, mem_info, is_input, is_output)
+        self.attributes["elemwise_op"] = "\"ADD\""
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "elemwise.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "elemwise.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "ElemWise" / "Add" / "aidge_add_float32.h"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h")
+        ]
+
+@operator_register(ExportLibAidgeARM, "Mul")
+class Mul_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, is_input, is_output):
+        super().__init__(node, mem_info, is_input, is_output)
+        self.attributes["elemwise_op"] = "\"MUL\""
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "elemwise.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "elemwise.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "ElemWise" / "Add" / "aidge_mul_float32.h"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h")
+        ]
+
+@operator_register(ExportLibAidgeARM, "Softmax")
+class Softmax_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, is_input, is_output):
+        super().__init__(node, mem_info, is_input, is_output)
+        self.attributes["activation_type"] = "\"SOFTMAX\""
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "activation.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "activation_chw.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "ElemWise" / "Add" / "aidge_softmax_chw_float32.h"),
+        ]
+
+@operator_register(ExportLibAidgeARM, "Sigmoid")
+class Sigmoid_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, is_input, is_output):
+        super().__init__(node, mem_info, is_input, is_output)
+        self.attributes["activation_type"] = "\"SIGMOID\""
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "activation.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "activation_chw.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "ElemWise" / "Add" / "aidge_sigmoid_float32.h"),
+        ]
+
+@operator_register(ExportLibAidgeARM, "MatMul")
+class MatMul_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, is_input, is_output):
+        super().__init__(node, mem_info, is_input, is_output)
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "matmul.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "matmul.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "ElemWise" / "Add" / "aidge_matmul_chw_float32.h"),
+        ]
+
+
+# TODO: Is this used ?
+# @operator_register("ConvReluScaling")
+# class ConvReluScaling_ARMCortexM(Conv_ARMCortexM):
+#      def __init__(self, node, board, library):
+#         super(Conv_ARMCortexM, self).__init__(node, board, library)
+
+#         if self.operator.has_attr("Begin_End_Borders"):
+#             self.padding = self.operator.attr.begin_end_borders
+
+#         self.activation = "Rectifier"
+
+#         # Should do this line but there is a bug while changing the dtype of generic operator...
+#         # self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
+#         # Do this instead
+#         if self.operator.attr.quantized_nb_bits == 8:
+#             if self.operator.attr.is_output_unsigned:
+#                 self.dtype = aidge2c(aidge_core.dtype.uint8)
+#             else:
+#                 self.dtype = aidge2c(aidge_core.dtype.int8)
+
+#         # Impose Single Shift (perhaps change it to have a more modular system)
+#         self.scaling = Scaling(self.operator.attr.scaling_factor,
+#                                self.operator.attr.quantized_nb_bits)("floating_point")
+
+# @operator_register("BatchNorm")
+# class BatchNorm2D_ARMCortexM(ExportNode):
+#     def __init__(self, node, board, library):
+
+#         super().__init__(node)
+#         self.board = board
+#         self.library = library
+#         self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
+#         self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+#         self.epsilon = node.get_operator().attr.epsilon
+
+
+#         self.producers = []
+
+#         for i in range(0, len(node.inputs())):
+#             if node.input(i)[0].type()=="Producer":
+#                 producer = node.input(i)[0]
+#                 self.producers.append(Producer_ARMCortexM(producer))
+
+#     def export(self, export_folder:Path,list_configs:list):
+#         for i in range(len(self.producers)):
+#             self.producers[i].export(export_folder / "parameters" / f"{self.producers[i].name}.h")
+#             list_configs.append(f"parameters/{self.producers[i].name}.h")
+
+#         list_configs.append(f"layers/{self.name}.h")
+
+#         if self.library == "aidge":
+#             if self.dataformat == "float32":
+#                 copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "BatchNorm" / "aidge_batchnorm2d_chw_float32.c"),
+#                          str(export_folder / "src" / "kernels"))
+
+#             generate_file(
+#                 str(export_folder / "layers" / f"{self.name}.h"),
+#                 str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "batchnorm2d.jinja"),
+#                 name=self.name,
+#                 epsilon=self.epsilon,
+#                 input_dims = self.inputs_dims[0])
+
+#         return list_configs
+#     def forward(self, list_actions:list):
+#         if not self.is_last:
+#             list_actions.append(set_up_output(self.name, self.dtype))
+
+#         if self.library == "aidge":
+
+#             list_actions.append(generate_str(
+#                 str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "batchnorm2d.jinja"),
+#                 name=self.name,
+#                 dataformat=self.dataformat,
+#                 input_name=self.inputs[0].name(),
+#                 running_mean_name=self.inputs[3].name(),
+#                 running_var_name=self.inputs[4].name(),
+#                 weight_name=self.inputs[1].name(),
+#                 bias_name=self.inputs[2].name(),
+#                 output_name=self.name
+#             ))
+#         return list_actions
+
+# @operator_register("Reshape")
+# class Reshape_ARMCortexM(ExportNode):
+#     def __init__(self, node, board, library):
+#         super().__init__(node)
+
+#         self.board = board
+#         self.library = library
+#         # node.set_name(self.inputs[0].name())
+#         self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
+#         self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+
+
+#     def export(self, export_folder:Path, list_configs:list):
+
+#         list_configs.append(f"layers/{self.name}.h")
+
+#         if self.library == "aidge":
+#             if self.dataformat == "float32":
+#                 copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Reshape" / "aidge_reshape_chw_float32.c"),
+#                          str(export_folder / "src" / "kernels"))
+
+#             generate_file(
+#                 str(export_folder / "layers" / f"{self.name}.h"),
+#                 str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "reshape.jinja"),
+#                 name=self.name,
+#                 nb_inputs=np.prod(self.inputs_dims[0]),
+#                 nb_outputs=np.prod(self.outputs_dims[0]))
+
+#         return list_configs
+
+#     def forward(self, list_actions:list):
+
+#         if not self.is_last:
+#             list_actions.append(set_up_output(self.name, self.dtype))
+
+#         if self.library == "aidge":
+#             list_actions.append(generate_str(
+#                 str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "reshape.jinja"),
+#                 name=self.name,
+#                 dataformat=self.dataformat,
+#                 input_name=self.inputs[0].name(),
+#                 output_name=self.name,
+#             ))
+
+#         return list_actions
+
+# @operator_register("Gather")
+# class Gather_ARMCortexM(ExportNode):
+#     def __init__(self, node, board, library):
+#         super().__init__(node)
 
-        return list_actions
+#         self.board = board
+#         self.library = library
+#         self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
+#         self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+#         self.indices =  node.get_operator().attr.indices
+#         self.axis =  node.get_operator().attr.axis
+
+#     def export(self, export_folder:Path, list_configs:list):
+
+#         list_configs.append(f"layers/{self.name}.h")
+
+#         export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
+#         list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
+
+#         export_params(f"{self.name}_INDEXES", np.array(self.indices,dtype=np.int32),export_folder / "dimensions" / f"{self.name}_INDEXES.h")
+#         list_configs.append(f"dimensions/{self.name}_INDEXES.h")
+
+
+#         if self.library == "aidge":
+#             if self.dataformat == "float32":
+#                 copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Transform" / "Gather" / "aidge_gather_chw_float32.c"),
+#                          str(export_folder / "src" / "kernels"))
+
+#             generate_file(
+#                 str(export_folder / "layers" / f"{self.name}.h"),
+#                 str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "gather.jinja"),
+#                 name=self.name,
+#                 axis = self.axis,
+#                 indices = self.indices,
+#                 input_dims=self.inputs_dims[0],
+#                 nb_outputs=np.prod(self.outputs_dims[0])
+#             )
+
+#         return list_configs
+
+#     def forward(self, list_actions:list):
+
+#         if not self.is_last:
+#             list_actions.append(set_up_output(self.name, self.dtype))
+
+#         if self.library == "aidge":
+#             list_actions.append(generate_str(
+#                 str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "gather.jinja"),
+#                 name=self.name,
+#                 dataformat=self.dataformat,
+#                 input_name=self.inputs[0].name(),
+#                 output_name=self.name
+#             ))
 
-@operator_register("Transpose")
-class Transpose_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
+#         return list_actions
 
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-        self.perm =  node.get_operator().attr.output_dims_order
+# @operator_register("Transpose")
+# class Transpose_ARMCortexM(ExportNode):
+#     def __init__(self, node, board, library):
+#         super().__init__(node)
 
-    def export(self, export_folder:Path, list_configs:list):
+#         self.board = board
+#         self.library = library
+#         self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
+#         self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+#         self.perm =  node.get_operator().attr.output_dims_order
 
-        list_configs.append(f"layers/{self.name}.h")
+#     def export(self, export_folder:Path, list_configs:list):
 
-        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
+#         list_configs.append(f"layers/{self.name}.h")
+
+#         export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
+#         list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
 
-        export_params(f"{self.name}_PERMUTATIONS", np.array(self.perm,dtype=np.int32),export_folder / "dimensions" / f"{self.name}_PERMUTATIONS.h")
-        list_configs.append(f"dimensions/{self.name}_PERMUTATIONS.h")
+#         export_params(f"{self.name}_PERMUTATIONS", np.array(self.perm,dtype=np.int32),export_folder / "dimensions" / f"{self.name}_PERMUTATIONS.h")
+#         list_configs.append(f"dimensions/{self.name}_PERMUTATIONS.h")
 
-        export_params(f"{self.name}_DIMS", np.array(self.outputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.name}_DIMS.h")
-        list_configs.append(f"dimensions/{self.name}_DIMS.h")
+#         export_params(f"{self.name}_DIMS", np.array(self.outputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.name}_DIMS.h")
+#         list_configs.append(f"dimensions/{self.name}_DIMS.h")
 
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Transform" / "Transpose" / "aidge_transpose_chw_float32.c"),
-                         str(export_folder / "src" / "kernels"))
+#         if self.library == "aidge":
+#             if self.dataformat == "float32":
+#                 copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Transform" / "Transpose" / "aidge_transpose_chw_float32.c"),
+#                          str(export_folder / "src" / "kernels"))
 
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "transpose.jinja"),
-                name=self.name,
-                perm = self.perm,
-                input_dims=self.inputs_dims[0],
-                output_dims=self.outputs_dims[0],
-                nb_outputs=np.prod(self.outputs_dims[0])
-            )
+#             generate_file(
+#                 str(export_folder / "layers" / f"{self.name}.h"),
+#                 str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "transpose.jinja"),
+#                 name=self.name,
+#                 perm = self.perm,
+#                 input_dims=self.inputs_dims[0],
+#                 output_dims=self.outputs_dims[0],
+#                 nb_outputs=np.prod(self.outputs_dims[0])
+#             )
 
-            # print(self.outputs_dims)
+#             # print(self.outputs_dims)
 
-        return list_configs
+#         return list_configs
 
-    def forward(self, list_actions:list):
+#     def forward(self, list_actions:list):
 
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
+#         if not self.is_last:
+#             list_actions.append(set_up_output(self.name, self.dtype))
 
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "transpose.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                output_name=self.name
-                ))
+#         if self.library == "aidge":
+#             list_actions.append(generate_str(
+#                 str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "transpose.jinja"),
+#                 name=self.name,
+#                 dataformat=self.dataformat,
+#                 input_name=self.inputs[0].name(),
+#                 output_name=self.name
+#                 ))
 
-        return list_actions
+#         return list_actions
diff --git a/aidge_export_arm_cortexm/utils/__init__.py b/aidge_export_arm_cortexm/utils/__init__.py
index 7ff6e8b69b617013aa30feeb785e83789bdee575..bd48bd6a7b39cccf74f0f723124b6af2e7db478d 100644
--- a/aidge_export_arm_cortexm/utils/__init__.py
+++ b/aidge_export_arm_cortexm/utils/__init__.py
@@ -1,5 +1,4 @@
 from pathlib import Path
-import os
 
 # Constants
 FILE = Path(__file__).resolve()
@@ -24,7 +23,7 @@ def get_all_available_boards():
             board_name = relpath.replace('/', '').replace('\\', '')
 
             boards[board_name.lower()] = str(subfolder)
-            
+
     return boards
 
 AVAILABLE_BOARDS = get_all_available_boards()
@@ -32,24 +31,3 @@ AVAILABLE_BOARDS = get_all_available_boards()
 
 def has_board(board_name: str) -> bool:
     return board_name.lower() in AVAILABLE_BOARDS.keys()
-
-
-OPERATORS_REGISTRY = {}
-
-def operator_register(*args):
-   
-    key_list = [arg for arg in args]
-
-    def decorator(operator):
-        class Wrapper(operator):
-            def __init__(self, *args, **kwargs):
-                return operator(*args, **kwargs)
-        
-        for key in key_list:
-            OPERATORS_REGISTRY[key] = operator
-
-        return Wrapper
-    return decorator
-
-def supported_operators():
-    return list(OPERATORS_REGISTRY.keys())
diff --git a/aidge_export_arm_cortexm/utils/generation.py b/aidge_export_arm_cortexm/utils/generation.py
deleted file mode 100644
index b80ffbf7b1ac8cdb88aebbd9bb24037e6a4d9b92..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/utils/generation.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import re
-import os
-import shutil
-from jinja2 import Environment, FileSystemLoader
-
-def get_functions_from_c_file(file_path):
-    functions = []
-    pattern = r'\w+\s+(\w+)\s*\(([^)]*)\)\s*{'
-    keyword = ['else', 'for', 'if', 'while', 'do']
-
-
-    with open(file_path, 'r') as file:
-        file_content = file.read()
-
-    matches = re.findall(pattern, file_content)
-    for match in matches:
-        function_name = match[0]
-        if function_name in keyword:
-            continue
-        arguments = match[1].split(',')
-        arguments = [arg.strip() for arg in arguments]
-
-        return_type = get_return_type(file_content, function_name)
-
-        function_string = f"{return_type} {function_name}({', '.join(arguments)});"
-        functions.append(function_string)
-
-    return functions
-
-
-def get_return_type(file_content, function_name):
-    pattern = rf'\w+\s+{function_name}\s*\([^)]*\)\s*{{'
-    return_type = re.search(pattern, file_content).group()
-    return_type = return_type.split()[0].strip()
-    return return_type
-
-
-def get_functions_from_c_folder(folder_path):
-    functions = []
-    
-    for _, _, files in os.walk(folder_path):
-        for file in files:
-            functions += get_functions_from_c_file(os.path.join(folder_path, file))
-
-    return functions
-
-
-def get_filenames_from_folder(folder_path: str, pattern: str = r'.*'):
-    # Ensure the provided folder path exists
-    if not os.path.isdir(folder_path):
-        raise ValueError(f"The provided folder path '{folder_path}' does not exist.")
-
-    # Compile the regex pattern
-    regex = re.compile(pattern)
-
-    # List all files and directories in the provided folder path
-    all_entries = os.listdir(folder_path)
-
-    # Use a regex pattern to filter only filenames (excluding directories)
-    filenames = [entry for entry in all_entries if os.path.isfile(os.path.join(folder_path, entry)) and regex.match(entry)]
-
-    return filenames
-
-
-def copyfile(filename, dst_folder):
-
-    # If directory doesn't exist, create it
-    if not os.path.exists(dst_folder):
-        os.makedirs(dst_folder)
-
-    shutil.copy(filename, dst_folder)
diff --git a/aidge_export_arm_cortexm/utils/scheduler.py b/aidge_export_arm_cortexm/utils/scheduler.py
deleted file mode 100644
index 44540172c89506f75e6e5e0200d57edbea670d6a..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/utils/scheduler.py
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-def topological_sort(graphview):
-    """Take an Aidge Graphview 
-    and returns a list of nodes topologically sorting
-    """
-
-    nodes = graphview.get_nodes()
-    result = []
-    visited = set()
-    visiting = set()  # To track nodes being currently visited
-
-    def visit(node):
-        if node in visiting:
-            raise ValueError("Graph contains a cycle")
-        if node in visited:
-            return
-        visiting.add(node)
-        for parent in node.get_parents():
-            if parent and parent in nodes:
-                visit(parent)
-        visiting.remove(node)
-        visited.add(node)
-        result.append(node)
-
-    for node in nodes:
-        visit(node)
-
-    return result