diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Atan/aidge_atan_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Atan/aidge_atan_float32.c
deleted file mode 100644
index 469f3aec9364ea16c715cbf39e6e9d31213c246e..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Atan/aidge_atan_float32.c
+++ /dev/null
@@ -1,10 +0,0 @@
-#include <math.h>
-
-void aidge_atan_float32 (float* input,
-                         float* output,
-                         unsigned int size)
-{
-    for (unsigned int i = 0; i < size; ++i) {
-        output[i] = atanf(input[i]);
-    }
-}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Add/aidge_add_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Add/aidge_add_float32.h
similarity index 89%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Add/aidge_add_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Add/aidge_add_float32.h
index c9cfc152851099e4f307ee95450ce28baf76114e..ad11e15f0fb41e8487e037314de10bb360cd033e 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Add/aidge_add_float32.c
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Add/aidge_add_float32.h
@@ -1,17 +1,17 @@
-void aidge_add_float32(float* input_a, 
-                       float* input_b, 
-                       float* output, 
-                       int dim_a[],
-                       int dim_b[],
-                       int output_Dim[],
+void aidge_add_float32(const float* input_a,
+                       const float* input_b,
+                       float* output,
+                       const int dim_a[],
+                       const int dim_b[],
+                       const int output_Dim[],
                        int size_dima,
                        int size_dimb,
                        int size_outputDim,
                        int output_size)
 {
-    // Broadcast dims 
-    int ndim_a[size_outputDim];     
-    int ndim_b[size_outputDim];     
+    // Broadcast dims
+    int ndim_a[size_outputDim];
+    int ndim_b[size_outputDim];
 
     for (int i= 0; i<size_outputDim; i++){
     	int idx = size_outputDim-size_dima;
@@ -96,4 +96,4 @@ void aidge_add_float32(float* input_a,
         }
 
     }
-}
\ No newline at end of file
+}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Atan/aidge_atan.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Atan/aidge_atan.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..d4da329a44eb8669bc0469a31beeca670b63e6c3
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Atan/aidge_atan.hpp
@@ -0,0 +1,10 @@
+#include <cmath>
+
+template <unsigned int SIZE, typename Input_T, typename Output_T>
+__attribute__((always_inline)) inline static
+void aidge_atan(Input_T* __restrict input, Output_T* __restrict output) {
+  for (unsigned int i = 0; i < SIZE; ++i) {
+    // Note : no cast to get compiler warning if we lose precision during auto cast!
+    output[i] = std::atan(input[i]);
+  }
+}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Concat/aidge_concat_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Concat/aidge_concat_float32.c
deleted file mode 100644
index 6faa94de4ae0c87e50d94b2ac6a3790937490412..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Concat/aidge_concat_float32.c
+++ /dev/null
@@ -1,92 +0,0 @@
-#include <stdarg.h>
-
-void aidge_concat2_float32 (unsigned int axis,
-                            float* input1,
-                            unsigned int size1,
-                            float* input2,
-                            unsigned int size2,
-                            float* output)
-{
-    for (unsigned int i = 0; i < size1; ++i) {
-        output[i] = input1[i];
-    }
-    for (unsigned int i = 0; i < size2; ++i) {
-        output[i + size1] = input2[i];
-    }
-}
-
-void aidge_concat3_float32 (unsigned int axis,
-                           float* input1,
-                           unsigned int size1,
-                           float* input2,
-                           unsigned int size2,
-                           float* input3,
-                           unsigned int size3,
-                           float* output)
-{
-    for (unsigned int i = 0; i < size1; ++i) {
-        output[i] = input1[i];
-    }
-    for (unsigned int i = 0; i < size2; ++i) {
-        output[i + size1] = input2[i];
-    }
-    for (unsigned int i = 0; i < size3; ++i) {
-        output[i + size1 + size2] = input3[i];
-    }
-}
-
-void aidge_concat4_float32 (unsigned int axis,
-                           float* input1,
-                           unsigned int size1,
-                           float* input2,
-                           unsigned int size2,
-                           float* input3,
-                           unsigned int size3,
-                           float* input4,
-                           unsigned int size4,
-                           float* output)
-{
-    for (unsigned int i = 0; i < size1; ++i) {
-        output[i] = input1[i];
-    }
-    for (unsigned int i = 0; i < size2; ++i) {
-        output[i + size1] = input2[i];
-    }
-    for (unsigned int i = 0; i < size3; ++i) {
-        output[i + size1 + size2] = input3[i];
-    }
-    for (unsigned int i = 0; i < size4; ++i) {
-        output[i + size1 + size2 + size3] = input4[i];
-    }
-}
-
-void aidge_concat5_float32 (unsigned int axis,
-                           float* input1,
-                           unsigned int size1,
-                           float* input2,
-                           unsigned int size2,
-                           float* input3,
-                           unsigned int size3,
-                           float* input4,
-                           unsigned int size4,
-                           float* input5,
-                           unsigned int size5,
-                           float* output)
-{
-    for (unsigned int i = 0; i < size1; ++i) {
-        output[i] = input1[i];
-    }
-    for (unsigned int i = 0; i < size2; ++i) {
-        output[i + size1] = input2[i];
-    }
-    for (unsigned int i = 0; i < size3; ++i) {
-        output[i + size1 + size2] = input3[i];
-    }
-    for (unsigned int i = 0; i < size4; ++i) {
-        output[i + size1 + size2 + size3] = input4[i];
-    }
-    for (unsigned int i = 0; i < size5; ++i) {
-        output[i + size1 + size2 + size3 + size4] = input5[i];
-    }
-}
-
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Concat/aidge_concat_float32.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Concat/aidge_concat_float32.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..305b2d3e078e27b135869c66aeb102f45eb1f41b
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Concat/aidge_concat_float32.hpp
@@ -0,0 +1,16 @@
+template<typename T, unsigned int NB_INPUTS>
+__attribute__((always_inline)) inline static
+void aidge_concat(
+    const unsigned int axis,
+    const T* const * __restrict inputs,
+    const unsigned int* __restrict sizes,
+    T* __restrict output)
+{
+    unsigned int offset = 0;
+    for (unsigned int n = 0; n < NB_INPUTS; ++n) {
+        for (unsigned int i = 0; i < sizes[n]; ++i) {
+            output[offset + i] = inputs[n][i];
+        }
+        offset += sizes[n];
+    }
+}
diff --git a/aidge_export_arm_cortexm/_N2D2/static/kernels/Conv.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Convolution/Conv.hpp
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/kernels/Conv.hpp
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Convolution/Conv.hpp
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Convolution/ConvDW.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Convolution/ConvDW.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..cefe933057f0e858e54dbf45e6f0b548407bd593
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Convolution/ConvDW.hpp
@@ -0,0 +1,177 @@
+/*
+    (C) Copyright 2017 CEA LIST. All Rights Reserved.
+    Contributor(s): N2D2 Team
+
+    This software is governed by the CeCILL-C license under French law and
+    abiding by the rules of distribution of free software.  You can  use,
+    modify and/ or redistribute the software under the terms of the CeCILL-C
+    license as circulated by CEA, CNRS and INRIA at the following URL
+    "http://www.cecill.info".
+
+    As a counterpart to the access to the source code and  rights to copy,
+    modify and redistribute granted by the license, users are provided only
+    with a limited warranty  and the software's author,  the holder of the
+    economic rights,  and the successive licensors  have only  limited
+    liability.
+
+    The fact that you are presently reading this means that you have had
+    knowledge of the CeCILL-C license and that you accept its terms.
+*/
+
+#ifndef __N2D2_EXPORT_CPP_CONV_DW_HPP__
+#define __N2D2_EXPORT_CPP_CONV_DW_HPP__
+
+#include "typedefs.h"
+#include "assert.h"
+#include "utils.hpp"
+#include "kernels/Macs.hpp"
+
+namespace N2D2_Export {
+
+template<int NB_CHANNELS,
+         int CHANNELS_HEIGHT, int CHANNELS_WIDTH,
+         int NB_OUTPUTS,
+         int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH,
+         int PADDING_Y, int PADDING_X,
+         int STRIDE_Y, int STRIDE_X,
+         int KERNEL_HEIGHT, int KERNEL_WIDTH,
+         ActivationFunction_T ACTIVATION,
+         // Memory mapping: inputs
+         int INPUT_MEM_CONT_OFFSET,
+         int INPUT_MEM_CONT_SIZE,
+         int INPUT_MEM_WRAP_OFFSET,
+         int INPUT_MEM_WRAP_SIZE,
+         int INPUT_MEM_STRIDE,
+         // Memory mapping: outputs
+         int OUTPUT_MEM_CONT_OFFSET,
+         int OUTPUT_MEM_CONT_SIZE,
+         int OUTPUT_MEM_WRAP_OFFSET,
+         int OUTPUT_MEM_WRAP_SIZE,
+         int OUTPUT_MEM_STRIDE,
+         typename Input_T, typename Output_T,
+         typename Weight_T, typename Bias_T,
+         typename Rescaling_T>
+__attribute__((always_inline)) inline void convcellDWPropagate(
+    const Input_T* __restrict inputs,
+    Output_T* __restrict outputs,
+    const Bias_T* __restrict biasses,
+    const Weight_T* __restrict weights,
+    const Rescaling_T& __restrict rescaling)
+{
+    static_assert(NB_OUTPUTS % NB_CHANNELS == 0,
+        "NB_OUTPUTS should be a multiple of NB_CHANNELS.");
+
+    constexpr int OUTPUTS_HEIGHT_NOPAD
+        = (CHANNELS_HEIGHT - KERNEL_HEIGHT + STRIDE_Y) / STRIDE_Y;
+    constexpr int OUTPUTS_WIDTH_NOPAD
+        = (CHANNELS_WIDTH - KERNEL_WIDTH + STRIDE_X) / STRIDE_X;
+
+    for (int oy = 0; oy < OUTPUTS_HEIGHT; ++oy) {
+        const int syMin = (PADDING_Y == 0) ? 0
+            : max(PADDING_Y - (oy * STRIDE_Y), 0);
+        const int syMax = (PADDING_Y == 0
+                && OUTPUTS_HEIGHT == OUTPUTS_HEIGHT_NOPAD) ? KERNEL_HEIGHT
+            : clamp(CHANNELS_HEIGHT + PADDING_Y - (oy * STRIDE_Y),
+                    0, KERNEL_HEIGHT);
+        const int iy = (oy * STRIDE_Y) - PADDING_Y;
+
+        for (int ox = 0; ox < OUTPUTS_WIDTH; ++ox) {
+            const int sxMin = (PADDING_X == 0) ? 0
+                : max(PADDING_X - (ox * STRIDE_X), 0);
+            const int sxMax = (PADDING_X == 0
+                    && OUTPUTS_WIDTH == OUTPUTS_WIDTH_NOPAD)
+                        ? KERNEL_WIDTH
+                : clamp(CHANNELS_WIDTH + PADDING_X - (ox * STRIDE_X),
+                        0, KERNEL_WIDTH);
+            const int ix = (ox * STRIDE_X) - PADDING_X;
+
+            const int oPos = (ox + OUTPUTS_WIDTH * oy);
+            int oOffset = OUTPUT_MEM_STRIDE * oPos;
+
+            if (OUTPUT_MEM_WRAP_SIZE > 0 && oOffset >= OUTPUT_MEM_CONT_SIZE) {
+                oOffset += OUTPUT_MEM_WRAP_OFFSET - OUTPUT_MEM_CONT_OFFSET
+                            - OUTPUT_MEM_CONT_SIZE;
+            }
+
+            for (int output = 0; output < NB_OUTPUTS; ++output) {
+                const int channel = (output * NB_CHANNELS) / NB_OUTPUTS;
+
+                SUM_T weightedSum = biasses[output];
+
+                for (int sy = 0; sy < KERNEL_HEIGHT; ++sy) {
+                    if ((PADDING_Y != 0
+                            || OUTPUTS_HEIGHT != OUTPUTS_HEIGHT_NOPAD)
+                        && sy >= syMax - syMin)
+                    {
+                        break;
+                    }
+
+                    const int iPos = ((sxMin + ix)
+                                        + CHANNELS_WIDTH * (iy + syMin + sy));
+                    int iOffset = INPUT_MEM_STRIDE * iPos;
+
+                    // Wrapping cannot occur in the middle of a line, except if
+                    // there is only one line (1D)!
+                    bool wrapInRange = false;
+
+                    if (INPUT_MEM_WRAP_SIZE > 0
+                        && iOffset >= INPUT_MEM_CONT_SIZE)
+                    {
+                        iOffset += INPUT_MEM_WRAP_OFFSET - INPUT_MEM_CONT_OFFSET
+                                    - INPUT_MEM_CONT_SIZE;
+                    }
+                    else if (INPUT_MEM_WRAP_SIZE > 0 && KERNEL_WIDTH > 1
+                        && CHANNELS_HEIGHT == 1 // single line (1D)!
+                        && iOffset + KERNEL_WIDTH * INPUT_MEM_STRIDE
+                            > INPUT_MEM_CONT_SIZE)
+                    {
+                        wrapInRange = true;
+                    }
+
+                    const int wOffset = (sxMin
+                        + KERNEL_WIDTH * (syMin + sy + KERNEL_HEIGHT * output));
+
+                    if (!wrapInRange && ((PADDING_X == 0
+                            && OUTPUTS_WIDTH == OUTPUTS_WIDTH_NOPAD)
+                        || sxMax - sxMin == KERNEL_WIDTH))
+                    {
+                        macsOnRange<KERNEL_WIDTH, INPUT_MEM_STRIDE>(
+                            inputs + iOffset + channel,
+                            weights + wOffset,
+                            weightedSum);
+                    }
+                    else {
+                        for (int sx = 0; sx < KERNEL_WIDTH; ++sx) {
+                            if ((PADDING_X != 0
+                                    || OUTPUTS_WIDTH != OUTPUTS_WIDTH_NOPAD)
+                                && sx >= sxMax - sxMin)
+                            {
+                                break;
+                            }
+
+                            int iOffsetInRange = iOffset
+                                + sx * INPUT_MEM_STRIDE;
+
+                            if (wrapInRange &&
+                                iOffsetInRange >= INPUT_MEM_CONT_SIZE)
+                            {
+                                iOffsetInRange += INPUT_MEM_WRAP_OFFSET
+                                            - INPUT_MEM_CONT_OFFSET
+                                            - INPUT_MEM_CONT_SIZE;
+                            }
+
+                            weightedSum += inputs[channel + iOffsetInRange]
+                                * weights[wOffset + sx];
+                        }
+                    }
+                }
+
+                outputs[output + oOffset]
+                    = sat<Output_T>(weightedSum, output, ACTIVATION, rescaling);
+            }
+        }
+    }
+}
+}   // N2D2_Export
+
+#endif  // __N2D2_EXPORT_CPP_CONV_HPP__
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Convolution/aidge_conv2d_hwc_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Convolution/aidge_conv2d_hwc_float32.c
deleted file mode 100644
index 531331e6b1386b8bd4c6b7395941fddc39726d11..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Convolution/aidge_conv2d_hwc_float32.c
+++ /dev/null
@@ -1,70 +0,0 @@
-#include "include/aidge_supportfunctions.h"
-
-void aidge_conv2d_hwc_float32(float* inputs,
-                              float* weights,
-                              float* biases,
-                              float* outputs,
-                              const int nb_channels,
-                              const int channel_width, const int channel_height,
-                              const int kernel_width, const int kernel_height,
-                              const int nb_outputs,
-                              const int output_width, const int output_height,
-                              const int padding_width, const int padding_height,
-                              const int stride_width, const int stride_height,
-                              const int dilation_width, const int dilation_height)
-{
-    int outputOffset = 0;
-
-    const int dilated_kernel_width 
-            = kernel_width + (dilation_width - 1) * (kernel_width - 1);
-    const int dilated_kernel_height 
-            = kernel_height + (dilation_height - 1) * (kernel_height - 1);
-
-    int iy = 0;
-    for (int oy = 0; oy < output_height; ++oy) {
-        const int syMin = (padding_height == 0)
-                            ? 0 : max(padding_height - iy, 0);
-        const int syMax = (padding_height == 0)
-                            ? dilated_kernel_height 
-                            : clamp(channel_height + padding_height - iy, 
-                                    0, dilated_kernel_height);
-     
-        int ix = 0;
-        for (int ox = 0; ox < output_width; ++ox) {
-            const int sxMin = (padding_width == 0)
-                                ? 0 : max(padding_width - ix, 0);
-            const int sxMax = (padding_width == 0)
-                                ? dilated_kernel_width 
-                                : clamp(channel_width + padding_width - ix,  
-                                        0, dilated_kernel_width);
-         
-            for (int och = 0; och < nb_outputs; ++och) {
-                float weightedSum = biases[och];
-
-                for (int sy = 0; sy < kernel_height; ++sy) {
-                    if (padding_height != 0 && (sy*dilation_height < syMin || sy*dilation_height >= syMax)) {
-                        continue;
-                    }
-                    const int inputsOffset  = (iy + sy*dilation_height - padding_height)*channel_width*nb_channels +
-                                              (ix - padding_width)*nb_channels;
-                    const int weightsOffset = och*kernel_height*kernel_width*nb_channels +
-                                              sy*kernel_width*nb_channels;
-
-                    for (int sx = 0; sx < kernel_width; ++sx) {
-                        if(sx*dilation_width < sxMin || sx*dilation_width >= sxMax) {
-                            continue;
-                        }
-                        for (int ch = 0; ch < nb_channels; ++ch) {
-                            weightedSum += inputs[inputsOffset + sx*dilation_width*nb_channels + ch] 
-                                           * weights[weightsOffset + sx*nb_channels + ch];
-                        }
-                    }
-                }
-                outputs[outputOffset] = weightedSum;
-                ++outputOffset;
-            }
-            ix += stride_width;
-        }
-        iy += stride_height;
-    }
-}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Div/aidge_div_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Div/aidge_div_float32.h
similarity index 100%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Div/aidge_div_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Div/aidge_div_float32.h
diff --git a/aidge_export_arm_cortexm/_N2D2/static/kernels/Fc.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/FullyConnected/Fc.hpp
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/kernels/Fc.hpp
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/FullyConnected/Fc.hpp
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/FullyConnected/aidge_fc_chw_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/FullyConnected/aidge_fc_chw_float32.c
deleted file mode 100644
index 15d7fb7063d8caaaee2db0f3ab174bd48af8417b..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/FullyConnected/aidge_fc_chw_float32.c
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
-void aidge_fc_chw_float32 (float* inputs,
-                           float* weights,
-                           float* biases,
-                           float* outputs,
-                           unsigned int nb_channels,
-                           unsigned int channels_height,
-                           unsigned int channels_width,
-                           unsigned int nb_outputs)
-{
-    for (unsigned int out = 0; out < nb_outputs; ++out) {
-        // Init with bias
-        float accum = biases[out]; 
-
-        for (int iy = 0; iy < channels_height; ++iy) {
-            for (int ix = 0; ix < channels_width; ++ix) {
-                for (int ch = 0; ch < nb_channels; ++ch) {
-                    accum += inputs[channels_width*nb_channels*iy + nb_channels*ix + ch] 
-                                * weights[channels_height*channels_width*nb_channels*out + channels_height*channels_width*ch + channels_height*iy + ix];
-                }
-            }
-        }
-
-        // Store result
-        outputs[out] = accum;
-    }
-}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/FullyConnected/aidge_fc_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/FullyConnected/aidge_fc_float32.c
deleted file mode 100644
index de169d49367eabf9c904a05e7cf3b9789f3ac9a4..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/FullyConnected/aidge_fc_float32.c
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-void aidge_fc_float32 (float* inputs,
-                       float* weights,
-                       float* biases,
-                       float* outputs,
-                       unsigned int batch_size,
-                       unsigned int nb_inputs,
-                       unsigned int nb_outputs)
-{
-    for (unsigned int batch = 0; batch < batch_size; ++batch){
-        for (unsigned int out = 0; out < nb_outputs; ++out) {
-            // Init with bias
-            float accum = biases[out]; 
-
-            for (unsigned int in = 0; in < nb_inputs; ++in) {
-                accum += inputs[batch*nb_inputs + in] * weights[out * nb_inputs + in];
-            }
-
-            // Store result
-            outputs[batch*nb_outputs + out] = accum;
-        }
-    }
-}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/MatMul/aidge_matmul_chw_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/MatMul/aidge_matmul_chw_float32.h
similarity index 100%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/MatMul/aidge_matmul_chw_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/MatMul/aidge_matmul_chw_float32.h
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Mul/aidge_mul_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Mul/aidge_mul_float32.h
similarity index 89%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Mul/aidge_mul_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Mul/aidge_mul_float32.h
index dbbf908cee8699bd09b6bc83b8abeeb481c26c05..8045717cbee6ebeee82e7f379726d530166288b7 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Mul/aidge_mul_float32.c
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Mul/aidge_mul_float32.h
@@ -1,17 +1,17 @@
-void aidge_mul_float32(float* input_a, 
-                       float* input_b, 
-                       float* output, 
-                       int dim_a[],
-                       int dim_b[],
-                       int output_Dim[],
+void aidge_mul_float32(const float* input_a,
+                       const float* input_b,
+                       float* output,
+                       const int dim_a[],
+                       const int dim_b[],
+                       const int output_Dim[],
                        int size_dima,
                        int size_dimb,
                        int size_outputDim,
                        int output_size)
 {
-    // Broadcast dims 
-    int ndim_a[size_outputDim];     
-    int ndim_b[size_outputDim];     
+    // Broadcast dims
+    int ndim_a[size_outputDim];
+    int ndim_b[size_outputDim];
 
     for (int i= 0; i<size_outputDim; i++){
     	int idx = size_outputDim-size_dima;
@@ -96,4 +96,4 @@ void aidge_mul_float32(float* input_a,
         }
 
     }
-}
\ No newline at end of file
+}
diff --git a/aidge_export_arm_cortexm/_N2D2/static/kernels/Pooling.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Pooling/Pooling.hpp
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/kernels/Pooling.hpp
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Pooling/Pooling.hpp
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Pooling/aidge_maxpool2d_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Pooling/aidge_maxpool2d_float32.c
deleted file mode 100644
index ebfb8f7ebdbb060e75f56cdfb4e9abe78aa810aa..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Pooling/aidge_maxpool2d_float32.c
+++ /dev/null
@@ -1,75 +0,0 @@
-#include "include/aidge_supportfunctions.h"
-
-void aidge_maxpool2d_float32(float* inputs,
-                             float* outputs,
-                             const int nb_channels,
-                             const int channel_width, const int channel_height,
-                             const int kernel_width, const int kernel_height,
-                             const int nb_outputs,
-                             const int output_width, const int output_height,
-                             const int padding_width, const int padding_height,
-                             const int stride_width, const int stride_height)
-{
-    const int OUTPUTS_HEIGHT_NOPAD
-        = (channel_height - kernel_height + stride_height) / stride_height;
-    const int OUTPUTS_WIDTH_NOPAD
-        = (channel_width - kernel_width + stride_width) / stride_width;
-
-    for (int oy = 0; oy < output_height; ++oy) {
-        const int syMin = (padding_height == 0) ? 0
-            : max(padding_height - (oy * stride_height), 0);
-        const int syMax = (padding_height == 0
-                && output_height == OUTPUTS_HEIGHT_NOPAD) ? kernel_height
-            : clamp(channel_height + padding_height - (oy * stride_height), 
-                    0, kernel_height);
-        const int iy = (oy * stride_height) - padding_height;
-
-        for (int ox = 0; ox < output_width; ++ox) {
-            for (int output = 0; output < nb_outputs; ++output) {
-                
-                const int sxMin = (padding_width == 0) ? 0
-                    : max(padding_width - (ox * stride_width), 0);
-                const int sxMax = (padding_width == 0
-                        && output_width == OUTPUTS_WIDTH_NOPAD)
-                            ? kernel_width
-                    : clamp(channel_width + padding_width - (ox * stride_width), 
-                            0, kernel_width);
-                const int ix = (ox * stride_width) - padding_width;
-
-                const int oPos = (ox + output_width * oy);
-                int oOffset = nb_outputs * oPos;
-                                
-                float maxVal = -1000.f;
-
-                for (int sy = 0; sy < kernel_height; ++sy) {
-                    if ((padding_height != 0
-                            || output_height != OUTPUTS_HEIGHT_NOPAD)
-                        && sy >= syMax - syMin)
-                    {
-                        break;
-                    }
-
-                    const int iPos = ((sxMin + ix)
-                                        + channel_width * (iy + syMin + sy));
-                    int iOffset = nb_channels * iPos;
-
-                    for (int sx = 0; sx < kernel_width; ++sx) {
-                        if ((padding_width != 0
-                                || output_width != OUTPUTS_WIDTH_NOPAD)
-                            && sx >= sxMax - sxMin)
-                        {
-                            break;
-                        }
-
-                        int iOffsetInRange = iOffset + output + sx * nb_channels;
-
-                        if (inputs[iOffsetInRange] > maxVal)
-                            maxVal = inputs[iOffsetInRange];
-                    }
-                }
-
-                outputs[oOffset + output] = maxVal;
-            }
-        }
-    }
-}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Relu/aidge_relu_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Relu/aidge_relu_float32.h
similarity index 81%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Relu/aidge_relu_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Relu/aidge_relu_float32.h
index 5e1bb6bca480aff223484653e2f08702299a33e3..baee52ebfe7e20331bd40b62c913135292ad65f8 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Relu/aidge_relu_float32.c
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Relu/aidge_relu_float32.h
@@ -1,10 +1,8 @@
-
-
-void aidge_relu_float32 (float* inputs, 
+void aidge_relu_float32 (float* inputs,
                          float* outputs,
                          unsigned int size)
 {
     for (unsigned int i = 0; i < size; ++i) {
         outputs[i] = (inputs[i] < 0.0f) ? 0.0f : inputs[i];
     }
-}
\ No newline at end of file
+}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Sigmoid/aidge_sigmoid_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Sigmoid/aidge_sigmoid_float32.h
similarity index 100%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Sigmoid/aidge_sigmoid_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Sigmoid/aidge_sigmoid_float32.h
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Slice/aidge_slice_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Slice/aidge_slice_float32.hpp
similarity index 55%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/Slice/aidge_slice_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Slice/aidge_slice_float32.hpp
index 8e5f5000ee5c75723532f5052ccd9932e4de5515..9ec6ebc034adf579cd10d007e698d5501233fdef 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Slice/aidge_slice_float32.c
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Slice/aidge_slice_float32.hpp
@@ -1,10 +1,8 @@
-
-
-void aidge_slice_float32 (float* inputs, 
-                          float* outputs, 
-                          int* axes, 
-                          int* starts, 
-                          int* ends,
+void aidge_slice_float32 (float* inputs,
+                          float* outputs,
+                          const int* axes,
+                          const int* starts,
+                          const int* ends,
                           unsigned int input_dims,
                           unsigned int nb_axes)
 {
@@ -13,4 +11,4 @@ void aidge_slice_float32 (float* inputs,
     for (int i = starts[axes[0] - 1]; i < ends[axes[0] - 1]; ++i) {
         outputs[out_index++] = inputs[i];
     }
-}
\ No newline at end of file
+}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Softmax/aidge_softmax_chw_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Softmax/aidge_softmax_chw_float32.h
similarity index 100%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Softmax/aidge_softmax_chw_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Softmax/aidge_softmax_chw_float32.h
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Sub/aidge_sub_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Sub/aidge_sub_float32.h
similarity index 52%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Sub/aidge_sub_float32.c
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Sub/aidge_sub_float32.h
index 1f4f7d798eedcb9d4504087ba02179203ee46772..99bd9a0e54857004d0944e7f854db28732abe580 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Sub/aidge_sub_float32.c
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Sub/aidge_sub_float32.h
@@ -1,11 +1,11 @@
 
 
-void aidge_sub_float32(float* input_a, 
-                       float* input_b, 
-                       float* output, 
+void aidge_sub_float32(const float* input_a,
+                       const float* input_b,
+                       float* output,
                        unsigned int size)
 {
     for (unsigned int i = 0; i < size; ++i) {
         output[i] = input_a[i] - input_b[i];
     }
-}
\ No newline at end of file
+}
diff --git a/aidge_export_arm_cortexm/_N2D2/static/kernels/Macs.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Utils/Macs.hpp
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/kernels/Macs.hpp
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Utils/Macs.hpp
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/SupportFunctions/aidge_supportfunctions.h b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Utils/aidge_supportfunctions.h
similarity index 100%
rename from aidge_export_arm_cortexm/_Aidge_Arm/kernels/SupportFunctions/aidge_supportfunctions.h
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Utils/aidge_supportfunctions.h
diff --git a/aidge_export_arm_cortexm/_N2D2/static/assert.h b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Utils/assert.h
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/assert.h
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Utils/assert.h
diff --git a/aidge_export_arm_cortexm/_N2D2/static/nn_scaling_functions.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Utils/nn_scaling_functions.hpp
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/nn_scaling_functions.hpp
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Utils/nn_scaling_functions.hpp
diff --git a/aidge_export_arm_cortexm/_N2D2/static/typedefs.h b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Utils/typedefs.h
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/typedefs.h
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Utils/typedefs.h
diff --git a/aidge_export_arm_cortexm/_N2D2/static/utils.hpp b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Utils/utils.hpp
similarity index 100%
rename from aidge_export_arm_cortexm/_N2D2/static/utils.hpp
rename to aidge_export_arm_cortexm/_Aidge_Arm/kernels/Utils/utils.hpp
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/_def_io.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/_def_io.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..66756cf8f501035f7222272f9c410908f499f06f
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/_def_io.jinja
@@ -0,0 +1,14 @@
+{# NOTE: Suppose input is first #}
+// INPUT CONF
+{% for inidx in range(nb_in) -%}
+#define {{ in_name[inidx]|upper }}_NB_CHANNELS {{ in_chan[inidx] }}
+#define {{ in_name[inidx]|upper }}_IN_HEIGHT {{ in_height[inidx] }}
+#define {{ in_name[inidx]|upper }}_IN_WIDTH {{ in_width[inidx] }}
+{% endfor %}
+
+// OUTPUT CONF
+{% for outidx in range(nb_out) -%}
+#define {{ out_name[outidx]|upper }}_NB_OUTPUTS {{ out_chan[outidx] }}
+#define {{ out_name[outidx]|upper }}_OUT_HEIGHT {{ out_height[outidx] }}
+#define {{ out_name[outidx]|upper }}_OUT_WIDTH {{ out_width[outidx] }}
+{% endfor %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/_meminfo.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/_meminfo.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..24e79e8eb57aeeac2ba34b2a1fd56cf13e99bac7
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/_meminfo.jinja
@@ -0,0 +1,27 @@
+// MEMINFO {{ name }}
+
+{% for inidx in range(nb_in) -%}
+{%- if in_node[inidx] is none -%}
+// GRAPH INPUT MEMINFO
+#define {{ in_name[inidx]|upper }}_SIZE {{ in_chan[inidx] }}
+#define {{ in_name[inidx]|upper }}_OFFSET  0
+#define {{ in_name[inidx]|upper }}_STRIDE {{ in_chan[inidx] }}
+#define {{ in_name[inidx]|upper }}_LENGTH {{ in_size[inidx] }}
+#define {{ in_name[inidx]|upper }}_CONT_SIZE {{ in_size[inidx] }}
+#define {{ in_name[inidx]|upper }}_CONT_OFFSET 0
+#define {{ in_name[inidx]|upper }}_WRAP_OFFSET 0
+#define {{ in_name[inidx]|upper }}_WRAP_SIZE 0
+{% endif -%}
+{% endfor -%}
+
+{% for outidx in range(nb_out) -%}
+// OUTPUT {{ outidx }}
+#define {{ out_name[outidx]|upper }}_SIZE {{ mem_info_size[outidx]}}
+#define {{ out_name[outidx]|upper }}_OFFSET {{ mem_info_offset[outidx]}}
+#define {{ out_name[outidx]|upper }}_STRIDE {{ mem_info_stride[outidx]}}
+#define {{ out_name[outidx]|upper }}_LENGTH {{ mem_info_length[outidx]}}
+#define {{ out_name[outidx]|upper }}_CONT_SIZE {{ mem_info_cont_size[outidx]}}
+#define {{ out_name[outidx]|upper }}_CONT_OFFSET {{ mem_info_cont_offset[outidx]}}
+#define {{ out_name[outidx]|upper }}_WRAP_OFFSET {{ mem_info_wrap_offset[outidx]}}
+#define {{ out_name[outidx]|upper }}_WRAP_SIZE {{ mem_info_wrap_size[outidx]}}
+{% endfor -%}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/activation.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/activation.jinja
index 82817e995ee5b4f684c6cdcc3073637b88d0e6d0..717757f32ea7009f6b3d7c0602a935f31c68502a 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/activation.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/activation.jinja
@@ -2,15 +2,15 @@
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
 
-/* Activation {{ activation_type|lower }} layer */
+/* Activation "{{ activation_type|lower }}" layer */
 
 {# For layer configuration -#}
-#define {{ name|upper }}_INPUTS_SIZE {{ nb_inputs }}
-#define {{ name|upper }}_OUTPUTS_SIZE {{ nb_outputs }}
+#define {{ name|upper }}_INPUTS_SIZE {{ in_size[0] }}
+#define {{ name|upper }}_OUTPUTS_SIZE {{ out_size[0] }}
 {% if axis is defined %}
 #define {{ name|upper }}_AXIS {{ axis }}
-#define {{name|upper}}_INPUT_DIMS_SIZE {{ input_dims|length}}
+#define {{ name|upper }}_INPUT_DIMS_SIZE {{ in_dims[0]|length}}
 {% endif %}
-#define {{ name|upper }}_ACTIVATION {{ activation_type }}
+#define {{ name|upper }}_ACTIVATION "{{ activation_type }}"
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/add.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/add.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..67b1c33e026f3a682d389adc1b67e901ebe9c0e5
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/add.jinja
@@ -0,0 +1,21 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+/* ElemWise - add layer */
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
+
+
+{# For layer configuration -#}
+#define {{ name|upper }}_OUTPUTS_SIZE {{ out_size[0] }}
+
+#define {{name|upper}}_IN_0_NB_DIMS {{ in_dims[0]|length}}
+#define {{name|upper}}_IN_1_NB_DIMS {{ in_dims[1]|length}}
+#define {{name|upper}}_OUT_0_NB_DIMS {{ out_dims[0]|length}}
+
+static const int {{name|upper}}_IN_0_DIMS[] = { {{ in_dims[0]|join(", ") }} };
+static const int {{name|upper}}_IN_1_DIMS[] = { {{ in_dims[1]|join(", ") }} };
+static const int {{name|upper}}_OUT_0_DIMS[] = { {{ out_dims[0]|join(", ") }} };
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/atan.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/atan.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..70ab97008b2cc15ab00b132d5f96c696d7d61415
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/atan.jinja
@@ -0,0 +1,9 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+{% include "./_meminfo.jinja" %}
+
+#define {{ name|upper }}_OUTPUTS_SIZE {{ out_chan[0] * out_height[0] * out_width[0] }}
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/concat.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/concat.jinja
index d819f59019c2d464fde9f5105beb3960fee8640e..8aa63156a2d890bbfb6f0c7ddce700917ccef83b 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/concat.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/concat.jinja
@@ -2,17 +2,14 @@
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
 
-/* Concat layer */
-
-{# For layer configuration -#}
-#define {{ name|upper }}_NB_INPUTS {{ nb_inputs }}
+{% include "./_meminfo.jinja" %}
 
+// Attributes
+#define {{ name|upper }}_NB_INPUTS {{ nb_in }}
 #define {{ name|upper }}_AXIS {{ axis }}
-
-{%- for i in range(nb_inputs) %}
-#define {{ name|upper }}_INPUT{{i}}_SIZE {{ list_input_size[i] }}
+{%- for i in range(nb_in) %}
+#define {{ name|upper }}_INPUT_{{i}}_SIZE {{ in_chan[i] * in_height[i] * in_width[i] }}
 {%- endfor %}
-
-#define {{ name|upper }}_OUTPUT_SIZE {{ output_size }}
+#define {{ name|upper }}_OUTPUT_SIZE {{ out_chan[0] * out_height[0] * out_width[0] }}
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/conv_config.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/conv_config.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..6a48f2b55c4889829823f0abf095cff40ebecc71
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/conv_config.jinja
@@ -0,0 +1,41 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+#include "typedefs.h"
+#include "nn_scaling_functions.hpp"
+
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
+
+// Attributes
+#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel_dims[1] }}
+#define {{ name|upper }}_KERNEL_WIDTH {{ kernel_dims[0] }}
+#define {{ name|upper }}_PADDING_Y {{ padding[1] }}
+#define {{ name|upper }}_PADDING_X {{ padding[0] }}
+#define {{ name|upper }}_STRIDE_Y {{ stride_dims[1] }}
+#define {{ name|upper }}_STRIDE_X {{ stride_dims[0] }}
+#define {{ name|upper }}_DILATION_Y {{ dilation_dims[1] }}
+#define {{ name|upper }}_DILATION_X {{ dilation_dims[0] }}
+
+// Activation/Scaling
+#define {{ name|upper }}_ACTIVATION {{ activation }}
+
+{%- if scaling_type == "floating_point" %}
+static const N2D2_Export::FloatingPointScaling {{ name|upper }}_SCALING = { {{scaling_value}} };
+{%- elif scaling_type == "fixed_point" %}
+static const N2D2_Export::FixedPointScaling<{{scaling_value}}, {{fractional_bits}}> {{ name|upper }}_SCALING;
+{%- elif scaling_type == "single_shift" %}
+static const N2D2_Export::SingleShiftScaling<{{shift_value}}> {{ name|upper }}_SCALING;
+{%- else %}
+static const N2D2_Export::NoScaling {{ name|upper }}_SCALING;
+{%- endif %}
+
+// Sizes
+#define {{ name|upper }}_WEIGHTS_SIZE {{ out_chan[0] * in_chan[0] * kernel_dims[1] * kernel_dims[0] }}
+#define {{ name|upper }}_BIASES_SIZE {{ out_chan[0] }}
+#define {{ name|upper }}_OUTPUTS_SIZE {{ out_chan[0] * out_height[0] * out_width[0] }}
+#define {{ name|upper }}_CHANNELS_SIZE {{ in_chan[0] * in_height[0] * in_width[0] }}
+
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/convolution.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/convolution.jinja
deleted file mode 100644
index 19d5358d7990875447f9c6b10f1231ebd948f92d..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/convolution.jinja
+++ /dev/null
@@ -1,29 +0,0 @@
-{#- For name header -#}
-#ifndef {{ name|upper }}_LAYER_H
-#define {{ name|upper }}_LAYER_H
-
-{# For layer configuration -#}
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
-
-#define {{ name|upper }}_KERNEL_Y {{ kernel[1] }}
-#define {{ name|upper }}_KERNEL_X {{ kernel[0] }}
-#define {{ name|upper }}_PADDING_Y {{ padding[1] }}
-#define {{ name|upper }}_PADDING_X {{ padding[0] }}
-#define {{ name|upper }}_STRIDE_Y {{ stride[1] }}
-#define {{ name|upper }}_STRIDE_X {{ stride[0] }}
-#define {{ name|upper }}_DILATION_Y {{ dilation[1] }}
-#define {{ name|upper }}_DILATION_X {{ dilation[0] }}
-
-
-{#- Calculate sizes #}
-{%- set weights_size = output_dims[0] * input_dims[0] * kernel[1] * kernel[0] %}
-#define {{ name|upper }}_WEIGHTS_SIZE {{ weights_size }}
-#define {{ name|upper }}_BIASES_SIZE {{ output_dims[0] }}
-
-
-#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/elemwise.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/elemwise.jinja
deleted file mode 100644
index c5e4281dba7b3146516bec019ed30b6136a10014..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/elemwise.jinja
+++ /dev/null
@@ -1,17 +0,0 @@
-{#- For name header -#}
-#ifndef {{ name|upper }}_LAYER_H
-#define {{ name|upper }}_LAYER_H
-
-/* ElemWise - {{ elemwise_op }} layer */
-
-{# For layer configuration -#}
-#define {{ name|upper }}_INPUTS_SIZE {{ nb_inputs }}
-#define {{ name|upper }}_OUTPUTS_SIZE {{ nb_outputs }}
-
-
-#define {{name|upper}}_INPUT_A_DIMS_SIZE {{ input_dims[0]|length}}
-#define {{name|upper}}_INPUT_B_DIMS_SIZE {{ input_dims[1]|length}}
-#define {{name|upper}}_OUTPUT_DIMS_SIZE {{ output_dims[0]|length}}
-#define {{ name|upper }}_ELEM_OP {{ elemwise_op }}
-
-#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_N2D2/templates/configuration/fc_config.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/fc_config.jinja
similarity index 52%
rename from aidge_export_arm_cortexm/_N2D2/templates/configuration/fc_config.jinja
rename to aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/fc_config.jinja
index e09cc0425490a67968dd1c6294a1003b102e4e2f..ab33588ca642d54e6fb5fcc58fe7a5279d4ddb32 100644
--- a/aidge_export_arm_cortexm/_N2D2/templates/configuration/fc_config.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/fc_config.jinja
@@ -6,13 +6,8 @@
 #include "nn_scaling_functions.hpp"
 
 {# For layer configuration -#}
-// In/Out
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
 
 // Activation/Scaling
 #define {{ name|upper }}_ACTIVATION {{ activation }}
@@ -28,12 +23,10 @@ static const N2D2_Export::NoScaling {{ name|upper }}_SCALING;
 {%- endif %}
 
 {# Calculate sizes -#}
-{%- set weights_size = output_dims[0] * input_dims[0] * input_dims[1] * input_dims[2] -%}
 // Sizes
-#define {{ name|upper }}_WEIGHTS_SIZE {{ weights_size }}
-#define {{ name|upper }}_BIASES_SIZE {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_SIZE {{ output_dims[0] * output_dims[1] * output_dims[2] }}
-#define {{ name|upper }}_CHANNELS_SIZE {{ input_dims[0] * input_dims[1] * input_dims[2] }}
-
+#define {{ name|upper }}_WEIGHTS_SIZE {{ out_chan[0] * in_chan[0] * in_height[0] * in_width[0] }}
+#define {{ name|upper }}_BIASES_SIZE {{ out_chan[0] }}
+#define {{ name|upper }}_OUTPUTS_SIZE {{ out_chan[0] * out_height[0] * out_width[0] }}
+#define {{ name|upper }}_CHANNELS_SIZE {{ in_chan[0] * in_height[0] * in_width[0] }}
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/fullyconnected.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/fullyconnected.jinja
deleted file mode 100644
index 0ffda543b5b45809b40916ff57583683c194f36e..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/fullyconnected.jinja
+++ /dev/null
@@ -1,18 +0,0 @@
-{#- For name header -#}
-#ifndef {{ name|upper }}_LAYER_H
-#define {{ name|upper }}_LAYER_H
-
-{# For layer configuration -#}
-#define {{name|upper}}_BATCH_SIZE {{ nb_batch}}
-#define {{ name|upper }}_NB_CHANNELS {{ nb_channels }}
-#define {{ name|upper }}_CHANNEL_HEIGHT {{ channel_height }}
-#define {{ name|upper }}_CHANNEL_WIDTH {{ channel_width }}
-#define {{ name|upper }}_NB_OUTPUTS {{ nb_outputs }}
-#define {{ name|upper }}_NB_INPUTS {{ nb_channels*channel_height*channel_width }}
-
-{#- Calculate sizes #}
-{%- set weights_size = nb_channels * channel_height * channel_width * nb_outputs %}
-#define {{ name|upper }}_WEIGHTS_SIZE {{ weights_size }}
-#define {{ name|upper }}_BIASES_SIZE {{ nb_outputs }}
-
-#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/matmul.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/matmul.jinja
index 86b40d40784ca8fb6a59fb1172627843d3df80db..2168a59be92c3e7d3e2ded20c03d9d4e57478b11 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/matmul.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/matmul.jinja
@@ -4,9 +4,14 @@
 
 {# For layer configuration -#}
 
-#define {{name|upper}}_INPUT_A_DIMS_SIZE {{ input_dims[0]|length}}
-#define {{name|upper}}_INPUT_B_DIMS_SIZE {{ input_dims[1]|length}}
-#define {{name|upper}}_OUTPUT_DIMS_SIZE {{ output_dims[0]|length}}
+
+#define {{ in_name[0]|upper }}_DIMS {{ in_dims[0] }}
+#define {{ in_name[1]|upper }}_DIMS {{ in_dims[1] }}
+#define {{ out_name[0]|upper }}_DIMS {{ out_dims[0] }}
+
+#define {{name|upper}}_INPUT_A_DIMS_SIZE {{ in_dims[0]|length}}
+#define {{name|upper}}_INPUT_B_DIMS_SIZE {{ in_dims[1]|length}}
+#define {{name|upper}}_OUTPUT_DIMS_SIZE {{ out_dims[0]|length}}
 
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/mul.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/mul.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..e7c303e001983288c1286f5c5b006e9fdd2c61cd
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/mul.jinja
@@ -0,0 +1,20 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+/* ElemWise - mul layer */
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
+
+{# For layer configuration -#}
+#define {{ name|upper }}_OUTPUTS_SIZE {{ out_size[0] }}
+
+#define {{name|upper}}_IN_0_NB_DIMS {{ in_dims[0]|length}}
+#define {{name|upper}}_IN_1_NB_DIMS {{ in_dims[1]|length}}
+#define {{name|upper}}_OUT_0_NB_DIMS {{ out_dims[0]|length}}
+
+static const int {{name|upper}}_IN_0_DIMS[] = { {{ in_dims[0]|join(", ") }} };
+static const int {{name|upper}}_IN_1_DIMS[] = { {{ in_dims[1]|join(", ") }} };
+static const int {{name|upper}}_OUT_0_DIMS[] = { {{ out_dims[0]|join(", ") }} };
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/pool_config.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/pool_config.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..02586f2958896397b5d6b18cdc6f1ddfa27f476e
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/pool_config.jinja
@@ -0,0 +1,22 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+#include "typedefs.h"
+
+{# For layer configuration -#}
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
+
+// Attributes
+#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel_dims[1] }}
+#define {{ name|upper }}_KERNEL_WIDTH {{ kernel_dims[0] }}
+#define {{ name|upper }}_PADDING_Y {{ padding[1] }}
+#define {{ name|upper }}_PADDING_X {{ padding[0] }}
+#define {{ name|upper }}_STRIDE_Y {{ stride_dims[1] }}
+#define {{ name|upper }}_STRIDE_X {{ stride_dims[0] }}
+
+#define {{ name|upper }}_POOLING_TYPE {{ pool_type }}
+#define {{ name|upper }}_ACTIVATION {{ activation }}
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/pooling.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/pooling.jinja
deleted file mode 100644
index 9c2eccb8fa4735840a4d28118a8e50e05f8f7098..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/pooling.jinja
+++ /dev/null
@@ -1,20 +0,0 @@
-{#- For name header -#}
-#ifndef {{ name|upper }}_LAYER_H
-#define {{ name|upper }}_LAYER_H
-
-{# For layer configuration -#}
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
-#define {{ name|upper }}_PADDING_Y {{ padding[1] }}
-#define {{ name|upper }}_PADDING_X {{ padding[0] }}
-#define {{ name|upper }}_STRIDE_Y {{ stride[1] }}
-#define {{ name|upper }}_STRIDE_X {{ stride[0] }}
-#define {{ name|upper }}_KERNEL_Y {{ kernel[1] }}
-#define {{ name|upper }}_KERNEL_X {{ kernel[0] }}
-#define {{ name|upper }}_POOLING_TYPE {{ pool_type }}
-
-#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/relu.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/relu.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..9e1a0ab99c3bc036bdc02275801d8374d3868b56
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/relu.jinja
@@ -0,0 +1,12 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+{% include "./_meminfo.jinja" %}
+
+{# For layer configuration -#}
+#define {{ name|upper }}_INPUTS_SIZE {{ in_size[0] }}
+#define {{ name|upper }}_OUTPUTS_SIZE {{ out_size[0] }}
+
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/slice.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/slice.jinja
index 553bb2dbb2e233d5971916d7867734085722d24a..64ae093a0b513711ef74aafc01e99646070c106a 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/slice.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/slice.jinja
@@ -3,14 +3,15 @@
 #define {{ name|upper }}_LAYER_H
 
 /* Slice layer */
+{% include "./_meminfo.jinja" %}
 
 {# For layer configuration -#}
-#define {{ name|upper }}_NB_CHANNELS {{ nb_inputs }}
-#define {{ name|upper }}_NB_OUTPUTS {{ nb_outputs }}
+#define {{ name|upper }}_NB_CHANNELS {{ nb_in }}
+#define {{ name|upper }}_NB_OUTPUTS {{ nb_out }}
 
 #define {{ name|upper }}_NB_AXES {{ axes|length }}
-static const int {{ name|upper }}_AXES[] = { {%- for axe in axes %}{{ axe }}, {% endfor -%} };
-static const int {{ name|upper }}_STARTS[] = { {%- for start in starts %}{{ start }}, {% endfor -%} };
-static const int {{ name|upper }}_ENDS[] = { {%- for end in ends %}{{ end }}, {% endfor -%} };
+static const int {{ name|upper }}_AXES[] = { {{ axes | join(', ') }} };
+static const int {{ name|upper }}_STARTS[] = { {{ starts | join(', ') }} };
+static const int {{ name|upper }}_ENDS[] = { {{ ends | join(', ') }} };
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/softmax.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/softmax.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..9f1eb503251117c70c07e81e478bdd84d6beb566
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/softmax.jinja
@@ -0,0 +1,13 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+{# For layer configuration -#}
+#define {{ name|upper }}_INPUTS_SIZE {{ in_size[0] }}
+#define {{ name|upper }}_OUTPUTS_SIZE {{ out_size[0] }}
+#define {{ name|upper }}_DIMS {{ in_dims[0] }}
+#define {{ name|upper }}_AXIS {{ axis }}
+#define {{ name|upper }}_INPUT_DIMS_SIZE {{ in_dims[0]|length}}
+
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/sub.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/sub.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..9ed0a8f718bb9f66b3541755a86b24d4de82390f
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/sub.jinja
@@ -0,0 +1,22 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+/* ElemWise - sub layer */
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
+
+{# For layer configuration -#}
+#define {{ name|upper }}_INPUTS_SIZE {{ in_size[0] }}
+#define {{ name|upper }}_OUTPUTS_SIZE {{ out_size[0] }}
+
+
+#define {{in_name[0]|upper}}_NB_DIM {{ in_dims[0]|length}}
+#define {{in_name[1]|upper}}_NB_DIM {{ in_dims[1]|length}}
+#define {{out_name[0]|upper}}_NB_DIM {{ out_dims[0]|length}}
+
+static const int {{ in_name[0]|upper }}_DIMS[] = { {{ in_dims[0]|join(", ") }} };
+static const int {{ in_name[1]|upper }}_DIMS[] = { {{ in_dims[1]|join(", ") }} };
+static const int {{ out_name[0]|upper }}_DIMS[] = { {{ out_dims[0]|join(", ") }} };
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/_mem_offset.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/_mem_offset.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..4f44773ae901606c0ace5fe9af39099acf722498
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/_mem_offset.jinja
@@ -0,0 +1,3 @@
+{% for outidx in range(nb_out) -%}
+{{out_cdtype[outidx]}}* {{out_name[outidx]}} = ({{out_cdtype[outidx]}}*) mem + {{out_name[outidx]|upper}}_OFFSET;
+{% endfor %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/_save_outputs.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/_save_outputs.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..f23238de69162d78b972670b3c814c48a003c428
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/_save_outputs.jinja
@@ -0,0 +1,30 @@
+{% for outidx in range(nb_out) -%}
+    {%if out_dims[outidx]|length == 4 %}
+    printf("{{ out_name[outidx] }} (NCHW format):\r\n");
+    int N_{{ out_name[outidx] }} = {{ out_dims[outidx][0] }};
+    int C_{{ out_name[outidx] }} = {{ out_dims[outidx][1] }};
+    int H_{{ out_name[outidx] }} = {{ out_dims[outidx][2] }};
+    int W_{{ out_name[outidx] }} = {{ out_dims[outidx][3] }};
+
+    for (int n = 0; n < N_{{ out_name[outidx] }}; ++n) {
+        for (int c = 0; c < C_{{ out_name[outidx] }}; ++c) {
+            printf("Batch %d, Channel %d:\r\n", n, c);
+            for (int h = 0; h < H_{{ out_name[outidx] }}; ++h) {
+                for (int w = 0; w < W_{{ out_name[outidx] }}; ++w) {
+                    printf("%f ", {{ out_name[outidx] }}[n * H_{{ out_name[outidx] }} * W_{{ out_name[outidx] }} * C_{{ out_name[outidx] }} + h * W_{{ out_name[outidx] }} * C_{{ out_name[outidx] }} + w * C_{{ out_name[outidx] }} + c]);
+                }
+                printf("\r\n");
+            }
+            printf("\r\n");
+        }
+    }
+    printf("\r\n");
+    {% else %}
+    printf("{{ out_name[outidx] }}:\r\n");
+    for (int o = 0; o < {{ out_size[outidx] }}; ++o) {
+        printf("%f ", {{ out_name[outidx] }}[o]);
+    }
+    printf("\r\n");
+    {% endif %}
+    printf("\r\n");
+{% endfor %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/add.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/add.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..cd0cef77dc6675b6922049b622e449a80308ace6
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/add.jinja
@@ -0,0 +1,14 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+aidge_add_float32(
+    {{in_name[0]}},
+    {{in_name[1]}},
+    {{out_name[0]}},
+    {{name|upper}}_IN_0_DIMS,
+    {{name|upper}}_IN_1_DIMS,
+    {{name|upper}}_OUT_0_DIMS,
+    {{name|upper}}_IN_0_NB_DIMS,
+    {{name|upper}}_IN_1_NB_DIMS,
+    {{name|upper}}_OUT_0_NB_DIMS,
+    {{name|upper}}_OUTPUTS_SIZE);
+    {% endfilter %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/atan.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/atan.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..bb1bb89dfaa5e9e911b77a59b06d36fb38a867db
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/atan.jinja
@@ -0,0 +1,4 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+aidge_atan <{{name|upper}}_OUTPUTS_SIZE> ({{in_name[0]}}, {{out_name[0]}});
+{% endfilter %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/batchnorm2d.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/batchnorm2d.jinja
similarity index 94%
rename from aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/batchnorm2d.jinja
rename to aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/batchnorm2d.jinja
index 5e7c73c8e55233b2c3d93c99fe3dc6e7682fe503..ca13e3fa280405181d6894e59fc7f6eff46be600 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/batchnorm2d.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/batchnorm2d.jinja
@@ -1 +1 @@
-aidge_batchnorm2d_chw_{{dataformat}} ({{input_name}}, {{output_name}}, {{running_mean_name}}, {{running_var_name}}, {{weight_name}}, {{bias_name}}, {{ name|upper }}_EPSILON, {{ name|upper }}_NB_CHANNELS, {{ name|upper }}_CHANNELS_WIDTH, {{ name|upper }}_CHANNELS_HEIGHT);
\ No newline at end of file
+aidge_batchnorm2d_chw_{{dataformat}} ({{input_name}}, {{output_name}}, {{running_mean_name}}, {{running_var_name}}, {{weight_name}}, {{bias_name}}, {{ name|upper }}_EPSILON, {{ name|upper }}_NB_CHANNELS, {{ name|upper }}_CHANNELS_WIDTH, {{ name|upper }}_CHANNELS_HEIGHT);
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/concat.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/concat.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..46fe87e43c51c672b3d74bfbaabbb21aaac12ee7
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/concat.jinja
@@ -0,0 +1,20 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+float* {{ name|upper }}_INPUTS[] = {
+    {%- for i in range(nb_in) -%}
+        {{ in_name[i] }}{{ ", " if not loop.last else "" }}
+    {%- endfor -%}
+};
+
+unsigned int {{ name|upper }}_SIZES[] = {
+    {%- for i in range(nb_in) -%}
+        {{ name|upper }}_INPUT_{{i}}_SIZE{{ ", " if not loop.last else "" }}
+    {%- endfor -%}
+};
+
+aidge_concat<float, {{ nb_in }}> (
+    {{name|upper}}_AXIS,
+    {{ name|upper }}_INPUTS,
+    {{ name|upper }}_SIZES,
+    {{ out_name[0] }});
+    {% endfilter %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/conv_kernel.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/conv_kernel.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..57027cdc7a750b5bb268bb6ce9c41731fdc11ae4
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/conv_kernel.jinja
@@ -0,0 +1,27 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+N2D2_Export::convcellPropagate<{{ in_name[0]|upper }}_NB_CHANNELS,
+                               {{ in_name[0]|upper }}_IN_HEIGHT,
+                               {{ in_name[0]|upper }}_IN_WIDTH,
+                               {{ out_name[0]|upper }}_NB_OUTPUTS,
+                               {{ out_name[0]|upper }}_OUT_HEIGHT,
+                               {{ out_name[0]|upper }}_OUT_WIDTH,
+                               {{ name|upper }}_PADDING_Y,
+                               {{ name|upper }}_PADDING_X,
+                               {{ name|upper }}_STRIDE_Y,
+                               {{ name|upper }}_STRIDE_X,
+                               {{ name|upper }}_KERNEL_HEIGHT,
+                               {{ name|upper }}_KERNEL_WIDTH,
+                               {{ name|upper }}_ACTIVATION,
+                               {{ in_name[0]|upper }}_CONT_OFFSET,
+                               {{ in_name[0]|upper }}_CONT_SIZE,
+                               {{ in_name[0]|upper }}_WRAP_OFFSET,
+                               {{ in_name[0]|upper }}_WRAP_SIZE,
+                               {{ in_name[0]|upper }}_STRIDE,
+                               {{ out_name[0]|upper }}_CONT_OFFSET,
+                               {{ out_name[0]|upper }}_CONT_SIZE,
+                               {{ out_name[0]|upper }}_WRAP_OFFSET,
+                               {{ out_name[0]|upper }}_WRAP_SIZE,
+                               {{ out_name[0]|upper }}_STRIDE>
+                               ({{in_name[0]}}, {{out_name[0]}}, {{in_name[2]}}, {{in_name[1]}}, {{ name|upper }}_SCALING);
+{% endfilter %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/fc_kernel.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/fc_kernel.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..707fa15d5b681f8c6c17f9a60ac8b8c5771bf073
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/fc_kernel.jinja
@@ -0,0 +1,21 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+N2D2_Export::fccellPropagate<{{ in_name[0] | upper }}_NB_CHANNELS,
+                             {{ in_name[0] | upper }}_IN_HEIGHT,
+                             {{ in_name[0] | upper }}_IN_WIDTH,
+                             {{ out_name[0] | upper }}_NB_OUTPUTS,
+                             {{ out_name[0] | upper }}_OUT_HEIGHT,
+                             {{ out_name[0] | upper }}_OUT_WIDTH,
+                             {{name|upper}}_ACTIVATION,
+                             {{ in_name[0] | upper }}_CONT_OFFSET,
+                             {{ in_name[0] | upper }}_CONT_SIZE,
+                             {{ in_name[0] | upper }}_WRAP_OFFSET,
+                             {{ in_name[0] | upper }}_WRAP_SIZE,
+                             {{ in_name[0] | upper }}_STRIDE,
+                             {{ out_name[0] | upper }}_CONT_OFFSET,
+                             {{ out_name[0] | upper }}_CONT_SIZE,
+                             {{ out_name[0] | upper }}_WRAP_OFFSET,
+                             {{ out_name[0] | upper }}_WRAP_SIZE,
+                             {{ out_name[0] | upper }}_STRIDE>
+                             ({{ in_name[0] }}, {{ out_name[0] }}, {{ in_name[2] }}, {{ in_name[1] }}, {{ name | upper }}_SCALING);
+{% endfilter %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/gather.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/gather.jinja
similarity index 100%
rename from aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/gather.jinja
rename to aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/gather.jinja
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/matmul.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/matmul.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8338cbeed7e5b79ee6311d1844d04f86f92dcb42
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/matmul.jinja
@@ -0,0 +1 @@
+aidge_matmul_chw_{{dataformat}} ({{in_name[0]}}, {{in_name[1]}}, {{out_name[0]}}, {{in_name[0]}}_DIMS, {{in_name[1]}}_DIMS, {{out_name[0]}}_DIMS, {{name|upper}}_INPUT_A_DIMS_SIZE, {{name|upper}}_INPUT_B_DIMS_SIZE, {{name|upper}}_OUTPUT_DIMS_SIZE);
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/mul.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/mul.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..1d6b80022b86338551a7b9b7fdef6e43ac940dc5
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/mul.jinja
@@ -0,0 +1,14 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+aidge_mul_float32(
+    {{in_name[0]}},
+    {{in_name[1]}},
+    {{out_name[0]}},
+    {{name|upper}}_IN_0_DIMS,
+    {{name|upper}}_IN_1_DIMS,
+    {{name|upper}}_OUT_0_DIMS,
+    {{name|upper}}_IN_0_NB_DIMS,
+    {{name|upper}}_IN_1_NB_DIMS,
+    {{name|upper}}_OUT_0_NB_DIMS,
+    {{name|upper}}_OUTPUTS_SIZE);
+{% endfilter %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/pool_kernel.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/pool_kernel.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..b53480b11d30b2111bcfdf4f2116f7b165ad5749
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/pool_kernel.jinja
@@ -0,0 +1,28 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+N2D2_Export::poolcellPropagate<{{ in_name[0]|upper }}_NB_CHANNELS,
+                               {{ in_name[0]|upper }}_IN_HEIGHT,
+                               {{ in_name[0]|upper }}_IN_WIDTH,
+                               {{ out_name[0]|upper }}_NB_OUTPUTS,
+                               {{ out_name[0]|upper }}_OUT_HEIGHT,
+                               {{ out_name[0]|upper }}_OUT_WIDTH,
+                               {{ name|upper }}_PADDING_Y,
+                               {{ name|upper }}_PADDING_X,
+                               {{ name|upper }}_STRIDE_Y,
+                               {{ name|upper }}_STRIDE_X,
+                               {{ name|upper }}_KERNEL_HEIGHT,
+                               {{ name|upper }}_KERNEL_WIDTH,
+                               {{ name|upper }}_POOLING_TYPE,
+                               {{ name|upper }}_ACTIVATION,
+                               {{ in_name[0]|upper }}_CONT_OFFSET,
+                               {{ in_name[0]|upper }}_CONT_SIZE,
+                               {{ in_name[0]|upper }}_WRAP_OFFSET,
+                               {{ in_name[0]|upper }}_WRAP_SIZE,
+                               {{ in_name[0]|upper }}_STRIDE,
+                               {{ out_name[0]|upper }}_CONT_OFFSET,
+                               {{ out_name[0]|upper }}_CONT_SIZE,
+                               {{ out_name[0]|upper }}_WRAP_OFFSET,
+                               {{ out_name[0]|upper }}_WRAP_SIZE,
+                               {{ out_name[0]|upper }}_STRIDE>
+                               ({{in_name[0]}}, {{out_name[0]}});
+{% endfilter %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/relu.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/relu.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..465a903b9bd2993d76bf33bf7363df6a51bdd627
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/relu.jinja
@@ -0,0 +1,4 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+aidge_relu_float32({{in_name[0]}}, {{out_name[0]}},  {{name|upper}}_INPUTS_SIZE);
+{% endfilter %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/reshape.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/reshape.jinja
similarity index 100%
rename from aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/reshape.jinja
rename to aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/reshape.jinja
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/slice.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/slice.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..5394f4d55d1c3ee562e33e323d9a80ff9756f5a4
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/slice.jinja
@@ -0,0 +1,4 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+aidge_slice_float32 ({{in_name[0]}}, {{out_name[0]}}, {{name|upper}}_AXES, {{name|upper}}_STARTS, {{name|upper}}_ENDS, {{name|upper}}_NB_AXES, {{name|upper}}_NB_CHANNELS);
+{% endfilter %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/softmax.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/softmax.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..0b2a30bfed9d85c9ec2e7766b2e9565c3f154c69
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/softmax.jinja
@@ -0,0 +1,4 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+aidge_softmax_chw_float32({{in_name[0]}}, {{out_name[0]}}, {{name|upper}}_DIMS, {{name|upper}}_AXIS, {{name|upper}}_INPUT_DIMS_SIZE, {{name|upper}}_OUTPUTS_SIZE);
+{% endfilter %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/sub.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/sub.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..6f63d3c47f92ed68f281da9b16c2d292df50918f
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/sub.jinja
@@ -0,0 +1,4 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+aidge_sub_float32({{in_name[0]}}, {{in_name[1]}}, {{out_name[0]}}, {{name|upper}}_OUTPUTS_SIZE);
+{% endfilter %}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/transpose.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/transpose.jinja
similarity index 97%
rename from aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/transpose.jinja
rename to aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/transpose.jinja
index 748cc71ac86c31d32bf93396dfd9796948355bbc..5103a4aad758d8374f76587219a4c4038505b1d2 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/transpose.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/forward_call/transpose.jinja
@@ -1 +1 @@
-aidge_transpose_chw_{{dataformat}} ({{input_name}}, {{output_name}},{{input_name}}_DIMS, {{name}}_PERMUTATIONS, {{output_name}}_DIMS, {{name|upper}}_OUTPUT_DIMS_SIZE, {{name|upper}}_OUTPUT_SIZE);
\ No newline at end of file
+aidge_transpose_chw_{{dataformat}} ({{input_name}}, {{output_name}},{{input_name}}_DIMS, {{name}}_PERMUTATIONS, {{output_name}}_DIMS, {{name|upper}}_OUTPUT_DIMS_SIZE, {{name|upper}}_OUTPUT_SIZE);
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation.jinja
deleted file mode 100644
index b1a2289e77ab5789812ad91f4bd4dfccbfcae64e..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation.jinja
+++ /dev/null
@@ -1 +0,0 @@
-    aidge_{{activation_type|lower}}_{{dataformat}}({{input_name}}, {{output_name}}, {% if activation_type is eq('softmax') %} {{input_name}}_DIMS, {{name|upper}}_AXIS, {{name|upper}}_INPUT_DIMS_SIZE,{% endif %} {{name|upper}}_OUTPUTS_SIZE);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation_chw.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation_chw.jinja
deleted file mode 100644
index 796a3718483e4fe995e9904c3faeb24693ad5431..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation_chw.jinja
+++ /dev/null
@@ -1 +0,0 @@
-    aidge_{{activation_type|lower}}_chw_{{dataformat}}({{input_name}}, {{output_name}}, {% if activation_type is eq('softmax') %} {{input_name}}_DIMS, {{name|upper}}_AXIS, {{name|upper}}_INPUT_DIMS_SIZE,{% endif %} {{name|upper}}_OUTPUTS_SIZE);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/concat.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/concat.jinja
deleted file mode 100644
index af6999b3d526fd9bf7678c17c8f96efe745c1732..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/concat.jinja
+++ /dev/null
@@ -1,6 +0,0 @@
-aidge_concat{{nb_inputs}}_{{dataformat}} (
-    {{name|upper}}_AXIS,  
-    {%- for i in range(nb_inputs) -%}
-     {{list_in_names[i]}}, {{ name|upper }}_INPUT{{i}}_SIZE,
-    {%- endfor -%}
-    {{output_name}});
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/convolution.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/convolution.jinja
deleted file mode 100644
index 35aaaa09b916cead955eb3f2ad49dfc296b890af..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/convolution.jinja
+++ /dev/null
@@ -1 +0,0 @@
-aidge_conv2d_hwc_{{dataformat}} ({{input_name}}, {{weight_name}}, {{bias_name}}, {{output_name}}, {{name|upper}}_NB_CHANNELS, {{name|upper}}_CHANNELS_WIDTH, {{name|upper}}_CHANNELS_HEIGHT, {{name|upper}}_KERNEL_Y, {{name|upper}}_KERNEL_X, {{name|upper}}_NB_OUTPUTS, {{name|upper}}_OUTPUTS_WIDTH, {{name|upper}}_OUTPUTS_HEIGHT, {{name|upper}}_PADDING_X, {{name|upper}}_PADDING_Y, {{name|upper}}_STRIDE_X, {{name|upper}}_STRIDE_Y, {{name|upper}}_DILATION_X, {{name|upper}}_DILATION_Y);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/elemwise.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/elemwise.jinja
deleted file mode 100644
index 0f3f1c8758a4d4d8944384973bf90054b5e91fca..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/elemwise.jinja
+++ /dev/null
@@ -1 +0,0 @@
-aidge_{{elemwise_type|lower}}_{{dataformat}}  ({{inputa_name}}, {{inputb_name}}, {{output_name}}, {{inputa_name}}_DIMS, {{inputb_name}}_DIMS, {{output_name}}_DIMS,{{name|upper}}_INPUT_A_DIMS_SIZE,{{name|upper}}_INPUT_B_DIMS_SIZE,{{name|upper}}_OUTPUT_DIMS_SIZE, {{name|upper}}_OUTPUTS_SIZE);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/fc_chw.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/fc_chw.jinja
deleted file mode 100644
index 57aa7876f2d1f6a7d8327a6437eaadc976b03d6a..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/fc_chw.jinja
+++ /dev/null
@@ -1 +0,0 @@
-aidge_fc_chw_{{dataformat}} ({{input_name}}, {{weight_name}}, {{bias_name}}, {{output_name}}, {{name|upper}}_NB_CHANNELS, {{name|upper}}_CHANNEL_HEIGHT, {{name|upper}}_CHANNEL_WIDTH, {{name|upper}}_NB_OUTPUTS);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/fullyconnected.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/fullyconnected.jinja
deleted file mode 100644
index b57ffdfd3de8699b9c644c56d3c341ed764ee73e..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/fullyconnected.jinja
+++ /dev/null
@@ -1 +0,0 @@
-aidge_fc_{{dataformat}} ({{input_name}}, {{weight_name}}, {{bias_name}}, {{output_name}}, {{name|upper}}_BATCH_SIZE, {{name|upper}}_NB_INPUTS, {{name|upper}}_NB_OUTPUTS);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/matmul.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/matmul.jinja
deleted file mode 100644
index 15ff05fec3bb40332ac7968d20f594009f7903a4..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/matmul.jinja
+++ /dev/null
@@ -1 +0,0 @@
-aidge_matmul_chw_{{dataformat}} ({{inputa_name}}, {{inputb_name}}, {{output_name}}, {{inputa_name}}_DIMS, {{inputb_name}}_DIMS, {{output_name}}_DIMS ,{{name|upper}}_INPUT_A_DIMS_SIZE,{{name|upper}}_INPUT_B_DIMS_SIZE,{{name|upper}}_OUTPUT_DIMS_SIZE);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/pooling.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/pooling.jinja
deleted file mode 100644
index 86684af278e0014b12c1a7ef7f9de26131215dde..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/pooling.jinja
+++ /dev/null
@@ -1 +0,0 @@
-aidge_{{pool_type}}pool2d_{{dataformat}} ({{input_name}}, {{output_name}}, {{name|upper}}_NB_CHANNELS, {{name|upper}}_CHANNELS_WIDTH, {{name|upper}}_CHANNELS_HEIGHT, {{name|upper}}_KERNEL_X, {{name|upper}}_KERNEL_Y, {{name|upper}}_NB_OUTPUTS, {{name|upper}}_OUTPUTS_WIDTH, {{name|upper}}_OUTPUTS_HEIGHT, {{name|upper}}_PADDING_X, {{name|upper}}_PADDING_Y, {{name|upper}}_STRIDE_X, {{name|upper}}_STRIDE_Y);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/slice.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/slice.jinja
deleted file mode 100644
index 1ecfd3d5a41feeda44dd8542128147b917c1ff8d..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/slice.jinja
+++ /dev/null
@@ -1 +0,0 @@
-aidge_slice_{{dataformat}} ({{input_name}}, {{output_name}}, {{name|upper}}_AXES, {{name|upper}}_STARTS, {{name|upper}}_ENDS, {{name|upper}}_NB_AXES, {{name|upper}}_NB_CHANNELS);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_N2D2/templates/configuration/conv_config.jinja b/aidge_export_arm_cortexm/_N2D2/templates/configuration/conv_config.jinja
deleted file mode 100644
index 1e46543bd51e6bc529000d4a0f48cb8cfb52f831..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_N2D2/templates/configuration/conv_config.jinja
+++ /dev/null
@@ -1,49 +0,0 @@
-{#- For name header -#}
-#ifndef {{ name|upper }}_LAYER_H
-#define {{ name|upper }}_LAYER_H
-
-#include "typedefs.h"
-#include "nn_scaling_functions.hpp"
-
-{# For layer configuration -#}
-// In/Out
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
-
-// Attributes
-#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel[1] }}
-#define {{ name|upper }}_KERNEL_WIDTH {{ kernel[0] }}
-#define {{ name|upper }}_PADDING_Y {{ padding[1] }}
-#define {{ name|upper }}_PADDING_X {{ padding[0] }}
-#define {{ name|upper }}_STRIDE_Y {{ stride[1] }}
-#define {{ name|upper }}_STRIDE_X {{ stride[0] }}
-#define {{ name|upper }}_DILATION_Y {{ dilation[1] }}
-#define {{ name|upper }}_DILATION_X {{ dilation[0] }}
-
-// Activation/Scaling
-#define {{ name|upper }}_ACTIVATION {{ activation }}
-
-{%- if scaling_type == "floating_point" %}
-static const N2D2_Export::FloatingPointScaling {{ name|upper }}_SCALING = { {{scaling_value}} };
-{%- elif scaling_type == "fixed_point" %}
-static const N2D2_Export::FixedPointScaling<{{scaling_value}}, {{fractional_bits}}> {{ name|upper }}_SCALING;
-{%- elif scaling_type == "single_shift" %}
-static const N2D2_Export::SingleShiftScaling<{{shift_value}}> {{ name|upper }}_SCALING;
-{%- else %}
-static const N2D2_Export::NoScaling {{ name|upper }}_SCALING;
-{%- endif %}
-
-{# Calculate sizes -#}
-{%- set weights_size = output_dims[0] * input_dims[0] * kernel[1] * kernel[0] %}
-// Sizes
-#define {{ name|upper }}_WEIGHTS_SIZE {{ weights_size }}
-#define {{ name|upper }}_BIASES_SIZE {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_SIZE {{ output_dims[0] * output_dims[1] * output_dims[2] }}
-#define {{ name|upper }}_CHANNELS_SIZE {{ input_dims[0] * input_dims[1] * input_dims[2] }}
-
-
-#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_N2D2/templates/configuration/pool_config.jinja b/aidge_export_arm_cortexm/_N2D2/templates/configuration/pool_config.jinja
deleted file mode 100644
index f514b375d67e39f5f254862b5ec3dadb4b6c61d0..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_N2D2/templates/configuration/pool_config.jinja
+++ /dev/null
@@ -1,27 +0,0 @@
-{#- For name header -#}
-#ifndef {{ name|upper }}_LAYER_H
-#define {{ name|upper }}_LAYER_H
-
-#include "typedefs.h"
-
-{# For layer configuration -#}
-// In/Out
-#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
-#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
-#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
-#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
-#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
-#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
-
-// Attributes
-#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel[1] }}
-#define {{ name|upper }}_KERNEL_WIDTH {{ kernel[0] }}
-#define {{ name|upper }}_PADDING_Y {{ padding[1] }}
-#define {{ name|upper }}_PADDING_X {{ padding[0] }}
-#define {{ name|upper }}_STRIDE_Y {{ stride[1] }}
-#define {{ name|upper }}_STRIDE_X {{ stride[0] }}
-
-#define {{ name|upper }}_POOLING_TYPE {{ pool_type }}
-#define {{ name|upper }}_ACTIVATION {{ activation }}
-
-#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_N2D2/templates/kernel/conv_kernel.jinja b/aidge_export_arm_cortexm/_N2D2/templates/kernel/conv_kernel.jinja
deleted file mode 100644
index 5a0798ea14a0acc4f8d8f0e08cf8c4ee4ef8a5b0..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_N2D2/templates/kernel/conv_kernel.jinja
+++ /dev/null
@@ -1,19 +0,0 @@
-N2D2_Export::convcellPropagate<{{ name | upper }}_NB_CHANNELS,
-                               {{ name | upper }}_CHANNELS_HEIGHT, {{ name | upper }}_CHANNELS_WIDTH,
-                               {{ name | upper }}_NB_OUTPUTS,
-                               {{ name | upper }}_OUTPUTS_HEIGHT, {{ name | upper }}_OUTPUTS_WIDTH,
-                               {{ name | upper }}_PADDING_Y, {{ name | upper }}_PADDING_X,
-                               {{ name | upper }}_STRIDE_Y, {{ name | upper }}_STRIDE_X,
-                               {{ name | upper }}_KERNEL_HEIGHT, {{ name | upper }}_KERNEL_WIDTH,
-                               {{ name | upper }}_ACTIVATION,
-                               {{ parent_name | upper }}_MEM_CONT_OFFSET,
-                               {{ parent_name | upper }}_MEM_CONT_SIZE,
-                               {{ parent_name | upper }}_MEM_WRAP_OFFSET,
-                               {{ parent_name | upper }}_MEM_WRAP_SIZE,
-                               {{ parent_name | upper }}_MEM_STRIDE,
-                               {{ name | upper }}_MEM_CONT_OFFSET,
-                               {{ name | upper }}_MEM_CONT_SIZE,
-                               {{ name | upper }}_MEM_WRAP_OFFSET,
-                               {{ name | upper }}_MEM_WRAP_SIZE,
-                               {{ name | upper }}_MEM_STRIDE>
-                               ({{ inputs_name }}, {{ outputs_name }}, {{ biases_name }}, {{ weights_name}}, {{ name | upper }}_SCALING);
diff --git a/aidge_export_arm_cortexm/_N2D2/templates/kernel/fc_kernel.jinja b/aidge_export_arm_cortexm/_N2D2/templates/kernel/fc_kernel.jinja
deleted file mode 100644
index 5d252e2dfc808d1dd86c313471d442bf8bef9af6..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_N2D2/templates/kernel/fc_kernel.jinja
+++ /dev/null
@@ -1,18 +0,0 @@
-N2D2_Export::fccellPropagate<{{ name | upper }}_NB_CHANNELS,
-                             {{ name | upper }}_CHANNELS_HEIGHT, 
-                             {{ name | upper }}_CHANNELS_WIDTH,
-                             {{ name | upper }}_NB_OUTPUTS,
-                             {{ name | upper }}_OUTPUTS_HEIGHT, 
-                             {{ name | upper }}_OUTPUTS_WIDTH,
-                             {{ name | upper }}_ACTIVATION,
-                             {{ parent_name | upper }}_MEM_CONT_OFFSET,
-                             {{ parent_name | upper }}_MEM_CONT_SIZE,
-                             {{ parent_name | upper }}_MEM_WRAP_OFFSET,
-                             {{ parent_name | upper }}_MEM_WRAP_SIZE,
-                             {{ parent_name | upper }}_MEM_STRIDE,
-                             {{ name | upper }}_MEM_CONT_OFFSET,
-                             {{ name | upper }}_MEM_CONT_SIZE,
-                             {{ name | upper }}_MEM_WRAP_OFFSET,
-                             {{ name | upper }}_MEM_WRAP_SIZE,
-                             {{ name | upper }}_MEM_STRIDE>
-                             ({{ inputs_name }}, {{ outputs_name }}, {{ biases_name }}, {{ weights_name}}, {{ name | upper }}_SCALING);
diff --git a/aidge_export_arm_cortexm/_N2D2/templates/kernel/pool_kernel.jinja b/aidge_export_arm_cortexm/_N2D2/templates/kernel/pool_kernel.jinja
deleted file mode 100644
index d1207bd6575dc1f20f364af9d1365923c09a66b2..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/_N2D2/templates/kernel/pool_kernel.jinja
+++ /dev/null
@@ -1,20 +0,0 @@
-N2D2_Export::poolcellPropagate<{{ name | upper }}_NB_CHANNELS,
-                               {{ name | upper }}_CHANNELS_HEIGHT, {{ name | upper }}_CHANNELS_WIDTH,
-                               {{ name | upper }}_NB_OUTPUTS,
-                               {{ name | upper }}_OUTPUTS_HEIGHT, {{ name | upper }}_OUTPUTS_WIDTH,
-                               {{ name | upper }}_PADDING_Y, {{ name | upper }}_PADDING_X,
-                               {{ name | upper }}_STRIDE_Y, {{ name | upper }}_STRIDE_X,
-                               {{ name | upper }}_KERNEL_HEIGHT, {{ name | upper }}_KERNEL_WIDTH,
-                               {{ name | upper }}_POOLING_TYPE,
-                               {{ name | upper }}_ACTIVATION,
-                               {{ parent_name | upper }}_MEM_CONT_OFFSET,
-                               {{ parent_name | upper }}_MEM_CONT_SIZE,
-                               {{ parent_name | upper }}_MEM_WRAP_OFFSET,
-                               {{ parent_name | upper }}_MEM_WRAP_SIZE,
-                               {{ parent_name | upper }}_MEM_STRIDE,
-                               {{ name | upper }}_MEM_CONT_OFFSET,
-                               {{ name | upper }}_MEM_CONT_SIZE,
-                               {{ name | upper }}_MEM_WRAP_OFFSET,
-                               {{ name | upper }}_MEM_WRAP_SIZE,
-                               {{ name | upper }}_MEM_STRIDE>
-                               ({{ inputs_name }}, {{ outputs_name }});
diff --git a/aidge_export_arm_cortexm/__init__.py b/aidge_export_arm_cortexm/__init__.py
index c79216345f7afcefb09abcdfe6ee759cf99622c5..7794c236c2565ea9b06a24ba40c9933c3a7641ae 100644
--- a/aidge_export_arm_cortexm/__init__.py
+++ b/aidge_export_arm_cortexm/__init__.py
@@ -5,3 +5,5 @@ This module has to be used with the Aidge suite
 """
 
 from .export import *
+from .export_registry import ExportLibAidgeARM, ExportLibCMSISNN
+from .operators import *
diff --git a/aidge_export_arm_cortexm/boards/__init__.py b/aidge_export_arm_cortexm/boards/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/aidge_export_arm_cortexm/boards/stm32/H7/Src/main.c b/aidge_export_arm_cortexm/boards/stm32/H7/Src/main.c
index c3d1559ee4f1490863e0606d92ecf90789cd0782..203db42842226fcb0a0411c8b136f0bcfabec8a8 100644
--- a/aidge_export_arm_cortexm/boards/stm32/H7/Src/main.c
+++ b/aidge_export_arm_cortexm/boards/stm32/H7/Src/main.c
@@ -26,7 +26,7 @@
 
 #include <stdio.h>
 
-#include "dnn/include/dnn.h"
+#include "dnn/include/forward.hpp"
 
 /* USER CODE END Includes */
 
@@ -83,7 +83,7 @@ int main(void)
   /* USER CODE BEGIN 1 */
 
   /* USER CODE END 1 */
-  
+
 
   /* Enable I-Cache---------------------------------------------------------*/
   SCB_EnableICache();
@@ -125,7 +125,7 @@ int main(void)
   printf("********************** END DEMO *********************\r\n");
   printf("*****************************************************\r\n");
   printf("\r\n");
-    
+
   /* USER CODE END 3 */
 }
 
@@ -139,15 +139,15 @@ void SystemClock_Config(void)
   RCC_ClkInitTypeDef RCC_ClkInitStruct = {0};
   RCC_PeriphCLKInitTypeDef PeriphClkInitStruct = {0};
 
-  /** Supply configuration update enable 
+  /** Supply configuration update enable
   */
   HAL_PWREx_ConfigSupply(PWR_LDO_SUPPLY);
-  /** Configure the main internal regulator output voltage 
+  /** Configure the main internal regulator output voltage
   */
   __HAL_PWR_VOLTAGESCALING_CONFIG(PWR_REGULATOR_VOLTAGE_SCALE1);
 
   while(!__HAL_PWR_GET_FLAG(PWR_FLAG_VOSRDY)) {}
-  /** Initializes the CPU, AHB and APB busses clocks 
+  /** Initializes the CPU, AHB and APB busses clocks
   */
   RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSI;
   RCC_OscInitStruct.HSIState = RCC_HSI_DIV1;
@@ -166,7 +166,7 @@ void SystemClock_Config(void)
   {
     Error_Handler();
   }
-  /** Initializes the CPU, AHB and APB busses clocks 
+  /** Initializes the CPU, AHB and APB busses clocks
   */
   RCC_ClkInitStruct.ClockType = RCC_CLOCKTYPE_HCLK|RCC_CLOCKTYPE_SYSCLK
                               |RCC_CLOCKTYPE_PCLK1|RCC_CLOCKTYPE_PCLK2
@@ -355,7 +355,7 @@ void Error_Handler(void)
   * @retval None
   */
 void assert_failed(uint8_t *file, uint32_t line)
-{ 
+{
   /* USER CODE BEGIN 6 */
   /* User can add his own implementation to report the file name and line number,
      tex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */
diff --git a/aidge_export_arm_cortexm/boards/stm32/__init__.py b/aidge_export_arm_cortexm/boards/stm32/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/aidge_export_arm_cortexm/export.py b/aidge_export_arm_cortexm/export.py
index 3e8e2d520cb1b899737de7aae689254b9384aa1b..d1f6595e89eb327ddb67b39a21d708c9078ea3ab 100644
--- a/aidge_export_arm_cortexm/export.py
+++ b/aidge_export_arm_cortexm/export.py
@@ -1,162 +1,55 @@
-import re
 import os
 import shutil
 from pathlib import Path
-import numpy as np
-from aidge_core.export_utils.data_conversion import aidge2c
-from aidge_core.export_utils.code_generation import *
-from aidge_export_arm_cortexm.utils import (ROOT, AVAILABLE_BOARDS, has_board, \
-                                            OPERATORS_REGISTRY, supported_operators)
-import aidge_export_arm_cortexm.operators
-from aidge_export_arm_cortexm.utils.scheduler import topological_sort
-from aidge_export_arm_cortexm.utils.generation import get_functions_from_c_file, get_functions_from_c_folder, get_filenames_from_folder
-from aidge_export_arm_cortexm.utils.converter import *
-from aidge_export_arm_cortexm.memory import *
+from aidge_export_arm_cortexm.utils import (ROOT, AVAILABLE_BOARDS, has_board)
+from aidge_export_arm_cortexm.export_registry import ExportLibAidgeARM
+# from aidge_export_arm_cortexm.utils.converter import numpy_dtype2ctype
 
+from aidge_core.mem_info import compute_default_mem_info, generate_optimized_memory_info
+from aidge_core.export_utils import scheduler_export
 
 
+BOARD_PATH : str = ROOT / "boards"
+
+BOARDS_MAP: dict[str, Path] = {
+    "stm32h7" : BOARD_PATH / "stm32" / "H7",
+}
+
 def export(export_folder_name,
            graphview,
            scheduler = None,
            board:str ="stm32h7",
-           library:str = "aidge",
            mem_wrapping = False):
 
-    # Create export directory
-    export_folder = Path().absolute() / export_folder_name
-    os.makedirs(str(export_folder), exist_ok=True)
-
-    # Create dnn directory
-    dnn_folder = export_folder / "dnn"
-    os.makedirs(str(dnn_folder), exist_ok=True)
-
-    # Determine which board the user wants
-    # to select correct config
-    if has_board(board):
-        board_path = AVAILABLE_BOARDS[board]
-    else:
-        raise ValueError(f"{board} not found in the package. Please among those boards: {list(AVAILABLE_BOARDS.keys())}")
-
-    # Copy all static files in the export
-    shutil.copytree(board_path, str(export_folder), dirs_exist_ok=True)
-
-    # For N2D2 library, copy static folder to export/include
-    if library == "n2d2":
-        dnn_include_folder = dnn_folder / "include"
-        os.makedirs(str(dnn_include_folder), exist_ok=True)
-        shutil.copytree(str(ROOT / "_N2D2" / "static"), str(dnn_include_folder), dirs_exist_ok=True)
-
-    # Create statistics directory
-    stats_folder = export_folder / "statistics"
-    os.makedirs(str(stats_folder), exist_ok=True)
-
-    # Sort layers according to a scheduler
-    if not isinstance(scheduler, aidge_core.Scheduler):
-        # No scheduler provided by the user, use the default one
-        list_forward_nodes = topological_sort(graphview)
-        mem_size, mem_info = compute_default_mem_info(list_forward_nodes)
-    else:
-        list_forward_nodes = scheduler.get_static_scheduling()
-        mem_size, mem_info = generate_optimized_memory_info(stats_folder, scheduler, mem_wrapping)
-
-    # Set some lists of elements for generating forward file
-    list_actions = []
-    list_configs = []
-
-    # Export layer configurations
-    for node in list_forward_nodes:
-        if node.type() == "Producer":
-            # We do not treat Producer here but i the nodes which use them
-            continue
-
-        if node.type() in supported_operators():
-            op = OPERATORS_REGISTRY[node.type()](node, board, library)
-            # Export the configuration
-            list_configs = op.export(dnn_folder, list_configs)
-
-            # Add forward kernel
-            list_actions = op.forward(list_actions)
-        else:
-            print(f"Warning: {node.type()} is not supported in the export.\nPlease add the implementation.")
-
-    # Generate the memory file
-    generate_file(
-        str(dnn_folder / "memory" / "mem_info.h"),
-        str(ROOT / "templates" / "memory" / "mem_info.jinja"),
-        mem_size = mem_size,
-        mem_info_legends = MEMORY_INFO_TEMPLATE,
-        mem_info = mem_info,
-        mem_alignment = 1  # Fixed memory alignement so far, feel free to adapt it
+    scheduler_export(
+        scheduler,
+        export_folder_name,
+        ExportLibAidgeARM,
+        memory_manager=generate_optimized_memory_info,
+        memory_manager_args={"stats_folder": f"{export_folder_name}/stats", "wrapping": mem_wrapping }
     )
-    list_configs.append("memory/mem_info.h")
 
-    # Get entry nodes
-    # It supposes the entry nodes are producers with constant=false
-    # Store the datatype & name
-    list_inputs_name = []
-    first_element_added = False
-    for node in graphview.get_nodes():
-        if node.type() == "Producer":
-            if not first_element_added:
-                    export_type = aidge2c(node.get_operator().get_output(0).dtype())
-                    list_inputs_name.append((export_type, node.name()))
-                    first_element_added = True
-            if not node.get_operator().attr.constant:
-                export_type = aidge2c(node.get_operator().get_output(0).dtype())
-                list_inputs_name.append((export_type, node.name()))
+    gen_board_files(export_folder_name, board)
 
-    # Get output nodes
-    # Store the datatype & name, like entry nodes
 
-    list_outputs_name = []
-    for node in graphview.get_nodes():
-        if len(node.get_children()) == 0:
-            if node.get_operator().attr.has_attr('dtype'):
-                # Temporary fix because impossible to set DataType of a generic operator
-                export_type = aidge2c(node.get_operator().attr.dtype)
-            else:
-                export_type = aidge2c(node.get_operator().get_output(0).dtype())
+def supported_boards() -> list[str]:
+    return BOARDS_MAP.keys()
 
-            list_outputs_name.append((export_type, node.name()))
-
-    if library == "n2d2":
-        forward_file = "forward.cpp"
-    else:
-        forward_file = "forward.c"
-
-    # Generate forward file
-    generate_file(
-        str(dnn_folder / "src" / forward_file),
-        str(ROOT / "templates" / "network" / "network_forward.jinja"),
-        headers=set(list_configs),
-        actions=list_actions,
-        inputs= list_inputs_name,
-        outputs=list_outputs_name
-    )
+def gen_board_files(path:str, board:str)->None:
+    if board not in supported_boards():
+        raise ValueError(f"Board {board} is not supported, supported board are:\n\t-{'\n\t-'.join(supported_boards())}")
 
-    # Generate dnn internal API
-    if library == "aidge":
-        # For Aidge, parse all kernels source code and retrieve function prototypes
-        generate_file(
-            str(dnn_folder / "include" / "network_functions.h"),
-            str(ROOT / "templates" / "network" / "network_prototypes.jinja"),
-            libraries=[],
-            functions=get_functions_from_c_folder(str(dnn_folder / "src" / "kernels")),
-        )
-    elif library == "n2d2":
-        # For N2D2, parse all the files in include/kernel/ and retrieve the names of the files
-        generate_file(
-            str(dnn_folder / "include" / "network_functions.h"),
-            str(ROOT / "templates" / "network" / "network_prototypes.jinja"),
-            libraries=[],
-            files=[str(Path("kernels") / x) for x in get_filenames_from_folder(str(dnn_folder / "include" / "kernels"), r'^.*\.hpp$')],
-        )
+    if isinstance(path, str): path = Path(path)
+    # Create dnn directory is not exist
+    dnn_folder = path / "dnn"
+    os.makedirs(str(dnn_folder), exist_ok=True)
 
-    # Generate dnn API
-    generate_file(
-        str(dnn_folder / "include" / "dnn.h"),
-        str(ROOT / "templates" / "network" / "dnn_header.jinja"),
-        libraries=["stdint.h"],
-        functions=get_functions_from_c_file(str(dnn_folder / "src" / forward_file)),
-    )
+    # Determine which board the user wants
+    # to select correct config
 
+    # Copy all static files in the export
+    shutil.copytree(BOARDS_MAP[board], str(path), dirs_exist_ok=True)
+    # For N2D2 library, copy static folder to export/include
+    dnn_include_folder = dnn_folder / "include"
+    os.makedirs(str(dnn_include_folder), exist_ok=True)
+    shutil.copytree(str(ROOT / "_N2D2" / "static"), str(dnn_include_folder), dirs_exist_ok=True)
diff --git a/aidge_export_arm_cortexm/export_registry.py b/aidge_export_arm_cortexm/export_registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..46c0653ad05895d13ca0d9be7fdb5f4314f2f775
--- /dev/null
+++ b/aidge_export_arm_cortexm/export_registry.py
@@ -0,0 +1,13 @@
+from aidge_core.export_utils import ExportLib
+
+class ExportLibAidgeARM(ExportLib):
+    _name="aidge_arm"
+
+
+# TODO ugly fix for Tensor registration issue...
+import aidge_core
+aidge_core.register_Tensor(["aidge_arm", aidge_core.dtype.float32],
+                           aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.float32]))
+
+class ExportLibCMSISNN(ExportLib):
+    _name="export_cmsisnn"
diff --git a/aidge_export_arm_cortexm/memory.py b/aidge_export_arm_cortexm/memory.py
deleted file mode 100644
index 7f7983fc7898bbd2d7fa383ecc0b5f16f290918f..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/memory.py
+++ /dev/null
@@ -1,130 +0,0 @@
-import os
-import shutil
-from typing import List
-from pathlib import Path
-import aidge_core
-import aidge_backend_cpu
-
-# for each layer, 
-# name              [size, stride, length, count, contigious offset, contigious size, wrapping offset, wrapping size]
-# true values       [nb_outputs, nb_outputs, width, width, offset start, total size, 0, 0]
-# Example:
-#define ENV_MEM_SIZE 3
-#define ENV_MEM_STRIDE 3
-#define ENV_MEM_LENGTH 224
-#define ENV_MEM_COUNT 224
-#define ENV_MEM_CONT_OFFSET 0
-#define ENV_MEM_CONT_SIZE 150528
-#define ENV_MEM_WRAP_OFFSET 0
-#define ENV_MEM_WRAP_SIZE 0
-MEMORY_INFO_TEMPLATE = ["layer_name", "size", "stride", "length", "count", "cont_offset", "cont_size", "wrap_offset", "wrap_size"]
-
-# for each layer, name: [size, offset start] (old style)
-# Example:
-#define ENV_MEM_SIZE 3
-#define ENV_OFFSET 0
-# MEMORY_INFO_TEMPLATE = ["layer_name", "size", "offset"]
-
-
-# Default memory management, which can be used for development
-def compute_default_mem_info(scheduler: aidge_core.Scheduler):
-    
-    list_forward_nodes = scheduler
-    mem_info = []
-    mem_size = 0
-
-    # Exclude Producers and the last layers (because the results are stored outside the export)
-    for i, node in enumerate(list_forward_nodes):
-        if node.type() != "Producer" and node.type() != "Reshape":
-        # if node.type() != "Producer":
-            if len(node.get_children()) != 0:
-                dims = node.get_operator().get_output(0).dims()
-                mem = 1
-                for dim in dims:
-                    mem *= dim
-
-                # Add memory info
-                # Only size and cont_offset matter
-                mem_info.append([node.name(), mem, 0, 0, 0, mem_size, mem, 0, 0])
-                
-                # Increment offset for the next layer
-                mem_size += mem
-
-    return mem_size, mem_info
-
-
-def generate_optimized_memory_info(stats_folder: Path,
-                                   scheduler: aidge_core.Scheduler,
-                                   wrapping:bool = False):
-    
-    # The forward dims has to done outside the function
-    # Also supposed the generation of the scheduler has been performed outside
-    # Otherwise decomment the following line
-    # scheduler.generate_scheduling()
-
-    # Generate the memory manager
-    # So far, the Producers are not take in consideration in the meory manager => inc_producers=False
-    mem_manager = scheduler.generate_memory(inc_producers=False, wrap_around_buffer=wrapping)
-
-    # In the export, we currently use an unified memory buffer whose size 
-    # is determined by the memory peak usage
-    mem_size = mem_manager.get_peak_usage()
-    mem_info = []
-
-    mem_planes = mem_manager.get_planes()
-
-    for node in scheduler.get_static_scheduling():
-
-        # Skip memory management for the parameter producers
-        if node.type() == "Producer":
-            if node.get_operator().attr.constant:
-                continue
-            else:
-                # Input memory management (suppose tensor ends with [:, channel, height, width]))
-                tensor = node.get_operator().get_output(0)
-                if tensor is None:
-                    raise RuntimeError("Warning input producer not provided")
-                if len(tensor.dims()) < 3:
-                    raise RuntimeError("Input producer dimensions must be with [:, channel, height, width]")
-
-                name = node.name()
-                size = tensor.dims()[-3]    # Should be nb_channels
-                stride = tensor.dims()[-3]  # Should be nb_channels
-                length = tensor.dims()[-1]  # Should be width
-                count = tensor.dims()[-2]   # Should be height
-                cont_offset = 0             # Suppose input data is stored outside the export function
-                                            # so the memory offset is not important to consider
-                cont_size = tensor.dims()[-1] * tensor.dims()[-2] * tensor.dims()[-3] # Size of input
-                wrap_offset = 0     # No wrapping
-                wrap_size = 0       # No wrapping
-        # elif node.type() != "Reshape":
-        else:
-            plane = mem_planes[node][0]
-
-            name = node.name()
-            size = plane.size
-            stride = plane.stride
-            length = plane.length
-            count = plane.count
-            cont_offset = plane.get_contiguous_offset()
-            cont_size = plane.get_contiguous_size()
-            wrap_offset = plane.get_wrapped_offset()
-            wrap_size = plane.get_wrapped_size()
-
-        mem_info.append([name, size, stride, length, count, 
-                        cont_offset, cont_size, wrap_offset, wrap_size])
-
-    # Use gnuplot to generate the log
-    try:
-        os.makedirs(str(stats_folder / "graph"), exist_ok=True)
-        mem_manager.log("memory_info")
-        os.chmod("memory_info_plot.gnu", 0o777)
-        os.system("./memory_info_plot.gnu")
-        shutil.move("memory_info", str(stats_folder / "graph"/ "memory_info"))
-        shutil.move("memory_info_plot.png", str(stats_folder / "graph" / "memory_info_plot.png"))
-        os.remove("memory_info_plot.gnu")
-    except:
-        print("Please install gnuplot if you want memory plot from MemoryManager.")
-
-
-    return mem_size, mem_info
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/operators.py b/aidge_export_arm_cortexm/operators.py
index d6d6836b296516dee62f9fb77dda389e68cd6a5a..64eb57c5cb6d0780e33d3986b481384b484d4b2e 100644
--- a/aidge_export_arm_cortexm/operators.py
+++ b/aidge_export_arm_cortexm/operators.py
@@ -1,38 +1,16 @@
 import os
 import math
-import shutil
 import numpy as np
 from pathlib import Path
-from jinja2 import Environment, FileSystemLoader
-from typing import Tuple, List, Union, Dict
+from typing import Tuple, List
 
 import aidge_core
-from aidge_core import ExportNode
+from aidge_core.export_utils import ExportNode, ExportNodeCpp
 from aidge_core.export_utils.code_generation import *
-from aidge_core.export_utils.data_conversion import aidge2c
-
-from aidge_export_arm_cortexm.utils import ROOT, operator_register
-from aidge_export_arm_cortexm.utils.converter import numpy_dtype2ctype, aidge_datatype2dataformat, aidge2c
+from aidge_export_arm_cortexm.utils import ROOT
+from aidge_export_arm_cortexm.utils.converter import numpy_dtype2ctype
 from aidge_export_arm_cortexm.utils.generation import *
-
-##############################################
-################### Utils ####################
-##############################################
-
-def get_node_parents(node):
-    parents = []
-    for parent in node.get_parents():
-        if parent.type() != "Producer":
-            parents.append(parent)
-    return parents
-
-def get_producer_parents(node):
-    parents = []
-    for parent in node.get_parents():
-        if parent.type() == "Producer":
-            parents.append(parent)
-    return parents
-
+from aidge_export_arm_cortexm.export_registry import ExportLibAidgeARM
 
 ##############################################
 ############## Export functions ##############
@@ -62,40 +40,43 @@ def export_params(name:str,
 ################### Actions ##################
 ##############################################
 
-def set_up_output(name, dtype):
-    return f"{dtype}* {name} = ({dtype}*) mem + {name.upper()}_MEM_CONT_OFFSET;"
-
-
-##############################################
-############## Operators helper ##############
-##############################################
 
+@ExportLibAidgeARM.register("Producer", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class Producer_ARMCortexM(ExportNode):
 
-class Producer_ARMCortexM:
-    def __init__(self, node):
-        self.name = node.name()
-        self.operator = node.get_operator()
-        self.constant = self.operator.attr.constant
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         self.values = np.array(self.operator.get_output(0))
-
-    def export(self, export_file:Path, format:str = "NHWC"):
-
-        if (len(self.values.shape) == 4):
-            # Suppose original dataformat is NCHW
-            if format == "NCHW":
-                export_params(self.name,
-                              self.values.reshape(-1),
-                              str(export_file))
-            elif format == "NHWC":
-                export_params(self.name,
-                              np.transpose(self.values, (0, 2, 3, 1)).reshape(-1),
-                              str(export_file))
-            else:
-                raise RuntimeError("Producer format export not supported.")
-        else:
-            export_params(self.name,
-                          self.values.reshape(-1),
-                          str(export_file))
+        if len(self.values.shape) == 4:  # Note: export in HWC
+            self.values = np.transpose(self.values, (0, 2, 3, 1))
+        # The following block of code is a dirty fix for FC
+        # The issue is that FC weight in Aidge are made for an CHW input
+        # Current export is made with HWC format
+        # So we need to reorder weights of the FC
+        # Note: it is not necessary if H and W != 1 (equivalent to in_dims length == 4)
+
+        if len(self.values.shape) == 2:
+            parents = node.get_children()
+            if len(parents) == 1 and list(parents)[0].type() == "FC":
+                data_in = list(parents)[0].get_operator().get_input(0)
+                if len(data_in.dims()) == 4:
+                    C = data_in.dims()[1]
+                    H = data_in.dims()[2]
+                    W = data_in.dims()[3]
+                    # Transpose weights to adapt the HWC
+                    self.values = self.values.reshape(-1, C, H, W).transpose(0, 2, 3, 1)
+
+    def export(self, export_folder: Path):
+        header_path = f"include/parameters/{self.attributes['name']}.hpp"
+        export_params(
+            self.attributes['out_name'][0],
+            self.values.reshape(-1),
+            str(export_folder / header_path))
+        return [header_path]
+
+    def forward(self):
+        # A Producer does nothing during forward
+        return []
 
 
 class Scaling():
@@ -204,1035 +185,289 @@ class Scaling():
         return self.scaling
 
 
-@operator_register("ReLU")
-class ReLU_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Activation" / "Relu" / "aidge_relu_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "activation.jinja"),
-                name=self.name,
-                activation_type="\"RELU\"",
-                nb_inputs=np.prod(self.inputs_dims[0]),
-                nb_outputs=np.prod(self.outputs_dims[0]))
-
-        return list_configs
+# TODO : find a way to remove this dummy exportnode
+@ExportLibAidgeARM.register("Pad2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class Pad_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        raise NotImplementedError("Pad2D nodes is not implemented")
 
-    def forward(self, list_actions:list):
 
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
 
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "activation.jinja"),
-                name=self.name,
-                activation_type="relu",
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                output_name=self.name
-            ))
 
-        return list_actions
+@ExportLibAidgeARM.register("ReLU", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class ReLU_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
 
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "relu.jinja")
 
-@operator_register("Conv")
-class Conv_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-
-        self.producers = []
-        # Exclude first input which is a real input
-        for i in range(1, len(node.inputs())):
-            producer = node.input(i)[0]
-            self.producers.append(Producer_ARMCortexM(producer))
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
-        self.scaling = Scaling()("no_scaling")
-        self.activation = "Linear"
-
-        self.kernel = node.get_operator().attr.kernel_dims
-        self.stride = node.get_operator().attr.stride_dims
-        self.dilation = node.get_operator().attr.dilation_dims
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "relu.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Relu" / "aidge_relu_float32.h"),
+        ]
 
+@ExportLibAidgeARM.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class Conv_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["activation"] = "Linear"
+        self.attributes.update(Scaling()("no_scaling"))
         # No padding with Conv
         # Use PaddedConv to add padding attribute
-        self.padding = [0, 0]
-
-        self.nb_channels = node.get_operator().in_channels()
-        self.nb_outputs = node.get_operator().out_channels()
-        if self.inputs[0] is None :
-            raise RuntimeError("")
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        if len(self.outputs_dims[0]) == 4:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
-
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        # Export weights to NHWC format
-        self.producers[0].export(export_folder / "parameters" / f"{self.producers[0].name}.h")
-        list_configs.append(f"parameters/{self.producers[0].name}.h")
-
-        # Export biases
-        if (len(self.producers) > 1):
-            # Convert the biases to int32
-            if self.dataformat != "float32":
-                self.producers[1].values = self.producers[1].values.astype(np.int32)
-
-            self.producers[1].export(export_folder / "parameters" / f"{self.producers[1].name}.h")
-            list_configs.append(f"parameters/{self.producers[1].name}.h")
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "aidge_conv2d_hwc_float32.c"),
-                         str(Path(export_folder) / "src" / "kernels"))
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
-                         str(Path(export_folder) / "include"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "convolution.jinja"),
-                name=self.name,
-                input_dims=self.inputs_dims[0],
-                output_dims=self.outputs_dims[0],
-                kernel=self.kernel,
-                stride=self.stride,
-                padding=self.padding,
-                dilation=self.dilation)
-
-        elif self.library == "n2d2":
-            # Export configuration file
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_N2D2" / "templates" / "configuration" / "conv_config.jinja"),
-                name=self.name,
-                input_dims=self.inputs_dims[0],
-                output_dims=self.outputs_dims[0],
-                kernel=self.kernel,
-                stride=self.stride,
-                padding=self.padding,
-                dilation=self.dilation,
-                activation=self.activation,
-                **self.scaling)
-
-        return list_configs
-
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "convolution.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                output_name=self.name,
-                weight_name=self.inputs[1].name(),
-                bias_name=self.inputs[2].name()
-            ))
-
-        elif self.library == "n2d2":
-            list_actions.append(generate_str(
-                str(ROOT / "_N2D2" / "templates" / "kernel" / "conv_kernel.jinja"),
-                name=self.name,
-                parent_name=self.inputs[0].name(),
-                inputs_name=self.inputs[0].name(),
-                weights_name=self.inputs[1].name(),
-                biases_name=self.inputs[2].name(),
-                outputs_name=self.name
-            ))
-
-        return list_actions
-
-
-@operator_register("PaddedConv")
-class PaddedConv_ARMCortexM(Conv_ARMCortexM):
-    def __init__(self, node, board, library):
-        ExportNode.__init__(self, node)
-
-        self.producers = []
-        # Exclude first input which is a real input
-        for i in range(1, len(node.inputs())):
-            producer = node.input(i)[0]
-            self.producers.append(Producer_ARMCortexM(producer))
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
-        self.scaling = Scaling()("no_scaling")
-        self.activation = "Linear"
-
+        self.attributes["padding"] = [0, 0]
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "conv_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "Conv.hpp")
+        ]
+
+@ExportLibAidgeARM.register("ConvDepthWise2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class ConvDW_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["activation"] = "Linear"
+        self.attributes.update(Scaling()("no_scaling"))
+        # No padding with Conv
+        # Use PaddedConv to add padding attribute
+        self.attributes["padding"] = [0, 0]
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "conv_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "ConvDW.hpp")
+        ]
+
+@ExportLibAidgeARM.register_metaop("PaddedConvDepthWise2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class PaddedConvDW_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["activation"] = "Linear"
+        self.attributes.update(Scaling()("no_scaling"))
         for n in self.operator.get_micro_graph().get_nodes():
-            if n.type() == "Pad":
-                self.padding = n.get_operator().attr.begin_end_borders
-            if n.type() == "Conv":
-                self.kernel = n.get_operator().attr.kernel_dims
-                self.stride = n.get_operator().attr.stride_dims
-                self.dilation = n.get_operator().attr.dilation_dims
-
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        if len(self.outputs_dims[0]) == 4:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
-
-
-@operator_register("ConvReluScaling")
-class ConvReluScaling_ARMCortexM(Conv_ARMCortexM):
-     def __init__(self, node, board, library):
-        super(Conv_ARMCortexM, self).__init__(node, board, library)
-
-        if self.operator.has_attr("Begin_End_Borders"):
-            self.padding = self.operator.attr.begin_end_borders
-
-        self.activation = "Rectifier"
-
-        # Should do this line but there is a bug while changing the dtype of generic operator...
-        # self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
-        # Do this instead
-        if self.operator.attr.quantized_nb_bits == 8:
-            if self.operator.attr.is_output_unsigned:
-                self.dtype = aidge2c(aidge_core.dtype.uint8)
-            else:
-                self.dtype = aidge2c(aidge_core.dtype.int8)
-
-        # Impose Single Shift (perhaps change it to have a more modular system)
-        self.scaling = Scaling(self.operator.attr.scaling_factor,
-                               self.operator.attr.quantized_nb_bits)("floating_point")
-
-
-class Pooling_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
-        self.pool_type = "None"
-        self.activation = "Linear"
-
-        self.kernel = node.get_operator().attr.kernel_dims
-        self.stride = node.get_operator().attr.stride_dims
-
+            if n.type() == "Pad2D":
+                self.attributes["padding"] = n.get_operator(
+                ).attr.begin_end_borders
+            if n.type() == "ConvDepthWise2D":
+                self.attributes["kernel_dims"] = n.get_operator(
+                ).attr.kernel_dims
+                self.attributes["stride_dims"] = n.get_operator(
+                ).attr.stride_dims
+                self.attributes["dilation_dims"] = n.get_operator(
+                ).attr.dilation_dims
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "conv_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "ConvDW.hpp")
+        ]
+
+
+
+@ExportLibAidgeARM.register_metaop("PaddedConv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class PaddedConv_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["activation"] = "Linear"
+        self.attributes.update(Scaling()("no_scaling"))
+        for n in self.operator.get_micro_graph().get_nodes():
+            if n.type() == "Pad2D":
+                self.attributes["padding"] = n.get_operator(
+                ).attr.begin_end_borders
+            if n.type() == "Conv2D":
+                self.attributes["kernel_dims"] = n.get_operator(
+                ).attr.kernel_dims
+                self.attributes["stride_dims"] = n.get_operator(
+                ).attr.stride_dims
+                self.attributes["dilation_dims"] = n.get_operator(
+                ).attr.dilation_dims
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "conv_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "Conv.hpp")
+        ]
+
+
+class Pooling_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["activation"] = "Linear"
+        self.attributes["pool_type"] = "None"
         # No padding with MaxPooling or AvgPooling
         # Use PaddedMaxPooling/PaddedAvgPooling to add padding attribute
-        self.padding = [0, 0]
-
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        if len(self.outputs_dims[0]) == 4:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
-
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Pooling" / "aidge_maxpool2d_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
-                         str(Path(export_folder) / "include"))
-
-            # Export configuration file
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "pooling.jinja"),
-                name=self.name,
-                input_dims=self.inputs_dims[0],
-                output_dims=self.outputs_dims[0],
-                kernel=self.kernel,
-                stride=self.stride,
-                padding=self.padding,
-                pool_type=self.pool_type)
-
-
-        elif self.library == "n2d2":
-
-            # Nothing to copy
-
-            # Export configuration file
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_N2D2" / "templates" / "configuration" / "pool_config.jinja"),
-                name=self.name,
-                input_dims=self.inputs_dims[0],
-                output_dims=self.outputs_dims[0],
-                kernel=self.kernel,
-                stride=self.stride,
-                padding=self.padding,
-                pool_type=self.pool_type,
-                activation=self.activation)
-
-
-        return list_configs
-
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "pooling.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                pool_type=self.pool_type.lower(),
-                input_name=self.inputs[0].name(),
-                output_name=self.name
-            ))
-
-        elif self.library == "n2d2":
+        self.attributes["padding"] = [0, 0]
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "pool_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "pool_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Pooling" / "Pooling.hpp")
+        ]
+        self.kernel = node.get_operator().attr.kernel_dims
+        self.stride = node.get_operator().attr.stride_dims
 
-            list_actions.append(generate_str(
-                str(ROOT / "_N2D2" / "templates" / "kernel" / "pool_kernel.jinja"),
-                name=self.name,
-                parent_name=self.inputs[0].name(),
-                inputs_name=self.inputs[0].name(),
-                outputs_name=self.name
-            ))
 
-        return list_actions
+@ExportLibAidgeARM.register("FC", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class FC_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["activation"] = "Linear"
+        self.attributes.update(Scaling()("no_scaling"))
+        # No padding with Conv
+        # Use PaddedConv to add padding attribute
+        self.attributes["padding"] = [0, 0]
 
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "fc_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "fc_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "FullyConnected" / "Fc.hpp")
+        ]
 
-@operator_register("MaxPooling")
+@ExportLibAidgeARM.register("MaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class MaxPooling_ARMCortexM(Pooling_ARMCortexM):
-    def __init__(self, node, board, library):
-        super().__init__(node, board, library)
-        self.pool_type = "Max"
-
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["pool_type"] = "Max"
 
-@operator_register("AvgPooling")
+@ExportLibAidgeARM.register("AvgPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class AvgPooling_ARMCortexM(Pooling_ARMCortexM):
-    def __init__(self, node, board, library):
-        super().__init__(node, board, library)
-        self.pool_type = "Avg"
-
-
-@operator_register("FC")
-class FC_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-
-        self.producers = []
-        # Exclude first input which is a real input
-        for i in range(1, len(node.inputs())):
-            producer = node.input(i)[0]
-            self.producers.append(Producer_ARMCortexM(producer))
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
-        self.scaling = Scaling()("no_scaling")
-        self.activation = "Linear"
-
-        # if len(self.inputs_dims[0]) == 4:
-        #     # if dims == [batch, nb_channels, height, width]
-        #     # transform to [nb_channels, height, width]
-        #     self.inputs_dims[0] = self.inputs_dims[0][1:]
-
-        #     # It also means that we need to change the dataformat of the weights
-        #     weights = self.producers[0].values
-        #     if len(weights.shape) == 2:
-        #         weights = weights.reshape(weights.shape[0], weights.shape[1], 1, 1)
-
-        if len(self.inputs_dims[0]) == 3:
-            # if dims == [nb_channels, height, width]
-            # transform to [batch, nb_channels, height, width]
-            self.inputs_dims[0] = [1, self.inputs_dims[0][0], self.inputs_dims[0][1], self.inputs_dims[0][2]]
-
-
-        elif len(self.inputs_dims[0]) == 2:
-            # if dims == [batch, nb_channels]
-            # transform to [batch,nb_channels, 1, 1]
-            self.inputs_dims[0] = [self.inputs_dims[0][0], self.inputs_dims[0][1], 1, 1]
-
-
-        # if len(self.outputs_dims[0]) == 2:
-        #     # if dims == [batch, nb_outputs]
-        #     # transform to [nb_outputs, 1, 1]
-        #     self.outputs_dims[0] = [self.outputs_dims[0][1], 1, 1]
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        # Export weights to NHWC format
-        self.producers[0].export(export_folder / "parameters" / f"{self.producers[0].name}.h")
-        list_configs.append(f"parameters/{self.producers[0].name}.h")
-
-        # Export biases
-        if (len(self.producers) > 1):
-            # Convert the biases to int32
-            if self.dataformat != "float32":
-                self.producers[1].values = self.producers[1].values.astype(np.int32)
-
-            self.producers[1].export(export_folder / "parameters" / f"{self.producers[1].name}.h")
-            list_configs.append(f"parameters/{self.producers[1].name}.h")
-
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                # Take this kernel for now to avoid bad transpose weights (see aidge_export_cpp)
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "FullyConnected" / "aidge_fc_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            # Export configuration file
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "fullyconnected.jinja"),
-                name=self.name,
-                nb_batch=self.inputs_dims[0][0],
-                nb_channels=self.inputs_dims[0][1],
-                channel_height=self.inputs_dims[0][2],
-                channel_width=self.inputs_dims[0][3],
-                nb_outputs=self.outputs_dims[0][1])
-
-        elif self.library == "n2d2":
-
-            # Export configuration file
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_N2D2" / "templates" / "configuration" / "fc_config.jinja"),
-                name=self.name,
-                input_dims=self.inputs_dims[0],
-                output_dims=self.outputs_dims[0],
-                activation=self.activation,
-                **self.scaling)
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "fullyconnected.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                weight_name=self.inputs[1].name(),
-                bias_name=self.inputs[2].name(),
-                output_name=self.name
-            ))
-
-        elif self.library == "n2d2":
-            list_actions.append(generate_str(
-            str(ROOT / "_N2D2" / "templates" / "kernel" / "fc_kernel.jinja"),
-            name=self.name,
-            parent_name=self.inputs[0].name(),
-            inputs_name=self.inputs[0].name(),
-            weights_name=self.inputs[1].name(),
-            biases_name=self.inputs[2].name(),
-            outputs_name=self.name
-        ))
-
-
-        return list_actions
-
-
-@operator_register("FcScaling")
-class FCScaling_ARMCortexM(FC_ARMCortexM):
-
-    def __init__(self, node, board, library):
-        super(FC_ARMCortexM, self).__init__(node, board, library)
-
-        # Should do this line but there is a bug while changing the datatype of generic operator...
-        # self.datatype = aidge2c(node.get_operator().get_output(0).dtype())
-        # Do this instead
-        if self.operator.attr.quantized_nb_bits == 8:
-            if self.operator.attr.is_output_unsigned:
-                self.dtype = aidge2c(aidge_core.dtype.uint8)
-            else:
-                self.dtype = aidge2c(aidge_core.dtype.int8)
-
-        # Impose Single Shift (perhaps change it to have a more modular system)
-        self.scaling = Scaling(self.operator.attr.scaling_factor,
-                               self.operator.attr.quantized_nb_bits)("floating_point")
-
-
-@operator_register("FcReluScaling")
-class FCReluScaling_ARMCortexM(FCScaling_ARMCortexM):
-    def __init__(self, node, board, library):
-        super(FCScaling_ARMCortexM, self).__init__(node, board, library)
-
-        self.activation = "Rectifier"
-
-
-@operator_register("Add")
-class Add_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-
-        super().__init__(node)
-
-        self.producers = []
-
-        for i in range(0, len(node.inputs())):
-            if node.input(i)[0].type()=="Producer" and node.input(i)[0].get_operator().attr.constant == True:
-                producer = node.input(i)[0]
-                self.producers.append(Producer_ARMCortexM(producer))
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-
-
-
-    def export(self, export_folder:Path,list_configs:list):
-        for i in range(len(self.producers)):
-            self.producers[i].export(export_folder / "parameters" / f"{self.producers[i].name}.h")
-            list_configs.append(f"parameters/{self.producers[i].name}.h")
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
-
-        export_params(f"{self.inputs[1].name()}_DIMS", np.array(self.inputs_dims[1],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[1].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[1].name()}_DIMS.h")
-
-        export_params(f"{self.name}_DIMS", np.array(self.outputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.name}_DIMS.h")
-        list_configs.append(f"dimensions/{self.name}_DIMS.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "ElemWise" / "Add" / "aidge_add_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
-                         str(Path(export_folder) / "include"))
-
-                generate_file(
-                    str(export_folder / "layers" / f"{self.name}.h"),
-                    str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "elemwise.jinja"),
-                    name=self.name,
-                    nb_inputs=np.prod(self.inputs_dims[0]),
-                    nb_outputs=np.prod(self.outputs_dims[0]),
-                    input_dims=self.inputs_dims,
-                    output_dims=self.outputs_dims,
-                    elemwise_op="\"ADD\"")
-
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "elemwise.jinja"),
-                name=self.name,
-                elemwise_type = "add",
-                dataformat=self.dataformat,
-                inputa_name=self.inputs[0].name(),
-                inputb_name=self.inputs[1].name(),
-                output_name=self.name))
-
-
-        return list_actions
-
-@operator_register("Mul")
-class Mul_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-
-        super().__init__(node)
-
-        self.producers = []
-
-        for i in range(0, len(node.inputs())):
-            if node.input(i)[0].type()=="Producer" and node.input(i)[0].get_operator().attr.constant == True:
-                producer = node.input(i)[0]
-                self.producers.append(Producer_ARMCortexM(producer))
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-
-
-    def export(self, export_folder:Path,list_configs:list):
-        for i in range(len(self.producers)):
-            self.producers[i].export(export_folder / "parameters" / f"{self.producers[i].name}.h")
-            list_configs.append(f"parameters/{self.producers[i].name}.h")
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
-
-        export_params(f"{self.inputs[1].name()}_DIMS", np.array(self.inputs_dims[1],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[1].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[1].name()}_DIMS.h")
-
-        export_params(f"{self.name}_DIMS", np.array(self.outputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.name}_DIMS.h")
-        list_configs.append(f"dimensions/{self.name}_DIMS.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "ElemWise" / "Mul" / "aidge_mul_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
-                         str(Path(export_folder) / "include"))
-
-                generate_file(
-                    str(export_folder / "layers" / f"{self.name}.h"),
-                    str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "elemwise.jinja"),
-                    name=self.name,
-                    nb_inputs=np.prod(self.inputs_dims[0]),
-                    nb_outputs=np.prod(self.outputs_dims[0]),
-                    input_dims=self.inputs_dims,
-                    output_dims=self.outputs_dims,
-                    elemwise_op="\"MUL\"")
-
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "elemwise.jinja"),
-                name=self.name,
-                elemwise_type = "mul",
-                dataformat=self.dataformat,
-                inputa_name=self.inputs[0].name(),
-                inputb_name=self.inputs[1].name(),
-                output_name=self.name))
-
-
-        return list_actions
-
-@operator_register("Softmax")
-class Softmax_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-
-        super().__init__(node)
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-        self.axis =  node.get_operator().attr.axis
-
-
-    def export(self, export_folder:Path,list_configs:list):
-
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
-
-        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Activation" / "Softmax" / "aidge_softmax_chw_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "activation.jinja"),
-                name=self.name,
-                activation_type="\"SOFTMAX\"",
-                nb_inputs=np.prod(self.inputs_dims[0]),
-                nb_outputs=np.prod(self.outputs_dims[0]),
-                axis=self.axis,
-                input_dims = self.inputs_dims[0])
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "activation_chw.jinja"),
-                name=self.name,
-                activation_type="softmax",
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                output_name=self.name
-            ))
-
-        return list_actions
-
-@operator_register("BatchNorm")
-class BatchNorm2D_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-
-        super().__init__(node)
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-        self.epsilon = node.get_operator().attr.epsilon
-
-
-        self.producers = []
-
-        for i in range(0, len(node.inputs())):
-            if node.input(i)[0].type()=="Producer":
-                producer = node.input(i)[0]
-                self.producers.append(Producer_ARMCortexM(producer))
-
-    def export(self, export_folder:Path,list_configs:list):
-        for i in range(len(self.producers)):
-            self.producers[i].export(export_folder / "parameters" / f"{self.producers[i].name}.h")
-            list_configs.append(f"parameters/{self.producers[i].name}.h")
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "BatchNorm" / "aidge_batchnorm2d_chw_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "batchnorm2d.jinja"),
-                name=self.name,
-                epsilon=self.epsilon,
-                input_dims = self.inputs_dims[0])
-
-        return list_configs
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "batchnorm2d.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                running_mean_name=self.inputs[3].name(),
-                running_var_name=self.inputs[4].name(),
-                weight_name=self.inputs[1].name(),
-                bias_name=self.inputs[2].name(),
-                output_name=self.name
-            ))
-        return list_actions
-
-@operator_register("Sigmoid")
-class Sigmoid_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Activation" / "Sigmoid" / "aidge_sigmoid_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "activation.jinja"),
-                name=self.name,
-                activation_type="\"SIGMOID\"",
-                nb_inputs=np.prod(self.inputs_dims[0]),
-                nb_outputs=np.prod(self.outputs_dims[0]))
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "activation.jinja"),
-                name=self.name,
-                activation_type="sigmoid",
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                output_name=self.name
-            ))
-
-        return list_actions
-
-@operator_register("Reshape")
-class Reshape_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        # node.set_name(self.inputs[0].name())
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Reshape" / "aidge_reshape_chw_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "reshape.jinja"),
-                name=self.name,
-                nb_inputs=np.prod(self.inputs_dims[0]),
-                nb_outputs=np.prod(self.outputs_dims[0]))
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "reshape.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                output_name=self.name,
-            ))
-
-        return list_actions
-
-@operator_register("MatMul")
-class Matmul_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-        self.producers = []
-
-        for i in range(0, len(node.inputs())):
-            if node.input(i)[0].type()=="Producer" and node.input(i)[0].get_operator().attr.constant == True:
-                producer = node.input(i)[0]
-                self.producers.append(Producer_ARMCortexM(producer))
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-
-
-    def export(self, export_folder:Path, list_configs:list):
-        for i in range(len(self.producers)):
-            self.producers[i].export(export_folder / "parameters" / f"{self.producers[i].name}.h")
-            list_configs.append(f"parameters/{self.producers[i].name}.h")
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
-
-        export_params(f"{self.inputs[1].name()}_DIMS", np.array(self.inputs_dims[1],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[1].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[1].name()}_DIMS.h")
-
-        export_params(f"{self.name}_DIMS", np.array(self.outputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.name}_DIMS.h")
-        list_configs.append(f"dimensions/{self.name}_DIMS.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "MatMul" / "aidge_matmul_chw_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
-                         str(Path(export_folder) / "include"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "matmul.jinja"),
-                name=self.name,
-                input_dims=self.inputs_dims,
-                output_dims=self.outputs_dims)
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "matmul.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                inputa_name=self.inputs[0].name(),
-                inputb_name=self.inputs[1].name(),
-                output_name=self.name))
-
-        return list_actions
-
-@operator_register("Gather")
-class Gather_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-        self.indices =  node.get_operator().attr.indices
-        self.axis =  node.get_operator().attr.axis
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
-
-        export_params(f"{self.name}_INDEXES", np.array(self.indices,dtype=np.int32),export_folder / "dimensions" / f"{self.name}_INDEXES.h")
-        list_configs.append(f"dimensions/{self.name}_INDEXES.h")
-
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Transform" / "Gather" / "aidge_gather_chw_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "gather.jinja"),
-                name=self.name,
-                axis = self.axis,
-                indices = self.indices,
-                input_dims=self.inputs_dims[0],
-                nb_outputs=np.prod(self.outputs_dims[0])
-            )
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "gather.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                output_name=self.name
-            ))
-
-        return list_actions
-
-@operator_register("Transpose")
-class Transpose_ARMCortexM(ExportNode):
-    def __init__(self, node, board, library):
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-        self.perm =  node.get_operator().attr.output_dims_order
-
-    def export(self, export_folder:Path, list_configs:list):
-
-        list_configs.append(f"layers/{self.name}.h")
-
-        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
-        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
-
-        export_params(f"{self.name}_PERMUTATIONS", np.array(self.perm,dtype=np.int32),export_folder / "dimensions" / f"{self.name}_PERMUTATIONS.h")
-        list_configs.append(f"dimensions/{self.name}_PERMUTATIONS.h")
-
-        export_params(f"{self.name}_DIMS", np.array(self.outputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.name}_DIMS.h")
-        list_configs.append(f"dimensions/{self.name}_DIMS.h")
-
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Transform" / "Transpose" / "aidge_transpose_chw_float32.c"),
-                         str(export_folder / "src" / "kernels"))
-
-            generate_file(
-                str(export_folder / "layers" / f"{self.name}.h"),
-                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "transpose.jinja"),
-                name=self.name,
-                perm = self.perm,
-                input_dims=self.inputs_dims[0],
-                output_dims=self.outputs_dims[0],
-                nb_outputs=np.prod(self.outputs_dims[0])
-            )
-
-            # print(self.outputs_dims)
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.dtype))
-
-        if self.library == "aidge":
-            list_actions.append(generate_str(
-                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "transpose.jinja"),
-                name=self.name,
-                dataformat=self.dataformat,
-                input_name=self.inputs[0].name(),
-                output_name=self.name
-                ))
-
-        return list_actions
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["pool_type"] = "Avg"
+
+@ExportLibAidgeARM.register_metaop("FcReluScaling", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class FC_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["activation"] = "Rectifier"
+        self.attributes.update(Scaling(self.operator.attr.scaling_factor,
+                               self.operator.attr.quantized_nb_bits)("floating_point"))
+        # No padding with Conv
+        # Use PaddedConv to add padding attribute
+        self.attributes["padding"] = [0, 0]
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "fc_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "fc_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "FullyConnected" / "Fc.hpp")
+        ]
+
+@ExportLibAidgeARM.register("Add", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class Add_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "add.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "add.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Add" / "aidge_add_float32.h"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "aidge_supportfunctions.h")
+        ]
+
+@ExportLibAidgeARM.register("Sub", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class Sub_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "sub.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "sub.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Sub" / "aidge_sub_float32.h"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "aidge_supportfunctions.h")
+        ]
+
+@ExportLibAidgeARM.register("Mul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class Mul_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "mul.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "mul.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Mul" / "aidge_mul_float32.h"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "aidge_supportfunctions.h")
+        ]
+
+@ExportLibAidgeARM.register("Softmax", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class Softmax_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "softmax.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "softmax.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Softmax" / "aidge_softmax_chw_float32.h"),
+        ]
+
+@ExportLibAidgeARM.register("Atan", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class Atan_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "atan.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "atan.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Atan" / "aidge_atan.hpp"),
+        ]
+
+
+@ExportLibAidgeARM.register("Slice", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class Slice_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "slice.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "slice.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Slice" / "aidge_slice_float32.hpp"),
+        ]
+
+@ExportLibAidgeARM.register("Concat", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class Concat_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "concat.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "concat.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Concat" / "aidge_concat_float32.hpp"),
+        ]
+
+@ExportLibAidgeARM.register("Sigmoid", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class Sigmoid_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["activation_type"] = "\"SIGMOID\""
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "activation.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "activation_chw.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Sigmoid" / "aidge_sigmoid_float32.h"),
+        ]
+
+@ExportLibAidgeARM.register("MatMul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class MatMul_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "matmul.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "matmul.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Matmul" / "aidge_matmul_chw_float32.h"),
+        ]
diff --git a/aidge_export_arm_cortexm/operators_old.py b/aidge_export_arm_cortexm/operators_old.py
deleted file mode 100644
index 3440b248b1b719f4e5573d5c4dcc9df0b450122c..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/operators_old.py
+++ /dev/null
@@ -1,551 +0,0 @@
-import os
-import shutil
-import numpy as np
-from pathlib import Path
-from jinja2 import Environment, FileSystemLoader
-
-from aidge_core import ExportNode
-from aidge_export_arm_cortexm.utils import ROOT, operator_register
-
-
-##############################################
-############## Export functions ##############
-##############################################
-
-def generate_file(filename, templatename, **kwargs):
-
-    # Get directory name of the file
-    dirname = os.path.dirname(filename)
-
-    # If directory doesn't exist, create it
-    if not os.path.exists(dirname):
-        os.makedirs(dirname)
-
-    # Get directory name and name of the template
-    template_dir = os.path.dirname(templatename)
-    template_name = os.path.basename(templatename)
-
-    # Select template
-    template = Environment(loader=FileSystemLoader(template_dir)).get_template(template_name)
-
-    # Generate file
-    content = template.render(kwargs)
-    with open(filename, mode="w", encoding="utf-8") as message:
-        message.write(content)
-
-
-def generate_action(template_path, **kwargs):
-    dirname = os.path.dirname(template_path)
-    filename = os.path.basename(template_path)
-    template = Environment(loader=FileSystemLoader(dirname)).get_template(filename)
-    return template.render(kwargs)
-
-def copyfile(filename, dst_folder):
-
-    # If directory doesn't exist, create it
-    if not os.path.exists(dst_folder):
-        os.makedirs(dst_folder)
-
-    shutil.copy(filename, dst_folder)
-
-
-def export_to_static(name, array, filepath):
-
-    # Get directory name of the file
-    dirname = os.path.dirname(filepath)
-
-    # If directory doesn't exist, create it
-    if not os.path.exists(dirname):
-        os.makedirs(dirname)
-
-    generate_file(
-        filepath,
-        str(ROOT) + "/templates/data/data_static.jinja",
-        dims = array.shape,
-        data_t = "float",
-        name = name,
-        values = array.tolist()
-    )
-
-##############################################
-################### Utils ####################
-##############################################
-
-def get_node_parents(node):
-    parents = []
-    for parent in node.get_parents():
-        if parent.type() != "Producer":
-            parents.append(parent)
-    return parents
-
-def get_producer_parents(node):
-    parents = []
-    for parent in node.get_parents():
-        if parent.type() == "Producer":
-            parents.append(parent)
-    return parents
-
-
-##############################################
-################### Actions ##################
-##############################################
-
-def set_up_output(name, datatype):
-    return f"{datatype}* {name} = ({datatype}*) mem + {name.upper()}_OFFSET;"
-
-
-##############################################
-############## Operators helper ##############
-##############################################
-
-@operator_register("Add")
-class Add(ExportNode):
-    def __init__(self, node, board, dataformat, library):
-        # Copy dims for first input
-        node.get_operator().get_output(0).resize(node.get_operator().get_input(0).dims())
-
-        super().__init__(node)
-        self.board = board
-        self.library = library
-        self.dataformat = dataformat
-
-    def export(self, export_folder:str, list_configs:list):
-        
-        # Copying kernel into export
-        # Find a more generic system for future dev
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "kernels" / "ElemWise" / "Add" / "aidge_add_float32.c"),
-                         str(Path(export_folder) / "src" / "kernels"))
-                
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
-
-        # Export configuration file
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            str(ROOT / "templates" / "configuration" / "elemwise.jinja"),
-            name=self.name,
-            elemwise_op="\"ADD\"",
-            nb_inputs=np.prod(self.inputs_dims[0]),
-            nb_outputs=np.prod(self.outputs_dims[0]))
-        
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_action(
-            str(ROOT / "templates" / "kernel" / "elemwise.jinja"),
-            name=self.name,
-            elemwise_type="add",
-            dataformat="float32",   # Only this choice so far
-            input1_name=self.inputs[0].name(),
-            input2_name=self.inputs[1].name(),
-            output_name=self.name
-        ))
-        return list_actions
-
-
-@operator_register("Sub")
-class Sub(ExportNode):
-    def __init__(self, node, board, dataformat, library):
-        # Copy dims for first input
-        node.get_operator().get_output(0).resize(node.get_operator().get_input(0).dims())
-
-        super().__init__(node)
-        self.board = board
-        self.library = library
-        self.dataformat = dataformat
-
-    def export(self, export_folder:str, list_configs:list):
-        
-        # Copying kernel into export
-        # Find a more generic system for future dev
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "kernels" / "ElemWise" / "Sub" / "aidge_sub_float32.c"),
-                         str(Path(export_folder) / "src" / "kernels"))
-                
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
-
-        # Export configuration file
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            str(ROOT / "templates" / "configuration" / "elemwise.jinja"),
-            name=self.name,
-            elemwise_op="\"SUB\"",
-            nb_inputs=np.prod(self.inputs_dims[0]),
-            nb_outputs=np.prod(self.outputs_dims[0]))
-        
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_action(
-            str(ROOT / "templates" / "kernel" / "elemwise.jinja"),
-            name=self.name,
-            elemwise_type="sub",
-            dataformat="float32",   # Only this choice so far
-            input1_name=self.inputs[0].name(),
-            input2_name=self.inputs[1].name(),
-            output_name=self.name
-        ))
-        return list_actions
-
-
-@operator_register("Mul")
-class Mul(ExportNode):
-    def __init__(self, node, board, dataformat, library):
-        # Copy dims for first input
-        node.get_operator().get_output(0).resize(node.get_operator().get_input(0).dims())
-
-        super().__init__(node)
-        self.board = board
-        self.library = library
-        self.dataformat = dataformat
-
-    def export(self, export_folder:str, list_configs:list):
-        
-        # Copying kernel into export
-        # Find a more generic system for future dev
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "kernels" / "ElemWise" / "Mul" / "aidge_mul_float32.c"),
-                         str(Path(export_folder) / "src" / "kernels"))
-                
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
-
-        # Export configuration file
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            str(ROOT / "templates" / "configuration" / "elemwise.jinja"),
-            name=self.name,
-            elemwise_op="\"MUL\"",
-            nb_inputs=np.prod(self.inputs_dims[0]),
-            nb_outputs=np.prod(self.outputs_dims[0]))
-        
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_action(
-            str(ROOT / "templates" / "kernel" / "elemwise.jinja"),
-            name=self.name,
-            elemwise_type="mul",
-            dataformat="float32",   # Only this choice so far
-            input1_name=self.inputs[0].name(),
-            input2_name=self.inputs[1].name(),
-            output_name=self.name
-        ))
-        return list_actions
-
-
-@operator_register("Div")
-class Div(ExportNode):
-    def __init__(self, node, board, dataformat, library):
-        # Copy dims for first input
-        node.get_operator().get_output(0).resize(node.get_operator().get_input(0).dims())
-
-        super().__init__(node)
-        self.board = board
-        self.library = library
-        self.dataformat = dataformat
-
-    def export(self, export_folder:str, list_configs:list):
-        
-        # Copying kernel into export
-        # Find a more generic system for future dev
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "kernels" / "ElemWise" / "Div" / "aidge_div_float32.c"),
-                         str(Path(export_folder) / "src" / "kernels"))
-                
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
-
-        # Export configuration file
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            str(ROOT / "templates" / "configuration" / "elemwise.jinja"),
-            name=self.name,
-            elemwise_op="\"DIV\"",
-            nb_inputs=np.prod(self.inputs_dims[0]),
-            nb_outputs=np.prod(self.outputs_dims[0]))
-        
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_action(
-            str(ROOT / "templates" / "kernel" / "elemwise.jinja"),
-            name=self.name,
-            elemwise_type="div",
-            dataformat="float32",   # Only this choice so far
-            input1_name=self.inputs[0].name(),
-            input2_name=self.inputs[1].name(),
-            output_name=self.name
-        ))
-        return list_actions
-
-
-@operator_register("Gemm")
-class Gemm(ExportNode):
-    def __init__(self, node, board, dataformat, library):
-
-        w_dims = node.get_operator().get_input(1).dims()
-        node.get_operator().get_output(0).resize([w_dims[1]])
-
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        self.dataformat = dataformat
-
-    def export(self, export_folder:str, list_configs:list):
-        # Copying kernel into export
-        # Find a more generic system for future dev
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "kernels" / "FullyConnected" / "aidge_fc_float32.c"),
-                         str(Path(export_folder) / "src" / "kernels"))
-                
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
-
-        # Export configuration file
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            str(ROOT / "templates" / "configuration" / "fullyconnected.jinja"),
-            name=self.name,
-            nb_channels=self.inputs_dims[0][0],
-            nb_outputs=self.outputs_dims[0][0],
-            biases_size=self.outputs_dims[0][0])
-        
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_action(
-            str(ROOT / "templates" / "kernel" / "fullyconnected.jinja"),
-            name=self.name,
-            dataformat="float32",   # Only this choice so far
-            input_name=self.inputs[0].name(),
-            weight_name=self.inputs[1].name(),
-            bias_name=self.inputs[2].name(),
-            output_name=self.name
-        ))
-        return list_actions
-
-
-@operator_register("Atan")
-class Atan(ExportNode):
-    def __init__(self, node, board, dataformat, library):
-        # Copy dims for first input
-        node.get_operator().get_output(0).resize(node.get_operator().get_input(0).dims())
-
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        self.dataformat = dataformat
-
-    def export(self, export_folder:str, list_configs:list):
-        # Copying kernel into export
-        # Find a more generic system for future dev
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "kernels" / "Activation" / "Atan" / "aidge_atan_float32.c"),
-                         str(Path(export_folder) / "src" / "kernels"))
-                
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
-
-        # Export configuration file
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            str(ROOT / "templates" / "configuration" / "activation.jinja"),
-            name=self.name,
-            activation_type="\"ATAN\"",
-            nb_inputs=np.prod(self.inputs_dims[0]),
-            nb_outputs=np.prod(self.outputs_dims[0]))
-        
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_action(
-            str(ROOT / "templates" / "kernel" / "activation.jinja"),
-            name=self.name,
-            activation_type="atan",
-            dataformat="float32",   # Only this choice so far
-            input_name=self.inputs[0].name(),
-            output_name=self.name
-        ))
-        return list_actions
-
-
-@operator_register("Slice")
-class Slice(ExportNode):
-
-    def __init__(self, node, board, dataformat, library):
-
-        self.axes = node.get_operator().attr.axes
-        self.starts = node.get_operator().attr.starts
-        self.ends = node.get_operator().attr.ends
-
-        # Compute output dims
-        out_dims = [self.ends[x-1] - self.starts[x-1] for x in self.axes]
-        node.get_operator().get_output(0).resize(out_dims)
-
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        self.dataformat = dataformat
-
-    def export(self, export_folder:str, list_configs:list):
-        # Copying kernel into export
-        # Find a more generic system for future dev
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "kernels" / "Slice" / "aidge_slice_float32.c"),
-                         str(Path(export_folder) / "src" / "kernels"))
-                
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
-
-        # Export configuration file
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            str(ROOT / "templates" / "configuration" / "slice.jinja"),
-            name=self.name,
-            axes=self.axes,
-            starts=self.starts,
-            ends=self.ends,
-            nb_inputs=np.prod(self.inputs_dims[0]),
-            nb_outputs=np.prod(self.outputs_dims[0]))
-        
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_actions.append(generate_action(
-            str(ROOT / "templates" / "kernel" / "slice.jinja"),
-            name=self.name,
-            dataformat="float32",   # Only this choice so far
-            input_name=self.inputs[0].name(),
-            output_name=self.name
-        ))
-        return list_actions
-
-
-@operator_register("Concat")
-class Concat(ExportNode):
-
-    def __init__(self, node, board, dataformat, library):
-
-        self.axis = node.get_operator().attr.axis
-        out_dims = node.get_operator().get_input(0).dims()
-
-        out_dims[self.axis - 1] = 0
-        for parent in node.get_parents():
-            out_dims[self.axis - 1] += parent.get_operator().get_output(0).dims()[self.axis - 1]
-
-        node.get_operator().get_output(0).resize(out_dims)
-
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        self.dataformat = dataformat
- 
-
-    def export(self, export_folder:str, list_configs:list):
-        # Copying kernel into export
-        # Find a more generic system for future dev
-        if self.library == "aidge":
-            if self.dataformat == "float32":
-                copyfile(str(ROOT / "kernels" / "Concat" / "aidge_concat_float32.c"),
-                         str(Path(export_folder) / "src" / "kernels"))
-                
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
-
-        # Get all input size
-        list_input_size = []
-        for i in range(len(self.inputs)):
-            list_input_size.append(np.prod(self.node.get_operator().get_input(i).dims()))
-
-        # Export configuration file
-        generate_file(
-            f"{export_folder}/layers/{self.name}.h",
-            str(ROOT / "templates" / "configuration" / "concat.jinja"),
-            name=self.name,
-            nb_inputs=len(self.node.get_parents()),
-            axis=self.axis,
-            list_input_size=list_input_size,
-            output_size=np.sum(list_input_size)
-        )
-        
-        return list_configs
-
-    def forward(self, list_actions:list):
-        if not self.is_last:
-            list_actions.append(set_up_output(self.name, "float"))
-
-        list_input_names = []
-        for i in range(len(self.inputs)):
-            list_input_names.append(self.inputs[i].name())
-
-        list_actions.append(generate_action(
-            str(ROOT / "templates" / "kernel" / "concat.jinja"),
-            name=self.name,
-            dataformat="float32",
-            nb_inputs=len(self.inputs),
-            list_in_names=list_input_names,
-            output_name=self.name,
-        ))
-        return list_actions
-
-
-@operator_register("Producer")
-class Producer(ExportNode):
-    """
-    If there is a standardization of the export operators
-    then this class should be just a inheritance of ProducerCPP
-    """
-    def __init__(self, node, board, dataformat, library):
-        super().__init__(node)
-
-        self.board = board
-        self.library = library
-        self.dataformat = dataformat
-        self.values = np.array(self.operator.get_output(0))
-
-    def export(self, export_folder:str, list_configs:list):
-
-        list_configs.append(f"parameters/{self.name}.h")
-        export_to_static(self.name,
-                         self.values.reshape(-1),
-                         f"{export_folder}/parameters/{self.name}.h")
-
-        return list_configs
-
-    def forward(self, list_actions:list):
-        return list_actions
diff --git a/aidge_export_arm_cortexm/templates/memory/mem_info.jinja b/aidge_export_arm_cortexm/templates/memory/mem_info.jinja
deleted file mode 100644
index f835d9649a599c9339256c59d5941fcfc8f1b545..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/templates/memory/mem_info.jinja
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef MEM_INFO_H
-#define MEM_INFO_H
-
-#define MEMORY_SIZE {{ mem_size }}
-#define MEMORY_ALIGNMENT {{ mem_alignment }}
-
-{% for i in range(mem_info|length) -%}
-{%- set layer_name = mem_info[i][0] %}
-/* {{layer_name}} memory */
-{% for j in range(1, mem_info[i]|length) %}
-#define {{ layer_name|upper }}_MEM_{{ mem_info_legends[j]|upper }} {{ mem_info[i][j] }}
-{%- endfor %}
-{% endfor %}
-
-
-#endif /* MEM_INFO_H */
diff --git a/aidge_export_arm_cortexm/templates/network/dnn_header.jinja b/aidge_export_arm_cortexm/templates/network/dnn_header.jinja
deleted file mode 100644
index 7b238c167b9849cb41bad7b61ef0c596f8d29abd..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/templates/network/dnn_header.jinja
+++ /dev/null
@@ -1,22 +0,0 @@
-{#- For name header -#}
-#ifndef DNN_H
-#define DNN_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-{#- For libraries #}
-{% for lib in libraries %}
-#include <{{ lib }}>
-{%- endfor %}
-
-{% for func in functions %}
-{{ func }}
-{% endfor %}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* DNN_H */
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/templates/network/network_forward.jinja b/aidge_export_arm_cortexm/templates/network/network_forward.jinja
deleted file mode 100644
index bde5553020d1a36f225a1402172715a7446c4496..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/templates/network/network_forward.jinja
+++ /dev/null
@@ -1,28 +0,0 @@
-{#- For libraries -#}
-
-#include <stdint.h>
-
-#include "dnn.h"
-#include "network_functions.h"
-
-// Layer & memory configurations
-{%- for header in headers %}
-#include "{{ header }}"
-{%- endfor %}
-
-{# mem has the datatype of the firt input #}
-{#- Change here to improve it -#}
-{% if inputs[0][0] %}
-static {{inputs[0][0]}} mem[MEMORY_SIZE];
-{% else %}
-static float mem[MEMORY_SIZE];
-{% endif %}
-
-{# Forward function #}
-{#- Support multiple inputs with different datatypes and multiple outputs with different datatypes -#}
-void model_forward({% for inp in inputs %}const {{inp[0]}}* {{inp[1]}}, {% endfor %}{% for out in outputs %}{{out[0]}}* {{out[1]}}{{ ", " if not loop.last else "" }}{% endfor %})
-{
-    {%- for action in actions %}
-    {{ action }}
-    {%- endfor %}
-}
diff --git a/aidge_export_arm_cortexm/templates/network/network_prototypes.jinja b/aidge_export_arm_cortexm/templates/network/network_prototypes.jinja
deleted file mode 100644
index 4d2f3452f1c2434c1d767ba654a0ca26ac2bae2a..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/templates/network/network_prototypes.jinja
+++ /dev/null
@@ -1,19 +0,0 @@
-{#- For name header -#}
-#ifndef NETWORK_FUNCTIONS_HPP
-#define NETWORK_FUNCTIONS_HPP
-
-{#- For libraries #}
-{% for lib in libraries %}
-#include <{{ lib }}>
-{%- endfor %}
-
-{% for file in files %}
-#include "{{ file }}"
-{%- endfor %}
-
-{% for func in functions %}
-{{ func }}
-{% endfor %}
-
-
-#endif /* NETWORK_FUNCTIONS_HPP */
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/utils/__init__.py b/aidge_export_arm_cortexm/utils/__init__.py
deleted file mode 100644
index 7ff6e8b69b617013aa30feeb785e83789bdee575..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/utils/__init__.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from pathlib import Path
-import os
-
-# Constants
-FILE = Path(__file__).resolve()
-ROOT = FILE.parents[1]
-
-
-def get_all_available_boards():
-    boards = {}
-
-    directory_path = Path(str(ROOT / "boards"))
-
-    for subfolder in directory_path.rglob('*'):
-        if subfolder.is_dir() and \
-            subfolder.name != "__pycache__" and \
-            (subfolder.parent / '__init__.py').exists() and \
-            not (subfolder / '__init__.py').exists():
-
-            # Get relative path to boards directory
-            relpath = str(subfolder.relative_to(directory_path))
-
-            # Get board name
-            board_name = relpath.replace('/', '').replace('\\', '')
-
-            boards[board_name.lower()] = str(subfolder)
-            
-    return boards
-
-AVAILABLE_BOARDS = get_all_available_boards()
-
-
-def has_board(board_name: str) -> bool:
-    return board_name.lower() in AVAILABLE_BOARDS.keys()
-
-
-OPERATORS_REGISTRY = {}
-
-def operator_register(*args):
-   
-    key_list = [arg for arg in args]
-
-    def decorator(operator):
-        class Wrapper(operator):
-            def __init__(self, *args, **kwargs):
-                return operator(*args, **kwargs)
-        
-        for key in key_list:
-            OPERATORS_REGISTRY[key] = operator
-
-        return Wrapper
-    return decorator
-
-def supported_operators():
-    return list(OPERATORS_REGISTRY.keys())
diff --git a/aidge_export_arm_cortexm/utils/converter.py b/aidge_export_arm_cortexm/utils/converter.py
deleted file mode 100644
index 3bc2f392b9f48b96972f3ff744bbba3bf945ca13..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/utils/converter.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import numpy as np
-import aidge_core
-
-def numpy_dtype2ctype(dtype):
-    if dtype == np.int8:
-        return "int8_t"
-    elif dtype == np.uint8:
-        return "uint8_t"
-    elif dtype == np.int16:
-        return "int16_t"
-    elif dtype == np.int32:
-        return "int32_t"
-    elif dtype == np.int64:
-        return "int64_t"
-    elif dtype == np.float32:
-        return "float"
-    elif dtype == np.float64:
-        return "double"
-    # Add more dtype mappings as needed
-    else:
-        raise ValueError(f"Unsupported {dtype} dtype")
-
-
-def aidge_datatype2ctype(datatype):
-    if datatype == aidge_core.dtype.int8:
-        return "int8_t"
-    elif datatype == aidge_core.dtype.uint8:
-        return "uint8_t"
-    elif datatype == aidge_core.dtype.int32:
-        return "int32_t"
-    elif datatype == aidge_core.dtype.int64:
-        return "int64_t"
-    elif datatype == aidge_core.dtype.float32:
-        return "float"
-    elif datatype == aidge_core.dtype.float64:
-        return "double"
-    # Add more dtype mappings as needed
-    else:
-        raise ValueError(f"Unsupported {datatype} aidge dtype")
-
-
-def aidge_datatype2dataformat(datatype):
-    if datatype == aidge_core.dtype.int8:
-        return "int8"
-    elif datatype == aidge_core.dtype.int32:
-        return "int32"
-    elif datatype == aidge_core.dtype.int64:
-        return "int64"
-    elif datatype == aidge_core.dtype.float32:
-        return "float32"
-    elif datatype == aidge_core.dtype.float64:
-        return "float64"
-    # Add more dtype mappings as needed
-    else:
-        raise ValueError(f"Unsupported {datatype} aidge dtype")
diff --git a/aidge_export_arm_cortexm/utils/generation.py b/aidge_export_arm_cortexm/utils/generation.py
deleted file mode 100644
index b80ffbf7b1ac8cdb88aebbd9bb24037e6a4d9b92..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/utils/generation.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import re
-import os
-import shutil
-from jinja2 import Environment, FileSystemLoader
-
-def get_functions_from_c_file(file_path):
-    functions = []
-    pattern = r'\w+\s+(\w+)\s*\(([^)]*)\)\s*{'
-    keyword = ['else', 'for', 'if', 'while', 'do']
-
-
-    with open(file_path, 'r') as file:
-        file_content = file.read()
-
-    matches = re.findall(pattern, file_content)
-    for match in matches:
-        function_name = match[0]
-        if function_name in keyword:
-            continue
-        arguments = match[1].split(',')
-        arguments = [arg.strip() for arg in arguments]
-
-        return_type = get_return_type(file_content, function_name)
-
-        function_string = f"{return_type} {function_name}({', '.join(arguments)});"
-        functions.append(function_string)
-
-    return functions
-
-
-def get_return_type(file_content, function_name):
-    pattern = rf'\w+\s+{function_name}\s*\([^)]*\)\s*{{'
-    return_type = re.search(pattern, file_content).group()
-    return_type = return_type.split()[0].strip()
-    return return_type
-
-
-def get_functions_from_c_folder(folder_path):
-    functions = []
-    
-    for _, _, files in os.walk(folder_path):
-        for file in files:
-            functions += get_functions_from_c_file(os.path.join(folder_path, file))
-
-    return functions
-
-
-def get_filenames_from_folder(folder_path: str, pattern: str = r'.*'):
-    # Ensure the provided folder path exists
-    if not os.path.isdir(folder_path):
-        raise ValueError(f"The provided folder path '{folder_path}' does not exist.")
-
-    # Compile the regex pattern
-    regex = re.compile(pattern)
-
-    # List all files and directories in the provided folder path
-    all_entries = os.listdir(folder_path)
-
-    # Use a regex pattern to filter only filenames (excluding directories)
-    filenames = [entry for entry in all_entries if os.path.isfile(os.path.join(folder_path, entry)) and regex.match(entry)]
-
-    return filenames
-
-
-def copyfile(filename, dst_folder):
-
-    # If directory doesn't exist, create it
-    if not os.path.exists(dst_folder):
-        os.makedirs(dst_folder)
-
-    shutil.copy(filename, dst_folder)
diff --git a/aidge_export_arm_cortexm/utils/scheduler.py b/aidge_export_arm_cortexm/utils/scheduler.py
deleted file mode 100644
index 44540172c89506f75e6e5e0200d57edbea670d6a..0000000000000000000000000000000000000000
--- a/aidge_export_arm_cortexm/utils/scheduler.py
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-def topological_sort(graphview):
-    """Take an Aidge Graphview 
-    and returns a list of nodes topologically sorting
-    """
-
-    nodes = graphview.get_nodes()
-    result = []
-    visited = set()
-    visiting = set()  # To track nodes being currently visited
-
-    def visit(node):
-        if node in visiting:
-            raise ValueError("Graph contains a cycle")
-        if node in visited:
-            return
-        visiting.add(node)
-        for parent in node.get_parents():
-            if parent and parent in nodes:
-                visit(parent)
-        visiting.remove(node)
-        visited.add(node)
-        result.append(node)
-
-    for node in nodes:
-        visit(node)
-
-    return result
diff --git a/examples/README.md b/examples/README.md
deleted file mode 100644
index 643f196717043d908c7e0342f61883abcb3806f6..0000000000000000000000000000000000000000
--- a/examples/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Examples on how to use this module Aidge ARM CortexM Export
-
-This folder contains some examples on how to use the `Aidge ARM CortexM Export` module in your projects.
-- [LeNet export for MNIST dataset](./export_LeNet/)
-
-Feel free to propose your own contributions with this module !
\ No newline at end of file
diff --git a/examples/export_LeNet/export_lenet_fp32.ipynb b/examples/export_LeNet/export_lenet_fp32.ipynb
deleted file mode 100644
index 08b64318ec66e55754aa1e2302d0576854557e05..0000000000000000000000000000000000000000
--- a/examples/export_LeNet/export_lenet_fp32.ipynb
+++ /dev/null
@@ -1,281 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Export a MNIST model to a CPP standalone project"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "%pip install requests numpy ipywidgets ipycanvas"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Download the model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "import requests"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Download onnx file if it has not been done before\n",
-    "if not os.path.isfile(\"./lenet_mnist.onnx\"):\n",
-    "    response = requests.get(\"https://huggingface.co/vtemplier/LeNet_MNIST/resolve/main/lenet_mnist.onnx?download=true\")\n",
-    "    if response.status_code == 200:\n",
-    "        with open(\"lenet_mnist.onnx\", 'wb') as f:\n",
-    "            f.write(response.content)\n",
-    "        print(\"ONNX model downloaded successfully.\")\n",
-    "    else:\n",
-    "        print(\"Failed to download ONNX model. Status code:\", response.status_code)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Load the model in Aidge and manipulate it"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import aidge_core\n",
-    "import aidge_backend_cpu\n",
-    "import aidge_onnx\n",
-    "import aidge_export_cpp\n",
-    "import aidge_export_arm_cortexm"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "model = aidge_onnx.load_onnx(\"lenet_mnist.onnx\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Remove Flatten node, useless in the CPP export\n",
-    "aidge_core.remove_flatten(model)\n",
-    "\n",
-    "# Freeze the model by setting constant to parameters producers\n",
-    "for node in model.get_nodes():\n",
-    "    if node.type() == \"Producer\":\n",
-    "        node.get_operator().set_attr(\"Constant\", True)\n",
-    "\n",
-    "# Create Producer Node for the Graph\n",
-    "input_node = aidge_core.Producer([1, 1, 28, 28], \"input\")\n",
-    "input_node.add_child(model)\n",
-    "model.add(input_node)\n",
-    "\n",
-    "# Configuration for the model + forward dimensions\n",
-    "model.compile(\"cpu\", aidge_core.DataType.Float32)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Generate scheduling of the model\n",
-    "scheduler = aidge_core.SequentialScheduler(model)\n",
-    "scheduler.generate_scheduling()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Export the model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "aidge_export_arm_cortexm.export(\"lenet_export_fp32\", model, scheduler, board=\"stm32h7\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Draw your own number"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from ipywidgets import HBox, VBox, Button, Layout\n",
-    "from ipycanvas import RoughCanvas, hold_canvas\n",
-    "\n",
-    "img_name = \"my_number.png\"\n",
-    "\n",
-    "canvas = RoughCanvas(width=28, height=28, sync_image_data=True)\n",
-    "\n",
-    "button_gen = Button(description=\"Generate PNG\")\n",
-    "button_clear = Button(description=\"Clear\")\n",
-    "\n",
-    "drawing = False\n",
-    "position = None\n",
-    "shape = []\n",
-    "\n",
-    "def on_erase_button_clicked(b):\n",
-    "    canvas.clear()\n",
-    "\n",
-    "def on_generate_button_clicked(b):\n",
-    "    try:\n",
-    "        canvas.to_file(img_name)\n",
-    "        print(f\"Image generated to {img_name} !\")\n",
-    "    except:\n",
-    "        print(\"Draw a number before generating the image.\")\n",
-    "\n",
-    "button_clear.on_click(on_erase_button_clicked)\n",
-    "button_gen.on_click(on_generate_button_clicked)\n",
-    "\n",
-    "def on_mouse_down(x, y):\n",
-    "    global drawing\n",
-    "    global position\n",
-    "    global shape\n",
-    "\n",
-    "    drawing = True\n",
-    "    position = (x, y)\n",
-    "    shape = [position]\n",
-    "\n",
-    "def on_mouse_move(x, y):\n",
-    "    global drawing\n",
-    "    global position\n",
-    "    global shape\n",
-    "\n",
-    "    if not drawing:\n",
-    "        return\n",
-    "\n",
-    "    with hold_canvas():\n",
-    "        canvas.stroke_line(position[0], position[1], x, y)\n",
-    "        position = (x, y)\n",
-    "\n",
-    "    shape.append(position)\n",
-    "\n",
-    "def on_mouse_up(x, y):\n",
-    "    global drawing\n",
-    "    global position\n",
-    "    global shape\n",
-    "\n",
-    "    drawing = False\n",
-    "\n",
-    "    with hold_canvas():\n",
-    "        canvas.stroke_line(position[0], position[1], x, y)\n",
-    "\n",
-    "    shape = []\n",
-    "\n",
-    "canvas.on_mouse_down(on_mouse_down)\n",
-    "canvas.on_mouse_move(on_mouse_move)\n",
-    "canvas.on_mouse_up(on_mouse_up)\n",
-    "\n",
-    "canvas.stroke_style = \"#000000\"\n",
-    "\n",
-    "VBox((canvas, HBox((button_gen, button_clear))),\n",
-    "     layout=Layout(height='auto', width=\"300px\"))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Generate inputs for testing the model from your drawing"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "try:\n",
-    "    number_np = canvas.get_image_data()\n",
-    "    # We got a numpy array with the shape of (28,28,4)\n",
-    "    # Transform it to (28,28)\n",
-    "    x = number_np[:, :, 3].astype(\"float32\")\n",
-    "    # Convert from [0, 255] to [0, 1] and export it\n",
-    "    aidge_export_cpp.generate_input_file(export_folder=\"lenet_export_fp32\",\n",
-    "                                         array_name=\"inputs\",\n",
-    "                                         array=x / 255)\n",
-    "except:\n",
-    "    print(\"Please draw a number in the previous cell before running this one.\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Compile the export and test it"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "!cd lenet_export_fp32 && make build_image_docker && make build_docker"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "env",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.16"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}