diff --git a/.gitignore b/.gitignore
index 8eb208c0cff93ea86a79a962adbc89978b7ae50e..55ab6d78711f9af47af0458596f474ba44379676 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,4 +31,4 @@ ENV/
 xml*/
 
 # Model parameters
-*.onnx
\ No newline at end of file
+*.onnx
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Sigmoid/aidge_sigmoid_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Sigmoid/aidge_sigmoid_float32.c
new file mode 100644
index 0000000000000000000000000000000000000000..00e04d858b67ed8d97abbbe071f0880c5ebba8da
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Sigmoid/aidge_sigmoid_float32.c
@@ -0,0 +1,11 @@
+#include <math.h>
+
+
+void aidge_sigmoid_float32 (float* inputs, 
+                            float* outputs,
+                            unsigned int size)
+{
+    for (unsigned int i = 0; i < size; ++i) {
+        outputs[i] = 1 / ( 1 + exp(-inputs[i]) );
+    }
+}
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Softmax/aidge_softmax_chw_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Softmax/aidge_softmax_chw_float32.c
new file mode 100644
index 0000000000000000000000000000000000000000..93d710e5955f478e11ead1c7f848dc5d716b28f0
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Activation/Softmax/aidge_softmax_chw_float32.c
@@ -0,0 +1,35 @@
+#include <math.h>
+
+void aidge_softmax_chw_float32 (float* inputs, 
+                            float* outputs,
+                            int inputDims[],
+                            int axis,
+                            unsigned int size_inputDim,
+                            unsigned int size)
+{
+
+	axis += (axis >= 0 ) ? 0 : size_inputDim;
+
+    int postAxisElems = 1;
+    for (int i = axis+1; i < size_inputDim; ++i) {
+        postAxisElems *= inputDims[i];
+    }
+    int preAxisElems = 1;
+    for (int i = 0; i < axis; ++i) {
+        preAxisElems *= inputDims[i];
+    }
+
+    for (int i = 0; i < preAxisElems; ++i) {
+        for (int j = 0; j < postAxisElems; ++j) {
+            float sumExp = 0.0;
+            for (int k = 0; k < inputDims[axis]; ++k) {
+                int inIdx = i * inputDims[axis] * postAxisElems + k * postAxisElems + j;
+                sumExp += exp(inputs[inIdx]);
+            }
+            for (int  k = 0; k < inputDims[axis]; ++k) {
+                int inIdx = i * inputDims[axis] * postAxisElems + k * postAxisElems + j;
+                outputs[inIdx] = exp(inputs[inIdx]) / sumExp;
+            }
+        }
+    }
+}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/BatchNorm/aidge_batchnorm2d_chw_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/BatchNorm/aidge_batchnorm2d_chw_float32.c
new file mode 100644
index 0000000000000000000000000000000000000000..62cc292e3e29bf81fbb878659899f8aa0d591f45
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/BatchNorm/aidge_batchnorm2d_chw_float32.c
@@ -0,0 +1,28 @@
+#include <math.h>
+
+
+void aidge_batchnorm2d_chw_float32 (float* inputs,
+                                float* outputs,
+                                float* input_mean,
+                                float* input_var,
+                                float* scale,
+                                float* bias,
+                                float epsilon,
+                                const int nb_channels,
+                                const int channel_width, const int channel_height)
+{
+    int featureMapSize = channel_width * channel_height;
+    for (int ch = 0; ch < nb_channels; ++ch) 
+    {
+        int ioIndex = ch * featureMapSize;
+        for (int i = ioIndex; i < ioIndex + featureMapSize; i++){
+            outputs[i] = bias[ch];
+        }
+        float var =sqrt(input_var[ch] + epsilon);
+
+        for (int feature = 0; feature<featureMapSize; ++feature) {
+            outputs[ioIndex + feature] += scale[ch] * (inputs[ioIndex + feature]-input_mean[ch]) / var;
+        }
+    
+    }
+}
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Add/aidge_add_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Add/aidge_add_float32.c
index 2ba879665215fac5396cc83c06a714c16ea2cd4e..c9cfc152851099e4f307ee95450ce28baf76114e 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Add/aidge_add_float32.c
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Add/aidge_add_float32.c
@@ -1,11 +1,99 @@
-
-
 void aidge_add_float32(float* input_a, 
                        float* input_b, 
                        float* output, 
-                       unsigned int size)
+                       int dim_a[],
+                       int dim_b[],
+                       int output_Dim[],
+                       int size_dima,
+                       int size_dimb,
+                       int size_outputDim,
+                       int output_size)
 {
-    for (unsigned int i = 0; i < size; ++i) {
-        output[i] = input_a[i] + input_b[i];
+    // Broadcast dims 
+    int ndim_a[size_outputDim];     
+    int ndim_b[size_outputDim];     
+
+    for (int i= 0; i<size_outputDim; i++){
+    	int idx = size_outputDim-size_dima;
+    	ndim_a[i] = (i< idx) ? 1 : dim_a[i-idx];
+    }
+
+
+    for (int i= 0; i<size_outputDim; i++){
+    	int idx = size_outputDim-size_dimb;
+    	ndim_b[i] = (i< idx) ? 1 : dim_b[i-idx];
+    }
+
+    // Find the highest equal dimension
+    int contiguousIdx = size_outputDim-1;
+    while (ndim_a[contiguousIdx] == ndim_b[contiguousIdx]){
+    	contiguousIdx--;
+    }
+    contiguousIdx++;
+
+    // Compute the highest number of contiguous data for each Tensor
+    int input0_contiguous_size = 1;
+    for(int i = contiguousIdx; i<size_outputDim; ++i){
+    	input0_contiguous_size *= ndim_a[i];
+    }
+
+    int input1_contiguous_size = 1;
+    for(int i = contiguousIdx; i<size_outputDim; ++i){
+    	input1_contiguous_size *= ndim_b[i];
+    }
+
+    int output_contiguous_size = 1;
+    for(int i = contiguousIdx; i<size_outputDim; ++i){
+    	output_contiguous_size *= output_Dim[i];
+    }
+
+
+    // initialize strides to iterate through data because of broadcasting
+    int stride_post0[contiguousIdx] ;
+    int stride_post1[contiguousIdx] ;
+    int stride_step0[contiguousIdx] ;
+    int stride_step1[contiguousIdx] ;
+    if (contiguousIdx > 0) {
+        stride_post0[contiguousIdx - 1] = 1;
+        stride_post1[contiguousIdx - 1] = 1;
+        for (int i = contiguousIdx-2; i != -1; --i) {
+            stride_post0[i] = stride_post0[i+1]*ndim_a[i+1];
+            stride_post1[i] = stride_post1[i+1]*ndim_b[i+1];
+        }
+        for (int i = 0; i < contiguousIdx; ++i) {
+            stride_step0[i] = (ndim_a[i] == 1) ? 1 - stride_post0[i] : 1;
+            stride_step1[i] = (ndim_b[i] == 1) ? 1 - stride_post1[i] : 1;
+        }
+    }
+
+    int offsetIn0 = 0;
+    int offsetIn1 = 0;
+    int offsetOut = 0;
+    int nbMatrices = 1;
+    for(int i = 0; i<contiguousIdx; ++i){
+        nbMatrices *= output_Dim[i];
+    }
+    int dim = contiguousIdx - 1;
+
+    for(int stack = 0; stack < nbMatrices;){
+
+    	for(int i = 0; i < output_contiguous_size; ++i){
+    		int in0_id = (input0_contiguous_size != 1) ? i : 0;
+    		int in1_id = (input1_contiguous_size != 1) ? i : 0;
+    		output[i + offsetOut*output_contiguous_size] = input_a[in0_id + offsetIn0*input0_contiguous_size] + input_b[in1_id + offsetIn1*input1_contiguous_size];
+
+    	}
+        if (++stack < nbMatrices) {
+            int tmp_stack = stack;
+            while(tmp_stack % output_Dim[dim] == 0) {
+                tmp_stack /= output_Dim[dim];
+                dim--;
+            }
+            offsetIn0 += stride_step0[dim];
+            offsetIn1 += stride_step1[dim];
+            ++offsetOut;
+            dim = contiguousIdx - 1;
+        }
+
     }
 }
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Mul/aidge_mul_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Mul/aidge_mul_float32.c
index 28c22ab6c1a1575a4e3d48d6a0f4fb1dcc90d797..dbbf908cee8699bd09b6bc83b8abeeb481c26c05 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Mul/aidge_mul_float32.c
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/ElemWise/Mul/aidge_mul_float32.c
@@ -1,11 +1,99 @@
-
-
 void aidge_mul_float32(float* input_a, 
                        float* input_b, 
                        float* output, 
-                       unsigned int size)
+                       int dim_a[],
+                       int dim_b[],
+                       int output_Dim[],
+                       int size_dima,
+                       int size_dimb,
+                       int size_outputDim,
+                       int output_size)
 {
-    for (unsigned int i = 0; i < size; ++i) {
-        output[i] = input_a[i] * input_b[i];
+    // Broadcast dims 
+    int ndim_a[size_outputDim];     
+    int ndim_b[size_outputDim];     
+
+    for (int i= 0; i<size_outputDim; i++){
+    	int idx = size_outputDim-size_dima;
+    	ndim_a[i] = (i< idx) ? 1 : dim_a[i-idx];
+    }
+
+
+    for (int i= 0; i<size_outputDim; i++){
+    	int idx = size_outputDim-size_dimb;
+    	ndim_b[i] = (i< idx) ? 1 : dim_b[i-idx];
+    }
+
+    // Find the highest equal dimension
+    int contiguousIdx = size_outputDim-1;
+    while (ndim_a[contiguousIdx] == ndim_b[contiguousIdx]){
+    	contiguousIdx--;
+    }
+    contiguousIdx++;
+
+    // Compute the highest number of contiguous data for each Tensor
+    int input0_contiguous_size = 1;
+    for(int i = contiguousIdx; i<size_outputDim; ++i){
+    	input0_contiguous_size *= ndim_a[i];
+    }
+
+    int input1_contiguous_size = 1;
+    for(int i = contiguousIdx; i<size_outputDim; ++i){
+    	input1_contiguous_size *= ndim_b[i];
+    }
+
+    int output_contiguous_size = 1;
+    for(int i = contiguousIdx; i<size_outputDim; ++i){
+    	output_contiguous_size *= output_Dim[i];
+    }
+
+
+    // initialize strides to iterate through data because of broadcasting
+    int stride_post0[contiguousIdx] ;
+    int stride_post1[contiguousIdx] ;
+    int stride_step0[contiguousIdx] ;
+    int stride_step1[contiguousIdx] ;
+    if (contiguousIdx > 0) {
+        stride_post0[contiguousIdx - 1] = 1;
+        stride_post1[contiguousIdx - 1] = 1;
+        for (int i = contiguousIdx-2; i != -1; --i) {
+            stride_post0[i] = stride_post0[i+1]*ndim_a[i+1];
+            stride_post1[i] = stride_post1[i+1]*ndim_b[i+1];
+        }
+        for (int i = 0; i < contiguousIdx; ++i) {
+            stride_step0[i] = (ndim_a[i] == 1) ? 1 - stride_post0[i] : 1;
+            stride_step1[i] = (ndim_b[i] == 1) ? 1 - stride_post1[i] : 1;
+        }
+    }
+
+    int offsetIn0 = 0;
+    int offsetIn1 = 0;
+    int offsetOut = 0;
+    int nbMatrices = 1;
+    for(int i = 0; i<contiguousIdx; ++i){
+        nbMatrices *= output_Dim[i];
+    }
+    int dim = contiguousIdx - 1;
+
+    for(int stack = 0; stack < nbMatrices;){
+
+    	for(int i = 0; i < output_contiguous_size; ++i){
+    		int in0_id = (input0_contiguous_size != 1) ? i : 0;
+    		int in1_id = (input1_contiguous_size != 1) ? i : 0;
+    		output[i + offsetOut*output_contiguous_size] = input_a[in0_id + offsetIn0*input0_contiguous_size] * input_b[in1_id + offsetIn1*input1_contiguous_size];
+
+    	}
+        if (++stack < nbMatrices) {
+            int tmp_stack = stack;
+            while(tmp_stack % output_Dim[dim] == 0) {
+                tmp_stack /= output_Dim[dim];
+                dim--;
+            }
+            offsetIn0 += stride_step0[dim];
+            offsetIn1 += stride_step1[dim];
+            ++offsetOut;
+            dim = contiguousIdx - 1;
+        }
+
     }
 }
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/FullyConnected/aidge_fc_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/FullyConnected/aidge_fc_float32.c
index a38efabdcdacde1c0b457232e655a877b6f3ac63..de169d49367eabf9c904a05e7cf3b9789f3ac9a4 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/FullyConnected/aidge_fc_float32.c
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/FullyConnected/aidge_fc_float32.c
@@ -4,18 +4,21 @@ void aidge_fc_float32 (float* inputs,
                        float* weights,
                        float* biases,
                        float* outputs,
+                       unsigned int batch_size,
                        unsigned int nb_inputs,
                        unsigned int nb_outputs)
 {
-    for (unsigned int out = 0; out < nb_outputs; ++out) {
-        // Init with bias
-        float accum = biases[out]; 
+    for (unsigned int batch = 0; batch < batch_size; ++batch){
+        for (unsigned int out = 0; out < nb_outputs; ++out) {
+            // Init with bias
+            float accum = biases[out]; 
 
-        for (unsigned int in = 0; in < nb_inputs; ++in) {
-            accum += inputs[in] * weights[out * nb_inputs + in];
-        }
+            for (unsigned int in = 0; in < nb_inputs; ++in) {
+                accum += inputs[batch*nb_inputs + in] * weights[out * nb_inputs + in];
+            }
 
-        // Store result
-        outputs[out] = accum;
+            // Store result
+            outputs[batch*nb_outputs + out] = accum;
+        }
     }
 }
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/MatMul/aidge_matmul_chw_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/MatMul/aidge_matmul_chw_float32.c
new file mode 100644
index 0000000000000000000000000000000000000000..764d3d046e969227665a2bf55ef9e2c66fbf4b4f
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/MatMul/aidge_matmul_chw_float32.c
@@ -0,0 +1,99 @@
+void aidge_matmul_chw_float32(float* input_a, 
+                       float* input_b, 
+                       float* output,
+                       int dim_a[],
+                       int dim_b[],
+                       int output_Dim[],
+                       int size_dima,
+                       int size_dimb,
+                       int size_outputDim)
+{
+        //initialize arrays storing broadcasted(or not) dims
+        int ndim_a[size_outputDim];     
+        int ndim_b[size_outputDim];
+        if ( size_dima == 1){ 
+            ndim_a[0] = 1;
+            ndim_a[1] = dim_a[0];
+        }
+        if ( size_dimb == 1){ 
+            ndim_b[0] = dim_b[0];
+            ndim_b[1] = 1;
+        }
+        
+        for (int i= 0; i<size_outputDim; i++){
+            int idx = size_outputDim-size_dima;
+            ndim_a[i] = (i< idx) ? 1 : dim_a[i-idx];
+        }
+
+
+        for (int i= 0; i<size_outputDim; i++){
+            int idx = size_outputDim-size_dimb;
+            ndim_b[i] = (i< idx) ? 1 : dim_b[i-idx];
+        }
+        
+    // initialize strides to iterate through data because of broadcasting
+    int stride_post0[size_outputDim-2] ;
+    int stride_post1[size_outputDim-2] ; 
+    int stride_step0[size_outputDim-2] ;
+    int stride_step1[size_outputDim-2] ; 
+    if (size_outputDim > 2){ 
+        stride_post0[size_outputDim - 3] = 1;
+        stride_post1[size_outputDim - 3] = 1;
+        for (int i = size_outputDim-4; i != -1; --i) {
+            stride_post0[i] = stride_post0[i+1]*ndim_a[i+1];
+            stride_post1[i] = stride_post1[i+1]*ndim_b[i+1];
+        }
+        for (int i = 0; i < size_outputDim-2; ++i) {
+            stride_step0[i] = (ndim_a[i] == 1) ? 1 - stride_post0[i] : 1;
+            stride_step1[i] = (ndim_b[i] == 1) ? 1 - stride_post1[i] : 1;
+        }
+
+    }
+
+    
+    // if size_dimb == size_dima, then size_dima == size_outputDim == size_dimb; 
+    // else it will be broadcasted to the correct dims
+
+    int nbMatrices = 1;
+    for(int i = size_outputDim -3; i>=0; --i){
+        nbMatrices *= output_Dim[i];
+    }
+    int dim = size_outputDim -3;
+
+
+    int offsetIn0 = 0;
+    int offsetIn1 = 0;
+    int offsetOut = 0;
+    const int n = ndim_a[size_outputDim - 2];
+    const int k = ndim_a[size_outputDim - 1];
+    const int m = ndim_b[size_outputDim - 1];
+    const int matrix0Size = n*k;
+    const int matrix1Size = k*m;
+    const int matrixOutSize = n*m;
+
+    for(int stack = 0; stack < nbMatrices;){
+        for (int i = 0; i < n; ++i) {
+            for (int j = 0; j < m; ++j) {
+                float sum = 0;
+                for (int l = 0; l < k; ++l) {
+                    sum += (input_a[ offsetIn0*matrix0Size + i*k + l] * input_b[offsetIn1*matrix1Size + l*m + j]);
+                }
+                output[ offsetOut*matrixOutSize + i*m + j] = sum;
+            }
+        } 
+
+        if (++stack < nbMatrices) {
+            int tmp_stack = stack;
+            while(tmp_stack % output_Dim[dim] == 0) {
+                tmp_stack /= output_Dim[dim];
+                dim--;
+            }
+            offsetIn0 += stride_step0[dim];
+            offsetIn1 += stride_step1[dim];
+            ++offsetOut;
+            dim = size_outputDim -3;
+        }
+
+    }
+
+}
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Reshape/aidge_reshape_chw_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Reshape/aidge_reshape_chw_float32.c
new file mode 100644
index 0000000000000000000000000000000000000000..e55d77c7cf43cb2fee719a24b835797175ebbfab
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Reshape/aidge_reshape_chw_float32.c
@@ -0,0 +1,9 @@
+
+void aidge_reshape_chw_float32(float* inputs,
+                           float* outputs,
+                           unsigned int size)
+{
+    for (int i = 0; i < size; i++){
+        outputs[i] = inputs[i];
+    }
+}
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Transform/Gather/aidge_gather_chw_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Transform/Gather/aidge_gather_chw_float32.c
new file mode 100644
index 0000000000000000000000000000000000000000..62fee42da890f2a07aa8cc4a943ce1ad017c2534
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Transform/Gather/aidge_gather_chw_float32.c
@@ -0,0 +1,39 @@
+
+
+void aidge_gather_chw_float32 (float* inputs, 
+                            float* outputs,
+                            int axis,
+                            int indices[],
+                            int input_dims[],
+                            int size_inputDim,
+                            int indices_size,
+                            unsigned int size)
+{
+	axis += (axis >= 0 ) ? 0 : size_inputDim;
+
+    int postAxisElems = 1;
+    for (int i = axis + 1; i < size_inputDim; ++i) {
+        postAxisElems *= input_dims[i];
+    }
+
+    int preAxisElems = 1;
+    for (int i = 0; i < axis; ++i) {
+    	preAxisElems *= input_dims[i];
+    }
+
+      int outputOffset = 0;
+    for (int i=0; i<preAxisElems; ++i){
+        for(int j = 0; j< indices_size; j++){
+            int idx = indices[j] >= 0 ?
+                                        indices[j] :
+                                        indices[j] + input_dims[axis];
+
+            for(int k = 0; k<postAxisElems;++k){
+            	int in_idx = i * postAxisElems * input_dims[axis] + idx * postAxisElems +k;
+            	float tmp = inputs[in_idx];
+                outputs[outputOffset + k] = tmp;
+            }
+            outputOffset += postAxisElems;
+        }
+    }
+}
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Transform/Transpose/aidge_transpose_chw_float32.c b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Transform/Transpose/aidge_transpose_chw_float32.c
new file mode 100644
index 0000000000000000000000000000000000000000..7b0313abced3b30f98853e3f0c2557562b0f27a3
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/kernels/Transform/Transpose/aidge_transpose_chw_float32.c
@@ -0,0 +1,39 @@
+void aidge_transpose_chw_float32   (float* inputs,    
+                                float* outputs,
+                                int input_dims[],
+                                int perm[],
+                                int output_dims[],
+                                unsigned int size_outputDims,
+                                unsigned int size)
+{
+	int newStrides[size_outputDims];
+	for (int i = 0; i<size_outputDims;++i){newStrides[i] = 1;}
+	for (int i = 0; i < size_outputDims; ++i) {
+		for (int j = i + 1; j < size_outputDims; ++j) {
+			newStrides[i] *= output_dims[j];
+		}
+	}
+
+	int indices[size_outputDims];
+	for (int i = 0; i<size_outputDims;++i){indices[i] = 0;}
+
+	for (int i = 0; i < size; ++i) {
+		int idx = 0;
+		for (int j = size_outputDims -1; j >=0; --j) {
+			idx += indices[perm[j]] * newStrides[j];
+		}
+
+		outputs[idx] = inputs[i];
+
+
+		for (int j = size_outputDims - 1; j >= 0; --j) {
+			if (indices[j] < input_dims[j] - 1) {
+				indices[j]++;
+				break;
+			}
+			else {
+				indices[j] = 0;
+			}
+		}
+	}
+}
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/activation.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/activation.jinja
index 50a8087e9adfc7ba8c64c6b1af30eb0cc26aa069..82817e995ee5b4f684c6cdcc3073637b88d0e6d0 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/activation.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/activation.jinja
@@ -7,7 +7,10 @@
 {# For layer configuration -#}
 #define {{ name|upper }}_INPUTS_SIZE {{ nb_inputs }}
 #define {{ name|upper }}_OUTPUTS_SIZE {{ nb_outputs }}
-
+{% if axis is defined %}
+#define {{ name|upper }}_AXIS {{ axis }}
+#define {{name|upper}}_INPUT_DIMS_SIZE {{ input_dims|length}}
+{% endif %}
 #define {{ name|upper }}_ACTIVATION {{ activation_type }}
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/batchnorm2d.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/batchnorm2d.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..7fefbccc6f0924b7d84c3e1880b30647351a31ca
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/batchnorm2d.jinja
@@ -0,0 +1,14 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+{# For layer configuration -#}
+#define {{ name|upper }}_NB_BATCH {{ input_dims[0] }}
+#define {{ name|upper }}_NB_CHANNELS {{ input_dims[1] }}
+#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[2] }}
+#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[3] }}
+
+#define {{ name|upper }}_EPSILON {{ epsilon }}
+
+
+#endif /* {{ name|upper }}_LAYER_H */
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/elemwise.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/elemwise.jinja
index e09dd25398a12a7627b87956175e373cfefdb68d..c5e4281dba7b3146516bec019ed30b6136a10014 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/elemwise.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/elemwise.jinja
@@ -8,6 +8,10 @@
 #define {{ name|upper }}_INPUTS_SIZE {{ nb_inputs }}
 #define {{ name|upper }}_OUTPUTS_SIZE {{ nb_outputs }}
 
+
+#define {{name|upper}}_INPUT_A_DIMS_SIZE {{ input_dims[0]|length}}
+#define {{name|upper}}_INPUT_B_DIMS_SIZE {{ input_dims[1]|length}}
+#define {{name|upper}}_OUTPUT_DIMS_SIZE {{ output_dims[0]|length}}
 #define {{ name|upper }}_ELEM_OP {{ elemwise_op }}
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/fullyconnected.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/fullyconnected.jinja
index 44d8c2ecd511762940d318f0d923bea459c64924..0ffda543b5b45809b40916ff57583683c194f36e 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/fullyconnected.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/fullyconnected.jinja
@@ -3,15 +3,16 @@
 #define {{ name|upper }}_LAYER_H
 
 {# For layer configuration -#}
+#define {{name|upper}}_BATCH_SIZE {{ nb_batch}}
 #define {{ name|upper }}_NB_CHANNELS {{ nb_channels }}
 #define {{ name|upper }}_CHANNEL_HEIGHT {{ channel_height }}
 #define {{ name|upper }}_CHANNEL_WIDTH {{ channel_width }}
 #define {{ name|upper }}_NB_OUTPUTS {{ nb_outputs }}
+#define {{ name|upper }}_NB_INPUTS {{ nb_channels*channel_height*channel_width }}
 
 {#- Calculate sizes #}
 {%- set weights_size = nb_channels * channel_height * channel_width * nb_outputs %}
 #define {{ name|upper }}_WEIGHTS_SIZE {{ weights_size }}
 #define {{ name|upper }}_BIASES_SIZE {{ nb_outputs }}
 
-
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/gather.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/gather.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..67e616b5d61dded2696860f6407ad27819e085e2
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/gather.jinja
@@ -0,0 +1,15 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+{# For layer configuration -#}
+
+#define {{name|upper}}_AXIS {{ axis }}
+
+#define {{name|upper}}_INDEXES_DIMS_SIZE {{ indices|length}}
+#define {{name|upper}}_INPUT_DIMS_SIZE {{ input_dims|length}}
+
+#define {{name|upper}}_OUTPUT_SIZE {{ nb_outputs}}
+
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/matmul.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/matmul.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..86b40d40784ca8fb6a59fb1172627843d3df80db
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/matmul.jinja
@@ -0,0 +1,12 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+{# For layer configuration -#}
+
+#define {{name|upper}}_INPUT_A_DIMS_SIZE {{ input_dims[0]|length}}
+#define {{name|upper}}_INPUT_B_DIMS_SIZE {{ input_dims[1]|length}}
+#define {{name|upper}}_OUTPUT_DIMS_SIZE {{ output_dims[0]|length}}
+
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/reshape.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/reshape.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..d30e6ef9849f0510c6ee849359de89cdec2c92f9
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/reshape.jinja
@@ -0,0 +1,9 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+{# For layer configuration -#}
+#define {{ name|upper }}_INPUTS_SIZE {{ nb_inputs }}
+#define {{ name|upper }}_OUTPUTS_SIZE {{ nb_outputs }}
+
+#endif /* {{ name|upper }}_LAYER_H */
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/transpose.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/transpose.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..e9eea6ba070d7569dc42edbc50df94a2464d2786
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/configuration/transpose.jinja
@@ -0,0 +1,12 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+{# For layer configuration -#}
+
+#define {{name|upper}}_OUTPUT_DIMS_SIZE {{ output_dims|length}}
+
+#define {{name|upper}}_OUTPUT_SIZE {{ nb_outputs}}
+
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation.jinja
index 01a61a3b73e5176540a3972b8730fdef600e42bc..b1a2289e77ab5789812ad91f4bd4dfccbfcae64e 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation.jinja
@@ -1 +1 @@
-aidge_{{activation_type|lower}}_{{dataformat}} ({{input_name}}, {{output_name}}, {{name|upper}}_OUTPUTS_SIZE);
\ No newline at end of file
+    aidge_{{activation_type|lower}}_{{dataformat}}({{input_name}}, {{output_name}}, {% if activation_type is eq('softmax') %} {{input_name}}_DIMS, {{name|upper}}_AXIS, {{name|upper}}_INPUT_DIMS_SIZE,{% endif %} {{name|upper}}_OUTPUTS_SIZE);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation_chw.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation_chw.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..796a3718483e4fe995e9904c3faeb24693ad5431
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/activation_chw.jinja
@@ -0,0 +1 @@
+    aidge_{{activation_type|lower}}_chw_{{dataformat}}({{input_name}}, {{output_name}}, {% if activation_type is eq('softmax') %} {{input_name}}_DIMS, {{name|upper}}_AXIS, {{name|upper}}_INPUT_DIMS_SIZE,{% endif %} {{name|upper}}_OUTPUTS_SIZE);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/batchnorm2d.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/batchnorm2d.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..5e7c73c8e55233b2c3d93c99fe3dc6e7682fe503
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/batchnorm2d.jinja
@@ -0,0 +1 @@
+aidge_batchnorm2d_chw_{{dataformat}} ({{input_name}}, {{output_name}}, {{running_mean_name}}, {{running_var_name}}, {{weight_name}}, {{bias_name}}, {{ name|upper }}_EPSILON, {{ name|upper }}_NB_CHANNELS, {{ name|upper }}_CHANNELS_WIDTH, {{ name|upper }}_CHANNELS_HEIGHT);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/elemwise.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/elemwise.jinja
index 2f79268d60b274fd48917a38e2c1240682190b1c..0f3f1c8758a4d4d8944384973bf90054b5e91fca 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/elemwise.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/elemwise.jinja
@@ -1 +1 @@
-aidge_{{elemwise_type|lower}}_{{dataformat}} ({{input1_name}}, {{input2_name}}, {{output_name}}, {{name|upper}}_OUTPUTS_SIZE);
\ No newline at end of file
+aidge_{{elemwise_type|lower}}_{{dataformat}}  ({{inputa_name}}, {{inputb_name}}, {{output_name}}, {{inputa_name}}_DIMS, {{inputb_name}}_DIMS, {{output_name}}_DIMS,{{name|upper}}_INPUT_A_DIMS_SIZE,{{name|upper}}_INPUT_B_DIMS_SIZE,{{name|upper}}_OUTPUT_DIMS_SIZE, {{name|upper}}_OUTPUTS_SIZE);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/fullyconnected.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/fullyconnected.jinja
index d4f8a80a8945eb2e77b5a1d9e686797244db3762..b57ffdfd3de8699b9c644c56d3c341ed764ee73e 100644
--- a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/fullyconnected.jinja
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/fullyconnected.jinja
@@ -1 +1 @@
-aidge_fc_{{dataformat}} ({{input_name}}, {{weight_name}}, {{bias_name}}, {{output_name}}, {{name|upper}}_NB_CHANNELS, {{name|upper}}_NB_OUTPUTS);
\ No newline at end of file
+aidge_fc_{{dataformat}} ({{input_name}}, {{weight_name}}, {{bias_name}}, {{output_name}}, {{name|upper}}_BATCH_SIZE, {{name|upper}}_NB_INPUTS, {{name|upper}}_NB_OUTPUTS);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/gather.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/gather.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..f709c8699dbe75793df18d8e341010b3793b8ad6
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/gather.jinja
@@ -0,0 +1 @@
+aidge_gather_chw_{{dataformat}} ({{input_name}}, {{output_name}}, {{name|upper}}_AXIS, {{name}}_INDEXES , {{input_name}}_DIMS, {{name|upper}}_INPUT_DIMS_SIZE,{{name|upper}}_INDEXES_DIMS_SIZE,{{name|upper}}_OUTPUT_SIZE);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/matmul.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/matmul.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..15ff05fec3bb40332ac7968d20f594009f7903a4
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/matmul.jinja
@@ -0,0 +1 @@
+aidge_matmul_chw_{{dataformat}} ({{inputa_name}}, {{inputb_name}}, {{output_name}}, {{inputa_name}}_DIMS, {{inputb_name}}_DIMS, {{output_name}}_DIMS ,{{name|upper}}_INPUT_A_DIMS_SIZE,{{name|upper}}_INPUT_B_DIMS_SIZE,{{name|upper}}_OUTPUT_DIMS_SIZE);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/reshape.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/reshape.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..059e90e33512c1d0ba8da8b716e78e375b28eda6
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/reshape.jinja
@@ -0,0 +1 @@
+aidge_reshape_chw_{{dataformat}}({{input_name}}, {{output_name}}, {{name|upper}}_OUTPUTS_SIZE);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/transpose.jinja b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/transpose.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..748cc71ac86c31d32bf93396dfd9796948355bbc
--- /dev/null
+++ b/aidge_export_arm_cortexm/_Aidge_Arm/templates/kernel/transpose.jinja
@@ -0,0 +1 @@
+aidge_transpose_chw_{{dataformat}} ({{input_name}}, {{output_name}},{{input_name}}_DIMS, {{name}}_PERMUTATIONS, {{output_name}}_DIMS, {{name|upper}}_OUTPUT_DIMS_SIZE, {{name|upper}}_OUTPUT_SIZE);
\ No newline at end of file
diff --git a/aidge_export_arm_cortexm/export.py b/aidge_export_arm_cortexm/export.py
index 26e7d87f1259a16b8f3d3bdd85e7e1f640f43de0..3e8e2d520cb1b899737de7aae689254b9384aa1b 100644
--- a/aidge_export_arm_cortexm/export.py
+++ b/aidge_export_arm_cortexm/export.py
@@ -71,7 +71,6 @@ def export(export_folder_name,
 
         if node.type() in supported_operators():
             op = OPERATORS_REGISTRY[node.type()](node, board, library)
-
             # Export the configuration
             list_configs = op.export(dnn_folder, list_configs)
 
@@ -95,20 +94,26 @@ def export(export_folder_name,
     # It supposes the entry nodes are producers with constant=false
     # Store the datatype & name
     list_inputs_name = []
+    first_element_added = False
     for node in graphview.get_nodes():
         if node.type() == "Producer":
-            if not node.get_operator().get_attr("Constant"):
+            if not first_element_added:
+                    export_type = aidge2c(node.get_operator().get_output(0).dtype())
+                    list_inputs_name.append((export_type, node.name()))
+                    first_element_added = True
+            if not node.get_operator().attr.constant:
                 export_type = aidge2c(node.get_operator().get_output(0).dtype())
                 list_inputs_name.append((export_type, node.name()))
 
     # Get output nodes
     # Store the datatype & name, like entry nodes
+
     list_outputs_name = []
     for node in graphview.get_nodes():
         if len(node.get_children()) == 0:
-            if node.get_operator().has_attr("DataType"):
+            if node.get_operator().attr.has_attr('dtype'):
                 # Temporary fix because impossible to set DataType of a generic operator
-                export_type = aidge2c(node.get_operator().get_attr("DataType"))
+                export_type = aidge2c(node.get_operator().attr.dtype)
             else:
                 export_type = aidge2c(node.get_operator().get_output(0).dtype())
 
@@ -123,7 +128,7 @@ def export(export_folder_name,
     generate_file(
         str(dnn_folder / "src" / forward_file),
         str(ROOT / "templates" / "network" / "network_forward.jinja"),
-        headers=list_configs,
+        headers=set(list_configs),
         actions=list_actions,
         inputs= list_inputs_name,
         outputs=list_outputs_name
diff --git a/aidge_export_arm_cortexm/memory.py b/aidge_export_arm_cortexm/memory.py
index 5e2cd36de1130b55f8978ae80d57ac7c30facb6a..7f7983fc7898bbd2d7fa383ecc0b5f16f290918f 100644
--- a/aidge_export_arm_cortexm/memory.py
+++ b/aidge_export_arm_cortexm/memory.py
@@ -29,13 +29,14 @@ MEMORY_INFO_TEMPLATE = ["layer_name", "size", "stride", "length", "count", "cont
 # Default memory management, which can be used for development
 def compute_default_mem_info(scheduler: aidge_core.Scheduler):
     
-    list_forward_nodes = scheduler.get_static_scheduling()
+    list_forward_nodes = scheduler
     mem_info = []
     mem_size = 0
 
     # Exclude Producers and the last layers (because the results are stored outside the export)
     for i, node in enumerate(list_forward_nodes):
-        if node.type() != "Producer":
+        if node.type() != "Producer" and node.type() != "Reshape":
+        # if node.type() != "Producer":
             if len(node.get_children()) != 0:
                 dims = node.get_operator().get_output(0).dims()
                 mem = 1
@@ -76,7 +77,7 @@ def generate_optimized_memory_info(stats_folder: Path,
 
         # Skip memory management for the parameter producers
         if node.type() == "Producer":
-            if node.get_operator().get_attr("Constant"):
+            if node.get_operator().attr.constant:
                 continue
             else:
                 # Input memory management (suppose tensor ends with [:, channel, height, width]))
@@ -96,6 +97,7 @@ def generate_optimized_memory_info(stats_folder: Path,
                 cont_size = tensor.dims()[-1] * tensor.dims()[-2] * tensor.dims()[-3] # Size of input
                 wrap_offset = 0     # No wrapping
                 wrap_size = 0       # No wrapping
+        # elif node.type() != "Reshape":
         else:
             plane = mem_planes[node][0]
 
diff --git a/aidge_export_arm_cortexm/operators.py b/aidge_export_arm_cortexm/operators.py
index eb4603252c17f676b21e96b0d4fe05f9824e683d..d6d6836b296516dee62f9fb77dda389e68cd6a5a 100644
--- a/aidge_export_arm_cortexm/operators.py
+++ b/aidge_export_arm_cortexm/operators.py
@@ -62,8 +62,8 @@ def export_params(name:str,
 ################### Actions ##################
 ##############################################
 
-def set_up_output(name, datatype):
-    return f"{datatype}* {name} = ({datatype}*) mem + {name.upper()}_MEM_CONT_OFFSET;"
+def set_up_output(name, dtype):
+    return f"{dtype}* {name} = ({dtype}*) mem + {name.upper()}_MEM_CONT_OFFSET;"
 
 
 ##############################################
@@ -75,7 +75,7 @@ class Producer_ARMCortexM:
     def __init__(self, node):
         self.name = node.name()
         self.operator = node.get_operator()
-        self.constant = self.operator.get_attr("Constant")
+        self.constant = self.operator.attr.constant
         self.values = np.array(self.operator.get_output(0))
 
     def export(self, export_file:Path, format:str = "NHWC"):
@@ -212,7 +212,7 @@ class ReLU_ARMCortexM(ExportNode):
         self.board = board
         self.library = library
         self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.datatype = aidge2c(node.get_operator().get_output(0).dtype())
+        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
 
     def export(self, export_folder:Path, list_configs:list):
 
@@ -236,7 +236,7 @@ class ReLU_ARMCortexM(ExportNode):
     def forward(self, list_actions:list):
 
         if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.datatype))
+            list_actions.append(set_up_output(self.name, self.dtype))
 
         if self.library == "aidge":
             list_actions.append(generate_str(
@@ -265,21 +265,22 @@ class Conv_ARMCortexM(ExportNode):
         self.board = board
         self.library = library
         self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.datatype = aidge2c(node.get_operator().get_output(0).dtype())
+        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
         self.scaling = Scaling()("no_scaling")
         self.activation = "Linear"
 
-        self.kernel = node.get_operator().get_attr("KernelDims")
-        self.stride = node.get_operator().get_attr("StrideDims")
-        self.dilation = node.get_operator().get_attr("DilationDims")
+        self.kernel = node.get_operator().attr.kernel_dims
+        self.stride = node.get_operator().attr.stride_dims
+        self.dilation = node.get_operator().attr.dilation_dims
 
         # No padding with Conv
         # Use PaddedConv to add padding attribute
         self.padding = [0, 0]
 
-        self.nb_channels = node.get_operator().get_attr("InChannels")
-        self.nb_outputs = node.get_operator().get_attr("OutChannels")
-
+        self.nb_channels = node.get_operator().in_channels()
+        self.nb_outputs = node.get_operator().out_channels()
+        if self.inputs[0] is None :
+            raise RuntimeError("")
         if len(self.inputs_dims[0]) == 4:
             # if dims == [batch, nb_channels, height, width]
             # transform to [nb_channels, height, width]
@@ -347,7 +348,7 @@ class Conv_ARMCortexM(ExportNode):
     def forward(self, list_actions:list):
 
         if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.datatype))
+            list_actions.append(set_up_output(self.name, self.dtype))
 
         if self.library == "aidge":
             list_actions.append(generate_str(
@@ -388,17 +389,17 @@ class PaddedConv_ARMCortexM(Conv_ARMCortexM):
         self.board = board
         self.library = library
         self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.datatype = aidge2c(node.get_operator().get_output(0).dtype())
+        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
         self.scaling = Scaling()("no_scaling")
         self.activation = "Linear"
 
         for n in self.operator.get_micro_graph().get_nodes():
             if n.type() == "Pad":
-                self.padding = n.get_operator().get_attr("BeginEndBorders")
+                self.padding = n.get_operator().attr.begin_end_borders
             if n.type() == "Conv":
-                self.kernel = n.get_operator().get_attr("KernelDims")
-                self.stride = n.get_operator().get_attr("StrideDims")
-                self.dilation = n.get_operator().get_attr("DilationDims")
+                self.kernel = n.get_operator().attr.kernel_dims
+                self.stride = n.get_operator().attr.stride_dims
+                self.dilation = n.get_operator().attr.dilation_dims
 
         if len(self.inputs_dims[0]) == 4:
             # if dims == [batch, nb_channels, height, width]
@@ -416,23 +417,23 @@ class ConvReluScaling_ARMCortexM(Conv_ARMCortexM):
      def __init__(self, node, board, library):
         super(Conv_ARMCortexM, self).__init__(node, board, library)
 
-        if self.operator.has_attr("BeginEndBorders"):
-            self.padding = self.operator.get_attr("BeginEndBorders")
+        if self.operator.has_attr("Begin_End_Borders"):
+            self.padding = self.operator.attr.begin_end_borders
 
         self.activation = "Rectifier"
 
-        # Should do this line but there is a bug while changing the datatype of generic operator...
-        # self.datatype = aidge2c(node.get_operator().get_output(0).dtype())
+        # Should do this line but there is a bug while changing the dtype of generic operator...
+        # self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
         # Do this instead
-        if self.operator.get_attr("quantizedNbBits") == 8:
-            if self.operator.get_attr("isOutputUnsigned"):
-                self.datatype = aidge2c(aidge_core.DataType.UInt8)
+        if self.operator.attr.quantized_nb_bits == 8:
+            if self.operator.attr.is_output_unsigned:
+                self.dtype = aidge2c(aidge_core.dtype.uint8)
             else:
-                self.datatype = aidge2c(aidge_core.DataType.Int8)
+                self.dtype = aidge2c(aidge_core.dtype.int8)
 
         # Impose Single Shift (perhaps change it to have a more modular system)
-        self.scaling = Scaling(self.operator.get_attr("scalingFactor"),
-                               self.operator.get_attr("quantizedNbBits"))("floating_point")
+        self.scaling = Scaling(self.operator.attr.scaling_factor,
+                               self.operator.attr.quantized_nb_bits)("floating_point")
 
 
 class Pooling_ARMCortexM(ExportNode):
@@ -442,12 +443,12 @@ class Pooling_ARMCortexM(ExportNode):
         self.board = board
         self.library = library
         self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.datatype = aidge2c(node.get_operator().get_output(0).dtype())
+        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
         self.pool_type = "None"
         self.activation = "Linear"
 
-        self.kernel = node.get_operator().get_attr("KernelDims")
-        self.stride = node.get_operator().get_attr("StrideDims")
+        self.kernel = node.get_operator().attr.kernel_dims
+        self.stride = node.get_operator().attr.stride_dims
 
         # No padding with MaxPooling or AvgPooling
         # Use PaddedMaxPooling/PaddedAvgPooling to add padding attribute
@@ -512,7 +513,7 @@ class Pooling_ARMCortexM(ExportNode):
 
     def forward(self, list_actions:list):
         if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.datatype))
+            list_actions.append(set_up_output(self.name, self.dtype))
 
         if self.library == "aidge":
 
@@ -566,31 +567,36 @@ class FC_ARMCortexM(ExportNode):
         self.board = board
         self.library = library
         self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
-        self.datatype = aidge2c(node.get_operator().get_output(0).dtype())
+        self.dtype = aidge2c(node.get_operator().get_output(0).dtype())
         self.scaling = Scaling()("no_scaling")
         self.activation = "Linear"
 
-        if len(self.inputs_dims[0]) == 4:
-            # if dims == [batch, nb_channels, height, width]
-            # transform to [nb_channels, height, width]
-            self.inputs_dims[0] = self.inputs_dims[0][1:]
+        # if len(self.inputs_dims[0]) == 4:
+        #     # if dims == [batch, nb_channels, height, width]
+        #     # transform to [nb_channels, height, width]
+        #     self.inputs_dims[0] = self.inputs_dims[0][1:]
+
+        #     # It also means that we need to change the dataformat of the weights
+        #     weights = self.producers[0].values
+        #     if len(weights.shape) == 2:
+        #         weights = weights.reshape(weights.shape[0], weights.shape[1], 1, 1)
+
+        if len(self.inputs_dims[0]) == 3:
+            # if dims == [nb_channels, height, width]
+            # transform to [batch, nb_channels, height, width]
+            self.inputs_dims[0] = [1, self.inputs_dims[0][0], self.inputs_dims[0][1], self.inputs_dims[0][2]]
 
-            # It also means that we need to change the dataformat of the weights
-            weights = self.producers[0].values
-            if len(weights.shape) == 2:
-                weights = weights.reshape(weights.shape[0], weights.shape[1], 1, 1)
 
         elif len(self.inputs_dims[0]) == 2:
             # if dims == [batch, nb_channels]
-            # transform to [nb_channels, 1, 1]
-            self.inputs_dims[0] = [self.inputs_dims[0][1], 1, 1]
+            # transform to [batch,nb_channels, 1, 1]
+            self.inputs_dims[0] = [self.inputs_dims[0][0], self.inputs_dims[0][1], 1, 1]
 
 
-        if len(self.outputs_dims[0]) == 2:
-            # if dims == [batch, nb_outputs]
-            # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = [self.outputs_dims[0][1], 1, 1]
-
+        # if len(self.outputs_dims[0]) == 2:
+        #     # if dims == [batch, nb_outputs]
+        #     # transform to [nb_outputs, 1, 1]
+        #     self.outputs_dims[0] = [self.outputs_dims[0][1], 1, 1]
 
     def export(self, export_folder:Path, list_configs:list):
 
@@ -621,10 +627,11 @@ class FC_ARMCortexM(ExportNode):
                 str(export_folder / "layers" / f"{self.name}.h"),
                 str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "fullyconnected.jinja"),
                 name=self.name,
-                nb_channels=self.inputs_dims[0][0],
-                channel_height=self.inputs_dims[0][1],
-                channel_width=self.inputs_dims[0][2],
-                nb_outputs=self.outputs_dims[0][0])
+                nb_batch=self.inputs_dims[0][0],
+                nb_channels=self.inputs_dims[0][1],
+                channel_height=self.inputs_dims[0][2],
+                channel_width=self.inputs_dims[0][3],
+                nb_outputs=self.outputs_dims[0][1])
 
         elif self.library == "n2d2":
 
@@ -642,7 +649,7 @@ class FC_ARMCortexM(ExportNode):
 
     def forward(self, list_actions:list):
         if not self.is_last:
-            list_actions.append(set_up_output(self.name, self.datatype))
+            list_actions.append(set_up_output(self.name, self.dtype))
 
         if self.library == "aidge":
             list_actions.append(generate_str(
@@ -679,15 +686,15 @@ class FCScaling_ARMCortexM(FC_ARMCortexM):
         # Should do this line but there is a bug while changing the datatype of generic operator...
         # self.datatype = aidge2c(node.get_operator().get_output(0).dtype())
         # Do this instead
-        if self.operator.get_attr("quantizedNbBits") == 8:
-            if self.operator.get_attr("isOutputUnsigned"):
-                self.datatype = aidge2c(aidge_core.DataType.UInt8)
+        if self.operator.attr.quantized_nb_bits == 8:
+            if self.operator.attr.is_output_unsigned:
+                self.dtype = aidge2c(aidge_core.dtype.uint8)
             else:
-                self.datatype = aidge2c(aidge_core.DataType.Int8)
+                self.dtype = aidge2c(aidge_core.dtype.int8)
 
         # Impose Single Shift (perhaps change it to have a more modular system)
-        self.scaling = Scaling(self.operator.get_attr("scalingFactor"),
-                               self.operator.get_attr("quantizedNbBits"))("floating_point")
+        self.scaling = Scaling(self.operator.attr.scaling_factor,
+                               self.operator.attr.quantized_nb_bits)("floating_point")
 
 
 @operator_register("FcReluScaling")
@@ -696,3 +703,536 @@ class FCReluScaling_ARMCortexM(FCScaling_ARMCortexM):
         super(FCScaling_ARMCortexM, self).__init__(node, board, library)
 
         self.activation = "Rectifier"
+
+
+@operator_register("Add")
+class Add_ARMCortexM(ExportNode):
+    def __init__(self, node, board, library):
+
+        super().__init__(node)
+
+        self.producers = []
+
+        for i in range(0, len(node.inputs())):
+            if node.input(i)[0].type()=="Producer" and node.input(i)[0].get_operator().attr.constant == True:
+                producer = node.input(i)[0]
+                self.producers.append(Producer_ARMCortexM(producer))
+
+        self.board = board
+        self.library = library
+        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
+        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+
+
+
+    def export(self, export_folder:Path,list_configs:list):
+        for i in range(len(self.producers)):
+            self.producers[i].export(export_folder / "parameters" / f"{self.producers[i].name}.h")
+            list_configs.append(f"parameters/{self.producers[i].name}.h")
+
+        list_configs.append(f"layers/{self.name}.h")
+
+        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
+        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
+
+        export_params(f"{self.inputs[1].name()}_DIMS", np.array(self.inputs_dims[1],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[1].name()}_DIMS.h")
+        list_configs.append(f"dimensions/{self.inputs[1].name()}_DIMS.h")
+
+        export_params(f"{self.name}_DIMS", np.array(self.outputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.name}_DIMS.h")
+        list_configs.append(f"dimensions/{self.name}_DIMS.h")
+
+        if self.library == "aidge":
+            if self.dataformat == "float32":
+                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "ElemWise" / "Add" / "aidge_add_float32.c"),
+                         str(export_folder / "src" / "kernels"))
+                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
+                         str(Path(export_folder) / "include"))
+
+                generate_file(
+                    str(export_folder / "layers" / f"{self.name}.h"),
+                    str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "elemwise.jinja"),
+                    name=self.name,
+                    nb_inputs=np.prod(self.inputs_dims[0]),
+                    nb_outputs=np.prod(self.outputs_dims[0]),
+                    input_dims=self.inputs_dims,
+                    output_dims=self.outputs_dims,
+                    elemwise_op="\"ADD\"")
+
+
+        return list_configs
+
+    def forward(self, list_actions:list):
+        if not self.is_last:
+            list_actions.append(set_up_output(self.name, self.dtype))
+
+        if self.library == "aidge":
+
+            list_actions.append(generate_str(
+                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "elemwise.jinja"),
+                name=self.name,
+                elemwise_type = "add",
+                dataformat=self.dataformat,
+                inputa_name=self.inputs[0].name(),
+                inputb_name=self.inputs[1].name(),
+                output_name=self.name))
+
+
+        return list_actions
+
+@operator_register("Mul")
+class Mul_ARMCortexM(ExportNode):
+    def __init__(self, node, board, library):
+
+        super().__init__(node)
+
+        self.producers = []
+
+        for i in range(0, len(node.inputs())):
+            if node.input(i)[0].type()=="Producer" and node.input(i)[0].get_operator().attr.constant == True:
+                producer = node.input(i)[0]
+                self.producers.append(Producer_ARMCortexM(producer))
+
+        self.board = board
+        self.library = library
+        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
+        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+
+
+    def export(self, export_folder:Path,list_configs:list):
+        for i in range(len(self.producers)):
+            self.producers[i].export(export_folder / "parameters" / f"{self.producers[i].name}.h")
+            list_configs.append(f"parameters/{self.producers[i].name}.h")
+
+        list_configs.append(f"layers/{self.name}.h")
+
+        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
+        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
+
+        export_params(f"{self.inputs[1].name()}_DIMS", np.array(self.inputs_dims[1],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[1].name()}_DIMS.h")
+        list_configs.append(f"dimensions/{self.inputs[1].name()}_DIMS.h")
+
+        export_params(f"{self.name}_DIMS", np.array(self.outputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.name}_DIMS.h")
+        list_configs.append(f"dimensions/{self.name}_DIMS.h")
+
+        if self.library == "aidge":
+            if self.dataformat == "float32":
+                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "ElemWise" / "Mul" / "aidge_mul_float32.c"),
+                         str(export_folder / "src" / "kernels"))
+                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
+                         str(Path(export_folder) / "include"))
+
+                generate_file(
+                    str(export_folder / "layers" / f"{self.name}.h"),
+                    str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "elemwise.jinja"),
+                    name=self.name,
+                    nb_inputs=np.prod(self.inputs_dims[0]),
+                    nb_outputs=np.prod(self.outputs_dims[0]),
+                    input_dims=self.inputs_dims,
+                    output_dims=self.outputs_dims,
+                    elemwise_op="\"MUL\"")
+
+
+        return list_configs
+
+    def forward(self, list_actions:list):
+        if not self.is_last:
+            list_actions.append(set_up_output(self.name, self.dtype))
+
+        if self.library == "aidge":
+
+            list_actions.append(generate_str(
+                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "elemwise.jinja"),
+                name=self.name,
+                elemwise_type = "mul",
+                dataformat=self.dataformat,
+                inputa_name=self.inputs[0].name(),
+                inputb_name=self.inputs[1].name(),
+                output_name=self.name))
+
+
+        return list_actions
+
+@operator_register("Softmax")
+class Softmax_ARMCortexM(ExportNode):
+    def __init__(self, node, board, library):
+
+        super().__init__(node)
+        self.board = board
+        self.library = library
+        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
+        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+        self.axis =  node.get_operator().attr.axis
+
+
+    def export(self, export_folder:Path,list_configs:list):
+
+        # Add to config list the include of configurations
+        list_configs.append(f"layers/{self.name}.h")
+
+        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
+        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
+
+        if self.library == "aidge":
+            if self.dataformat == "float32":
+                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Activation" / "Softmax" / "aidge_softmax_chw_float32.c"),
+                         str(export_folder / "src" / "kernels"))
+
+            generate_file(
+                str(export_folder / "layers" / f"{self.name}.h"),
+                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "activation.jinja"),
+                name=self.name,
+                activation_type="\"SOFTMAX\"",
+                nb_inputs=np.prod(self.inputs_dims[0]),
+                nb_outputs=np.prod(self.outputs_dims[0]),
+                axis=self.axis,
+                input_dims = self.inputs_dims[0])
+
+        return list_configs
+
+    def forward(self, list_actions:list):
+
+        if not self.is_last:
+            list_actions.append(set_up_output(self.name, self.dtype))
+
+        if self.library == "aidge":
+            list_actions.append(generate_str(
+                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "activation_chw.jinja"),
+                name=self.name,
+                activation_type="softmax",
+                dataformat=self.dataformat,
+                input_name=self.inputs[0].name(),
+                output_name=self.name
+            ))
+
+        return list_actions
+
+@operator_register("BatchNorm")
+class BatchNorm2D_ARMCortexM(ExportNode):
+    def __init__(self, node, board, library):
+
+        super().__init__(node)
+        self.board = board
+        self.library = library
+        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
+        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+        self.epsilon = node.get_operator().attr.epsilon
+
+
+        self.producers = []
+
+        for i in range(0, len(node.inputs())):
+            if node.input(i)[0].type()=="Producer":
+                producer = node.input(i)[0]
+                self.producers.append(Producer_ARMCortexM(producer))
+
+    def export(self, export_folder:Path,list_configs:list):
+        for i in range(len(self.producers)):
+            self.producers[i].export(export_folder / "parameters" / f"{self.producers[i].name}.h")
+            list_configs.append(f"parameters/{self.producers[i].name}.h")
+
+        list_configs.append(f"layers/{self.name}.h")
+
+        if self.library == "aidge":
+            if self.dataformat == "float32":
+                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "BatchNorm" / "aidge_batchnorm2d_chw_float32.c"),
+                         str(export_folder / "src" / "kernels"))
+
+            generate_file(
+                str(export_folder / "layers" / f"{self.name}.h"),
+                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "batchnorm2d.jinja"),
+                name=self.name,
+                epsilon=self.epsilon,
+                input_dims = self.inputs_dims[0])
+
+        return list_configs
+    def forward(self, list_actions:list):
+        if not self.is_last:
+            list_actions.append(set_up_output(self.name, self.dtype))
+
+        if self.library == "aidge":
+
+            list_actions.append(generate_str(
+                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "batchnorm2d.jinja"),
+                name=self.name,
+                dataformat=self.dataformat,
+                input_name=self.inputs[0].name(),
+                running_mean_name=self.inputs[3].name(),
+                running_var_name=self.inputs[4].name(),
+                weight_name=self.inputs[1].name(),
+                bias_name=self.inputs[2].name(),
+                output_name=self.name
+            ))
+        return list_actions
+
+@operator_register("Sigmoid")
+class Sigmoid_ARMCortexM(ExportNode):
+    def __init__(self, node, board, library):
+        super().__init__(node)
+
+        self.board = board
+        self.library = library
+        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
+        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+
+
+    def export(self, export_folder:Path, list_configs:list):
+
+        list_configs.append(f"layers/{self.name}.h")
+
+        if self.library == "aidge":
+            if self.dataformat == "float32":
+                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Activation" / "Sigmoid" / "aidge_sigmoid_float32.c"),
+                         str(export_folder / "src" / "kernels"))
+
+            generate_file(
+                str(export_folder / "layers" / f"{self.name}.h"),
+                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "activation.jinja"),
+                name=self.name,
+                activation_type="\"SIGMOID\"",
+                nb_inputs=np.prod(self.inputs_dims[0]),
+                nb_outputs=np.prod(self.outputs_dims[0]))
+
+        return list_configs
+
+    def forward(self, list_actions:list):
+
+        if not self.is_last:
+            list_actions.append(set_up_output(self.name, self.dtype))
+
+        if self.library == "aidge":
+            list_actions.append(generate_str(
+                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "activation.jinja"),
+                name=self.name,
+                activation_type="sigmoid",
+                dataformat=self.dataformat,
+                input_name=self.inputs[0].name(),
+                output_name=self.name
+            ))
+
+        return list_actions
+
+@operator_register("Reshape")
+class Reshape_ARMCortexM(ExportNode):
+    def __init__(self, node, board, library):
+        super().__init__(node)
+
+        self.board = board
+        self.library = library
+        # node.set_name(self.inputs[0].name())
+        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
+        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+
+
+    def export(self, export_folder:Path, list_configs:list):
+
+        list_configs.append(f"layers/{self.name}.h")
+
+        if self.library == "aidge":
+            if self.dataformat == "float32":
+                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Reshape" / "aidge_reshape_chw_float32.c"),
+                         str(export_folder / "src" / "kernels"))
+
+            generate_file(
+                str(export_folder / "layers" / f"{self.name}.h"),
+                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "reshape.jinja"),
+                name=self.name,
+                nb_inputs=np.prod(self.inputs_dims[0]),
+                nb_outputs=np.prod(self.outputs_dims[0]))
+
+        return list_configs
+
+    def forward(self, list_actions:list):
+
+        if not self.is_last:
+            list_actions.append(set_up_output(self.name, self.dtype))
+
+        if self.library == "aidge":
+            list_actions.append(generate_str(
+                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "reshape.jinja"),
+                name=self.name,
+                dataformat=self.dataformat,
+                input_name=self.inputs[0].name(),
+                output_name=self.name,
+            ))
+
+        return list_actions
+
+@operator_register("MatMul")
+class Matmul_ARMCortexM(ExportNode):
+    def __init__(self, node, board, library):
+        super().__init__(node)
+        self.producers = []
+
+        for i in range(0, len(node.inputs())):
+            if node.input(i)[0].type()=="Producer" and node.input(i)[0].get_operator().attr.constant == True:
+                producer = node.input(i)[0]
+                self.producers.append(Producer_ARMCortexM(producer))
+
+        self.board = board
+        self.library = library
+        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
+        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+
+
+    def export(self, export_folder:Path, list_configs:list):
+        for i in range(len(self.producers)):
+            self.producers[i].export(export_folder / "parameters" / f"{self.producers[i].name}.h")
+            list_configs.append(f"parameters/{self.producers[i].name}.h")
+
+        list_configs.append(f"layers/{self.name}.h")
+
+        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
+        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
+
+        export_params(f"{self.inputs[1].name()}_DIMS", np.array(self.inputs_dims[1],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[1].name()}_DIMS.h")
+        list_configs.append(f"dimensions/{self.inputs[1].name()}_DIMS.h")
+
+        export_params(f"{self.name}_DIMS", np.array(self.outputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.name}_DIMS.h")
+        list_configs.append(f"dimensions/{self.name}_DIMS.h")
+
+        if self.library == "aidge":
+            if self.dataformat == "float32":
+                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "MatMul" / "aidge_matmul_chw_float32.c"),
+                         str(export_folder / "src" / "kernels"))
+                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
+                         str(Path(export_folder) / "include"))
+
+            generate_file(
+                str(export_folder / "layers" / f"{self.name}.h"),
+                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "matmul.jinja"),
+                name=self.name,
+                input_dims=self.inputs_dims,
+                output_dims=self.outputs_dims)
+
+        return list_configs
+
+    def forward(self, list_actions:list):
+
+        if not self.is_last:
+            list_actions.append(set_up_output(self.name, self.dtype))
+
+        if self.library == "aidge":
+            list_actions.append(generate_str(
+                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "matmul.jinja"),
+                name=self.name,
+                dataformat=self.dataformat,
+                inputa_name=self.inputs[0].name(),
+                inputb_name=self.inputs[1].name(),
+                output_name=self.name))
+
+        return list_actions
+
+@operator_register("Gather")
+class Gather_ARMCortexM(ExportNode):
+    def __init__(self, node, board, library):
+        super().__init__(node)
+
+        self.board = board
+        self.library = library
+        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
+        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+        self.indices =  node.get_operator().attr.indices
+        self.axis =  node.get_operator().attr.axis
+
+    def export(self, export_folder:Path, list_configs:list):
+
+        list_configs.append(f"layers/{self.name}.h")
+
+        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
+        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
+
+        export_params(f"{self.name}_INDEXES", np.array(self.indices,dtype=np.int32),export_folder / "dimensions" / f"{self.name}_INDEXES.h")
+        list_configs.append(f"dimensions/{self.name}_INDEXES.h")
+
+
+        if self.library == "aidge":
+            if self.dataformat == "float32":
+                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Transform" / "Gather" / "aidge_gather_chw_float32.c"),
+                         str(export_folder / "src" / "kernels"))
+
+            generate_file(
+                str(export_folder / "layers" / f"{self.name}.h"),
+                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "gather.jinja"),
+                name=self.name,
+                axis = self.axis,
+                indices = self.indices,
+                input_dims=self.inputs_dims[0],
+                nb_outputs=np.prod(self.outputs_dims[0])
+            )
+
+        return list_configs
+
+    def forward(self, list_actions:list):
+
+        if not self.is_last:
+            list_actions.append(set_up_output(self.name, self.dtype))
+
+        if self.library == "aidge":
+            list_actions.append(generate_str(
+                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "gather.jinja"),
+                name=self.name,
+                dataformat=self.dataformat,
+                input_name=self.inputs[0].name(),
+                output_name=self.name
+            ))
+
+        return list_actions
+
+@operator_register("Transpose")
+class Transpose_ARMCortexM(ExportNode):
+    def __init__(self, node, board, library):
+        super().__init__(node)
+
+        self.board = board
+        self.library = library
+        self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
+        self.dtype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+        self.perm =  node.get_operator().attr.output_dims_order
+
+    def export(self, export_folder:Path, list_configs:list):
+
+        list_configs.append(f"layers/{self.name}.h")
+
+        export_params(f"{self.inputs[0].name()}_DIMS", np.array(self.inputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.inputs[0].name()}_DIMS.h")
+        list_configs.append(f"dimensions/{self.inputs[0].name()}_DIMS.h")
+
+        export_params(f"{self.name}_PERMUTATIONS", np.array(self.perm,dtype=np.int32),export_folder / "dimensions" / f"{self.name}_PERMUTATIONS.h")
+        list_configs.append(f"dimensions/{self.name}_PERMUTATIONS.h")
+
+        export_params(f"{self.name}_DIMS", np.array(self.outputs_dims[0],dtype=np.int32),export_folder / "dimensions" / f"{self.name}_DIMS.h")
+        list_configs.append(f"dimensions/{self.name}_DIMS.h")
+
+        if self.library == "aidge":
+            if self.dataformat == "float32":
+                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Transform" / "Transpose" / "aidge_transpose_chw_float32.c"),
+                         str(export_folder / "src" / "kernels"))
+
+            generate_file(
+                str(export_folder / "layers" / f"{self.name}.h"),
+                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "transpose.jinja"),
+                name=self.name,
+                perm = self.perm,
+                input_dims=self.inputs_dims[0],
+                output_dims=self.outputs_dims[0],
+                nb_outputs=np.prod(self.outputs_dims[0])
+            )
+
+            # print(self.outputs_dims)
+
+        return list_configs
+
+    def forward(self, list_actions:list):
+
+        if not self.is_last:
+            list_actions.append(set_up_output(self.name, self.dtype))
+
+        if self.library == "aidge":
+            list_actions.append(generate_str(
+                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "transpose.jinja"),
+                name=self.name,
+                dataformat=self.dataformat,
+                input_name=self.inputs[0].name(),
+                output_name=self.name
+                ))
+
+        return list_actions
diff --git a/aidge_export_arm_cortexm/operators_old.py b/aidge_export_arm_cortexm/operators_old.py
index 04a090b65c56a0a503618c80480e51ac4fbd6550..3440b248b1b719f4e5573d5c4dcc9df0b450122c 100644
--- a/aidge_export_arm_cortexm/operators_old.py
+++ b/aidge_export_arm_cortexm/operators_old.py
@@ -403,9 +403,9 @@ class Slice(ExportNode):
 
     def __init__(self, node, board, dataformat, library):
 
-        self.axes = node.get_operator().get_attr("axes")
-        self.starts = node.get_operator().get_attr("starts")
-        self.ends = node.get_operator().get_attr("ends")
+        self.axes = node.get_operator().attr.axes
+        self.starts = node.get_operator().attr.starts
+        self.ends = node.get_operator().attr.ends
 
         # Compute output dims
         out_dims = [self.ends[x-1] - self.starts[x-1] for x in self.axes]
@@ -460,7 +460,7 @@ class Concat(ExportNode):
 
     def __init__(self, node, board, dataformat, library):
 
-        self.axis = node.get_operator().get_attr("axis")
+        self.axis = node.get_operator().attr.axis
         out_dims = node.get_operator().get_input(0).dims()
 
         out_dims[self.axis - 1] = 0
diff --git a/aidge_export_arm_cortexm/templates/network/network_forward.jinja b/aidge_export_arm_cortexm/templates/network/network_forward.jinja
index b00e42f813066505cf03dd9f324a4ac418e45818..bde5553020d1a36f225a1402172715a7446c4496 100644
--- a/aidge_export_arm_cortexm/templates/network/network_forward.jinja
+++ b/aidge_export_arm_cortexm/templates/network/network_forward.jinja
@@ -12,7 +12,11 @@
 
 {# mem has the datatype of the firt input #}
 {#- Change here to improve it -#}
+{% if inputs[0][0] %}
 static {{inputs[0][0]}} mem[MEMORY_SIZE];
+{% else %}
+static float mem[MEMORY_SIZE];
+{% endif %}
 
 {# Forward function #}
 {#- Support multiple inputs with different datatypes and multiple outputs with different datatypes -#}
diff --git a/aidge_export_arm_cortexm/utils/converter.py b/aidge_export_arm_cortexm/utils/converter.py
index 08b8599fd5c8f77e9a862401a87b7251773d9ba1..3bc2f392b9f48b96972f3ff744bbba3bf945ca13 100644
--- a/aidge_export_arm_cortexm/utils/converter.py
+++ b/aidge_export_arm_cortexm/utils/converter.py
@@ -22,34 +22,34 @@ def numpy_dtype2ctype(dtype):
 
 
 def aidge_datatype2ctype(datatype):
-    if datatype == aidge_core.DataType.Int8:
+    if datatype == aidge_core.dtype.int8:
         return "int8_t"
-    elif datatype == aidge_core.DataType.UInt8:
+    elif datatype == aidge_core.dtype.uint8:
         return "uint8_t"
-    elif datatype == aidge_core.DataType.Int32:
+    elif datatype == aidge_core.dtype.int32:
         return "int32_t"
-    elif datatype == aidge_core.DataType.Int64:
+    elif datatype == aidge_core.dtype.int64:
         return "int64_t"
-    elif datatype == aidge_core.DataType.Float32:
+    elif datatype == aidge_core.dtype.float32:
         return "float"
-    elif datatype == aidge_core.DataType.Float64:
+    elif datatype == aidge_core.dtype.float64:
         return "double"
     # Add more dtype mappings as needed
     else:
-        raise ValueError(f"Unsupported {datatype} aidge datatype")
+        raise ValueError(f"Unsupported {datatype} aidge dtype")
 
 
 def aidge_datatype2dataformat(datatype):
-    if datatype == aidge_core.DataType.Int8:
+    if datatype == aidge_core.dtype.int8:
         return "int8"
-    elif datatype == aidge_core.DataType.Int32:
+    elif datatype == aidge_core.dtype.int32:
         return "int32"
-    elif datatype == aidge_core.DataType.Int64:
+    elif datatype == aidge_core.dtype.int64:
         return "int64"
-    elif datatype == aidge_core.DataType.Float32:
+    elif datatype == aidge_core.dtype.float32:
         return "float32"
-    elif datatype == aidge_core.DataType.Float64:
+    elif datatype == aidge_core.dtype.float64:
         return "float64"
     # Add more dtype mappings as needed
     else:
-        raise ValueError(f"Unsupported {datatype} aidge datatype")
+        raise ValueError(f"Unsupported {datatype} aidge dtype")
diff --git a/aidge_export_arm_cortexm/utils/generation.py b/aidge_export_arm_cortexm/utils/generation.py
index 793e8d11cf17b11b23f807a2001d1241220eacc3..b80ffbf7b1ac8cdb88aebbd9bb24037e6a4d9b92 100644
--- a/aidge_export_arm_cortexm/utils/generation.py
+++ b/aidge_export_arm_cortexm/utils/generation.py
@@ -3,10 +3,11 @@ import os
 import shutil
 from jinja2 import Environment, FileSystemLoader
 
-
 def get_functions_from_c_file(file_path):
     functions = []
     pattern = r'\w+\s+(\w+)\s*\(([^)]*)\)\s*{'
+    keyword = ['else', 'for', 'if', 'while', 'do']
+
 
     with open(file_path, 'r') as file:
         file_content = file.read()
@@ -14,6 +15,8 @@ def get_functions_from_c_file(file_path):
     matches = re.findall(pattern, file_content)
     for match in matches:
         function_name = match[0]
+        if function_name in keyword:
+            continue
         arguments = match[1].split(',')
         arguments = [arg.strip() for arg in arguments]