diff --git a/aidge_export_arm_cortexm/operators.py b/aidge_export_arm_cortexm/operators.py
index d1f170179b663e85646905ea29f94083a0b87a9b..dbcfc096dceae1144564579712791c6e4223d239 100644
--- a/aidge_export_arm_cortexm/operators.py
+++ b/aidge_export_arm_cortexm/operators.py
@@ -1,9 +1,12 @@
 import os
+import math
 import shutil
 import numpy as np
 from pathlib import Path
 from jinja2 import Environment, FileSystemLoader
+from typing import Tuple, List, Union, Dict
 
+import aidge_core
 from aidge_core import ExportNode
 from aidge_core.export.code_generation import *
 from aidge_export_arm_cortexm.utils import ROOT, operator_register
@@ -58,7 +61,7 @@ def export_params(name:str,
 ##############################################
 
 def set_up_output(name, datatype):
-    return f"{datatype}* {name} = ({datatype}*) mem + {name.upper()}_OFFSET;"
+    return f"{datatype}* {name} = ({datatype}*) mem + {name.upper()}_MEM_CONT_OFFSET;"
 
 
 ##############################################
@@ -66,36 +69,137 @@ def set_up_output(name, datatype):
 ##############################################
 
 
-@operator_register("Producer")
-class Producer_ARMCortexM(ExportNode):
-
-    def __init__(self, node, board, library):
-        super().__init__(node)
+class Producer_ARMCortexM:
+    def __init__(self, node):
+        self.name = node.name()
+        self.operator = node.get_operator()
         self.constant = self.operator.get_attr("Constant")
         self.values = np.array(self.operator.get_output(0))
-        self.board = board
-        self.library = library
-
-        if len(self.values.shape) == 4:
-            self.values = np.transpose(self.values, (0, 2, 3, 1))
 
-    def export(self, export_folder:Path, list_configs:list):
-
-        # If not constant, it is a dataprovider 
-        # and not a parameter provider
-        if (self.constant):
-            list_configs.append(f"parameters/{self.name}.h")
-
-            # Export in HWC
-            export_params(self.name,
-                          self.values.reshape(-1),
-                          str(export_folder / "parameters" / f"{self.name}.h"))
-
-        return list_configs
+    def export(self, export_file:Path, format:str = "NHWC"):
+
+        if (len(self.values.shape) == 4): 
+            # Suppose original dataformat is NCHW
+            if format == "NCHW":
+                export_params(self.name, 
+                              self.values.reshape(-1), 
+                              str(export_file))
+            elif format == "NHWC":
+                export_params(self.name, 
+                              np.transpose(self.values, (0, 2, 3, 1)).reshape(-1), 
+                              str(export_file))
+            else:
+                raise RuntimeError("Producer format export not supported.")
+        else:
+            export_params(self.name, 
+                          self.values.reshape(-1), 
+                          str(export_file))
+            
+
+class Scaling():
+    class ScalingMode:
+        FLOAT_MULT32 = 0
+        FIXED_MULT16 = 1
+        FIXED_MULT32 = 2
+        SINGLE_SHIFT = 3
+        DOUBLE_SHIFT = 4
+
+    def __init__(self, scaling_factor=0.0, nb_bits=8) -> None:
+        self.scaling_factor = scaling_factor
+        self.nb_bits = nb_bits
+
+    def approximate_fixed_point_scaling(self, mode: int, scaling: float) -> Tuple[int, int]:
+        """Calculate fixed point factor from floating point factor"""
+
+        limit = (2**15 - 1) if mode == Scaling.ScalingMode.FIXED_MULT16 else (2**31 - 1)
+
+        if scaling >= limit:
+            if mode == Scaling.ScalingMode.FIXED_MULT16:
+                print(f"Scaling ({scaling}) doesn't fit in FIXED_MULT16. Falling back to FIXED_MULT32.")
+                mode = Scaling.ScalingMode.FIXED_MULT32
+                return self.approximate_fixed_point_scaling(mode, scaling)
+            else:
+                raise RuntimeError(f"Scaling ({scaling}) doesn't fit in FIXED_MULT32.")
+
+        max_nb_fractional_bits = 50
+        nb_fractional_bits = min(math.floor(math.log(limit / scaling) / math.log(2.0)), max_nb_fractional_bits)
+
+        scaling_fixed_point = round(scaling * (1 << nb_fractional_bits))
+        return nb_fractional_bits, scaling_fixed_point
+
+    def approximate_shift_scaling(self, scaling: float, nb_divisions: int) -> Tuple[List[int], float]:
+        """Calculate single shift factor from floating point factor"""
+
+        ROUNDING_THRESHOLD = 0.98
+
+        assert nb_divisions > 0
+        assert scaling <= 1.0
+
+        precision = 0.0
+        power_of_2_divs = [0] * nb_divisions
+
+        for i_div in range(nb_divisions):
+            if precision == 1.0:
+                power_of_2_divs[i_div - 1] += 1
+                power_of_2_divs[i_div] = power_of_2_divs[i_div - 1]
+            else:
+                exponent = math.ceil(math.log2(1.0 / (scaling * (1.0 - precision))))
+                precision += 1.0 / (scaling * 2 ** exponent)
+                power_of_2_divs[i_div] = exponent
+
+        assert precision <= 1.0
+
+        if precision >= ROUNDING_THRESHOLD:
+            precision = 1.0
+        elif precision < 1.0:
+            precision += 1.0 / (scaling * 2 ** power_of_2_divs[-1])
+            power_of_2_divs[-1] -= 1
+
+        assert precision >= 1.0
+
+        return power_of_2_divs, precision
+    
 
-    def forward(self, list_actions:list):
-        # A Producer does nothing during forward
-        return list_actions
+    def __call__(self, mode:str) -> dict:
+        """Get dictionnary of scale values in function of the mode
+        Possible modes:
+        - no_scaling
+        - floating_point
+        - fixed_point (16 or 32 bits)
+        - single_shift
+        - double_shift
+        
+        """
+        
+        if mode == "floating_point":
+            self.scaling = {"scaling_type": "floating_point",
+                            "scaling_value": self.scaling_factor}
+        elif mode == "fixed_point":
+            if self.nb_bits == 16:
+                nb_fractional_bits, scaling_fixed_point = self.approximate_fixed_point_scaling(Scaling.ScalingMode.FIXED_MULT16, self.scaling_factor)
+            else:
+                nb_fractional_bits, scaling_fixed_point = self.approximate_fixed_point_scaling(Scaling.ScalingMode.FIXED_MULT32, self.scaling_factor)
+
+            self.scaling = {"scaling_type": "fixed_point",
+                            "scaling_value": scaling_fixed_point,
+                            "fractional_bits": nb_fractional_bits}
+
+        elif mode == "single_shift":
+            shift_value, _ = self.approximate_shift_scaling(self.scaling_factor, 1)
+
+            self.scaling = {"scaling_type": "single_shift",
+                            "shift_value": shift_value[0]}
+            
+        elif mode == "double_shift":
+            shift_value, _ = self.approximate_shift_scaling(self.scaling_factor, 2)
+
+            self.scaling = {"scaling_type": "double_shift",
+                            "shift_value_0": shift_value[0],
+                            "shift_value_1": shift_value[1]}
+        else:
+            self.scaling = {"scaling_type": "no_scaling"}
+
+        return self.scaling
     
 
 @operator_register("ReLU")
@@ -110,19 +214,20 @@ class ReLU_ARMCortexM(ExportNode):
 
     def export(self, export_folder:Path, list_configs:list):
 
+        list_configs.append(f"layers/{self.name}.h")
+
         if self.library == "aidge":
             if self.dataformat == "float32":
                 copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Activation" / "Relu" / "aidge_relu_float32.c"),
                          str(export_folder / "src" / "kernels"))
 
-        list_configs.append(f"layers/{self.name}.h")
-        generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
-            str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "activation.jinja"),
-            name=self.name,
-            activation_type="\"RELU\"",
-            nb_inputs=np.prod(self.inputs_dims[0]),
-            nb_outputs=np.prod(self.outputs_dims[0]))
+            generate_file(
+                str(export_folder / "layers" / f"{self.name}.h"),
+                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "activation.jinja"),
+                name=self.name,
+                activation_type="\"RELU\"",
+                nb_inputs=np.prod(self.inputs_dims[0]),
+                nb_outputs=np.prod(self.outputs_dims[0]))
 
         return list_configs
 
@@ -131,14 +236,16 @@ class ReLU_ARMCortexM(ExportNode):
         if not self.is_last:
             list_actions.append(set_up_output(self.name, self.datatype))
 
-        list_actions.append(generate_str(
-            str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "activation.jinja"),
-            name=self.name,
-            activation_type="relu",
-            dataformat=self.dataformat,
-            input_name=self.inputs[0].name(),
-            output_name=self.name
-        ))
+        if self.library == "aidge":
+            list_actions.append(generate_str(
+                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "activation.jinja"),
+                name=self.name,
+                activation_type="relu",
+                dataformat=self.dataformat,
+                input_name=self.inputs[0].name(),
+                output_name=self.name
+            ))
+
         return list_actions
 
 
@@ -147,10 +254,18 @@ class Conv_ARMCortexM(ExportNode):
     def __init__(self, node, board, library):
         super().__init__(node)
 
+        self.producers = []
+        # Exclude first input which is a real input
+        for i in range(1, len(node.inputs())):
+            producer = node.input(i)[0]
+            self.producers.append(Producer_ARMCortexM(producer))
+
         self.board = board
         self.library = library
         self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
         self.datatype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+        self.scaling = Scaling()("no_scaling")
+        self.activation = "Linear"
 
         self.kernel = node.get_operator().get_attr("KernelDims")
         self.stride = node.get_operator().get_attr("StrideDims")
@@ -176,6 +291,21 @@ class Conv_ARMCortexM(ExportNode):
 
     def export(self, export_folder:Path, list_configs:list):
 
+        # Export weights to NHWC format
+        self.producers[0].export(export_folder / "parameters" / f"{self.producers[0].name}.h")
+        list_configs.append(f"parameters/{self.producers[0].name}.h")
+
+        # Export biases
+        if (len(self.producers) > 1):
+            # Convert the biases to int32
+            if self.dataformat != "float32":
+                self.producers[1].values = self.producers[1].values.astype(np.int32)
+            
+            self.producers[1].export(export_folder / "parameters" / f"{self.producers[1].name}.h")
+            list_configs.append(f"parameters/{self.producers[1].name}.h")
+
+        list_configs.append(f"layers/{self.name}.h")
+
         if self.library == "aidge":
             if self.dataformat == "float32":
                 copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "aidge_conv2d_hwc_float32.c"),
@@ -183,34 +313,62 @@ class Conv_ARMCortexM(ExportNode):
                 copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
                          str(Path(export_folder) / "include"))
 
-        list_configs.append(f"layers/{self.name}.h")
-        generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
-            str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "convolution.jinja"),
-            name=self.name,
-            input_dims=self.inputs_dims[0],
-            output_dims=self.outputs_dims[0],
-            kernel=self.kernel,
-            stride=self.stride,
-            padding=self.padding,
-            dilation=self.dilation)
+            generate_file(
+                str(export_folder / "layers" / f"{self.name}.h"),
+                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "convolution.jinja"),
+                name=self.name,
+                input_dims=self.inputs_dims[0],
+                output_dims=self.outputs_dims[0],
+                kernel=self.kernel,
+                stride=self.stride,
+                padding=self.padding,
+                dilation=self.dilation)
+            
+        elif self.library == "n2d2":
+            # Export configuration file
+            generate_file(
+                str(export_folder / "layers" / f"{self.name}.h"),
+                str(ROOT / "_N2D2" / "templates" / "configuration" / "conv_config.jinja"),
+                name=self.name,
+                input_dims=self.inputs_dims[0],
+                output_dims=self.outputs_dims[0],
+                kernel=self.kernel,
+                stride=self.stride,
+                padding=self.padding,
+                dilation=self.dilation,
+                activation=self.activation,
+                **self.scaling)
 
         return list_configs
 
+
     def forward(self, list_actions:list):
 
         if not self.is_last:
             list_actions.append(set_up_output(self.name, self.datatype))
 
-        list_actions.append(generate_str(
-            str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "convolution.jinja"),
-            name=self.name,
-            dataformat=self.dataformat,
-            input_name=self.inputs[0].name(),
-            output_name=self.name,
-            weight_name=self.inputs[1].name(),
-            bias_name=self.inputs[2].name()
-        ))
+        if self.library == "aidge":
+            list_actions.append(generate_str(
+                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "convolution.jinja"),
+                name=self.name,
+                dataformat=self.dataformat,
+                input_name=self.inputs[0].name(),
+                output_name=self.name,
+                weight_name=self.inputs[1].name(),
+                bias_name=self.inputs[2].name()
+            ))
+
+        elif self.library == "n2d2":
+            list_actions.append(generate_str(
+                str(ROOT / "_N2D2" / "templates" / "kernel" / "conv_kernel.jinja"),
+                name=self.name,
+                parent_name=self.inputs[0].name(),
+                inputs_name=self.inputs[0].name(),
+                weights_name=self.inputs[1].name(),
+                biases_name=self.inputs[2].name(),
+                outputs_name=self.name
+            ))
+            
         return list_actions
    
 
@@ -219,11 +377,18 @@ class PaddedConv_ARMCortexM(Conv_ARMCortexM):
     def __init__(self, node, board, library):
         ExportNode.__init__(self, node)
 
+        self.producers = []
+        # Exclude first input which is a real input
+        for i in range(1, len(node.inputs())):
+            producer = node.input(i)[0]
+            self.producers.append(Producer_ARMCortexM(producer))
+
         self.board = board
         self.library = library
         self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
         self.datatype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
-
+        self.scaling = Scaling()("no_scaling")
+        self.activation = "Linear"
 
         for n in self.operator.get_micro_graph().get_nodes():
             if n.type() == "Pad":
@@ -241,11 +406,34 @@ class PaddedConv_ARMCortexM(Conv_ARMCortexM):
         if len(self.outputs_dims[0]) == 4:
             # if dims == [batch, nb_outputs]
             # transform to [nb_outputs, 1, 1]
-            self.outputs_dims[0] = self.outputs_dims[0][1:]
+            self.outputs_dims[0] = self.outputs_dims[0][1:] 
 
 
-@operator_register("MaxPooling")
-class MaxPool_ARMCortexM(ExportNode):
+@operator_register("ConvReluScaling")
+class ConvReluScaling_ARMCortexM(Conv_ARMCortexM):
+     def __init__(self, node, board, library):
+        super(Conv_ARMCortexM, self).__init__(node, board, library)
+
+        if self.operator.has_attr("BeginEndBorders"):
+            self.padding = self.operator.get_attr("BeginEndBorders")
+
+        self.activation = "Rectifier"
+
+        # Should do this line but there is a bug while changing the datatype of generic operator...
+        # self.datatype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+        # Do this instead
+        if self.operator.get_attr("quantizedNbBits") == 8:
+            if self.operator.get_attr("isOutputUnsigned"):
+                self.datatype = aidge_datatype2ctype(aidge_core.DataType.UInt8)
+            else:
+                self.datatype = aidge_datatype2ctype(aidge_core.DataType.Int8)
+
+        # Impose Single Shift (perhaps change it to have a more modular system)
+        self.scaling = Scaling(self.operator.get_attr("scalingFactor"),
+                               self.operator.get_attr("quantizedNbBits"))("floating_point")
+    
+
+class Pooling_ARMCortexM(ExportNode):
     def __init__(self, node, board, library):
         super().__init__(node)
 
@@ -253,12 +441,14 @@ class MaxPool_ARMCortexM(ExportNode):
         self.library = library
         self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
         self.datatype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+        self.pool_type = "None"
+        self.activation = "Linear"
 
         self.kernel = node.get_operator().get_attr("KernelDims")
         self.stride = node.get_operator().get_attr("StrideDims")
 
-        # No padding with MaxPooling
-        # Use PaddedMaxPooling to add padding attribute
+        # No padding with MaxPooling or AvgPooling
+        # Use PaddedMaxPooling/PaddedAvgPooling to add padding attribute
         self.padding = [0, 0]
 
         if len(self.inputs_dims[0]) == 4:
@@ -274,6 +464,9 @@ class MaxPool_ARMCortexM(ExportNode):
 
     def export(self, export_folder:Path, list_configs:list):
 
+        # Add to config list the include of configurations
+        list_configs.append(f"layers/{self.name}.h")
+
         if self.library == "aidge":
             if self.dataformat == "float32":
                 copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "Pooling" / "aidge_maxpool2d_float32.c"),
@@ -281,56 +474,116 @@ class MaxPool_ARMCortexM(ExportNode):
                 copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "SupportFunctions" / "aidge_supportfunctions.h"),
                          str(Path(export_folder) / "include"))
 
-        list_configs.append(f"layers/{self.name}.h")
+            # Export configuration file 
+            generate_file(
+                str(export_folder / "layers" / f"{self.name}.h"),
+                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "pooling.jinja"),
+                name=self.name,
+                input_dims=self.inputs_dims[0],
+                output_dims=self.outputs_dims[0],
+                kernel=self.kernel,
+                stride=self.stride,
+                padding=self.padding,
+                pool_type=self.pool_type)
+            
+            
+        elif self.library == "n2d2":
+
+            # Nothing to copy
+
+            # Export configuration file
+            generate_file(
+                str(export_folder / "layers" / f"{self.name}.h"),
+                str(ROOT / "_N2D2" / "templates" / "configuration" / "pool_config.jinja"),
+                name=self.name,
+                input_dims=self.inputs_dims[0],
+                output_dims=self.outputs_dims[0],
+                kernel=self.kernel,
+                stride=self.stride,
+                padding=self.padding,
+                pool_type=self.pool_type,
+                activation=self.activation)
 
-        generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
-            str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "pooling.jinja"),
-            name=self.name,
-            input_dims=self.inputs_dims[0],
-            output_dims=self.outputs_dims[0],
-            kernel=self.kernel,
-            stride=self.stride,
-            padding=self.padding,
-            pool_type="Max")
 
         return list_configs
 
-    def forward(self, list_actions:list):
 
+    def forward(self, list_actions:list):
         if not self.is_last:
             list_actions.append(set_up_output(self.name, self.datatype))
 
-        list_actions.append(generate_str(
-            str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "pooling.jinja"),
-            name=self.name,
-            dataformat=self.dataformat,
-            pool_type="max",
-            input_name=self.inputs[0].name(),
-            output_name=self.name
-        ))
-        return list_actions  
+        if self.library == "aidge":
+
+            list_actions.append(generate_str(
+                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "pooling.jinja"),
+                name=self.name,
+                dataformat=self.dataformat,
+                pool_type=self.pool_type.lower(),
+                input_name=self.inputs[0].name(),
+                output_name=self.name
+            ))
+
+        elif self.library == "n2d2":
+
+            list_actions.append(generate_str(
+                str(ROOT / "_N2D2" / "templates" / "kernel" / "pool_kernel.jinja"),
+                name=self.name,
+                parent_name=self.inputs[0].name(),
+                inputs_name=self.inputs[0].name(),
+                outputs_name=self.name
+            ))
+
+        return list_actions
+
+
+@operator_register("MaxPooling")
+class MaxPooling_ARMCortexM(Pooling_ARMCortexM):
+    def __init__(self, node, board, library):
+        super().__init__(node, board, library)
+        self.pool_type = "Max"
+
+
+@operator_register("AvgPooling")
+class AvgPooling_ARMCortexM(Pooling_ARMCortexM):
+    def __init__(self, node, board, library):
+        super().__init__(node, board, library)
+        self.pool_type = "Avg"
 
 
 @operator_register("FC")
-class Fc_ARMCortexM(ExportNode):
+class FC_ARMCortexM(ExportNode):
     def __init__(self, node, board, library):
         super().__init__(node)
 
+        self.producers = []
+        # Exclude first input which is a real input
+        for i in range(1, len(node.inputs())):
+            producer = node.input(i)[0]
+            self.producers.append(Producer_ARMCortexM(producer))
+
         self.board = board
         self.library = library
         self.dataformat = aidge_datatype2dataformat(node.get_operator().get_output(0).dtype())
         self.datatype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+        self.scaling = Scaling()("no_scaling")
+        self.activation = "Linear"
 
         if len(self.inputs_dims[0]) == 4:
             # if dims == [batch, nb_channels, height, width]
             # transform to [nb_channels, height, width]
             self.inputs_dims[0] = self.inputs_dims[0][1:]
+            
+            # It also means that we need to change the dataformat of the weights
+            weights = self.producers[0].values
+            if len(weights.shape) == 2:
+                weights = weights.reshape(weights.shape[0], weights.shape[1], 1, 1)
+
         elif len(self.inputs_dims[0]) == 2:
             # if dims == [batch, nb_channels]
             # transform to [nb_channels, 1, 1]
             self.inputs_dims[0] = [self.inputs_dims[0][1], 1, 1]
 
+
         if len(self.outputs_dims[0]) == 2:
             # if dims == [batch, nb_outputs]
             # transform to [nb_outputs, 1, 1]
@@ -339,24 +592,49 @@ class Fc_ARMCortexM(ExportNode):
 
     def export(self, export_folder:Path, list_configs:list):
 
+        # Export weights to NHWC format
+        self.producers[0].export(export_folder / "parameters" / f"{self.producers[0].name}.h")
+        list_configs.append(f"parameters/{self.producers[0].name}.h")
+
+        # Export biases
+        if (len(self.producers) > 1):
+            # Convert the biases to int32
+            if self.dataformat != "float32":
+                self.producers[1].values = self.producers[1].values.astype(np.int32)
+
+            self.producers[1].export(export_folder / "parameters" / f"{self.producers[1].name}.h")
+            list_configs.append(f"parameters/{self.producers[1].name}.h")
+
+        # Add to config list the include of configurations
+        list_configs.append(f"layers/{self.name}.h")
+
         if self.library == "aidge":
             if self.dataformat == "float32":
                 # Take this kernel for now to avoid bad transpose weights (see aidge_export_cpp)
-                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "FullyConnected" / "aidge_fc_chw_float32.c"),
+                copyfile(str(ROOT / "_Aidge_Arm" / "kernels" / "FullyConnected" / "aidge_fc_float32.c"),
                          str(export_folder / "src" / "kernels"))
 
-        # Add to config list the include of configurations
-        list_configs.append(f"layers/{self.name}.h")
-
-        # Export configuration file
-        generate_file(
-            str(export_folder / "layers" / f"{self.name}.h"),
-            str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "fullyconnected.jinja"),
-            name=self.name,
-            nb_channels=self.inputs_dims[0][0],
-            channel_height=self.inputs_dims[0][1],
-            channel_width=self.inputs_dims[0][2],
-            nb_outputs=self.outputs_dims[0][0])
+            # Export configuration file
+            generate_file(
+                str(export_folder / "layers" / f"{self.name}.h"),
+                str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "fullyconnected.jinja"),
+                name=self.name,
+                nb_channels=self.inputs_dims[0][0],
+                channel_height=self.inputs_dims[0][1],
+                channel_width=self.inputs_dims[0][2],
+                nb_outputs=self.outputs_dims[0][0])
+            
+        elif self.library == "n2d2":
+
+            # Export configuration file
+            generate_file(
+                str(export_folder / "layers" / f"{self.name}.h"),
+                str(ROOT / "_N2D2" / "templates" / "configuration" / "fc_config.jinja"),
+                name=self.name,
+                input_dims=self.inputs_dims[0],
+                output_dims=self.outputs_dims[0],
+                activation=self.activation,
+                **self.scaling)
         
         return list_configs
 
@@ -364,13 +642,55 @@ class Fc_ARMCortexM(ExportNode):
         if not self.is_last:
             list_actions.append(set_up_output(self.name, self.datatype))
 
-        list_actions.append(generate_str(
-            str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "fc_chw.jinja"),
+        if self.library == "aidge":
+            list_actions.append(generate_str(
+                str(ROOT / "_Aidge_Arm" / "templates" / "kernel" / "fullyconnected.jinja"),
+                name=self.name,
+                dataformat=self.dataformat,
+                input_name=self.inputs[0].name(),
+                weight_name=self.inputs[1].name(),
+                bias_name=self.inputs[2].name(),
+                output_name=self.name
+            ))
+
+        elif self.library == "n2d2":
+            list_actions.append(generate_str(
+            str(ROOT / "_N2D2" / "templates" / "kernel" / "fc_kernel.jinja"),
             name=self.name,
-            dataformat=self.dataformat,
-            input_name=self.inputs[0].name(),
-            weight_name=self.inputs[1].name(),
-            bias_name=self.inputs[2].name(),
-            output_name=self.name
+            parent_name=self.inputs[0].name(),
+            inputs_name=self.inputs[0].name(),
+            weights_name=self.inputs[1].name(),
+            biases_name=self.inputs[2].name(),
+            outputs_name=self.name
         ))
+
+
         return list_actions
+    
+
+@operator_register("FcScaling")
+class FCScaling_ARMCortexM(FC_ARMCortexM):
+
+    def __init__(self, node, board, library):
+        super(FC_ARMCortexM, self).__init__(node, board, library)
+
+        # Should do this line but there is a bug while changing the datatype of generic operator...
+        # self.datatype = aidge_datatype2ctype(node.get_operator().get_output(0).dtype())
+        # Do this instead
+        if self.operator.get_attr("quantizedNbBits") == 8:
+            if self.operator.get_attr("isOutputUnsigned"):
+                self.datatype = aidge_datatype2ctype(aidge_core.DataType.UInt8)
+            else:
+                self.datatype = aidge_datatype2ctype(aidge_core.DataType.Int8)
+
+        # Impose Single Shift (perhaps change it to have a more modular system)
+        self.scaling = Scaling(self.operator.get_attr("scalingFactor"),
+                               self.operator.get_attr("quantizedNbBits"))("floating_point")
+
+
+@operator_register("FcReluScaling")
+class FCReluScaling_ARMCortexM(FCScaling_ARMCortexM):
+    def __init__(self, node, board, library):
+        super(FCScaling_ARMCortexM, self).__init__(node, board, library)
+
+        self.activation = "Rectifier"