diff --git a/aidge_export_arm_cortexm/operators.py b/aidge_export_arm_cortexm/operators.py
index 64eb57c5cb6d0780e33d3986b481384b484d4b2e..a5fd1b2ab7245dbaca27213deae91aa19a3351ef 100644
--- a/aidge_export_arm_cortexm/operators.py
+++ b/aidge_export_arm_cortexm/operators.py
@@ -5,12 +5,15 @@ from pathlib import Path
 from typing import Tuple, List
 
 import aidge_core
+import aidge_backend_cpu
 from aidge_core.export_utils import ExportNode, ExportNodeCpp
 from aidge_core.export_utils.code_generation import *
 from aidge_export_arm_cortexm.utils import ROOT
 from aidge_export_arm_cortexm.utils.converter import numpy_dtype2ctype
 from aidge_export_arm_cortexm.utils.generation import *
 from aidge_export_arm_cortexm.export_registry import ExportLibAidgeARM
+# from data_conversion import datatype_converter_aidge2arm
+from aidge_export_arm_cortexm.data_conversion import datatype_converter_aidge2arm
 
 ##############################################
 ############## Export functions ##############
@@ -18,6 +21,7 @@ from aidge_export_arm_cortexm.export_registry import ExportLibAidgeARM
 
 def export_params(name:str,
                   array: np.ndarray,
+                  type_str: str,
                   filepath:str):
 
     # Get directory name of the file
@@ -31,21 +35,66 @@ def export_params(name:str,
         filepath,
         str(ROOT / "templates" / "data" / "parameters.jinja"),
         name = name,
-        data_t = numpy_dtype2ctype(array.dtype),
-        values = array.tolist()
+        data_t = type_str,
+        values = array.tolist(),
     )
 
+def export_params_from_tensor(name:str,
+                  tensor: aidge_core.Tensor,
+                  type_str: str,
+                  filepath:str):
+
+    # Get directory name of the file
+    dirname = os.path.dirname(filepath)
+
+    # If directory doesn't exist, create it
+    if not os.path.exists(dirname):
+        os.makedirs(dirname)
+
+    array = np.array(tensor).reshape(-1)
+
+    generate_file(
+        filepath,
+        str(ROOT / "templates" / "data" / "parameters.jinja"),
+        name = name,
+        data_t = type_str,
+        values = array.tolist(),
+    )
 
 ##############################################
 ################### Actions ##################
 ##############################################
 
+@ExportLibAidgeARM.register("Producer", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.dual_int4)))
+class Producer_ARMCortexM(ExportNode):
+
+    def __init__(self, node, mem_info, conversion_map = datatype_converter_aidge2arm):
+        super().__init__(node, mem_info, conversion_map)
+        
+        weights = self.operator.get_output(0)
+
+        self.values = np.array(weights).reshape(-1)
+        
+
+    def export(self, export_folder: Path):
+        header_path = f"include/parameters/{self.attributes['name']}.hpp"
+        export_params(
+            name = self.attributes['out_name'][0],
+            array = self.values,
+            type_str = self.attributes["out_cdtype"][0],
+            filepath = str(export_folder / header_path))
+        return [header_path]
+
+    def forward(self):
+        # A Producer does nothing during forward
+        return []
+
 
 @ExportLibAidgeARM.register("Producer", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
 class Producer_ARMCortexM(ExportNode):
 
-    def __init__(self, node, mem_info):
-        super().__init__(node, mem_info)
+    def __init__(self, node, mem_info, conversion_map = datatype_converter_aidge2arm):
+        super().__init__(node, mem_info, conversion_map)
         self.values = np.array(self.operator.get_output(0))
         if len(self.values.shape) == 4:  # Note: export in HWC
             self.values = np.transpose(self.values, (0, 2, 3, 1))
@@ -69,9 +118,10 @@ class Producer_ARMCortexM(ExportNode):
     def export(self, export_folder: Path):
         header_path = f"include/parameters/{self.attributes['name']}.hpp"
         export_params(
-            self.attributes['out_name'][0],
-            self.values.reshape(-1),
-            str(export_folder / header_path))
+            name = self.attributes['out_name'][0],
+            array = self.values.reshape(-1),
+            type_str = self.attributes["out_cdtype"][0],
+            filepath = str(export_folder / header_path))
         return [header_path]
 
     def forward(self):
@@ -224,6 +274,207 @@ class Conv_ARMCortexM(ExportNodeCpp):
             str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "Conv.hpp")
         ]
 
+@ExportLibAidgeARM.register_generic("ArmPadConv2D", aidge_core.ImplSpec([
+                                                                aidge_core.IOSpec(aidge_core.dtype.any),               # Input[0] : Input Spec
+                                                                aidge_core.IOSpec(aidge_core.dtype.dual_int4),      # Input[1] : Weight Spec
+                                                                aidge_core.IOSpec(aidge_core.dtype.int32)           # Input[2] : Bias Spec
+                                                            ], 
+                                                            [
+                                                                aidge_core.IOSpec(aidge_core.dtype.any)       # Output[0] : Output spec
+                                                            ]))
+class PadConvScaling_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, conversion_map = datatype_converter_aidge2arm):
+        super().__init__(node, mem_info, conversion_map)
+        
+        self.attributes["activation"] = "Linear"
+        
+        self.attributes["padding"] = [0, 0]
+        if self.operator.attr.has_attr("Pad2D_0"):
+            self.attributes["padding"] = self.operator.attr.get_attr("Pad2D_0").get_attr("begin_end_borders")
+        
+        self.attributes["kernel_dims"] = self.operator.attr.get_attr("Conv2D_0").get_attr("kernel_dims")
+        self.attributes["stride_dims"] = self.operator.attr.get_attr("Conv2D_0").get_attr("stride_dims")
+        self.attributes["dilation_dims"] = self.operator.attr.get_attr("Conv2D_0").get_attr("dilation_dims")
+
+        # Correct "in_chan" and "out_chan" that were taken from the compacted tensor
+        self.attributes["in_chan"][0] = self.attributes["in_channels"]
+        self.attributes["out_chan"][0] = self.attributes["out_channels"]
+
+        if self.operator.attr.has_attr("ReLU_0"):
+            self.attributes["activation"] = "Rectifier"
+
+        # if self.operator.attr.has_attr("Scaling_0"):
+        if self.operator.attr.has_attr("scaling_factor"):
+            scaling_factor = self.operator.attr.scaling_factor
+            self.attributes.update(Scaling(scaling_factor = scaling_factor)("floating_point"))
+
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "custom_conv_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "CustomConv.hpp"), 
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "aidge_supportfunctions.h"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "Macs.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "nn_scaling_functions.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "subkernels_functions.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "swar_arm_acle.h"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "typedefs.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "utils.hpp")
+        ]
+
+
+@ExportLibAidgeARM.register_generic("ArmConv2D", aidge_core.ImplSpec([
+                                                                aidge_core.IOSpec(aidge_core.dtype.any),               # Input[0] : Input Spec
+                                                                aidge_core.IOSpec(aidge_core.dtype.dual_int4),      # Input[1] : Weight Spec
+                                                                aidge_core.IOSpec(aidge_core.dtype.int32)           # Input[2] : Bias Spec
+                                                            ], 
+                                                            [
+                                                                aidge_core.IOSpec(aidge_core.dtype.any)       # Output[0] : Output spec
+                                                            ]))
+class ConvScaling_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, conversion_map = datatype_converter_aidge2arm):
+        super().__init__(node, mem_info, conversion_map)
+        
+        self.attributes["activation"] = "Linear"
+        
+        self.attributes["padding"] = [0, 0]
+        if self.operator.attr.has_attr("Pad2D_0"):
+            self.attributes["padding"] = self.operator.attr.get_attr("Pad2D_0").get_attr("begin_end_borders")
+        
+        self.attributes["kernel_dims"] = self.operator.attr.get_attr("Conv2D_0").get_attr("kernel_dims")
+        self.attributes["stride_dims"] = self.operator.attr.get_attr("Conv2D_0").get_attr("stride_dims")
+        self.attributes["dilation_dims"] = self.operator.attr.get_attr("Conv2D_0").get_attr("dilation_dims")
+
+        # Correct "in_chan" and "out_chan" that were taken from the compacted tensor
+        self.attributes["in_chan"][0] = self.attributes["in_channels"]
+        self.attributes["out_chan"][0] = self.attributes["out_channels"]
+
+        if self.operator.attr.has_attr("ReLU_0"):
+            self.attributes["activation"] = "Rectifier"
+
+        if self.operator.attr.has_attr("scaling_factor"):
+            scaling_factor = self.operator.attr.scaling_factor
+            self.attributes.update(Scaling(scaling_factor = scaling_factor)("floating_point"))
+
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "custom_conv_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "CustomConv.hpp"), 
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "aidge_supportfunctions.h"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "Macs.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "nn_scaling_functions.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "subkernels_functions.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "swar_arm_acle.h"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "typedefs.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "utils.hpp")
+        ]
+
+
+@ExportLibAidgeARM.register_generic("ArmFC", aidge_core.ImplSpec([
+                                                                aidge_core.IOSpec(aidge_core.dtype.any),            # Input[0] : Input Spec
+                                                                aidge_core.IOSpec(aidge_core.dtype.dual_int4),      # Input[1] : Weight Spec
+                                                                aidge_core.IOSpec(aidge_core.dtype.int32)           # Input[2] : Bias Spec
+                                                            ], 
+                                                            [
+                                                                aidge_core.IOSpec(aidge_core.dtype.any)       # Output[0] : Output spec
+                                                            ]))
+class FCScaling_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, conversion_map = datatype_converter_aidge2arm):
+        super().__init__(node, mem_info, conversion_map)
+        self.attributes["activation"] = "Linear"
+
+        # # Correct "in_chan" and "out_chan" that were taken from the compacted tensor
+        self.attributes["in_chan"][0] = self.attributes["in_channels"]
+        self.attributes["out_chan"][0] = self.attributes["out_channels"]
+
+        if self.operator.attr.has_attr("ReLU_0"):
+            self.attributes["activation"] = "Rectifier"
+
+        if self.operator.attr.has_attr("scaling_factor"):
+            scaling_factor = self.operator.attr.scaling_factor
+            self.attributes.update(Scaling(scaling_factor = scaling_factor)("floating_point"))
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "fc_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "custom_fc_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "FullyConnected" / "CustomFc.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "aidge_supportfunctions.h"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "Macs.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "nn_scaling_functions.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "subkernels_functions.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "swar_arm_acle.h"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "typedefs.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "utils.hpp")
+        ]
+
+@ExportLibAidgeARM.register("MaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class CustomPooling_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, conversion_map = datatype_converter_aidge2arm):
+        super().__init__(node, mem_info, conversion_map)
+
+        self.attributes["activation"] = "Linear"
+        self.attributes["pool_type"] = "Max"
+        # No padding with MaxPooling or AvgPooling
+        # Use PaddedMaxPooling/PaddedAvgPooling to add padding attribute
+        self.attributes["padding"] = [0, 0]
+
+        self.attributes["kernel_dims"] = node.get_operator().attr.kernel_dims
+        self.attributes["stride_dims"] = node.get_operator().attr.stride_dims
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "pool_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "custom_pool_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Pooling" / "CustomPooling.hpp")
+        ]
+
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Pooling" / "CustomPooling.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "aidge_supportfunctions.h"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "Macs.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "subkernels_functions.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "typedefs.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "utils.hpp")
+        ]
+
+
+
+# USING IMPLSPEC CONSTRUCTOR : INPUTS : const std::vector<ImplSpec::IOSpec>&, OUTPUTS : const std::vector<ImplSpec::IOSpec>&, ATTRIBUTES : const DynamicAttributes&>()
+@ExportLibAidgeARM.register("Conv2D", aidge_core.ImplSpec(  [
+                                                                aidge_core.IOSpec(aidge_core.dtype.any),    # Input[0] : Input Spec
+                                                                aidge_core.IOSpec(aidge_core.dtype.int4),   # Input[1] : Weight Spec
+                                                                aidge_core.IOSpec(aidge_core.dtype.any)     # Input[2] : Bias Spec
+                                                            ], 
+                                                            [
+                                                                aidge_core.IOSpec(aidge_core.dtype.int4) # Output[0] : Output spec
+                                                            ]))
+class Conv_ARMCortexM(ExportNodeCpp):
+    def __init__(self, node, mem_info, conversion_map = datatype_converter_aidge2arm):
+        super().__init__(node, mem_info, conversion_map)
+        self.attributes["activation"] = "Linear"
+        self.attributes.update(Scaling()("no_scaling"))
+        # No padding with Conv
+        # Use PaddedConv to add padding attribute
+        self.attributes["padding"] = [0, 0]
+
+        self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_config.jinja")
+        self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "custom_conv_kernel.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "CustomConv.hpp"), 
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "aidge_supportfunctions.h"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "Macs.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "nn_scaling_functions.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "subkernels_functions.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "swar_arm_acle.h"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "typedefs.hpp"),
+            str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "utils.hpp")
+        ]
+
 @ExportLibAidgeARM.register("ConvDepthWise2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class ConvDW_ARMCortexM(ExportNodeCpp):
     def __init__(self, node, mem_info):