diff --git a/aidge_export_cpp/export_utils.py b/aidge_export_cpp/export_utils.py
index 9ce15f5ec9ca3797a1a885c648b1c64160fe0010..1ef9d992cf12d6deb76166fb5680dd890b61f1cd 100644
--- a/aidge_export_cpp/export_utils.py
+++ b/aidge_export_cpp/export_utils.py
@@ -11,48 +11,56 @@ def cpp_fuse_to_metaops(graph_view: aidge_core.GraphView):
                        ordered input/output data within the computational graph.
     """
 
-    cpp_recipes = OrderedDict({
-        "Quantizer":      "BitShift#->Clip; BitShift#<-Mul?",    # Scaling node created by the quantization
-        "CppFc":          "FC->Quantizer?->ReLU?",
-        "CppConv":        "Conv2D#->Quantizer?->ReLU?; Conv2D#<-Pad2D?",
-        "CppPool":        "(MaxPooling2D#|AvgPooling2D#|GlobalAveragePooling#)->ReLU?; (MaxPooling2D#|AvgPooling2D#|GlobalAveragePooling#)<-Pad2D?",
-        "CppElemWise":    "(Add|Mul|Sub)->Quantizer?->ReLU?",
-        "CppActivation":  "ReLU"
-    })
-
     # cpp_recipes = OrderedDict({
-    #     # FC
-    #     "FcReLU":         "FC->ReLU",
-
-    #     # Conv
-    #     "PadConv":        "Conv2D<-Pad2D",
-    #     "ConvReLU":       "Conv2D->ReLU",
-    #     "PadConvReLU":    "PadConv->ReLU",
-
-    #     # Max Pooling
-    #     "PadMaxPool":     "MaxPooling2D<-Pad2D",
-    #     "MaxPoolReLU":    "MaxPooling2D->ReLU",
-    #     "PadMaxPoolReLU": "PadMaxPool->ReLU",
-
-    #     # Average Pooling
-    #     "PadAvgPool":     "AvgPooling2D<-Pad2D",
-    #     "AvgPoolReLU":    "AvgPooling2D->ReLU",
-    #     "PadAvgPoolReLU": "PadAvgPool->ReLU",
-
-    #     # Global Average Pooling
-    #     "PadGlobalAvgPool":     "GlobalAveragePooling2D<-Pad2D",
-    #     "GlobalAvgPoolReLU":    "GlobalAveragePooling2D->ReLU",
-    #     "PadGlobalAvgPoolReLU": "PadGlobalAveragePool->ReLU",
-
-    #     # ElemWise
-    #     "AddReLU":    "Add->ReLU",
-    #     "SubReLU":    "Sub->ReLU",
-    #     "MulReLU":    "Mul->ReLU"
+    #     "Quantizer":      "BitShift#->Clip; BitShift#<-Mul?",    # Scaling node created by the quantization
+    #     "CppFc":          "FC->Quantizer?->ReLU?",
+    #     "CppConv":        "Conv2D#->Quantizer?->ReLU?; Conv2D#<-Pad2D?",
+    #     "CppPool":        "(MaxPooling2D#|AvgPooling2D#|GlobalAveragePooling#)->ReLU?; (MaxPooling2D#|AvgPooling2D#|GlobalAveragePooling#)<-Pad2D?",
+    #     "CppElemWise":    "(Add|Mul|Sub)->Quantizer?->ReLU?",
+    #     "CppActivation":  "ReLU"
     # })
 
-    # Fuse Quantizers
-    # aidge_core.fuse_to_metaops(
-    #     graph_view, cpp_recipes["Quantizer"], "Quantizer")
+    cpp_recipes = OrderedDict({
+        # Quantization
+        "Quantizer":      "BitShift#->Clip; BitShift#<-Mul?",
+
+        # FC
+        "QFC":            "FC->Quantizer",
+        "FCAct":          "(FC|QFC)->(ReLU|LeakyReLU)",
+
+        # Conv
+        "QConv":          "Conv2D->Quantizer",
+        "PadConv":        "(QConv|Conv2D)<-Pad2D",
+        "ConvAct":        "(QConv|Conv2D)->(ReLU|LeakyReLU)",
+        "PadConvAct":     "PadConv->(ReLU|LeakyReLU)",
+
+        # Max Pooling
+        "PadMaxPool":     "MaxPooling2D<-Pad2D",
+        "MaxPoolAct":     "MaxPooling2D->(ReLU|LeakyReLU)",
+        "PadMaxPoolAct":  "PadMaxPool->(ReLU|LeakyReLU)",
+
+        # Average Pooling
+        "PadAvgPool":     "AvgPooling2D<-Pad2D",
+        "AvgPoolAct":     "AvgPooling2D->(ReLU|LeakyReLU)",
+        "PadAvgPoolAct":  "PadAvgPool->(ReLU|LeakyReLU)",
+
+        # Global Average Pooling
+        "PadGlobalAvgPool":     "GlobalAveragePooling2D<-Pad2D",
+        "GlobalAvgPoolAct":     "GlobalAveragePooling2D->(ReLU|LeakyReLU)",
+        "PadGlobalAvgPoolAct":  "PadGlobalAveragePool->(ReLU|LeakyReLU)",
+
+        # ElemWise
+        "QAdd":      "Add->Quantizer",
+        "QSub":      "Sub->Quantizer",
+        "QMul":      "Mul->Quantizer",
+        "AddAct":    "(QAdd|Add)->(ReLU|LeakyReLU)",
+        "SubAct":    "(QSub|Sub)->(ReLU|LeakyReLU)",
+        "MulAct":    "(QMul|Mul)->(ReLU|LeakyReLU)",
+
+        # Activation
+        "QReLU":        "ReLU->Quantizer",
+        "QLeakyReLU":   "LeakyReLU->Quantizer",
+    })
 
     for node, recipe in cpp_recipes.items():
         aidge_core.fuse_to_metaops(graph_view, recipe, node)
@@ -90,7 +98,8 @@ def set_nodes_names(scheduler):
             node_it += 1
 
             # Set producers names
-            if node_type in ["CppFc", "CppConv"]:
+            if node_type in ["QConv", "PadConv", "ConvAct", "PadConvAct",
+                             "QFC", "FCAct"]:
                 # nb_parents = len(node.get_parents())
                 node.get_parent(1).set_name(node.name() + "_weights")
                 if node.get_parent(2) is not None:
@@ -174,23 +183,33 @@ def exclude_unwanted_producers(model):
                     node.attributes().ignore = True
                     break
 
-def set_scaling_attributes(export_node: aidge_core.export_utils.ExportNode, QNode: aidge_core.Node):
+
+
+def set_scaling_attributes(export_node: aidge_core.export_utils.ExportNode, node: aidge_core.Node):
     """
-    Set shift and coef attributes of the given export node.
+    Look recursively for a Quantizer node inside of the given node, 
+    then set shift and coef attributes of the given export node.
     [TODO] Should be moved into aidge_core.ExportNode
 
     :param export_node: An instance of :py:class:`aidge_core.export_utils.ExportNode` to set the scaling
                         attributes needed for a quantized export. 
     :type export_node: aidge_core.export_utils.ExportNode
-    :param QNode: Quantizer node holding the shift and coef values. 
-    :type QNode: aidge_core.Node
+    :param node: Node which may hold a Quantizer node. 
+    :type node: aidge_core.Node
     """
 
-    for node in QNode.get_operator().get_micro_graph().get_nodes():
-        if node.type() == "BitShift":
-            export_node.attributes["shift_value"] = node.get_operator().get_input(1)[0]
-        elif node.type() == "Mul":
-            export_node.attributes["coef_value"] = node.get_operator().get_input(1)[0]
+    if node.type() == "Quantizer":
+        for n in node.get_operator().get_micro_graph().get_nodes():
+            if n.type() == "BitShift":
+                export_node.attributes["shift_value"] = n.get_operator().get_input(1)[0]
+            elif n.type() == "Mul":
+                export_node.attributes["coef_value"] = n.get_operator().get_input(1)[0]
+
+    elif isinstance(node.get_operator(), aidge_core.MetaOperatorOp):
+        for n in node.get_operator().get_micro_graph().get_nodes():
+            set_scaling_attributes(export_node, n)
+
+      
             
 def normalize(array):
     """
diff --git a/aidge_export_cpp/operators/CppActivation.py b/aidge_export_cpp/operators/Activation.py
similarity index 79%
rename from aidge_export_cpp/operators/CppActivation.py
rename to aidge_export_cpp/operators/Activation.py
index 7d8193906dd00a844b8606d8217670046f5843f4..8bb52f9a6eb4450bdd6a2b60f5b07f2068bf42e9 100644
--- a/aidge_export_cpp/operators/CppActivation.py
+++ b/aidge_export_cpp/operators/Activation.py
@@ -2,13 +2,13 @@ import aidge_core
 from aidge_core.export_utils import ExportNodeCpp
 from aidge_export_cpp import ROOT, ExportLibCpp, set_scaling_attributes
 
-@ExportLibCpp.register_metaop("CppActivation", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
-class CppActivation(ExportNodeCpp):
+@ExportLibCpp.register("ReLU", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class ReLU(ExportNodeCpp):
     def __init__(self, node, mem_info):
         super().__init__(node, mem_info)
 
         # Initialize kernel attributes
-        self.attributes["activation"] = "Linear"
+        self.attributes["activation"] = "Rectifier"
         self.attributes["aidge_cmp"] = node.attributes().has_attr("aidge_cmp")
 
         ## Scaling
@@ -16,19 +16,6 @@ class CppActivation(ExportNodeCpp):
         self.attributes["shift_value"] = 0
         self.attributes["coef_value"] = 1
 
-        # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "ReLU":
-                self.attributes["activation"] = "Rectifier"
-            elif n.type() == "Quantizer":
-                set_scaling_attributes(self, n)
-
-        ## Set the scaling type
-        if self.attributes["coef_value"] != 1:
-            self.attributes["rescaling"] = "FixedPointScaling"
-        elif self.attributes["shift_value"] != 0:
-            self.attributes["rescaling"] = "SingleShiftScaling"
-
         # Template for layer configutation file generation
         self.config_template = str(ROOT / "templates" / "configuration" / "activation_config.jinja")
         
@@ -45,4 +32,20 @@ class CppActivation(ExportNodeCpp):
         if self.attributes["aidge_cmp"]:
             self.include_list.append("network/utils.hpp")   # aidge_cmp function
             self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp")
-        
\ No newline at end of file
+    
+        
+@ExportLibCpp.register_metaop("QReLU", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class QReLU(ReLU):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Browse the metaop to update kernel attributes
+        for n in node.get_operator().get_micro_graph().get_nodes():
+            if n.type() == "Quantizer":
+                set_scaling_attributes(self, n)
+
+        # Update the scaling type
+        if self.attributes["coef_value"] != 1:
+            self.attributes["rescaling"] = "FixedPointScaling"
+        elif self.attributes["shift_value"] != 0:
+            self.attributes["rescaling"] = "SingleShiftScaling"
diff --git a/aidge_export_cpp/operators/CppBatchNorm.py b/aidge_export_cpp/operators/BatchNorm.py
similarity index 96%
rename from aidge_export_cpp/operators/CppBatchNorm.py
rename to aidge_export_cpp/operators/BatchNorm.py
index 091dc76c248fd733bdaf8f51754e375182927bf6..b0f5a16f195fb27846db9a8727b3804d84520d12 100644
--- a/aidge_export_cpp/operators/CppBatchNorm.py
+++ b/aidge_export_cpp/operators/BatchNorm.py
@@ -4,7 +4,7 @@ from aidge_export_cpp import ROOT
 from aidge_export_cpp import ExportLibCpp
 
 @ExportLibCpp.register("BatchNorm2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
-class CppBatchNorm(ExportNodeCpp):
+class BatchNorm(ExportNodeCpp):
     def __init__(self, node, mem_info):
         super().__init__(node, mem_info)
 
@@ -30,4 +30,5 @@ class CppBatchNorm(ExportNodeCpp):
         # Include aidge outputs within the fwd file
         if self.attributes["aidge_cmp"]:
             self.include_list.append("network/utils.hpp")   # aidge_cmp function
-            self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp")
\ No newline at end of file
+            self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp")
+            
\ No newline at end of file
diff --git a/aidge_export_cpp/operators/CppConcat.py b/aidge_export_cpp/operators/Concat.py
similarity index 98%
rename from aidge_export_cpp/operators/CppConcat.py
rename to aidge_export_cpp/operators/Concat.py
index b0c91e2fe5d0530a2ac5415551bbf29273da3541..ea65f8d3cd4debc01b388b71086620e6ba7b3d0b 100644
--- a/aidge_export_cpp/operators/CppConcat.py
+++ b/aidge_export_cpp/operators/Concat.py
@@ -5,7 +5,7 @@ from aidge_export_cpp import ROOT
 from aidge_export_cpp import ExportLibCpp
 
 @ExportLibCpp.register("Concat", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
-class CppConcat(ExportNodeCpp):
+class Concat(ExportNodeCpp):
     def __init__(self, node, mem_info):
         super().__init__(node, mem_info)
 
diff --git a/aidge_export_cpp/operators/Conv.py b/aidge_export_cpp/operators/Conv.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2562f79c2e8428efe7a23ec6290515827eeb48a
--- /dev/null
+++ b/aidge_export_cpp/operators/Conv.py
@@ -0,0 +1,92 @@
+import aidge_core
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_export_cpp import ROOT, ExportLibCpp, set_scaling_attributes
+
+@ExportLibCpp.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class Conv(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Initialize kernel attributes
+        self.attributes["padding"] = [0, 0, 0, 0]
+        self.attributes["activation"] = "Linear"
+        self.attributes["aidge_cmp"] = node.attributes().has_attr("aidge_cmp")
+
+        ## Scaling
+        self.attributes["rescaling"] = "NoScaling"
+        self.attributes["shift_value"] = 0
+
+        # Browse the metaop to update kernel attributes
+        self.get_conv_attributes(node)
+
+        # Template for layer configutation file generation
+        self.config_template = str(ROOT / "templates" / "configuration" / "convolution_config.jinja")
+        
+        # Template layer call function generation within the forward file
+        self.forward_template = str(ROOT / "templates" / "kernel_forward" / "convolution_forward.jinja")
+        
+        # Files to include within the generated forward.cpp file
+        self.include_list = []
+        
+        # Path to the kernel(s) files to copy
+        self.add_kernel_to_copy(ROOT / "kernels" / "convolution.hpp")
+        self.add_kernel_to_copy(ROOT / "static" / "macs.hpp", "include/network", fwd_include=False)
+        
+        # Include aidge outputs within the fwd file
+        if self.attributes["aidge_cmp"]:
+            self.include_list.append("network/utils.hpp")   # aidge_cmp function
+            self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp")
+    
+    def get_conv_attributes(self, node):
+        if isinstance(node.get_operator(), aidge_core.MetaOperatorOp):
+            for n in node.get_operator().get_micro_graph().get_nodes():
+                self.get_conv_attributes(n)
+
+        elif node.type() == "Conv2D":
+            self.attributes["kernel_dims"] = node.get_operator().attr.kernel_dims
+            self.attributes["stride_dims"] = node.get_operator().attr.stride_dims
+            self.attributes["dilation_dims"] = node.get_operator().attr.dilation_dims
+
+
+@ExportLibCpp.register_metaop("QConv", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class QConv(Conv):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Look for Quantizer node and set shift and coef export node attributes
+        set_scaling_attributes(self, node)
+
+        ## Set the scaling type
+        if self.attributes["shift_value"] != 0:
+            self.attributes["rescaling"] = "SingleShiftScaling"
+
+
+@ExportLibCpp.register_metaop("PadConv", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class PadConv(QConv):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Browse the metaop to update kernel attributes
+        for n in node.get_operator().get_micro_graph().get_nodes():
+            if n.type() == "Pad2D":
+                self.attributes["padding"] = n.get_operator().attr.begin_end_borders
+
+
+@ExportLibCpp.register_metaop("ConvAct", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class ConvAct(QConv):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Browse the metaop to update kernel attributes
+        for n in node.get_operator().get_micro_graph().get_nodes():
+            if n.type() == "ReLU":
+                self.attributes["activation"] = "Rectifier"
+            elif n.type() == "LeakyReLU":
+                aidge_core.Log.fatal(f"{n.type()} activation is not yet supported.")
+                # TODO : Should not be checked manually for each activation
+
+
+@ExportLibCpp.register_metaop("PadConvAct", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class PadConvAct(PadConv, ConvAct):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
diff --git a/aidge_export_cpp/operators/CppConv.py b/aidge_export_cpp/operators/CppConv.py
deleted file mode 100644
index 8cc28d452ba6b9cdd3b8875e93f66f69521f2994..0000000000000000000000000000000000000000
--- a/aidge_export_cpp/operators/CppConv.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import aidge_core
-from aidge_core.export_utils import ExportNodeCpp
-from aidge_export_cpp import ROOT, ExportLibCpp, set_scaling_attributes
-
-@ExportLibCpp.register_metaop("CppConv", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
-class CppConv(ExportNodeCpp):
-    def __init__(self, node, mem_info):
-        super().__init__(node, mem_info)
-
-        # Initialize kernel attributes
-        self.attributes["padding"] = [0, 0, 0, 0]
-        self.attributes["activation"] = "Linear"
-        self.attributes["aidge_cmp"] = node.attributes().has_attr("aidge_cmp")
-
-        ## Scaling
-        self.attributes["rescaling"] = "NoScaling"
-        self.attributes["shift_value"] = 0
-
-        # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "ReLU":
-                self.attributes["activation"] = "Rectifier"
-            elif n.type() == "Pad2D":
-                self.attributes["padding"] = n.get_operator().attr.begin_end_borders
-            elif n.type() == "Conv2D":
-                self.attributes["kernel_dims"] = n.get_operator().attr.kernel_dims
-                self.attributes["stride_dims"] = n.get_operator().attr.stride_dims
-                self.attributes["dilation_dims"] = n.get_operator().attr.dilation_dims
-            elif n.type() == "Quantizer":
-                set_scaling_attributes(self, n)
-
-        ## Set the scaling type
-        if self.attributes["shift_value"] != 0:
-            self.attributes["rescaling"] = "SingleShiftScaling"
-
-        # Template for layer configutation file generation
-        self.config_template = str(ROOT / "templates" / "configuration" / "convolution_config.jinja")
-        
-        # Template layer call function generation within the forward file
-        self.forward_template = str(ROOT / "templates" / "kernel_forward" / "convolution_forward.jinja")
-        
-        # Files to include within the generated forward.cpp file
-        self.include_list = []
-        
-        # Path to the kernel(s) files to copy
-        self.add_kernel_to_copy(ROOT / "kernels" / "convolution.hpp")
-        self.add_kernel_to_copy(ROOT / "static" / "macs.hpp", "include/network", fwd_include=False)
-        
-        # Include aidge outputs within the fwd file
-        if self.attributes["aidge_cmp"]:
-            self.include_list.append("network/utils.hpp")   # aidge_cmp function
-            self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp")
\ No newline at end of file
diff --git a/aidge_export_cpp/operators/CppElemWise.py b/aidge_export_cpp/operators/CppElemWise.py
deleted file mode 100644
index fd4bb265696c6596f758b727bd46145b18a20526..0000000000000000000000000000000000000000
--- a/aidge_export_cpp/operators/CppElemWise.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import aidge_core
-from aidge_core.export_utils import ExportNodeCpp
-from aidge_export_cpp import ROOT, ExportLibCpp, set_scaling_attributes
-
-@ExportLibCpp.register_metaop("CppElemWise", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
-class CppElemWise(ExportNodeCpp):
-    def __init__(self, node, mem_info):
-        super().__init__(node, mem_info)
-
-        # Initialize kernel attributes
-        self.attributes["activation"] = "Linear"
-        self.attributes["aidge_cmp"] = node.attributes().has_attr("aidge_cmp")
-
-        ## Scaling
-        self.attributes["rescaling"] = "NoScaling"
-        self.attributes["shift_value"] = 0
-        self.attributes["coef_value"] = 1
-
-        # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "ReLU":
-                self.attributes["activation"] = "Rectifier"
-            elif n.type() in ["Add", "Mul", "Sub"]:
-                self.attributes["elemwise_op"] = n.type()
-            elif n.type() == "Quantizer":
-                set_scaling_attributes(self, n)
-
-        ## Set the scaling type
-        if self.attributes["coef_value"] != 1:
-            self.attributes["rescaling"] = "FixedPointScaling"
-        elif self.attributes["shift_value"] != 0:
-            self.attributes["rescaling"] = "SingleShiftScaling"
-
-        # Template for layer configutation file generation
-        self.config_template = str(ROOT / "templates" / "configuration" / "elemwise_config.jinja")
-
-        # Template layer call function generation within the forward file
-        self.forward_template = str(ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja")
-
-        # Files to include within the generated forward.cpp file
-        self.include_list = []
-
-        # Path to the kernel(s) files to copy
-        self.add_kernel_to_copy(ROOT / "kernels" / "elemwise.hpp")
-        
-        # Include aidge outputs within the fwd file
-        if self.attributes["aidge_cmp"]:
-            self.include_list.append("network/utils.hpp")   # aidge_cmp function
-            self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp")
\ No newline at end of file
diff --git a/aidge_export_cpp/operators/CppPool.py b/aidge_export_cpp/operators/CppPool.py
deleted file mode 100644
index 54a4cbbe7c7d03a2abd4cd3a71073710b2495915..0000000000000000000000000000000000000000
--- a/aidge_export_cpp/operators/CppPool.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import aidge_core
-from aidge_core.export_utils import ExportNodeCpp
-from aidge_export_cpp import ROOT
-from aidge_export_cpp import ExportLibCpp
-
-@ExportLibCpp.register_metaop("CppPool", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
-class CppPool(ExportNodeCpp):
-    def __init__(self, node, mem_info):
-        super().__init__(node, mem_info)
-
-        # Initialize kernel attributes
-        self.attributes["stride_dims"] = [1, 1]
-        self.attributes["padding"] = [0, 0, 0, 0]
-        self.attributes["pool_type"] = "Max"
-        self.attributes["activation"] = "Linear"
-        self.attributes["aidge_cmp"] = node.attributes().has_attr("aidge_cmp")
-
-        # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "ReLU":
-                self.attributes["activation"] = "Rectifier"
-            elif n.type() == "Pad2D":
-                self.attributes["padding"] = n.get_operator().attr.begin_end_borders
-            elif n.type() == "GlobalAveragePooling":
-                self.attributes["pool_type"] = "Average"
-                self.attributes["kernel_dims"] = [self.attributes["in_width"][0], self.attributes["in_height"][0]]
-            elif n.type() == "MaxPooling2D":
-                self.attributes["pool_type"] = "Max"
-                self.attributes["kernel_dims"] = n.get_operator().attr.kernel_dims
-                self.attributes["stride_dims"] = n.get_operator().attr.stride_dims
-            elif n.type() == "AvgPooling2D":
-                self.attributes["pool_type"] = "Average"
-                self.attributes["kernel_dims"] = n.get_operator().attr.kernel_dims
-                self.attributes["stride_dims"] = n.get_operator().attr.stride_dims
-
-        # Template for layer configutation file generation
-        self.config_template = str(ROOT / "templates" / "configuration" / "pooling_config.jinja")
-        
-        # Template layer call function generation within the forward file
-        self.forward_template = str(ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja")
-        
-        # Files to include within the generated forward.cpp file
-        self.include_list = []
-        
-        # Path to the kernel(s) files to copy
-        self.add_kernel_to_copy(ROOT / "kernels" / "pooling.hpp")
-
-        # Include aidge outputs within the fwd file
-        if self.attributes["aidge_cmp"]:
-            self.include_list.append("network/utils.hpp")   # aidge_cmp function
-            self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp")
\ No newline at end of file
diff --git a/aidge_export_cpp/operators/ElemWise.py b/aidge_export_cpp/operators/ElemWise.py
new file mode 100644
index 0000000000000000000000000000000000000000..c27c351451aeff90203ea1481fe2fbd5318ce065
--- /dev/null
+++ b/aidge_export_cpp/operators/ElemWise.py
@@ -0,0 +1,130 @@
+import aidge_core
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_export_cpp import ROOT, ExportLibCpp, set_scaling_attributes
+
+class ElemWise(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Initialize kernel attributes
+        self.attributes["activation"] = "Linear"
+        self.attributes["aidge_cmp"] = node.attributes().has_attr("aidge_cmp")
+
+        ## Scaling
+        self.attributes["rescaling"] = "NoScaling"
+        self.attributes["shift_value"] = 0
+        self.attributes["coef_value"] = 1
+
+        # Template for layer configutation file generation
+        self.config_template = str(ROOT / "templates" / "configuration" / "elemwise_config.jinja")
+
+        # Template layer call function generation within the forward file
+        self.forward_template = str(ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja")
+
+        # Files to include within the generated forward.cpp file
+        self.include_list = []
+
+        # Path to the kernel(s) files to copy
+        self.add_kernel_to_copy(ROOT / "kernels" / "elemwise.hpp")
+        
+        # Include aidge outputs within the fwd file
+        if self.attributes["aidge_cmp"]:
+            self.include_list.append("network/utils.hpp")   # aidge_cmp function
+            self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp")
+
+
+class QElemWise(ElemWise):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Browse the metaop to update kernel attributes
+        for n in node.get_operator().get_micro_graph().get_nodes():
+            if n.type() == "Quantizer":
+                set_scaling_attributes(self, n)
+
+        ## Set the scaling type
+        if self.attributes["coef_value"] != 1:
+            self.attributes["rescaling"] = "FixedPointScaling"
+        elif self.attributes["shift_value"] != 0:
+            self.attributes["rescaling"] = "SingleShiftScaling"
+
+
+@ExportLibCpp.register("Add", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class Add(ElemWise):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["elemwise_op"] = "Add"
+
+
+@ExportLibCpp.register_metaop("QAdd", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class QAdd(QElemWise, Add):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+
+@ExportLibCpp.register_metaop("AddAct", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class AddAct(QAdd):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Browse the metaop to update kernel attributes
+        for n in node.get_operator().get_micro_graph().get_nodes():
+            if n.type() == "ReLU":
+                self.attributes["activation"] = "Rectifier"
+            elif n.type() == "LeakyReLU":
+                aidge_core.Log.fatal(f"{n.type()} activation is not yet supported.")
+                # TODO : Should not be checked manually for each activation
+
+
+@ExportLibCpp.register("Sub", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class Sub(ElemWise):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["elemwise_op"] = "Sub"
+
+
+@ExportLibCpp.register_metaop("QSub", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class QSub(QElemWise, Sub):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+
+@ExportLibCpp.register_metaop("SubAct", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class SubAct(QSub):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Browse the metaop to update kernel attributes
+        for n in node.get_operator().get_micro_graph().get_nodes():
+            if n.type() == "ReLU":
+                self.attributes["activation"] = "Rectifier"
+            elif n.type() == "LeakyReLU":
+                aidge_core.Log.fatal(f"{n.type()} activation is not yet supported.")
+                # TODO : Should not be checked manually for each activation
+
+
+@ExportLibCpp.register("Mul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class Mul(ElemWise):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["elemwise_op"] = "Mul"
+
+
+@ExportLibCpp.register_metaop("QMul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class QMul(QElemWise, Mul):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+
+@ExportLibCpp.register_metaop("MulAct", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class MulAct(QMul):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Browse the metaop to update kernel attributes
+        for n in node.get_operator().get_micro_graph().get_nodes():
+            if n.type() == "ReLU":
+                self.attributes["activation"] = "Rectifier"
+            elif n.type() == "LeakyReLU":
+                aidge_core.Log.fatal(f"{n.type()} activation is not yet supported.")
+                # TODO : Should not be checked manually for each activation
\ No newline at end of file
diff --git a/aidge_export_cpp/operators/CppFc.py b/aidge_export_cpp/operators/Fc.py
similarity index 68%
rename from aidge_export_cpp/operators/CppFc.py
rename to aidge_export_cpp/operators/Fc.py
index 8b10d914d1f15453c73bb323bf2e767e41e3ad69..05eff821467478a22fc9d7885b78e6ff2c3f6e71 100644
--- a/aidge_export_cpp/operators/CppFc.py
+++ b/aidge_export_cpp/operators/Fc.py
@@ -2,35 +2,19 @@ import aidge_core
 from aidge_core.export_utils import ExportNodeCpp
 from aidge_export_cpp import ROOT, ExportLibCpp, set_scaling_attributes
 
-@ExportLibCpp.register_metaop("CppFc", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
-class CppFc(ExportNodeCpp):
+@ExportLibCpp.register("FC", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class FC(ExportNodeCpp):
     def __init__(self, node, mem_info):
         super().__init__(node, mem_info)
 
         # Initialize kernel attributes
         self.attributes["activation"] = "Linear"
-        self.attributes["rescaling"] = "NoScaling"
         self.attributes["aidge_cmp"] = node.attributes().has_attr("aidge_cmp")
 
         ## Scaling
         self.attributes["rescaling"] = "NoScaling"
         self.attributes["shift_value"] = 0
 
-        ## Scaling
-        self.attributes["rescaling"] = "NoScaling"
-        self.attributes["shift_value"] = 0
-
-        # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "ReLU":
-                self.attributes["activation"] = "Rectifier"
-            elif n.type() == "Quantizer":
-                set_scaling_attributes(self, n)
-
-        ## Set the scaling type
-        if self.attributes["shift_value"] != 0:
-            self.attributes["rescaling"] = "SingleShiftScaling"
-
         # Template for layer configutation file generation
         self.config_template = str(ROOT / "templates" / "configuration" / "fullyconnected_config.jinja")
         
@@ -47,4 +31,33 @@ class CppFc(ExportNodeCpp):
         # Include aidge outputs within the fwd file
         if self.attributes["aidge_cmp"]:
             self.include_list.append("network/utils.hpp")   # aidge_cmp function
-            self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp")
\ No newline at end of file
+            self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp")
+
+
+@ExportLibCpp.register_metaop("QFC", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class QFC(FC):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Browse the metaop to update kernel attributes
+        for n in node.get_operator().get_micro_graph().get_nodes():
+            if n.type() == "Quantizer":
+                set_scaling_attributes(self, n)
+
+        ## Set the scaling type
+        if self.attributes["shift_value"] != 0:
+            self.attributes["rescaling"] = "SingleShiftScaling"
+
+
+@ExportLibCpp.register_metaop("FCAct", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class FCAct(QFC):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Browse the metaop to update kernel attributes
+        for n in node.get_operator().get_micro_graph().get_nodes():
+            if n.type() == "ReLU":
+                self.attributes["activation"] = "Rectifier"
+            elif n.type() == "LeakyReLU":
+                aidge_core.Log.fatal(f"{n.type()} activation is not yet supported.")
+                # TODO : Should not be checked manually for each activation
diff --git a/aidge_export_cpp/operators/CppPad.py b/aidge_export_cpp/operators/Pad.py
similarity index 100%
rename from aidge_export_cpp/operators/CppPad.py
rename to aidge_export_cpp/operators/Pad.py
diff --git a/aidge_export_cpp/operators/Pool.py b/aidge_export_cpp/operators/Pool.py
new file mode 100644
index 0000000000000000000000000000000000000000..558ec1bd522fb0005c1d47fcac7ca84c4992fd7e
--- /dev/null
+++ b/aidge_export_cpp/operators/Pool.py
@@ -0,0 +1,165 @@
+import aidge_core
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_export_cpp import ROOT
+from aidge_export_cpp import ExportLibCpp
+
+class Pool(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Initialize kernel attributes
+        self.attributes["stride_dims"] = [1, 1]
+        self.attributes["padding"] = [0, 0, 0, 0]
+        self.attributes["pool_type"] = "Max"
+        self.attributes["activation"] = "Linear"
+        self.attributes["aidge_cmp"] = node.attributes().has_attr("aidge_cmp")
+
+        # Template for layer configutation file generation
+        self.config_template = str(ROOT / "templates" / "configuration" / "pooling_config.jinja")
+        
+        # Template layer call function generation within the forward file
+        self.forward_template = str(ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja")
+        
+        # Files to include within the generated forward.cpp file
+        self.include_list = []
+        
+        # Path to the kernel(s) files to copy
+        self.add_kernel_to_copy(ROOT / "kernels" / "pooling.hpp")
+
+        # Include aidge outputs within the fwd file
+        if self.attributes["aidge_cmp"]:
+            self.include_list.append("network/utils.hpp")   # aidge_cmp function
+            self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp")
+
+
+class PadPool(Pool):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Browse the metaop to update kernel attributes
+        for n in node.get_operator().get_micro_graph().get_nodes():
+            if n.type() == "Pad2D":
+                self.attributes["padding"] = n.get_operator().attr.begin_end_borders
+
+
+class PoolAct(Pool):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Browse the metaop to update kernel attributes
+        for n in node.get_operator().get_micro_graph().get_nodes():
+            if n.type() == "ReLU":
+                self.attributes["activation"] = "Rectifier"
+            elif n.type() == "LeakyReLU":
+                aidge_core.Log.fatal(f"{n.type()} activation is not yet supported.")
+                # TODO : Should not be checked manually for each activation
+
+
+@ExportLibCpp.register("MaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class MaxPool(Pool):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Browse the metaop to update kernel attributes
+        self.get_maxpool_attributes(node)
+
+    def get_maxpool_attributes(self, node):
+        if isinstance(node.get_operator(), aidge_core.MetaOperatorOp):
+            for n in node.get_operator().get_micro_graph().get_nodes():
+                self.get_maxpool_attributes(n)
+
+        elif node.type() == "MaxPooling2D":
+            self.attributes["pool_type"] = "Max"
+            self.attributes["kernel_dims"] = node.get_operator().attr.kernel_dims
+            self.attributes["stride_dims"] = node.get_operator().attr.stride_dims
+
+
+@ExportLibCpp.register_metaop("PadMaxPool", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class PadMaxPool(MaxPool, PadPool):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+
+@ExportLibCpp.register_metaop("MaxPoolAct", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class MaxPoolAct(MaxPool, PoolAct):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+
+@ExportLibCpp.register_metaop("PadMaxPoolAct", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class PadMaxPoolAct(PadMaxPool, MaxPoolAct):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+
+@ExportLibCpp.register("AvgPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class AvgPool(Pool):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Browse the metaop to update kernel attributes
+        self.get_avgpool_attributes(node)
+
+    def get_avgpool_attributes(self, node):
+        if isinstance(node.get_operator(), aidge_core.MetaOperatorOp):
+            for n in node.get_operator().get_micro_graph().get_nodes():
+                self.get_avgpool_attributes(n)
+
+        elif node.type() == "AvgPooling2D":
+            self.attributes["pool_type"] = "Average"
+            self.attributes["kernel_dims"] = node.get_operator().attr.kernel_dims
+            self.attributes["stride_dims"] = node.get_operator().attr.stride_dims
+
+
+@ExportLibCpp.register_metaop("PadAvgPool", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class PadAvgPool(AvgPool, PadPool):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+
+@ExportLibCpp.register_metaop("AvgPoolAct", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class AvgPoolAct(AvgPool, PoolAct):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+
+@ExportLibCpp.register_metaop("PadAvgPoolAct", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class PadAvgPoolAct(PadAvgPool, AvgPoolAct):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+
+@ExportLibCpp.register("GlobalAveragePooling", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class GlobalAvgPool(Pool):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+        # Browse the metaop to update kernel attributes
+        self.get_globalavgpool_attributes(node)
+
+    def get_globalavgpool_attributes(self, node):
+        if isinstance(node.get_operator(), aidge_core.MetaOperatorOp):
+            for n in node.get_operator().get_micro_graph().get_nodes():
+                self.get_globalavgpool_attributes(n)
+
+        elif node.type() == "GlobalAvgPooling":
+            self.attributes["pool_type"] = "Average"
+            self.attributes["kernel_dims"] = [self.attributes["in_width"][0], self.attributes["in_height"][0]]
+
+
+@ExportLibCpp.register_metaop("PadGlobalAvgPool", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class PadGlobalAvgPool(GlobalAvgPool, PadPool):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+
+@ExportLibCpp.register_metaop("GlobalAvgPoolAct", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class GlobalAvgPoolAct(GlobalAvgPool, PoolAct):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+
+
+@ExportLibCpp.register_metaop("PadGlobalAvgPoolAct", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
+class PadGlobalAvgPoolAct(PadGlobalAvgPool, GlobalAvgPoolAct):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
\ No newline at end of file
diff --git a/aidge_export_cpp/operators/CppRescaling.py b/aidge_export_cpp/operators/Quantizer.py
similarity index 78%
rename from aidge_export_cpp/operators/CppRescaling.py
rename to aidge_export_cpp/operators/Quantizer.py
index 69c1ec50b8b23e499953b21c4b40af46bf60b641..8a6c1ae824b60682286cced7cdc2300cef5a1ba0 100644
--- a/aidge_export_cpp/operators/CppRescaling.py
+++ b/aidge_export_cpp/operators/Quantizer.py
@@ -17,6 +17,9 @@ class CppRescaling(ExportNodeCpp):
         for n in node.get_operator().get_micro_graph().get_nodes():
             if n.type() == "ReLU":
                 self.attributes["activation"] = "Rectifier"
+            elif n.type() == "LeakyReLU":
+                aidge_core.Log.fatal(f"{n.type()} activation is not yet supported.")
+                # TODO : Should not be checked manually for each activation
         
         # Set scaling attributes
         set_scaling_attributes(self, node)
@@ -39,7 +42,7 @@ class CppRescaling(ExportNodeCpp):
         # Path to the kernel(s) files to copy
         self.add_kernel_to_copy(ROOT / "kernels" / "rescaling.hpp")
         
-#        # Include aidge outputs within the fwd file
-#        if self.attributes["aidge_cmp"]:
-#            self.include_list.append("network/utils.hpp")   # aidge_cmp function
-#            self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp")
\ No newline at end of file
+        # Include aidge outputs within the fwd file
+        if self.attributes["aidge_cmp"]:
+            self.include_list.append("network/utils.hpp")   # aidge_cmp function
+            self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp")
\ No newline at end of file