diff --git a/aidge_export_cpp/export_utils.py b/aidge_export_cpp/export_utils.py
index c82c64318b39b799c3ad78bc45a35d5a8b123e78..b8f9eaf74d1ec604e1b837b33e77bd0e60c3ddba 100644
--- a/aidge_export_cpp/export_utils.py
+++ b/aidge_export_cpp/export_utils.py
@@ -2,6 +2,7 @@ import os
 from collections import OrderedDict
 
 import aidge_core
+from aidge_core.export_utils import get_node
 
 def cpp_fuse_to_metaops(graph_view: aidge_core.GraphView):
     """ 
@@ -197,17 +198,15 @@ def set_scaling_attributes(export_node: aidge_core.export_utils.ExportNode, node
     :type node: aidge_core.Node
     """
 
-    if node.type() == "Quantizer":
-        for n in node.get_operator().get_micro_graph().get_nodes():
+    QNode = get_node(node, "Quantizer")
+
+    if QNode is not None:
+        for n in QNode.get_operator().get_micro_graph().get_nodes():
             if n.type() == "BitShift":
                 export_node.attributes["shift_value"] = n.get_operator().get_input(1)[0]
             elif n.type() == "Mul":
                 export_node.attributes["coef_value"] = n.get_operator().get_input(1)[0]
 
-    elif isinstance(node.get_operator(), aidge_core.MetaOperatorOp):
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            set_scaling_attributes(export_node, n)
-
       
             
 def normalize(array):
diff --git a/aidge_export_cpp/operators/Activation.py b/aidge_export_cpp/operators/Activation.py
index 8bb52f9a6eb4450bdd6a2b60f5b07f2068bf42e9..55e7e19425e0a5b61790b58a2d36a8f233f75228 100644
--- a/aidge_export_cpp/operators/Activation.py
+++ b/aidge_export_cpp/operators/Activation.py
@@ -40,9 +40,7 @@ class QReLU(ReLU):
         super().__init__(node, mem_info)
 
         # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "Quantizer":
-                set_scaling_attributes(self, n)
+        set_scaling_attributes(self, node)
 
         # Update the scaling type
         if self.attributes["coef_value"] != 1:
diff --git a/aidge_export_cpp/operators/Conv.py b/aidge_export_cpp/operators/Conv.py
index e2562f79c2e8428efe7a23ec6290515827eeb48a..4207184b6417f96064f5f8c1f6dcca38df07c13a 100644
--- a/aidge_export_cpp/operators/Conv.py
+++ b/aidge_export_cpp/operators/Conv.py
@@ -1,5 +1,5 @@
 import aidge_core
-from aidge_core.export_utils import ExportNodeCpp
+from aidge_core.export_utils import ExportNodeCpp, get_node
 from aidge_export_cpp import ROOT, ExportLibCpp, set_scaling_attributes
 
 @ExportLibCpp.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
@@ -17,7 +17,10 @@ class Conv(ExportNodeCpp):
         self.attributes["shift_value"] = 0
 
         # Browse the metaop to update kernel attributes
-        self.get_conv_attributes(node)
+        ConvNode = get_node(node, "Conv2D") 
+        self.attributes["kernel_dims"] = ConvNode.get_operator().attr.kernel_dims
+        self.attributes["stride_dims"] = ConvNode.get_operator().attr.stride_dims
+        self.attributes["dilation_dims"] = ConvNode.get_operator().attr.dilation_dims
 
         # Template for layer configutation file generation
         self.config_template = str(ROOT / "templates" / "configuration" / "convolution_config.jinja")
@@ -35,17 +38,7 @@ class Conv(ExportNodeCpp):
         # Include aidge outputs within the fwd file
         if self.attributes["aidge_cmp"]:
             self.include_list.append("network/utils.hpp")   # aidge_cmp function
-            self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp")
-    
-    def get_conv_attributes(self, node):
-        if isinstance(node.get_operator(), aidge_core.MetaOperatorOp):
-            for n in node.get_operator().get_micro_graph().get_nodes():
-                self.get_conv_attributes(n)
-
-        elif node.type() == "Conv2D":
-            self.attributes["kernel_dims"] = node.get_operator().attr.kernel_dims
-            self.attributes["stride_dims"] = node.get_operator().attr.stride_dims
-            self.attributes["dilation_dims"] = node.get_operator().attr.dilation_dims
+            self.include_list.append("data/aidge_outputs/" + node.name() + ".hpp") 
 
 
 @ExportLibCpp.register_metaop("QConv", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
@@ -67,9 +60,8 @@ class PadConv(QConv):
         super().__init__(node, mem_info)
 
         # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "Pad2D":
-                self.attributes["padding"] = n.get_operator().attr.begin_end_borders
+        PadNode = get_node(node, "Pad2D")
+        self.attributes["padding"] = PadNode.get_operator().attr.begin_end_borders
 
 
 @ExportLibCpp.register_metaop("ConvAct", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
@@ -78,13 +70,11 @@ class ConvAct(QConv):
         super().__init__(node, mem_info)
 
         # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "ReLU":
-                self.attributes["activation"] = "Rectifier"
-            elif n.type() == "LeakyReLU":
-                aidge_core.Log.fatal(f"{n.type()} activation is not yet supported.")
-                # TODO : Should not be checked manually for each activation
-
+        if get_node(node, "ReLU") is not None:
+            self.attributes["activation"] = "Rectifier"
+        elif get_node(node, "LeakyReLU") is not None:
+            aidge_core.Log.fatal(f"{node.type()} activation is not yet supported.")
+            # TODO : Should not be checked manually for each activation     
 
 @ExportLibCpp.register_metaop("PadConvAct", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
 class PadConvAct(PadConv, ConvAct):
diff --git a/aidge_export_cpp/operators/ElemWise.py b/aidge_export_cpp/operators/ElemWise.py
index c27c351451aeff90203ea1481fe2fbd5318ce065..6a9ee7e4d2241ed424262f6af9d401aa3aaf52ba 100644
--- a/aidge_export_cpp/operators/ElemWise.py
+++ b/aidge_export_cpp/operators/ElemWise.py
@@ -1,5 +1,5 @@
 import aidge_core
-from aidge_core.export_utils import ExportNodeCpp
+from aidge_core.export_utils import ExportNodeCpp, get_node
 from aidge_export_cpp import ROOT, ExportLibCpp, set_scaling_attributes
 
 class ElemWise(ExportNodeCpp):
@@ -38,9 +38,7 @@ class QElemWise(ElemWise):
         super().__init__(node, mem_info)
 
         # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "Quantizer":
-                set_scaling_attributes(self, n)
+        set_scaling_attributes(self, node)
 
         ## Set the scaling type
         if self.attributes["coef_value"] != 1:
@@ -68,12 +66,11 @@ class AddAct(QAdd):
         super().__init__(node, mem_info)
 
         # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "ReLU":
-                self.attributes["activation"] = "Rectifier"
-            elif n.type() == "LeakyReLU":
-                aidge_core.Log.fatal(f"{n.type()} activation is not yet supported.")
-                # TODO : Should not be checked manually for each activation
+        if get_node(node, "ReLU") is not None:
+            self.attributes["activation"] = "Rectifier"
+        elif get_node(node, "LeakyReLU") is not None:
+            aidge_core.Log.fatal(f"{node.type()} activation is not yet supported.")
+            # TODO : Should not be checked manually for each activation    
 
 
 @ExportLibCpp.register("Sub", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
@@ -95,12 +92,11 @@ class SubAct(QSub):
         super().__init__(node, mem_info)
 
         # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "ReLU":
-                self.attributes["activation"] = "Rectifier"
-            elif n.type() == "LeakyReLU":
-                aidge_core.Log.fatal(f"{n.type()} activation is not yet supported.")
-                # TODO : Should not be checked manually for each activation
+        if get_node(node, "ReLU") is not None:
+            self.attributes["activation"] = "Rectifier"
+        elif get_node(node, "LeakyReLU") is not None:
+            aidge_core.Log.fatal(f"{node.type()} activation is not yet supported.")
+            # TODO : Should not be checked manually for each activation    
 
 
 @ExportLibCpp.register("Mul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
@@ -122,9 +118,8 @@ class MulAct(QMul):
         super().__init__(node, mem_info)
 
         # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "ReLU":
-                self.attributes["activation"] = "Rectifier"
-            elif n.type() == "LeakyReLU":
-                aidge_core.Log.fatal(f"{n.type()} activation is not yet supported.")
-                # TODO : Should not be checked manually for each activation
\ No newline at end of file
+        if get_node(node, "ReLU") is not None:
+            self.attributes["activation"] = "Rectifier"
+        elif get_node(node, "LeakyReLU") is not None:
+            aidge_core.Log.fatal(f"{node.type()} activation is not yet supported.")
+            # TODO : Should not be checked manually for each activation    
\ No newline at end of file
diff --git a/aidge_export_cpp/operators/Fc.py b/aidge_export_cpp/operators/Fc.py
index 05eff821467478a22fc9d7885b78e6ff2c3f6e71..b83754be36bc8f79dc981cbeb41bb9a86aa4e50a 100644
--- a/aidge_export_cpp/operators/Fc.py
+++ b/aidge_export_cpp/operators/Fc.py
@@ -40,9 +40,7 @@ class QFC(FC):
         super().__init__(node, mem_info)
 
         # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "Quantizer":
-                set_scaling_attributes(self, n)
+        set_scaling_attributes(self, node)
 
         ## Set the scaling type
         if self.attributes["shift_value"] != 0:
diff --git a/aidge_export_cpp/operators/Pool.py b/aidge_export_cpp/operators/Pool.py
index 558ec1bd522fb0005c1d47fcac7ca84c4992fd7e..5b483bdeb8b634391f8edaa9340a901a100d4aad 100644
--- a/aidge_export_cpp/operators/Pool.py
+++ b/aidge_export_cpp/operators/Pool.py
@@ -1,5 +1,5 @@
 import aidge_core
-from aidge_core.export_utils import ExportNodeCpp
+from aidge_core.export_utils import ExportNodeCpp, get_node
 from aidge_export_cpp import ROOT
 from aidge_export_cpp import ExportLibCpp
 
@@ -37,9 +37,8 @@ class PadPool(Pool):
         super().__init__(node, mem_info)
 
         # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "Pad2D":
-                self.attributes["padding"] = n.get_operator().attr.begin_end_borders
+        PadNode = get_node(node, "Pad2D")
+        self.attributes["padding"] = PadNode.get_operator().attr.begin_end_borders
 
 
 class PoolAct(Pool):
@@ -47,12 +46,11 @@ class PoolAct(Pool):
         super().__init__(node, mem_info)
 
         # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "ReLU":
-                self.attributes["activation"] = "Rectifier"
-            elif n.type() == "LeakyReLU":
-                aidge_core.Log.fatal(f"{n.type()} activation is not yet supported.")
-                # TODO : Should not be checked manually for each activation
+        if get_node(node, "ReLU") is not None:
+            self.attributes["activation"] = "Rectifier"
+        elif get_node(node, "LeakyReLU") is not None:
+            aidge_core.Log.fatal(f"{node.type()} activation is not yet supported.")
+            # TODO : Should not be checked manually for each activation    
 
 
 @ExportLibCpp.register("MaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
@@ -61,17 +59,10 @@ class MaxPool(Pool):
         super().__init__(node, mem_info)
 
         # Browse the metaop to update kernel attributes
-        self.get_maxpool_attributes(node)
-
-    def get_maxpool_attributes(self, node):
-        if isinstance(node.get_operator(), aidge_core.MetaOperatorOp):
-            for n in node.get_operator().get_micro_graph().get_nodes():
-                self.get_maxpool_attributes(n)
-
-        elif node.type() == "MaxPooling2D":
-            self.attributes["pool_type"] = "Max"
-            self.attributes["kernel_dims"] = node.get_operator().attr.kernel_dims
-            self.attributes["stride_dims"] = node.get_operator().attr.stride_dims
+        PoolNode = get_node(node, "MaxPooling2D")
+        self.attributes["pool_type"] = "Max"
+        self.attributes["kernel_dims"] = PoolNode.get_operator().attr.kernel_dims
+        self.attributes["stride_dims"] = PoolNode.get_operator().attr.stride_dims
 
 
 @ExportLibCpp.register_metaop("PadMaxPool", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
@@ -98,17 +89,10 @@ class AvgPool(Pool):
         super().__init__(node, mem_info)
 
         # Browse the metaop to update kernel attributes
-        self.get_avgpool_attributes(node)
-
-    def get_avgpool_attributes(self, node):
-        if isinstance(node.get_operator(), aidge_core.MetaOperatorOp):
-            for n in node.get_operator().get_micro_graph().get_nodes():
-                self.get_avgpool_attributes(n)
-
-        elif node.type() == "AvgPooling2D":
-            self.attributes["pool_type"] = "Average"
-            self.attributes["kernel_dims"] = node.get_operator().attr.kernel_dims
-            self.attributes["stride_dims"] = node.get_operator().attr.stride_dims
+        PoolNode = get_node(node, "AvgPooling2D")
+        self.attributes["pool_type"] = "Average"
+        self.attributes["kernel_dims"] = PoolNode.get_operator().attr.kernel_dims
+        self.attributes["stride_dims"] = PoolNode.get_operator().attr.stride_dims
 
 
 @ExportLibCpp.register_metaop("PadAvgPool", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
@@ -134,17 +118,8 @@ class GlobalAvgPool(Pool):
     def __init__(self, node, mem_info):
         super().__init__(node, mem_info)
 
-        # Browse the metaop to update kernel attributes
-        self.get_globalavgpool_attributes(node)
-
-    def get_globalavgpool_attributes(self, node):
-        if isinstance(node.get_operator(), aidge_core.MetaOperatorOp):
-            for n in node.get_operator().get_micro_graph().get_nodes():
-                self.get_globalavgpool_attributes(n)
-
-        elif node.type() == "GlobalAvgPooling":
-            self.attributes["pool_type"] = "Average"
-            self.attributes["kernel_dims"] = [self.attributes["in_width"][0], self.attributes["in_height"][0]]
+        self.attributes["pool_type"] = "Average"
+        self.attributes["kernel_dims"] = [self.attributes["in_width"][0], self.attributes["in_height"][0]]
 
 
 @ExportLibCpp.register_metaop("PadGlobalAvgPool", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
diff --git a/aidge_export_cpp/operators/Quantizer.py b/aidge_export_cpp/operators/Quantizer.py
index 8a6c1ae824b60682286cced7cdc2300cef5a1ba0..cadc3bf6d77ee91e16e864ec90af49da74820b7c 100644
--- a/aidge_export_cpp/operators/Quantizer.py
+++ b/aidge_export_cpp/operators/Quantizer.py
@@ -1,5 +1,5 @@
 import aidge_core
-from aidge_core.export_utils import ExportNodeCpp
+from aidge_core.export_utils import ExportNodeCpp, get_node
 from aidge_export_cpp import ROOT, ExportLibCpp, set_scaling_attributes
 
 @ExportLibCpp.register_metaop("Quantizer", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
@@ -12,14 +12,14 @@ class CppRescaling(ExportNodeCpp):
         self.attributes["rescaling"] = "NoScaling"
         self.attributes["shift_value"] = 0
         self.attributes["coef_value"] = 1
+        self.attributes["aidge_cmp"] = node.attributes().has_attr("aidge_cmp")
 
         # Browse the metaop to update kernel attributes
-        for n in node.get_operator().get_micro_graph().get_nodes():
-            if n.type() == "ReLU":
-                self.attributes["activation"] = "Rectifier"
-            elif n.type() == "LeakyReLU":
-                aidge_core.Log.fatal(f"{n.type()} activation is not yet supported.")
-                # TODO : Should not be checked manually for each activation
+        if get_node(node, "ReLU") is not None:
+            self.attributes["activation"] = "Rectifier"
+        elif get_node(node, "LeakyReLU") is not None:
+            aidge_core.Log.fatal(f"{node.type()} activation is not yet supported.")
+            # TODO : Should not be checked manually for each activation     
         
         # Set scaling attributes
         set_scaling_attributes(self, node)