diff --git a/aidge_export_cpp/operators.py b/aidge_export_cpp/operators.py
index 8bdc42121976aeb49c0c2a8699b9703a64a87fa8..9654a20d3be3c258c195f6f6f35b706f56ccdda7 100644
--- a/aidge_export_cpp/operators.py
+++ b/aidge_export_cpp/operators.py
@@ -37,8 +37,8 @@ def export_params(name: str,
 
 @ExportLibCpp.register("Producer", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
 class ProducerCPP(ExportNode):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         self.values = np.array(self.operator.get_output(0))
 
         if len(self.values.shape) == 4:  # Note: export in HWC
@@ -59,14 +59,14 @@ class ProducerCPP(ExportNode):
 # TODO : find a way to remove this dummy exportnode
 @ExportLibCpp.register("Pad2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
 class Pad_ARMCortexM(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
+    def __init__(self, node, mem_info):
         raise NotImplementedError("Pad2D nodes is not implemented")
 
 
 @ExportLibCpp.register("ReLU", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class ReLUCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         self.attributes["activation"] = "Rectifier"
         self.attributes["rescaling"] = "NoScaling"
         self.config_template = str(
@@ -81,8 +81,8 @@ class ReLUCPP(ExportNodeCpp):
 
 @ExportLibCpp.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class ConvCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         # No padding with Conv
         # Use PaddedConv to add padding attribute
         self.attributes["padding"] = [0, 0]
@@ -102,8 +102,8 @@ class ConvCPP(ExportNodeCpp):
 
 @ExportLibCpp.register_metaop("PaddedConv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class PaddedConvCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         # TODO find a way to retrive attr for meta op
         for n in self.operator.get_micro_graph().get_nodes():
             if n.type() == "Pad2D":
@@ -132,8 +132,8 @@ class PaddedConvCPP(ExportNodeCpp):
 
 @ExportLibCpp.register("Add", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class AddCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         self.attributes["elemwise_op"] = "Add"
         self.attributes["activation"] = "Linear"
         self.attributes["rescaling"] = "NoScaling"
@@ -150,8 +150,8 @@ class AddCPP(ExportNodeCpp):
 
 @ExportLibCpp.register("Sub", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class SubCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         self.attributes["elemwise_op"] = "Sub"
         self.attributes["activation"] = "Linear"
         self.attributes["rescaling"] = "NoScaling"
@@ -169,8 +169,8 @@ class SubCPP(ExportNodeCpp):
 
 @ExportLibCpp.register("Mul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class MulCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         self.attributes["elemwise_op"] = "Mul"
         self.attributes["activation"] = "Linear"
         self.attributes["rescaling"] = "NoScaling"
@@ -187,8 +187,8 @@ class MulCPP(ExportNodeCpp):
 
 @ExportLibCpp.register("MaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class MaxPoolCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
 
         # No padding with MaxPooling
         # Use PaddedMaxPooling to add padding attribute
@@ -210,8 +210,8 @@ class MaxPoolCPP(ExportNodeCpp):
 
 @ExportLibCpp.register_metaop("PaddedMaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class PaddedMaxPoolCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         for n in self.operator.get_micro_graph().get_nodes():
             if n.type() == "Pad2D":
                 self.attributes["padding"] = n.get_operator(
@@ -238,8 +238,8 @@ class PaddedMaxPoolCPP(ExportNodeCpp):
 
 @ExportLibCpp.register("GlobalAveragePooling", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class GlobalAveragePoolCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
 
         self.attributes["stride_dims"] = [1, 1]
         # No padding with MaxPooling
@@ -265,8 +265,8 @@ class GlobalAveragePoolCPP(ExportNodeCpp):
 
 @ExportLibCpp.register("FC", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class FcCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         self.attributes["activation"] = "Linear"
         self.attributes["rescaling"] = "NoScaling"
         self.config_template = str(
diff --git a/aidge_export_cpp/templates/kernel_forward/_mem_offset.jinja b/aidge_export_cpp/templates/kernel_forward/_mem_offset.jinja
index 4f44773ae901606c0ace5fe9af39099acf722498..b85aae8f6cde13a9314b2ffef231f5dfbe416883 100644
--- a/aidge_export_cpp/templates/kernel_forward/_mem_offset.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/_mem_offset.jinja
@@ -1,3 +1,6 @@
+{% filter indent(width=4, first=False) %}
+
 {% for outidx in range(nb_out) -%}
 {{out_cdtype[outidx]}}* {{out_name[outidx]}} = ({{out_cdtype[outidx]}}*) mem + {{out_name[outidx]|upper}}_OFFSET;
 {% endfor %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja b/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja
index 61c6ba501f96336f79a065ae1a934b9ad1a1941b..9a39495e268361a16ee5215ecb15c3b3b9bd9479 100644
--- a/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja
@@ -1,6 +1,7 @@
+{% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-
 activation_forward<{{name|upper}}_NB_DATA,
                    {{name|upper}}_ACTIVATION>
                    ({{in_name[0]}}, {{out_name[0]}}, {{name|upper}}_RESCALING);
 {% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja b/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja
index 2714aa9b66c360b12bd67e14ae348c798dadbf1f..5a759b839cd0b04b3b82f8ca4cb8dd1b0201f4f7 100644
--- a/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja
@@ -1,8 +1,9 @@
+{% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-
 batchnorm_forward<{{ out_name[0]|upper }}_NB_OUTPUTS,
                   {{ out_name[0]|upper }}_OUT_HEIGHT,
                   {{ out_name[0]|upper }}_OUT_WIDTH,
                   {{name|upper}}_ACTIVATION>
                   ({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{in_name[3]}}, {{in_name[4]}}, {{name|upper}}_EPSILON);
 {% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja b/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja
index 87d94337957a60517dd38c69ea5c59304ceb564d..421013b9590dabe6ee0ac12f969494913414a530 100644
--- a/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja
@@ -1,5 +1,5 @@
+{% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-
 convolution_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
                     {{ in_name[0]|upper }}_IN_HEIGHT,
                     {{ in_name[0]|upper }}_IN_WIDTH,
@@ -17,3 +17,4 @@ convolution_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
                     {{name|upper}}_ACTIVATION>
                     ({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{name|upper}}_RESCALING);
 {% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja b/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja
index 9ccc67ecc078c33688924a2b986b1af0617bd561..f60d163dcbfd6eff75e6b66c37bc5e57cf2cfca9 100644
--- a/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja
@@ -1,7 +1,8 @@
+{% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-
 elemwise_forward<{{name|upper}}_NB_ELTS,
                  {{name|upper}}_ELEM_OP,
                  {{name|upper}}_ACTIVATION>
                  ({{out_name[0]}}, {{name|upper}}_RESCALING, {{in_name[0]}}, {{in_name[1]}});
 {% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja b/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja
index be4665700254eed32c28f430f15ad86f15794136..cac97de22b20c4c8e0953e0d6cb2f40a18d0cb30 100644
--- a/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja
@@ -1,5 +1,5 @@
+{% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-
 fullyconnected_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
                        {{ in_name[0]|upper }}_IN_HEIGHT,
                        {{ in_name[0]|upper }}_IN_WIDTH,
@@ -9,3 +9,4 @@ fullyconnected_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
                        {{name|upper}}_ACTIVATION>
                        ({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{name|upper}}_RESCALING);
 {% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja b/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja
index bbd514cc6a8df679182ee6f3305bd9d8a5c00e32..591fafeec996f9b7dc8f52a779cda5eea8a53eae 100644
--- a/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja
@@ -1,5 +1,6 @@
+{% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-
 leakyrelu_forward<{{name|upper}}_NB_DATA>
                    ({{input_name}}, {{output_name}}, {{name|upper}}_ALPHA);
 {% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja b/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja
index b4312f7514bdde3b26886caabfc5191fae174a33..c730923cfc4f8b534cab85a82b4fce5161a528de 100644
--- a/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja
@@ -1,5 +1,5 @@
+{% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-
 pooling_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
                 {{ in_name[0]|upper }}_IN_HEIGHT,
                 {{ in_name[0]|upper }}_IN_WIDTH,
@@ -16,3 +16,4 @@ pooling_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
                 {{name|upper}}_ACTIVATION>
                 ({{in_name[0]}}, {{out_name[0]}});
 {% include "./_save_outputs.jinja" %}
+{% endfilter %}