From d6cf46746df4ed1f091a06eed69e15fc561e3476 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Fri, 11 Oct 2024 12:00:06 +0000
Subject: [PATCH] Remove is_input and is_output from node_export.

---
 aidge_export_cpp/operators.py                 | 46 +++++++++----------
 .../kernel_forward/_mem_offset.jinja          |  3 ++
 .../kernel_forward/activation_forward.jinja   |  3 +-
 .../kernel_forward/batchnorm_forward.jinja    |  3 +-
 .../kernel_forward/convolution_forward.jinja  |  3 +-
 .../kernel_forward/elemwise_forward.jinja     |  3 +-
 .../fullyconnected_forward.jinja              |  3 +-
 .../kernel_forward/leakyrelu_forward.jinja    |  3 +-
 .../kernel_forward/pooling_forward.jinja      |  3 +-
 9 files changed, 40 insertions(+), 30 deletions(-)

diff --git a/aidge_export_cpp/operators.py b/aidge_export_cpp/operators.py
index 8bdc421..9654a20 100644
--- a/aidge_export_cpp/operators.py
+++ b/aidge_export_cpp/operators.py
@@ -37,8 +37,8 @@ def export_params(name: str,
 
 @ExportLibCpp.register("Producer", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
 class ProducerCPP(ExportNode):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         self.values = np.array(self.operator.get_output(0))
 
         if len(self.values.shape) == 4:  # Note: export in HWC
@@ -59,14 +59,14 @@ class ProducerCPP(ExportNode):
 # TODO : find a way to remove this dummy exportnode
 @ExportLibCpp.register("Pad2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
 class Pad_ARMCortexM(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
+    def __init__(self, node, mem_info):
         raise NotImplementedError("Pad2D nodes is not implemented")
 
 
 @ExportLibCpp.register("ReLU", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class ReLUCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         self.attributes["activation"] = "Rectifier"
         self.attributes["rescaling"] = "NoScaling"
         self.config_template = str(
@@ -81,8 +81,8 @@ class ReLUCPP(ExportNodeCpp):
 
 @ExportLibCpp.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class ConvCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         # No padding with Conv
         # Use PaddedConv to add padding attribute
         self.attributes["padding"] = [0, 0]
@@ -102,8 +102,8 @@ class ConvCPP(ExportNodeCpp):
 
 @ExportLibCpp.register_metaop("PaddedConv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class PaddedConvCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         # TODO find a way to retrive attr for meta op
         for n in self.operator.get_micro_graph().get_nodes():
             if n.type() == "Pad2D":
@@ -132,8 +132,8 @@ class PaddedConvCPP(ExportNodeCpp):
 
 @ExportLibCpp.register("Add", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class AddCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         self.attributes["elemwise_op"] = "Add"
         self.attributes["activation"] = "Linear"
         self.attributes["rescaling"] = "NoScaling"
@@ -150,8 +150,8 @@ class AddCPP(ExportNodeCpp):
 
 @ExportLibCpp.register("Sub", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class SubCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         self.attributes["elemwise_op"] = "Sub"
         self.attributes["activation"] = "Linear"
         self.attributes["rescaling"] = "NoScaling"
@@ -169,8 +169,8 @@ class SubCPP(ExportNodeCpp):
 
 @ExportLibCpp.register("Mul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class MulCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         self.attributes["elemwise_op"] = "Mul"
         self.attributes["activation"] = "Linear"
         self.attributes["rescaling"] = "NoScaling"
@@ -187,8 +187,8 @@ class MulCPP(ExportNodeCpp):
 
 @ExportLibCpp.register("MaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class MaxPoolCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
 
         # No padding with MaxPooling
         # Use PaddedMaxPooling to add padding attribute
@@ -210,8 +210,8 @@ class MaxPoolCPP(ExportNodeCpp):
 
 @ExportLibCpp.register_metaop("PaddedMaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class PaddedMaxPoolCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         for n in self.operator.get_micro_graph().get_nodes():
             if n.type() == "Pad2D":
                 self.attributes["padding"] = n.get_operator(
@@ -238,8 +238,8 @@ class PaddedMaxPoolCPP(ExportNodeCpp):
 
 @ExportLibCpp.register("GlobalAveragePooling", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class GlobalAveragePoolCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
 
         self.attributes["stride_dims"] = [1, 1]
         # No padding with MaxPooling
@@ -265,8 +265,8 @@ class GlobalAveragePoolCPP(ExportNodeCpp):
 
 @ExportLibCpp.register("FC", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class FcCPP(ExportNodeCpp):
-    def __init__(self, node, mem_info, is_input, is_output):
-        super().__init__(node, mem_info, is_input, is_output)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         self.attributes["activation"] = "Linear"
         self.attributes["rescaling"] = "NoScaling"
         self.config_template = str(
diff --git a/aidge_export_cpp/templates/kernel_forward/_mem_offset.jinja b/aidge_export_cpp/templates/kernel_forward/_mem_offset.jinja
index 4f44773..b85aae8 100644
--- a/aidge_export_cpp/templates/kernel_forward/_mem_offset.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/_mem_offset.jinja
@@ -1,3 +1,6 @@
+{% filter indent(width=4, first=False) %}
+
 {% for outidx in range(nb_out) -%}
 {{out_cdtype[outidx]}}* {{out_name[outidx]}} = ({{out_cdtype[outidx]}}*) mem + {{out_name[outidx]|upper}}_OFFSET;
 {% endfor %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja b/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja
index 61c6ba5..9a39495 100644
--- a/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/activation_forward.jinja
@@ -1,6 +1,7 @@
+{% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-
 activation_forward<{{name|upper}}_NB_DATA,
                    {{name|upper}}_ACTIVATION>
                    ({{in_name[0]}}, {{out_name[0]}}, {{name|upper}}_RESCALING);
 {% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja b/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja
index 2714aa9..5a759b8 100644
--- a/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/batchnorm_forward.jinja
@@ -1,8 +1,9 @@
+{% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-
 batchnorm_forward<{{ out_name[0]|upper }}_NB_OUTPUTS,
                   {{ out_name[0]|upper }}_OUT_HEIGHT,
                   {{ out_name[0]|upper }}_OUT_WIDTH,
                   {{name|upper}}_ACTIVATION>
                   ({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{in_name[3]}}, {{in_name[4]}}, {{name|upper}}_EPSILON);
 {% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja b/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja
index 87d9433..421013b 100644
--- a/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/convolution_forward.jinja
@@ -1,5 +1,5 @@
+{% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-
 convolution_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
                     {{ in_name[0]|upper }}_IN_HEIGHT,
                     {{ in_name[0]|upper }}_IN_WIDTH,
@@ -17,3 +17,4 @@ convolution_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
                     {{name|upper}}_ACTIVATION>
                     ({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{name|upper}}_RESCALING);
 {% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja b/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja
index 9ccc67e..f60d163 100644
--- a/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/elemwise_forward.jinja
@@ -1,7 +1,8 @@
+{% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-
 elemwise_forward<{{name|upper}}_NB_ELTS,
                  {{name|upper}}_ELEM_OP,
                  {{name|upper}}_ACTIVATION>
                  ({{out_name[0]}}, {{name|upper}}_RESCALING, {{in_name[0]}}, {{in_name[1]}});
 {% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja b/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja
index be46657..cac97de 100644
--- a/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/fullyconnected_forward.jinja
@@ -1,5 +1,5 @@
+{% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-
 fullyconnected_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
                        {{ in_name[0]|upper }}_IN_HEIGHT,
                        {{ in_name[0]|upper }}_IN_WIDTH,
@@ -9,3 +9,4 @@ fullyconnected_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
                        {{name|upper}}_ACTIVATION>
                        ({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{name|upper}}_RESCALING);
 {% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja b/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja
index bbd514c..591fafe 100644
--- a/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/leakyrelu_forward.jinja
@@ -1,5 +1,6 @@
+{% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-
 leakyrelu_forward<{{name|upper}}_NB_DATA>
                    ({{input_name}}, {{output_name}}, {{name|upper}}_ALPHA);
 {% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja b/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja
index b4312f7..c730923 100644
--- a/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/pooling_forward.jinja
@@ -1,5 +1,5 @@
+{% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-
 pooling_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
                 {{ in_name[0]|upper }}_IN_HEIGHT,
                 {{ in_name[0]|upper }}_IN_WIDTH,
@@ -16,3 +16,4 @@ pooling_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
                 {{name|upper}}_ACTIVATION>
                 ({{in_name[0]}}, {{out_name[0]}});
 {% include "./_save_outputs.jinja" %}
+{% endfilter %}
-- 
GitLab