From 534f5b3381aa7c1f9b088af1d28e97923de492cf Mon Sep 17 00:00:00 2001
From: Gallas Gaye <gallasko@gmail.com>
Date: Fri, 21 Feb 2025 10:38:36 +0100
Subject: [PATCH] fix: Add Matmul export operator back

Matmul op was removed and obsolote.
Fixed wrong hyperparameters names in jinja
Fixed impl in operator.py
---
 aidge_export_cpp/operators.py                     | 15 +++++++++++++++
 .../templates/configuration/matmul_config.jinja   |  9 ++++++---
 .../templates/kernel_forward/matmul_forward.jinja |  6 +++++-
 3 files changed, 26 insertions(+), 4 deletions(-)

diff --git a/aidge_export_cpp/operators.py b/aidge_export_cpp/operators.py
index 59ce94a..0790877 100644
--- a/aidge_export_cpp/operators.py
+++ b/aidge_export_cpp/operators.py
@@ -107,6 +107,21 @@ class ReshapeCPP(ExportNodeCpp):
             str(ROOT / "kernels" / "reshape.hpp"),
         ]
 
+@ExportLibCpp.register("MatMul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class MatMulCPP(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.attributes["activation"] = "Linear"
+        self.attributes["rescaling"] = "NoScaling"
+        self.config_template = str(
+            ROOT / "templates" / "configuration" / "matmul_config.jinja")
+        self.forward_template = str(
+            ROOT / "templates" / "kernel_forward" / "matmul_forward.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "kernels" / "matmul.hpp"),
+        ]
+
 @ExportLibCpp.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class ConvCPP(ExportNodeCpp):
     def __init__(self, node, mem_info):
diff --git a/aidge_export_cpp/templates/configuration/matmul_config.jinja b/aidge_export_cpp/templates/configuration/matmul_config.jinja
index fece988..38316f2 100644
--- a/aidge_export_cpp/templates/configuration/matmul_config.jinja
+++ b/aidge_export_cpp/templates/configuration/matmul_config.jinja
@@ -2,10 +2,13 @@
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
 
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
+
 {# For layer configuration -#}
-#define {{ name|upper }}_M {{ inputs_dims[0][0] }}
-#define {{ name|upper }}_K {{ inputs_dims[0][1] }}
-#define {{ name|upper }}_N {{ inputs_dims[1][1] }}
+#define {{ name|upper }}_M {{ in_dims[0][0] }}
+#define {{ name|upper }}_K {{ in_dims[0][1] }}
+#define {{ name|upper }}_N {{ in_dims[1][1] }}
 #define {{ name|upper }}_ACTIVATION {{ activation }}
 static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
 
diff --git a/aidge_export_cpp/templates/kernel_forward/matmul_forward.jinja b/aidge_export_cpp/templates/kernel_forward/matmul_forward.jinja
index ce80ffd..64b3df3 100644
--- a/aidge_export_cpp/templates/kernel_forward/matmul_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/matmul_forward.jinja
@@ -1,5 +1,9 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
 matmul_forward<{{name|upper}}_M,
                {{name|upper}}_K,
                {{name|upper}}_N,
                {{name|upper}}_ACTIVATION>
-               ({{inputs1_name}}, {{inputs2_name}}, {{outputs_name}}, {{name|upper}}_RESCALING);
\ No newline at end of file
+               ({{in_name[0]}}, {{in_name[1]}}, {{out_name[0]}}, {{name|upper}}_RESCALING);
+{% include "./_save_outputs.jinja" %}
+{% endfilter %}
-- 
GitLab