Skip to content
Snippets Groups Projects
Commit 534f5b33 authored by Gallas Gaye's avatar Gallas Gaye Committed by Gallas Gaye
Browse files

fix: Add Matmul export operator back

Matmul op was removed and obsolote.
Fixed wrong hyperparameters names in jinja
Fixed impl in operator.py
parent f1d56404
No related branches found
No related tags found
2 merge requests!39Update 0.2.1 -> 0.3.0,!31Add missing operators for basic onnx model exporting
...@@ -107,6 +107,21 @@ class ReshapeCPP(ExportNodeCpp): ...@@ -107,6 +107,21 @@ class ReshapeCPP(ExportNodeCpp):
str(ROOT / "kernels" / "reshape.hpp"), str(ROOT / "kernels" / "reshape.hpp"),
] ]
@ExportLibCpp.register("MatMul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class MatMulCPP(ExportNodeCpp):
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
self.attributes["activation"] = "Linear"
self.attributes["rescaling"] = "NoScaling"
self.config_template = str(
ROOT / "templates" / "configuration" / "matmul_config.jinja")
self.forward_template = str(
ROOT / "templates" / "kernel_forward" / "matmul_forward.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "kernels" / "matmul.hpp"),
]
@ExportLibCpp.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32))) @ExportLibCpp.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class ConvCPP(ExportNodeCpp): class ConvCPP(ExportNodeCpp):
def __init__(self, node, mem_info): def __init__(self, node, mem_info):
......
...@@ -2,10 +2,13 @@ ...@@ -2,10 +2,13 @@
#ifndef {{ name|upper }}_LAYER_H #ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H #define {{ name|upper }}_LAYER_H
{% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %}
{# For layer configuration -#} {# For layer configuration -#}
#define {{ name|upper }}_M {{ inputs_dims[0][0] }} #define {{ name|upper }}_M {{ in_dims[0][0] }}
#define {{ name|upper }}_K {{ inputs_dims[0][1] }} #define {{ name|upper }}_K {{ in_dims[0][1] }}
#define {{ name|upper }}_N {{ inputs_dims[1][1] }} #define {{ name|upper }}_N {{ in_dims[1][1] }}
#define {{ name|upper }}_ACTIVATION {{ activation }} #define {{ name|upper }}_ACTIVATION {{ activation }}
static const {{ rescaling }} {{ name|upper }}_RESCALING = {}; static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
......
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
matmul_forward<{{name|upper}}_M, matmul_forward<{{name|upper}}_M,
{{name|upper}}_K, {{name|upper}}_K,
{{name|upper}}_N, {{name|upper}}_N,
{{name|upper}}_ACTIVATION> {{name|upper}}_ACTIVATION>
({{inputs1_name}}, {{inputs2_name}}, {{outputs_name}}, {{name|upper}}_RESCALING); ({{in_name[0]}}, {{in_name[1]}}, {{out_name[0]}}, {{name|upper}}_RESCALING);
\ No newline at end of file {% include "./_save_outputs.jinja" %}
{% endfilter %}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment