Skip to content
Snippets Groups Projects
Commit 05fbdfb4 authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Merge branch 'user/cguillon/dev/matmul' into 'dev'

[MatMul] Add implementation of MatMul export as MatMulCPP

See merge request !18
parents 17efa8fb d68ceb49
No related branches found
No related tags found
2 merge requests!18[MatMul] Add implementation of MatMul export as MatMulCPP,!12Feat/release pip
#ifndef __AIDGE_EXPORT_CPP_KERNELS_MATMUL__
#define __AIDGE_EXPORT_CPP_KERNELS_MATMUL__
#include "network/typedefs.hpp"
#include "kernels/activation.hpp"
// Generic function for matmul and activation
template<int M,
int K,
int N,
ActivationFunction_T ACTIVATION,
typename Input_T, typename Output_T,
typename Rescaling_T>
__attribute__((always_inline)) inline
void matmul_forward (
const Input_T* __restrict inputs1,
const Input_T* __restrict inputs2,
Output_T* __restrict outputs,
const Rescaling_T& __restrict rescaling)
{
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
Output_T sum = Output_T(0);
for (int k = 0; k < K; ++k) {
sum += inputs1[K*m + k] * inputs2[N*k + n];
}
outputs[N*m + n] = activation_forward_value<Output_T>(sum, 0/*not applicable*/, ACTIVATION, rescaling);
}
}
}
#endif // __AIDGE_EXPORT_CPP_KERNELS_MATMUL__
......@@ -453,3 +453,68 @@ class FcCPP(ExportNode):
))
return list_actions
@operator_register("MatMul")
class MatMulCPP(ExportNode):
def __init__(self, node):
super().__init__(node)
dims0, dims1, outdims = [tuple(x) for x in [self.inputs_dims[0], self.inputs_dims[1], self.outputs_dims[0]]]
# TODO: MatMul aidge operator supports N-D multi broadcast dimensions where N > 2
assert len(dims0) <= 2 and len(dims1) <= 2, (
f"MatMul export do not support yet dimensions above 2D: inputs shapes are: {dims0}, {dims1}")
# Cast to at least 1D
# Note that from MatMul::forwardDims(), scalar inputs are supported
# which is actually more general than np.matmul
dims0 = dims0 if len(dims0) >= 1 else (1, 1)
dims1 = dims1 if len(dims1) >= 1 else (1, 1)
# Cast to at least 2D
dims0 = dims0 if len(dims0) >= 2 else (1, dims0[0])
dims1 = dims1 if len(dims1) >= 2 else (dims1[0], 1)
assert dims0[1] == dims1[0], (
f"MatMul input dimensions do no match, expected (m, k), (k, n): inputs shapes are: {dims0}, {dims1}")
outdims = outdims if len(outdims) > 0 else (1, 1)
assert outdims == (dims0[0], dims1[1]), (
f"MatMul output dimensions do no match, expected (m, n) for inputs (m, k) (k, n): output shape is: {outdims}, inputs shapes are: {dims0}, {dims1}")
self.matmul_inputs_dims = dims0, dims1
self.matmul_output_dims = outdims
def export(self, export_folder:Path, list_configs:list):
copyfile(str(ROOT / "kernels" / "matmul.hpp"),
str(export_folder / "include" / "kernels"))
copyfile(str(ROOT / "kernels" / "activation.hpp"),
str(export_folder / "include" / "kernels"))
# Add to config list the include of configurations
list_configs.append("kernels/matmul.hpp")
list_configs.append(f"layers/{self.name}.h")
# Export configuration file
generate_file(
str(export_folder / "layers" / f"{self.name}.h"),
str(ROOT / "templates" / "configuration" / "matmul_config.jinja"),
name=self.name,
inputs_dims=self.matmul_inputs_dims,
output_dims=self.matmul_output_dims,
activation="Linear",
rescaling="NoScaling",
)
return list_configs
def forward(self, list_actions:list):
if not self.is_last:
list_actions.append(set_up_output(self.name, "float"))
list_actions.append(generate_str(
str(ROOT / "templates" / "kernel_forward" / "matmul_forward.jinja"),
name=self.name,
inputs1_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input1",
inputs2_name=self.inputs[1].name() if self.inputs[1] else self.name + "_input2",
outputs_name=self.name
))
return list_actions
{#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H
{# For layer configuration -#}
#define {{ name|upper }}_M {{ inputs_dims[0][0] }}
#define {{ name|upper }}_K {{ inputs_dims[0][1] }}
#define {{ name|upper }}_N {{ inputs_dims[1][1] }}
#define {{ name|upper }}_ACTIVATION {{ activation }}
static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
{#- Calculate sizes #}
#endif /* {{ name|upper }}_LAYER_H */
matmul_forward<{{name|upper}}_M,
{{name|upper}}_K,
{{name|upper}}_N,
{{name|upper}}_ACTIVATION>
({{inputs1_name}}, {{inputs2_name}}, {{outputs_name}}, {{name|upper}}_RESCALING);
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment