Skip to content
Snippets Groups Projects
Commit 98cb46ab authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Split elemwise operators in multiple files.

parent dcbb32b3
No related branches found
No related tags found
No related merge requests found
Showing
with 127 additions and 35 deletions
void aidge_add_float32(float* input_a, void aidge_add_float32(const float* input_a,
float* input_b, const float* input_b,
float* output, float* output,
int dim_a[], const int dim_a[],
int dim_b[], const int dim_b[],
int output_Dim[], const int output_Dim[],
int size_dima, int size_dima,
int size_dimb, int size_dimb,
int size_outputDim, int size_outputDim,
int output_size) int output_size)
{ {
// Broadcast dims // Broadcast dims
int ndim_a[size_outputDim]; int ndim_a[size_outputDim];
int ndim_b[size_outputDim]; int ndim_b[size_outputDim];
for (int i= 0; i<size_outputDim; i++){ for (int i= 0; i<size_outputDim; i++){
int idx = size_outputDim-size_dima; int idx = size_outputDim-size_dima;
...@@ -96,4 +96,4 @@ void aidge_add_float32(float* input_a, ...@@ -96,4 +96,4 @@ void aidge_add_float32(float* input_a,
} }
} }
} }
\ No newline at end of file
void aidge_mul_float32(float* input_a, void aidge_mul_float32(const float* input_a,
float* input_b, const float* input_b,
float* output, float* output,
int dim_a[], const int dim_a[],
int dim_b[], const int dim_b[],
int output_Dim[], const int output_Dim[],
int size_dima, int size_dima,
int size_dimb, int size_dimb,
int size_outputDim, int size_outputDim,
int output_size) int output_size)
{ {
// Broadcast dims // Broadcast dims
int ndim_a[size_outputDim]; int ndim_a[size_outputDim];
int ndim_b[size_outputDim]; int ndim_b[size_outputDim];
for (int i= 0; i<size_outputDim; i++){ for (int i= 0; i<size_outputDim; i++){
int idx = size_outputDim-size_dima; int idx = size_outputDim-size_dima;
...@@ -96,4 +96,4 @@ void aidge_mul_float32(float* input_a, ...@@ -96,4 +96,4 @@ void aidge_mul_float32(float* input_a,
} }
} }
} }
\ No newline at end of file
void aidge_sub_float32(float* input_a, void aidge_sub_float32(const float* input_a,
float* input_b, const float* input_b,
float* output, float* output,
unsigned int size) unsigned int size)
{ {
for (unsigned int i = 0; i < size; ++i) { for (unsigned int i = 0; i < size; ++i) {
output[i] = input_a[i] - input_b[i]; output[i] = input_a[i] - input_b[i];
} }
} }
\ No newline at end of file
{#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H
/* ElemWise - add layer */
{% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %}
{# For layer configuration -#}
#define {{ name|upper }}_OUTPUTS_SIZE {{ out_size[0] }}
#define {{name|upper}}_IN_0_NB_DIMS {{ in_dims[0]|length}}
#define {{name|upper}}_IN_1_NB_DIMS {{ in_dims[1]|length}}
#define {{name|upper}}_OUT_0_NB_DIMS {{ out_dims[0]|length}}
static const int {{name|upper}}_IN_0_DIMS[] = { {{ in_dims[0]|join(", ") }} };
static const int {{name|upper}}_IN_1_DIMS[] = { {{ in_dims[1]|join(", ") }} };
static const int {{name|upper}}_OUT_0_DIMS[] = { {{ out_dims[0]|join(", ") }} };
#endif /* {{ name|upper }}_LAYER_H */
{#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H
/* ElemWise - mul layer */
{% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %}
{# For layer configuration -#}
#define {{ name|upper }}_OUTPUTS_SIZE {{ out_size[0] }}
#define {{name|upper}}_IN_0_NB_DIMS {{ in_dims[0]|length}}
#define {{name|upper}}_IN_1_NB_DIMS {{ in_dims[1]|length}}
#define {{name|upper}}_OUT_0_NB_DIMS {{ out_dims[0]|length}}
static const int {{name|upper}}_IN_0_DIMS[] = { {{ in_dims[0]|join(", ") }} };
static const int {{name|upper}}_IN_1_DIMS[] = { {{ in_dims[1]|join(", ") }} };
static const int {{name|upper}}_OUT_0_DIMS[] = { {{ out_dims[0]|join(", ") }} };
#endif /* {{ name|upper }}_LAYER_H */
{#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H
/* ElemWise - sub layer */
{% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %}
{# For layer configuration -#}
#define {{ name|upper }}_INPUTS_SIZE {{ in_size[0] }}
#define {{ name|upper }}_OUTPUTS_SIZE {{ out_size[0] }}
#define {{in_name[0]|upper}}_NB_DIM {{ in_dims[0]|length}}
#define {{in_name[1]|upper}}_NB_DIM {{ in_dims[1]|length}}
#define {{out_name[0]|upper}}_NB_DIM {{ out_dims[0]|length}}
static const int {{ in_name[0]|upper }}_DIMS[] = { {{ in_dims[0]|join(", ") }} };
static const int {{ in_name[1]|upper }}_DIMS[] = { {{ in_dims[1]|join(", ") }} };
static const int {{ out_name[0]|upper }}_DIMS[] = { {{ out_dims[0]|join(", ") }} };
#endif /* {{ name|upper }}_LAYER_H */
{% if not is_output %}
{{out_cdtype[0]}}* {{out_name[0]}} = ({{out_cdtype[0]}}*) mem + {{out_name[0]|upper}}_OFFSET;
{% endif %}
aidge_add_float32(
{{in_name[0]}},
{{in_name[1]}},
{{out_name[0]}},
{{name|upper}}_IN_0_DIMS,
{{name|upper}}_IN_1_DIMS,
{{name|upper}}_OUT_0_DIMS,
{{name|upper}}_IN_0_NB_DIMS,
{{name|upper}}_IN_1_NB_DIMS,
{{name|upper}}_OUT_0_NB_DIMS,
{{name|upper}}_OUTPUTS_SIZE);
aidge_{{elemwise_op|lower}}_{{out_dtype[0]}} ({{in_name[0]}}, {{in_name[1]}}, {{out_name[0]}}, {{in_name[0]}}_DIMS, {{in_name[1]}}_DIMS, {{out_name[0]}}_DIMS,{{in_name[0]|upper}}_NB_DIM,{{in_name[1]|upper}}_NB_DIM,{{out_name[0]|upper}}_NB_DIM, {{name|upper}}_OUTPUTS_SIZE);
{% if not is_output %}
{{out_cdtype[0]}}* {{out_name[0]}} = ({{out_cdtype[0]}}*) mem + {{out_name[0]|upper}}_OFFSET;
{% endif %}
aidge_mul_float32(
{{in_name[0]}},
{{in_name[1]}},
{{out_name[0]}},
{{name|upper}}_IN_0_DIMS,
{{name|upper}}_IN_1_DIMS,
{{name|upper}}_OUT_0_DIMS,
{{name|upper}}_IN_0_NB_DIMS,
{{name|upper}}_IN_1_NB_DIMS,
{{name|upper}}_OUT_0_NB_DIMS,
{{name|upper}}_OUTPUTS_SIZE);
{% if not is_output %}
{{out_cdtype[0]}}* {{out_name[0]}} = ({{out_cdtype[0]}}*) mem + {{out_name[0]|upper}}_OFFSET;
{% endif %}
aidge_sub_float32({{in_name[0]}}, {{in_name[1]}}, {{out_name[0]}}, {{name|upper}}_OUTPUTS_SIZE);
...@@ -322,10 +322,9 @@ class FC_ARMCortexM(ExportNodeCpp): ...@@ -322,10 +322,9 @@ class FC_ARMCortexM(ExportNodeCpp):
class Add_ARMCortexM(ExportNodeCpp): class Add_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output): def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output) super().__init__(node, mem_info, is_input, is_output)
self.attributes["elemwise_op"] = "\"ADD\""
self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "elemwise.jinja") self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "add.jinja")
self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "elemwise.jinja") self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "add.jinja")
self.include_list = [] self.include_list = []
self.kernels_to_copy = [ self.kernels_to_copy = [
str(ROOT / "_Aidge_Arm" / "kernels" / "Add" / "aidge_add_float32.h"), str(ROOT / "_Aidge_Arm" / "kernels" / "Add" / "aidge_add_float32.h"),
...@@ -333,13 +332,12 @@ class Add_ARMCortexM(ExportNodeCpp): ...@@ -333,13 +332,12 @@ class Add_ARMCortexM(ExportNodeCpp):
] ]
@ExportLibAidgeARM.register("Sub", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32))) @ExportLibAidgeARM.register("Sub", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class Add_ARMCortexM(ExportNodeCpp): class Sub_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output): def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output) super().__init__(node, mem_info, is_input, is_output)
self.attributes["elemwise_op"] = "\"SUB\""
self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "elemwise.jinja") self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "sub.jinja")
self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "elemwise.jinja") self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "sub.jinja")
self.include_list = [] self.include_list = []
self.kernels_to_copy = [ self.kernels_to_copy = [
str(ROOT / "_Aidge_Arm" / "kernels" / "Sub" / "aidge_sub_float32.h"), str(ROOT / "_Aidge_Arm" / "kernels" / "Sub" / "aidge_sub_float32.h"),
...@@ -350,10 +348,9 @@ class Add_ARMCortexM(ExportNodeCpp): ...@@ -350,10 +348,9 @@ class Add_ARMCortexM(ExportNodeCpp):
class Mul_ARMCortexM(ExportNodeCpp): class Mul_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output): def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output) super().__init__(node, mem_info, is_input, is_output)
self.attributes["elemwise_op"] = "\"MUL\""
self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "elemwise.jinja") self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "mul.jinja")
self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "elemwise.jinja") self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "mul.jinja")
self.include_list = [] self.include_list = []
self.kernels_to_copy = [ self.kernels_to_copy = [
str(ROOT / "_Aidge_Arm" / "kernels" / "Mul" / "aidge_mul_float32.h"), str(ROOT / "_Aidge_Arm" / "kernels" / "Mul" / "aidge_mul_float32.h"),
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment