Skip to content
Snippets Groups Projects
Commit fd867582 authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'dev' into 'main'

v0.2.0

See merge request eclipse/aidge/aidge_export_cpp!27
parents 0d5c0581 05400e59
No related branches found
No related tags found
No related merge requests found
Showing
with 97 additions and 144 deletions
{#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H
#include "kernels/rescaling.hpp"
{# For layer configuration -#}
#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
{% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %}
#define {{ name|upper }}_ACTIVATION {{ activation }}
static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
{#- Calculate sizes #}
{%- set weights_size = output_dims[0] * input_dims[0] * input_dims[1] * input_dims[2] %}
{%- set weights_size = out_chan[0] * in_chan[0] * in_height[0] * in_width[0] %}
#define {{ name|upper }}_WEIGHTS_SIZE {{ weights_size }}
#define {{ name|upper }}_BIASES_SIZE {{ output_dims[0] }}
#define {{ name|upper }}_BIASES_SIZE {{ out_chan[0] }}
#endif /* {{ name|upper }}_LAYER_H */
......@@ -3,6 +3,8 @@
#define {{ name|upper }}_LAYER_H
{# For layer configuration -#}
{% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %}
#define {{ name|upper }}_NB_DATA {{ nb_data }}
#define {{ name|upper }}_ALPHA {{ alpha }}
......
......@@ -3,18 +3,14 @@
#define {{ name|upper }}_LAYER_H
{# For layer configuration -#}
#define {{ name|upper }}_NB_CHANNELS {{ input_dims[0] }}
#define {{ name|upper }}_CHANNELS_HEIGHT {{ input_dims[1] }}
#define {{ name|upper }}_CHANNELS_WIDTH {{ input_dims[2] }}
#define {{ name|upper }}_NB_OUTPUTS {{ output_dims[0] }}
#define {{ name|upper }}_OUTPUTS_HEIGHT {{ output_dims[1] }}
#define {{ name|upper }}_OUTPUTS_WIDTH {{ output_dims[2] }}
{% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %}
#define {{ name|upper }}_PADDING_Y {{ padding[1] }}
#define {{ name|upper }}_PADDING_X {{ padding[0] }}
#define {{ name|upper }}_STRIDE_Y {{ stride[1] }}
#define {{ name|upper }}_STRIDE_X {{ stride[0] }}
#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel[1] }}
#define {{ name|upper }}_KERNEL_WIDTH {{ kernel[0] }}
#define {{ name|upper }}_STRIDE_Y {{ stride_dims[1] }}
#define {{ name|upper }}_STRIDE_X {{ stride_dims[0] }}
#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel_dims[1] }}
#define {{ name|upper }}_KERNEL_WIDTH {{ kernel_dims[0] }}
#define {{ name|upper }}_POOLING_TYPE {{ pool_type }}
#define {{ name|upper }}_ACTIVATION {{ activation }}
......
{% filter indent(width=4, first=False) %}
{% for outidx in range(nb_out) -%}
{{out_cdtype[outidx]}}* {{out_name[outidx]}} = ({{out_cdtype[outidx]}}*) mem + {{out_name[outidx]|upper}}_OFFSET;
{% endfor %}
{% endfilter %}
/*
#ifdef SAVE_OUTPUTS
{% for outidx in range(nb_out) -%}
FILE* {{out_name[outidx]|upper}}_STREAM = fopen("outputs/{{out_name[outidx]}}.txt", "w");
saveOutputs<{{out_cdtype[outidx]}}>(
{{out_name[outidx]|upper}}_NB_OUTPUTS,
{{out_name[outidx]|upper}}_OUT_HEIGHT,
{{out_name[outidx]|upper}}_OUT_WIDTH,
{{out_name[outidx]|upper}}_CONT_OFFSET,
{{out_name[outidx]|upper}}_CONT_SIZE,
{{out_name[outidx]|upper}}_WRAP_OFFSET,
{{out_name[outidx]|upper}}_WRAP_SIZE,
{{out_name[outidx]|upper}}_STRIDE,
{{out_name[outidx]}},
{{out_name[outidx]|upper}}_STREAM,
Format::{{out_format[outidx]}});
fclose({{out_name[outidx]|upper}}_STREAM);
{% endfor %}
#endif
*/
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
activation_forward<{{name|upper}}_NB_DATA,
{{name|upper}}_ACTIVATION>
({{input_name}}, {{output_name}}, {{name|upper}}_RESCALING);
\ No newline at end of file
({{in_name[0]}}, {{out_name[0]}}, {{name|upper}}_RESCALING);
{% include "./_save_outputs.jinja" %}
{% endfilter %}
batchnorm_forward<{{name|upper}}_NB_OUTPUTS,
{{name|upper}}_OUTPUTS_HEIGHT,
{{name|upper}}_OUTPUTS_WIDTH,
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
batchnorm_forward<{{ out_name[0]|upper }}_NB_OUTPUTS,
{{ out_name[0]|upper }}_OUT_HEIGHT,
{{ out_name[0]|upper }}_OUT_WIDTH,
{{name|upper}}_ACTIVATION>
({{input_name}}, {{output_name}}, {{biases_name}}, {{variances_name}}, {{means_name}}, {{scales_name}}, {{name|upper}}_EPSILON);
\ No newline at end of file
({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{in_name[3]}}, {{in_name[4]}}, {{name|upper}}_EPSILON);
{% include "./_save_outputs.jinja" %}
{% endfilter %}
convolution_forward<{{name|upper}}_NB_CHANNELS,
{{name|upper}}_CHANNELS_HEIGHT,
{{name|upper}}_CHANNELS_WIDTH,
{{name|upper}}_NB_OUTPUTS,
{{name|upper}}_OUTPUTS_HEIGHT,
{{name|upper}}_OUTPUTS_WIDTH,
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
convolution_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
{{ in_name[0]|upper }}_IN_HEIGHT,
{{ in_name[0]|upper }}_IN_WIDTH,
{{ out_name[0]|upper }}_NB_OUTPUTS,
{{ out_name[0]|upper }}_OUT_HEIGHT,
{{ out_name[0]|upper }}_OUT_WIDTH,
{{name|upper}}_PADDING_Y,
{{name|upper}}_PADDING_X,
{{name|upper}}_STRIDE_Y,
......@@ -13,4 +15,6 @@ convolution_forward<{{name|upper}}_NB_CHANNELS,
{{name|upper}}_KERNEL_HEIGHT,
{{name|upper}}_KERNEL_WIDTH,
{{name|upper}}_ACTIVATION>
({{input_name}}, {{output_name}}, {{weights_name}}, {{biases_name}}, {{name|upper}}_RESCALING);
({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{name|upper}}_RESCALING);
{% include "./_save_outputs.jinja" %}
{% endfilter %}
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
elemwise_forward<{{name|upper}}_NB_ELTS,
{{name|upper}}_ELEM_OP,
{{name|upper}}_ACTIVATION>
({{output_name}}, {{name|upper}}_RESCALING, {{inputs1_name}}, {{inputs2_name}});
({{out_name[0]}}, {{name|upper}}_RESCALING, {{in_name[0]}}, {{in_name[1]}});
{% include "./_save_outputs.jinja" %}
{% endfilter %}
fullyconnected_forward<{{name|upper}}_NB_CHANNELS,
{{name|upper}}_CHANNELS_HEIGHT,
{{name|upper}}_CHANNELS_WIDTH,
{{name|upper}}_NB_OUTPUTS,
{{name|upper}}_OUTPUTS_HEIGHT,
{{name|upper}}_OUTPUTS_WIDTH,
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
fullyconnected_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
{{ in_name[0]|upper }}_IN_HEIGHT,
{{ in_name[0]|upper }}_IN_WIDTH,
{{ out_name[0]|upper }}_NB_OUTPUTS,
{{ out_name[0]|upper }}_OUT_HEIGHT,
{{ out_name[0]|upper }}_OUT_WIDTH,
{{name|upper}}_ACTIVATION>
({{inputs_name}}, {{outputs_name}}, {{weights_name}}, {{biases_name}}, {{name|upper}}_RESCALING);
\ No newline at end of file
({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{name|upper}}_RESCALING);
{% include "./_save_outputs.jinja" %}
{% endfilter %}
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
leakyrelu_forward<{{name|upper}}_NB_DATA>
({{input_name}}, {{output_name}}, {{name|upper}}_ALPHA);
\ No newline at end of file
({{input_name}}, {{output_name}}, {{name|upper}}_ALPHA);
{% include "./_save_outputs.jinja" %}
{% endfilter %}
pooling_forward<{{name|upper}}_NB_CHANNELS,
{{name|upper}}_CHANNELS_HEIGHT,
{{name|upper}}_CHANNELS_WIDTH,
{{name|upper}}_NB_OUTPUTS,
{{name|upper}}_OUTPUTS_HEIGHT,
{{name|upper}}_OUTPUTS_WIDTH,
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
pooling_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
{{ in_name[0]|upper }}_IN_HEIGHT,
{{ in_name[0]|upper }}_IN_WIDTH,
{{ out_name[0]|upper }}_NB_OUTPUTS,
{{ out_name[0]|upper }}_OUT_HEIGHT,
{{ out_name[0]|upper }}_OUT_WIDTH,
{{name|upper}}_PADDING_Y,
{{name|upper}}_PADDING_X,
{{name|upper}}_STRIDE_Y,
......@@ -12,4 +14,6 @@ pooling_forward<{{name|upper}}_NB_CHANNELS,
{{name|upper}}_KERNEL_WIDTH,
{{name|upper}}_POOLING_TYPE,
{{name|upper}}_ACTIVATION>
({{input_name}}, {{output_name}});
\ No newline at end of file
({{in_name[0]}}, {{out_name[0]}});
{% include "./_save_outputs.jinja" %}
{% endfilter %}
{#- For name header -#}
#ifndef MEM_INFO_H
#define MEM_INFO_H
#define MEMORY_SIZE {{ mem_size }}
{% for i in range(mem_info|length) -%}
{%- set layer_name = mem_info[i][0] %}
/* {{layer_name}} memory */
{% for j in range(1, mem_info[i]|length) %}
#define {{ layer_name|upper }}_{{ mem_info_legends[j]|upper }} {{ mem_info[i][j] }}
{%- endfor %}
{% endfor %}
#endif /* MEM_INFO_H */
\ No newline at end of file
{#- For name header -#}
#ifndef DNN_HPP
#define DNN_HPP
{#- For libraries #}
{% for lib in libraries %}
#include <{{ lib }}>
{%- endfor %}
{% for func in functions %}
{{ func }}
{% endfor %}
#endif /* DNN_HPP */
\ No newline at end of file
#ifndef ENV_LAYER_H
#define ENV_LAYER_H
#include <stdint.h>
#define ENV_SIZE_X {{ size_x }}
#define ENV_SIZE_Y {{ size_y }}
#define ENV_NB_OUTPUTS {{ nb_outputs }}
#define ENV_DATA_UNSIGNED {{ is_unsigned }}
#define ENV_OUTPUTS_SIZE (ENV_NB_OUTPUTS*ENV_SIZE_X*ENV_SIZE_Y)
#define NETWORK_TARGETS 1
//Output targets network dimension definition:
static unsigned int OUTPUTS_HEIGHT[NETWORK_TARGETS] = {1};
static unsigned int OUTPUTS_WIDTH[NETWORK_TARGETS] = {1};
static unsigned int NB_OUTPUTS[NETWORK_TARGETS] = {1000};
static unsigned int NB_TARGET[NETWORK_TARGETS] = {1000};
static unsigned int OUTPUTS_SIZE[NETWORK_TARGETS] = {(OUTPUTS_WIDTH[0]*OUTPUTS_HEIGHT[0])};
typedef int32_t Target_0_T;
typedef Target_0_T Target_T;
#endif // ENV_LAYER_H
{#- For libraries #}
#include <stdint.h>
#include "network/rescaling.hpp"
// Layer & memory configurations
{%- for header in headers %}
#include "{{ header }}"
{%- endfor %}
{# mem has the datatype of the firt input #}
{#- Change here to improve it -#}
static {{mem_ctype}} mem[MEMORY_SIZE];
{# Forward function #}
{#- Support multiple inputs with different datatypes and multiple outputs with different datatypes -#}
void model_forward({% for inp in inputs %}const {{inp[0]}}* {{inp[1]}}, {% endfor %}{% for out in outputs %}{{out[0]}}* {{out[1]}}{{ ", " if not loop.last else "" }}{% endfor %})
{
{%- for action in actions %}
{{ action }}
{%- endfor %}
}
\ No newline at end of file
import numpy as np
import aidge_core
def numpy_dtype2ctype(dtype):
if dtype == np.int8:
......@@ -17,19 +16,3 @@ def numpy_dtype2ctype(dtype):
# Add more dtype mappings as needed
else:
raise ValueError(f"Unsupported {dtype} dtype")
def aidge_datatype2ctype(datatype):
if datatype == aidge_core.dtype.int8:
return "int8_t"
elif datatype == aidge_core.dtype.int32:
return "int32_t"
elif datatype == aidge_core.dtype.int64:
return "int64_t"
elif datatype == aidge_core.dtype.float32:
return "float"
elif datatype == aidge_core.dtype.float64:
return "double"
# Add more dtype mappings as needed
else:
raise ValueError(f"Unsupported {datatype} aidge datatype")
\ No newline at end of file
0.2.0
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment