Skip to content
Snippets Groups Projects
Commit 321a39df authored by Wissam Boussella's avatar Wissam Boussella
Browse files

[Fix] Fix for conv depthwise and softmax, mbnet-v1 now working

parent 14b473c3
No related branches found
No related tags found
No related merge requests found
......@@ -95,8 +95,8 @@ __attribute__((always_inline)) inline void convcellDWPropagate(
for (int output = 0; output < NB_OUTPUTS; ++output) {
const int channel = (output * NB_CHANNELS) / NB_OUTPUTS;
SUM_T weightedSum = biasses[output];
using Sum_T = typename std::conditional<std::is_floating_point<Input_T>::value, float, int32_t>::type;
Sum_T weightedSum = biasses[output];
for (int sy = 0; sy < KERNEL_HEIGHT; ++sy) {
if ((PADDING_Y != 0
......
......@@ -2,16 +2,16 @@
void aidge_softmax_chw_float32 (float* inputs,
float* outputs,
int inputDims[],
const int inputDims[],
int axis,
unsigned int size_inputDim,
unsigned int size)
const unsigned int size_inputDim,
const unsigned int size)
{
axis += (axis >= 0 ) ? 0 : size_inputDim;
int postAxisElems = 1;
for (int i = axis+1; i < size_inputDim; ++i) {
for (unsigned int i = axis+1; i < size_inputDim; ++i) {
postAxisElems *= inputDims[i];
}
int preAxisElems = 1;
......
#define {{ name|upper }}_LAYER_H
#include "typedefs.h"
#include "nn_scaling_functions.hpp"
{% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %}
// Attributes
#define {{ name|upper }}_KERNEL_HEIGHT {{ kernel_dims[1] }}
#define {{ name|upper }}_KERNEL_WIDTH {{ kernel_dims[0] }}
#define {{ name|upper }}_PADDING_Y {{ padding[1] }}
#define {{ name|upper }}_PADDING_X {{ padding[0] }}
#define {{ name|upper }}_STRIDE_Y {{ stride_dims[1] }}
#define {{ name|upper }}_STRIDE_X {{ stride_dims[0] }}
#define {{ name|upper }}_DILATION_Y {{ dilation_dims[1] }}
#define {{ name|upper }}_DILATION_X {{ dilation_dims[0] }}
{# #define {{ name|upper }}_GROUP {{ group }} #}
// Activation/Scaling
#define {{ name|upper }}_ACTIVATION {{ activation }}
{%- if scaling_type == "floating_point" %}
static const N2D2_Export::FloatingPointScaling {{ name|upper }}_SCALING = { {{scaling_value}} };
{%- elif scaling_type == "fixed_point" %}
static const N2D2_Export::FixedPointScaling<{{scaling_value}}, {{fractional_bits}}> {{ name|upper }}_SCALING;
{%- elif scaling_type == "single_shift" %}
static const N2D2_Export::SingleShiftScaling<{{shift_value}}> {{ name|upper }}_SCALING;
{%- else %}
static const N2D2_Export::NoScaling {{ name|upper }}_SCALING;
{%- endif %}
// Sizes
#define {{ name|upper }}_WEIGHTS_SIZE {{ out_chan[0] * in_chan[0] * kernel_dims[1] * kernel_dims[0] }}
#define {{ name|upper }}_BIASES_SIZE {{ out_chan[0] }}
#define {{ name|upper }}_OUTPUTS_SIZE {{ out_chan[0] * out_height[0] * out_width[0] }}
#define {{ name|upper }}_CHANNELS_SIZE {{ in_chan[0] * in_height[0] * in_width[0] }}
#endif /* {{ name|upper }}_LAYER_H */
{% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %}
{#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H
......@@ -5,9 +9,8 @@
{# For layer configuration -#}
#define {{ name|upper }}_INPUTS_SIZE {{ in_size[0] }}
#define {{ name|upper }}_OUTPUTS_SIZE {{ out_size[0] }}
#define {{ name|upper }}_DIMS {{ in_dims[0] }}
#define {{ name|upper }}_AXIS {{ axis }}
#define {{ name|upper }}_INPUT_DIMS_SIZE {{ in_dims[0]|length}}
static const int {{ name|upper }}_DIMS[] = { {{ in_dims[0] | join(', ') }} };
#endif /* {{ name|upper }}_LAYER_H */
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
N2D2_Export::convcellDWPropagate<{{ in_name[0]|upper }}_NB_CHANNELS,
{{ in_name[0]|upper }}_IN_HEIGHT,
{{ in_name[0]|upper }}_IN_WIDTH,
{{ out_name[0]|upper }}_NB_OUTPUTS,
{{ out_name[0]|upper }}_OUT_HEIGHT,
{{ out_name[0]|upper }}_OUT_WIDTH,
{{ name|upper }}_PADDING_Y,
{{ name|upper }}_PADDING_X,
{{ name|upper }}_STRIDE_Y,
{{ name|upper }}_STRIDE_X,
{{ name|upper }}_KERNEL_HEIGHT,
{{ name|upper }}_KERNEL_WIDTH,
{{ name|upper }}_ACTIVATION,
{{ in_name[0]|upper }}_CONT_OFFSET,
{{ in_name[0]|upper }}_CONT_SIZE,
{{ in_name[0]|upper }}_WRAP_OFFSET,
{{ in_name[0]|upper }}_WRAP_SIZE,
{{ in_name[0]|upper }}_STRIDE,
{{ out_name[0]|upper }}_CONT_OFFSET,
{{ out_name[0]|upper }}_CONT_SIZE,
{{ out_name[0]|upper }}_WRAP_OFFSET,
{{ out_name[0]|upper }}_WRAP_SIZE,
{{ out_name[0]|upper }}_STRIDE>
({{in_name[0]}}, {{out_name[0]}}, {{in_name[2]}}, {{in_name[1]}}, {{ name|upper }}_SCALING);
{% endfilter %}
......@@ -250,8 +250,8 @@ class ConvDW_ARMCortexM(ExportNodeCpp):
# Use PaddedConv to add padding attribute
self.attributes["padding"] = [0, 0]
self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_config.jinja")
self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "conv_kernel.jinja")
self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_dw_config.jinja")
self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "conv_dw_kernel.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "ConvDW.hpp")
......@@ -275,8 +275,8 @@ class PaddedConvDW_ARMCortexM(ExportNodeCpp):
self.attributes["dilation_dims"] = n.get_operator(
).attr.dilation_dims
self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_config.jinja")
self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "conv_kernel.jinja")
self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_dw_config.jinja")
self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "conv_dw_kernel.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "ConvDW.hpp")
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment