Skip to content
Snippets Groups Projects
Commit 1044a6b6 authored by Gallas Gaye's avatar Gallas Gaye
Browse files

feat: Added Pad2d export op

parent 9e41dd6b
No related branches found
No related tags found
2 merge requests!39Update 0.2.1 -> 0.3.0,!36feat: Add missing operators for AIDGE model benchmarking
#ifndef __AIDGE_EXPORT_CPP_KERNELS_PAD2D__
#define __AIDGE_EXPORT_CPP_KERNELS_PAD2D__
#include "network/typedefs.hpp"
#include "kernels/rescaling.hpp"
#include "network/utils.hpp"
#include "kernels/macs.hpp"
#include "kernels/activation.hpp"
// Todo add border value and border type (Reflect, Constant, Wrap...) and add the two missing pad value (bottom and right)
template<int NB_CHANNELS,
int CHANNELS_HEIGHT, int CHANNELS_WIDTH,
int NB_OUTPUTS,
int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH,
int PADDING_Y, int PADDING_X,
typename Input_T, typename Output_T>
__attribute__((always_inline)) inline
void convolution_forward(
const Input_T* __restrict inputs,
Output_T* __restrict outputs
)
{
const I *input = static_cast<const I *>(input_);
O *output = static_cast<O *>(output_);
const std::size_t oySize = CHANNELS_HEIGHT + PADDING_Y + PADDING_Y;
const std::size_t oxSize = CHANNELS_WIDTH + PADDING_X + PADDING_X;
for (std::uint32_t oy = 0; oy < oySize; ++oy) {
for (std::uint32_t ox = 0; ox < oxSize; ++ox) {
if (oy < PADDING_Y or oy >= CHANNELS_HEIGHT + PADDING_Y or ox < PADDING_X or ox >= CHANNELS_WIDTH + PADDING_X)
{
outputs[oy * oySize + ox] = 0.0f;
}
outputs[oy * oySize + ox] = input[(oy - PADDING_Y) * CHANNELS_HEIGHT + (ox - PADDING_X)];
}
}
}
#endif // __AIDGE_EXPORT_CPP_KERNELS_PAD2D__
...@@ -75,7 +75,18 @@ class ProducerCPP(ExportNode): ...@@ -75,7 +75,18 @@ class ProducerCPP(ExportNode):
@ExportLibCpp.register("Pad2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any))) @ExportLibCpp.register("Pad2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
class Pad_ARMCortexM(ExportNodeCpp): class Pad_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info): def __init__(self, node, mem_info):
raise NotImplementedError("Pad2D nodes is not implemented") super().__init__(node, mem_info)
self.attributes["padding"] = node.get_operator().attr.begin_end_borders
self.config_template = str(
ROOT / "templates" / "configuration" / "pad_config.jinja")
self.forward_template = str(
ROOT / "templates" / "kernel_forward" / "pad_forward.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "kernels" / "pad.hpp")
]
@ExportLibCpp.register("ReLU", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32))) @ExportLibCpp.register("ReLU", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
......
{#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H
{# For layer configuration -#}
{% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %}
#define {{ name|upper }}_PADDING_Y {{ padding[1] }}
#define {{ name|upper }}_PADDING_X {{ padding[0] }}
#endif /* {{ name|upper }}_LAYER_H */
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
convolution_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
{{ in_name[0]|upper }}_IN_HEIGHT,
{{ in_name[0]|upper }}_IN_WIDTH,
{{ out_name[0]|upper }}_NB_OUTPUTS,
{{ out_name[0]|upper }}_OUT_HEIGHT,
{{ out_name[0]|upper }}_OUT_WIDTH,
{{name|upper}}_PADDING_Y,
{{name|upper}}_PADDING_X>
({{in_name[0]}}, {{out_name[0]}});
{% include "./_save_outputs.jinja" %}
{% endfilter %}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment