Skip to content
Snippets Groups Projects
Commit 58c0e326 authored by Christophe Guillon's avatar Christophe Guillon
Browse files

Merge branch 'new_operator' into 'dev'

Add missing operators for basic onnx model exporting

See merge request eclipse/aidge/aidge_export_cpp!31
parents f3a546b9 e98d3eee
No related branches found
No related tags found
2 merge requests!39Update 0.2.1 -> 0.3.0,!31Add missing operators for basic onnx model exporting
Pipeline #67987 passed
......@@ -65,8 +65,8 @@ void convolution_forward(
int oOffset = NB_OUTPUTS * oPos;
// <--
Bias_T weightedSum = biases[output];
// Check if the biases are defined
Bias_T weightedSum = biases ? biases[output] : 0;
for (int sy = 0; sy < KERNEL_HEIGHT; ++sy) {
if ((PADDING_Y != 0
......@@ -116,4 +116,45 @@ void convolution_forward(
}
}
// Template specialization when biases are not given to the convolution
template<int NB_CHANNELS,
int CHANNELS_HEIGHT, int CHANNELS_WIDTH,
int NB_OUTPUTS,
int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH,
int PADDING_Y, int PADDING_X,
int STRIDE_Y, int STRIDE_X,
int DILATION_Y, int DILATION_X,
int KERNEL_HEIGHT, int KERNEL_WIDTH,
ActivationFunction_T ACTIVATION,
typename Input_T, typename Output_T,
typename Weight_T,
typename Rescaling_T>
__attribute__((always_inline)) inline
void convolution_forward(
const Input_T* __restrict inputs,
Output_T* __restrict outputs,
const Weight_T* __restrict weights,
std::nullptr_t __restrict,
const Rescaling_T& __restrict rescaling)
{
const float* b = nullptr;
convolution_forward<NB_CHANNELS,
CHANNELS_HEIGHT,
CHANNELS_WIDTH,
NB_OUTPUTS,
OUTPUTS_HEIGHT,
OUTPUTS_WIDTH,
PADDING_Y,
PADDING_X,
STRIDE_Y,
STRIDE_X,
DILATION_Y,
DILATION_X,
KERNEL_HEIGHT,
KERNEL_WIDTH,
ACTIVATION>
(inputs, outputs, weights, b, rescaling);
}
#endif // __AIDGE_EXPORT_CPP_KERNELS_CONVOLUTION__
#ifndef __AIDGE_EXPORT_CPP_KERNELS_RESHAPE__
#define __AIDGE_EXPORT_CPP_KERNELS_RESHAPE__
#include "network/typedefs.hpp"
// Generic function for reshape and activation
template<int M,
typename Input_T, typename Output_T>
__attribute__((always_inline)) inline
void reshape_forward (
const Input_T* __restrict, // First input is useless as it only dictate the resulting layout of the reshape
const Input_T* __restrict inputs2,
Output_T* __restrict outputs)
{
// If inputs and outputs pointers are the same, the memory manager has already optimized this function so it is a no-op !
if (inputs2 == outputs)
return;
// A reshape in c++ world should equal to a Noop
// We only need to copy the input buffer to the output
for (int m = 0; m < M; ++m) {
outputs[m] = inputs2[m];
}
}
#endif // __AIDGE_EXPORT_CPP_KERNELS_RESHAPE__
\ No newline at end of file
......@@ -94,27 +94,65 @@ class ReLUCPP(ExportNodeCpp):
str(ROOT / "kernels" / "rescaling.hpp")
]
@ExportLibCpp.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class ConvCPP(ExportNodeCpp):
@ExportLibCpp.register("Reshape", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class ReshapeCPP(ExportNodeCpp):
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
self.config_template = str(
ROOT / "templates" / "configuration" / "reshape_config.jinja")
self.forward_template = str(
ROOT / "templates" / "kernel_forward" / "reshape_forward.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "kernels" / "reshape.hpp"),
]
@ExportLibCpp.register("MatMul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class MatMulCPP(ExportNodeCpp):
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
# No padding with Conv
# Use PaddedConv to add padding attribute
self.attributes["padding"] = [0, 0]
self.attributes["activation"] = "Linear"
self.attributes["rescaling"] = "NoScaling"
self.config_template = str(
ROOT / "templates" / "configuration" / "convolution_config.jinja")
ROOT / "templates" / "configuration" / "matmul_config.jinja")
self.forward_template = str(
ROOT / "templates" / "kernel_forward" / "convolution_forward.jinja")
ROOT / "templates" / "kernel_forward" / "matmul_forward.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "kernels" / "convolution.hpp"),
str(ROOT / "kernels" / "macs.hpp"),
str(ROOT / "kernels" / "activation.hpp"),
str(ROOT / "kernels" / "rescaling.hpp")
str(ROOT / "kernels" / "matmul.hpp"),
]
def _setup_conv2D(conv):
"""Common setup code for convolutions: Conv2D and PaddedConv2D."""
# If biases are not provided we set it as nullptr instead of None
if (len(conv.attributes["in_name"]) > 2 and conv.attributes["in_name"][2] is None):
conv.attributes["in_name"][2] = "nullptr"
conv.attributes["activation"] = "Linear"
conv.attributes["rescaling"] = "NoScaling"
conv.config_template = str(
ROOT / "templates" / "configuration" / "convolution_config.jinja")
conv.forward_template = str(
ROOT / "templates" / "kernel_forward" / "convolution_forward.jinja")
conv.include_list = []
conv.kernels_to_copy = [
str(ROOT / "kernels" / "convolution.hpp"),
str(ROOT / "kernels" / "macs.hpp"),
str(ROOT / "kernels" / "activation.hpp"),
str(ROOT / "kernels" / "rescaling.hpp")
]
@ExportLibCpp.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class ConvCPP(ExportNodeCpp):
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
# No padding with Conv
# Use PaddedConv to add padding attribute
self.attributes["padding"] = [0, 0]
_setup_conv2D(self)
@ExportLibCpp.register_metaop("PaddedConv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class PaddedConvCPP(ExportNodeCpp):
def __init__(self, node, mem_info):
......@@ -131,74 +169,60 @@ class PaddedConvCPP(ExportNodeCpp):
).attr.stride_dims
self.attributes["dilation_dims"] = n.get_operator(
).attr.dilation_dims
self.attributes["activation"] = "Linear"
self.attributes["rescaling"] = "NoScaling"
self.config_template = str(
ROOT / "templates" / "configuration" / "convolution_config.jinja")
self.forward_template = str(
ROOT / "templates" / "kernel_forward" / "convolution_forward.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "kernels" / "convolution.hpp"),
str(ROOT / "kernels" / "macs.hpp"),
str(ROOT / "kernels" / "activation.hpp"),
str(ROOT / "kernels" / "rescaling.hpp")
]
_setup_conv2D(self)
def _setup_elemwise_op(elemwise, op):
"""Common code (template and kernel setup) shared across all the different elementWise operator (Add, Sub,...)."""
elemwise.attributes["elemwise_op"] = op
elemwise.attributes["activation"] = "Linear"
elemwise.attributes["rescaling"] = "NoScaling"
elemwise.config_template = str(
ROOT / "templates" / "configuration" / "elemwise_config.jinja")
elemwise.forward_template = str(
ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja")
elemwise.include_list = []
elemwise.kernels_to_copy = [
str(ROOT / "kernels" / "elemwise.hpp"),
str(ROOT / "kernels" / "activation.hpp"),
str(ROOT / "kernels" / "rescaling.hpp")
]
@ExportLibCpp.register("Add", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class AddCPP(ExportNodeCpp):
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
self.attributes["elemwise_op"] = "Add"
self.attributes["activation"] = "Linear"
self.attributes["rescaling"] = "NoScaling"
self.config_template = str(
ROOT / "templates" / "configuration" / "elemwise_config.jinja")
self.forward_template = str(
ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "kernels" / "elemwise.hpp"),
str(ROOT / "kernels" / "activation.hpp"),
str(ROOT / "kernels" / "rescaling.hpp")
]
_setup_elemwise_op(self, "Add")
@ExportLibCpp.register("Sub", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class SubCPP(ExportNodeCpp):
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
self.attributes["elemwise_op"] = "Sub"
self.attributes["activation"] = "Linear"
self.attributes["rescaling"] = "NoScaling"
self.config_template = str(
ROOT / "templates" / "configuration" / "elemwise_config.jinja")
self.forward_template = str(
ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "kernels" / "elemwise.hpp"),
str(ROOT / "kernels" / "activation.hpp"),
str(ROOT / "kernels" / "rescaling.hpp")
]
_setup_elemwise_op(self, "Sub")
@ExportLibCpp.register("Mul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class MulCPP(ExportNodeCpp):
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
self.attributes["elemwise_op"] = "Mul"
self.attributes["activation"] = "Linear"
self.attributes["rescaling"] = "NoScaling"
self.config_template = str(
ROOT / "templates" / "configuration" / "elemwise_config.jinja")
self.forward_template = str(
ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "kernels" / "elemwise.hpp"),
str(ROOT / "kernels" / "activation.hpp"),
str(ROOT / "kernels" / "rescaling.hpp")
]
_setup_elemwise_op(self, "Mul")
def _setup_pooling(pooling):
"""Common code (template and kernel setup) shared across all the different pooling operator."""
pooling.config_template = str(
ROOT / "templates" / "configuration" / "pooling_config.jinja")
pooling.forward_template = str(
ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja")
pooling.include_list = []
pooling.kernels_to_copy = [
str(ROOT / "kernels" / "pooling.hpp"),
str(ROOT / "kernels" / "activation.hpp"),
str(ROOT / "kernels" / "rescaling.hpp")
]
@ExportLibCpp.register("MaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class MaxPoolCPP(ExportNodeCpp):
......@@ -211,17 +235,7 @@ class MaxPoolCPP(ExportNodeCpp):
self.attributes["pool_type"] = "Max"
self.attributes["activation"] = "Linear"
self.config_template = str(
ROOT / "templates" / "configuration" / "pooling_config.jinja")
self.forward_template = str(
ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "kernels" / "pooling.hpp"),
str(ROOT / "kernels" / "activation.hpp"),
str(ROOT / "kernels" / "rescaling.hpp")
]
_setup_pooling(self)
@ExportLibCpp.register_metaop("PaddedMaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class PaddedMaxPoolCPP(ExportNodeCpp):
......@@ -239,17 +253,7 @@ class PaddedMaxPoolCPP(ExportNodeCpp):
self.attributes["pool_type"] = "Max"
self.attributes["activation"] = "Linear"
self.config_template = str(
ROOT / "templates" / "configuration" / "pooling_config.jinja")
self.forward_template = str(
ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "kernels" / "pooling.hpp"),
str(ROOT / "kernels" / "activation.hpp"),
str(ROOT / "kernels" / "rescaling.hpp")
]
_setup_pooling(self)
@ExportLibCpp.register("GlobalAveragePooling", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class GlobalAveragePoolCPP(ExportNodeCpp):
......@@ -267,16 +271,7 @@ class GlobalAveragePoolCPP(ExportNodeCpp):
self.attributes["pool_type"] = "Average"
self.attributes["activation"] = "Linear"
self.config_template = str(
ROOT / "templates" / "configuration" / "pooling_config.jinja")
self.forward_template = str(
ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "kernels" / "pooling.hpp"),
str(ROOT / "kernels" / "activation.hpp"),
str(ROOT / "kernels" / "rescaling.hpp")
]
_setup_pooling(self)
@ExportLibCpp.register("FC", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class FcCPP(ExportNodeCpp):
......
......@@ -2,10 +2,13 @@
#ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H
{% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %}
{# For layer configuration -#}
#define {{ name|upper }}_M {{ inputs_dims[0][0] }}
#define {{ name|upper }}_K {{ inputs_dims[0][1] }}
#define {{ name|upper }}_N {{ inputs_dims[1][1] }}
#define {{ name|upper }}_M {{ in_dims[0][0] }}
#define {{ name|upper }}_K {{ in_dims[0][1] }}
#define {{ name|upper }}_N {{ in_dims[1][1] }}
#define {{ name|upper }}_ACTIVATION {{ activation }}
static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
......
{#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H
{% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %}
{# For layer configuration -#}
#define {{ name|upper }}_NB_ELTS {{ in_dims[0]|join('*') }}
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
matmul_forward<{{name|upper}}_M,
{{name|upper}}_K,
{{name|upper}}_N,
{{name|upper}}_ACTIVATION>
({{inputs1_name}}, {{inputs2_name}}, {{outputs_name}}, {{name|upper}}_RESCALING);
\ No newline at end of file
({{in_name[0]}}, {{in_name[1]}}, {{out_name[0]}}, {{name|upper}}_RESCALING);
{% include "./_save_outputs.jinja" %}
{% endfilter %}
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
reshape_forward<{{name|upper}}_NB_ELTS>
({{in_name[0]}}, {{in_name[1]}}, {{out_name[0]}});
{% include "./_save_outputs.jinja" %}
{% endfilter %}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment