Skip to content
Snippets Groups Projects
Commit ff980a95 authored by Gallas Gaye's avatar Gallas Gaye
Browse files

fix: Better pad op implementation

parent f4815323
No related branches found
No related tags found
2 merge requests!39Update 0.2.1 -> 0.3.0,!36feat: Add missing operators for AIDGE model benchmarking
...@@ -6,32 +6,44 @@ ...@@ -6,32 +6,44 @@
// Todo add border value and border type (Reflect, Constant, Wrap...) and add the two missing pad value (bottom and right) // Todo add border value and border type (Reflect, Constant, Wrap...) and add the two missing pad value (bottom and right)
template<int NB_CHANNELS, template<int NB_BATCHES, int NB_CHANNELS,
int CHANNELS_HEIGHT, int CHANNELS_WIDTH, int CHANNELS_HEIGHT, int CHANNELS_WIDTH,
int NB_OUTPUTS, int NB_OUTPUTS,
int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH, int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH,
int PADDING_Y, int PADDING_X, int PADDING_TOP,
int PADDING_LEFT,
int PADDING_BOTTOM,
int PADDING_RIGHT,
typename Input_T, typename Output_T> typename Input_T, typename Output_T>
__attribute__((always_inline)) inline __attribute__((always_inline)) inline
void convolution_forward( void pad_forward(
double borderValue,
const Input_T* __restrict inputs, const Input_T* __restrict inputs,
Output_T* __restrict outputs Output_T* __restrict outputs
) )
{ {
const unsigned int oySize = CHANNELS_HEIGHT + PADDING_Y + PADDING_Y; const unsigned int oySize = CHANNELS_HEIGHT + PADDING_TOP + PADDING_BOTTOM;
const unsigned int oxSize = CHANNELS_WIDTH + PADDING_X + PADDING_X; const unsigned int oxSize = CHANNELS_WIDTH + PADDING_LEFT + PADDING_RIGHT;
for (unsigned int oy = 0; oy < oySize; ++oy) { for (unsigned int batch = 0; batch < NB_BATCHES; ++batch) {
for (unsigned int ox = 0; ox < oxSize; ++ox) { for (unsigned int ch = 0; ch < NB_CHANNELS; ++ch) {
if (oy < PADDING_Y or oy >= CHANNELS_HEIGHT + PADDING_Y or ox < PADDING_X or ox >= CHANNELS_WIDTH + PADDING_X) const unsigned int preIndex = batch * NB_CHANNELS * CHANNELS_HEIGHT * CHANNELS_WIDTH + ch * CHANNELS_HEIGHT * CHANNELS_WIDTH;
{
outputs[oy * oySize + ox] = 0.0f; for (unsigned int oy = 0; oy < oySize; ++oy) {
} for (unsigned int ox = 0; ox < oxSize; ++ox) {
else const unsigned int outIndex = batch * NB_CHANNELS * oySize * oxSize + ch * oySize * oxSize + oy * oxSize + ox;
{
outputs[oy * oySize + ox] = inputs[(oy - PADDING_Y) * CHANNELS_HEIGHT + (ox - PADDING_X)];
}
outputs[outIndex] = borderValue;
const unsigned int inputX = ox - PADDING_LEFT;
const unsigned int inputY = oy - PADDING_TOP;
if (inputY >= 0 and inputY < CHANNELS_HEIGHT and inputX >= 0 and inputX < CHANNELS_WIDTH)
{
outputs[outIndex] = inputs[preIndex + inputY * CHANNELS_WIDTH + inputX];
}
}
}
} }
} }
} }
......
...@@ -73,10 +73,16 @@ class ProducerCPP(ExportNode): ...@@ -73,10 +73,16 @@ class ProducerCPP(ExportNode):
# TODO : find a way to remove this dummy exportnode # TODO : find a way to remove this dummy exportnode
@ExportLibCpp.register("Pad2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any))) @ExportLibCpp.register("Pad2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
class Pad_ARMCortexM(ExportNodeCpp): class PadCPP(ExportNodeCpp):
def __init__(self, node, mem_info): def __init__(self, node, mem_info):
super().__init__(node, mem_info) super().__init__(node, mem_info)
self.attributes["padding"] = node.get_operator().attr.begin_end_borders self.attributes["padding"] = node.get_operator().attr.begin_end_borders
self.attributes["border_type"] = node.get_operator().attr.border_type
self.attributes["border_value"] = node.get_operator().attr.border_value
assert self.attributes["border_type"] == aidge_core.pad_border_type.Constant, (
f"export Pad2d: border_type == {node.get_operator().attr.border_type} not implemented"
)
self.config_template = str( self.config_template = str(
ROOT / "templates" / "configuration" / "pad_config.jinja") ROOT / "templates" / "configuration" / "pad_config.jinja")
...@@ -87,8 +93,6 @@ class Pad_ARMCortexM(ExportNodeCpp): ...@@ -87,8 +93,6 @@ class Pad_ARMCortexM(ExportNodeCpp):
str(ROOT / "kernels" / "pad.hpp") str(ROOT / "kernels" / "pad.hpp")
] ]
@ExportLibCpp.register("ReLU", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32))) @ExportLibCpp.register("ReLU", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class ReLUCPP(ExportNodeCpp): class ReLUCPP(ExportNodeCpp):
def __init__(self, node, mem_info): def __init__(self, node, mem_info):
......
...@@ -4,7 +4,10 @@ ...@@ -4,7 +4,10 @@
{# For layer configuration -#} {# For layer configuration -#}
{% include "./_def_io.jinja" %} {% include "./_def_io.jinja" %}
{% include "./_meminfo.jinja" %} {% include "./_meminfo.jinja" %}
#define {{ name|upper }}_PADDING_Y {{ padding[1] }} #define {{ name|upper }}_PADDING_BOTTOM {{ padding[2] }}
#define {{ name|upper }}_PADDING_X {{ padding[0] }} #define {{ name|upper }}_PADDING_RIGHT {{ padding[3] }}
#define {{ name|upper }}_PADDING_TOP {{ padding[0] }}
#define {{ name|upper }}_PADDING_LEFT {{ padding[1] }}
#define {{ name|upper }}_BORDER_VALUE {{ border_value }}
#endif /* {{ name|upper }}_LAYER_H */ #endif /* {{ name|upper }}_LAYER_H */
{% filter indent(width=4, first=False) %} {% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %} {% include "./_mem_offset.jinja" %}
convolution_forward<{{ in_name[0]|upper }}_NB_CHANNELS, pad_forward<{{ in_name[0]|upper }}_IN_BATCH,
{{ in_name[0]|upper }}_IN_HEIGHT, {{ in_name[0]|upper }}_NB_CHANNELS,
{{ in_name[0]|upper }}_IN_WIDTH, {{ in_name[0]|upper }}_IN_HEIGHT,
{{ out_name[0]|upper }}_NB_OUTPUTS, {{ in_name[0]|upper }}_IN_WIDTH,
{{ out_name[0]|upper }}_OUT_HEIGHT, {{ out_name[0]|upper }}_NB_OUTPUTS,
{{ out_name[0]|upper }}_OUT_WIDTH, {{ out_name[0]|upper }}_OUT_HEIGHT,
{{name|upper}}_PADDING_Y, {{ out_name[0]|upper }}_OUT_WIDTH,
{{name|upper}}_PADDING_X> {{name|upper}}_PADDING_TOP,
({{in_name[0]}}, {{out_name[0]}}); {{name|upper}}_PADDING_LEFT,
{{name|upper}}_PADDING_BOTTOM,
{{name|upper}}_PADDING_RIGHT>
({{name|upper}}_BORDER_VALUE, {{in_name[0]}}, {{out_name[0]}});
{% include "./_save_outputs.jinja" %} {% include "./_save_outputs.jinja" %}
{% endfilter %} {% endfilter %}
...@@ -211,6 +211,55 @@ class test_operator_export(unittest.TestCase): ...@@ -211,6 +211,55 @@ class test_operator_export(unittest.TestCase):
self.unit_test_export(model, "Mul", [[1, 5, 5]]) self.unit_test_export(model, "Mul", [[1, 5, 5]])
def test_export_mul_larger(self):
print("MulLarger")
model = aidge_core.sequential([
aidge_core.Producer([1, 7, 5], name="producer"),
aidge_core.Mul(name="mul")
])
self.unit_test_export(model, "Mul", [[1, 7, 5]])
def test_export_mul_higher(self):
print("MulHigher")
model = aidge_core.sequential([
aidge_core.Producer([1, 5, 7], name="producer"),
aidge_core.Mul(name="mul")
])
self.unit_test_export(model, "Mul", [[1, 5, 7]])
# "Broadcast not supported yet in export operator"
@unittest.expectedFailure
def test_export_mul_simple_broadcast(self):
print("MulSimpleBroadcast")
model = aidge_core.sequential([
aidge_core.Producer([1, 1, 5], name="producer"),
aidge_core.Mul(name="mul")
])
self.unit_test_export(model, "MulSimpleBroadcast", [[1, 7, 5]])
# "Broadcast not supported yet in export operator"
@unittest.expectedFailure
def test_export_mul_double_broadcast(self):
print("MulDoubleBroadcast")
model = aidge_core.sequential([
aidge_core.Producer([1, 1, 7], name="producer"),
aidge_core.Mul(name="mul")
])
self.unit_test_export(model, "MulDoubleBroadcast", [[1, 5, 1]])
def test_export_mul_batch(self):
print("MulBatch")
model = aidge_core.sequential([
aidge_core.Producer([3, 5, 7], name="producer"),
aidge_core.Mul(name="mul")
])
self.unit_test_export(model, "MulBatch", [[3, 5, 7]])
def test_export_concat(self): def test_export_concat(self):
print("Concat") print("Concat")
model = aidge_core.sequential([ model = aidge_core.sequential([
...@@ -250,7 +299,56 @@ class test_operator_export(unittest.TestCase): ...@@ -250,7 +299,56 @@ class test_operator_export(unittest.TestCase):
aidge_core.Pad2D((1, 1, 1, 1), name="pad2d") aidge_core.Pad2D((1, 1, 1, 1), name="pad2d")
]) ])
self.unit_test_export(model, "Pad2D", [[1, 1, 10, 10]]) self.unit_test_export(model, "Pad2D", [[1, 1, 11, 11]])
def test_export_pad2D_larger(self):
print("Pad2DLarger")
model = aidge_core.sequential([
aidge_core.Pad2D((1, 3, 1, 3), name="pad2d")
])
self.unit_test_export(model, "Pad2DLarger", [[1, 1, 7, 11]])
def test_export_pad2D_higher(self):
print("Pad2DHigher")
model = aidge_core.sequential([
aidge_core.Pad2D((3, 1, 3, 1), name="pad2d")
])
self.unit_test_export(model, "Pad2DHigher", [[1, 1, 11, 7]])
def test_export_pad2D_mismatch(self):
print("Pad2DMismatch")
model = aidge_core.sequential([
aidge_core.Pad2D((1, 3, 5, 7), name="pad2d")
])
self.unit_test_export(model, "Pad2DMismatch", [[3, 5, 11, 7]])
def test_export_pad2D_denser(self):
print("Pad2DDenser")
model = aidge_core.sequential([
aidge_core.Pad2D((3, 3, 3, 3), name="pad2d")
])
self.unit_test_export(model, "Pad2DDenser", [[1, 5, 7, 11]])
def test_export_pad2D_with_bigger_batch_size(self):
print("Pad2DBiggerBatchSize")
model = aidge_core.sequential([
aidge_core.Pad2D((1, 1, 1, 1), name="pad2d")
])
self.unit_test_export(model, "Pad2DBiggerBatchSize", [[3, 5, 7, 11]])
@unittest.expectedFailure
def test_export_pad2D_not_constant(self):
print("Pad2DNotConstant")
model = aidge_core.sequential([
aidge_core.Pad2D((3, 3, 3, 3), border_type=aidge_core.pad_border_type.Wrap, name="pad2d")
])
self.unit_test_export(model, "Pad2DNotConstant", [[1, 5, 7, 11]])
def test_export_batchnorm2D(self): def test_export_batchnorm2D(self):
print("BatchNormalization2D") print("BatchNormalization2D")
...@@ -260,6 +358,38 @@ class test_operator_export(unittest.TestCase): ...@@ -260,6 +358,38 @@ class test_operator_export(unittest.TestCase):
self.unit_test_export(model, "BatchNorm2D", [[1, 1, 5, 5]], False, False) self.unit_test_export(model, "BatchNorm2D", [[1, 1, 5, 5]], False, False)
def test_export_batchnorm2D_Larger(self):
print("BatchNormalization2DLarger")
model = aidge_core.sequential([
aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
])
self.unit_test_export(model, "BatchNorm2DLarger", [[1, 1, 5, 7]], False, False)
def test_export_batchnorm2D_Higher(self):
print("BatchNormalization2DHigher")
model = aidge_core.sequential([
aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
])
self.unit_test_export(model, "BatchNorm2DHigher", [[1, 1, 7, 5]], False, False)
def test_export_batchnorm2D_Denser(self):
print("BatchNormalization2DDenser")
model = aidge_core.sequential([
aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
])
self.unit_test_export(model, "BatchNorm2DDenser", [[1, 3, 5, 7]], False, False)
def test_export_batchnorm2D_with_bigger_batch_size(self):
print("BatchNormalization2DBiggerBatchSize")
model = aidge_core.sequential([
aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
])
self.unit_test_export(model, "BatchNormalization2DBiggerBatchSize", [[4, 3, 5, 7]], False, False)
def test_export_cpp(self): def test_export_cpp(self):
print("Export test to do") print("Export test to do")
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment