Skip to content
Snippets Groups Projects
Commit 9de26a0c authored by Thibault Allenet's avatar Thibault Allenet
Browse files

Merge branch 'dev' into low_bit_support

parents ec1d6f8e 1a4b72ef
No related branches found
No related tags found
No related merge requests found
......@@ -34,3 +34,6 @@ xml*/
# Model parameters
*.onnx
uni_tests/benchmark.py
uni_tests/superpoint.py
uni_tests/test_cmsis_nn_conv.py
......@@ -95,8 +95,7 @@ __attribute__((always_inline)) inline void convcellDWPropagate(
for (int output = 0; output < NB_OUTPUTS; ++output) {
const int channel = (output * NB_CHANNELS) / NB_OUTPUTS;
SUM_T weightedSum = biasses[output];
Bias_T weightedSum = biasses[output];
for (int sy = 0; sy < KERNEL_HEIGHT; ++sy) {
if ((PADDING_Y != 0
......
#include <math.h>
void aidge_softmax_chw_float32 (float* inputs,
void aidge_softmax_chw_float32(float* inputs,
float* outputs,
int inputDims[],
const int inputDims[],
int axis,
unsigned int size_inputDim,
unsigned int size)
const unsigned int size_inputDim,
const unsigned int size)
{
axis += (axis >= 0 ) ? 0 : size_inputDim;
axis += (axis >= 0 ) ? 0 : size_inputDim;
int postAxisElems = 1;
for (int i = axis+1; i < size_inputDim; ++i) {
for (unsigned int i = axis+1; i < size_inputDim; ++i) {
postAxisElems *= inputDims[i];
}
int preAxisElems = 1;
......
{% include "./_def_io.jinja" %}
{#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H
......@@ -5,9 +8,7 @@
{# For layer configuration -#}
#define {{ name|upper }}_INPUTS_SIZE {{ in_size[0] }}
#define {{ name|upper }}_OUTPUTS_SIZE {{ out_size[0] }}
#define {{ name|upper }}_DIMS {{ in_dims[0] }}
#define {{ name|upper }}_AXIS {{ axis }}
#define {{ name|upper }}_INPUT_DIMS_SIZE {{ in_dims[0]|length}}
static const int {{ name|upper }}_DIMS[] = { {{ in_dims[0] | join(', ') }} };
#endif /* {{ name|upper }}_LAYER_H */
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
N2D2_Export::convcellDWPropagate<{{ in_name[0]|upper }}_NB_CHANNELS,
{{ in_name[0]|upper }}_IN_HEIGHT,
{{ in_name[0]|upper }}_IN_WIDTH,
{{ out_name[0]|upper }}_NB_OUTPUTS,
{{ out_name[0]|upper }}_OUT_HEIGHT,
{{ out_name[0]|upper }}_OUT_WIDTH,
{{ name|upper }}_PADDING_Y,
{{ name|upper }}_PADDING_X,
{{ name|upper }}_STRIDE_Y,
{{ name|upper }}_STRIDE_X,
{{ name|upper }}_KERNEL_HEIGHT,
{{ name|upper }}_KERNEL_WIDTH,
{{ name|upper }}_ACTIVATION,
{{ in_name[0]|upper }}_CONT_OFFSET,
{{ in_name[0]|upper }}_CONT_SIZE,
{{ in_name[0]|upper }}_WRAP_OFFSET,
{{ in_name[0]|upper }}_WRAP_SIZE,
{{ in_name[0]|upper }}_STRIDE,
{{ out_name[0]|upper }}_CONT_OFFSET,
{{ out_name[0]|upper }}_CONT_SIZE,
{{ out_name[0]|upper }}_WRAP_OFFSET,
{{ out_name[0]|upper }}_WRAP_SIZE,
{{ out_name[0]|upper }}_STRIDE>
({{in_name[0]}}, {{out_name[0]}}, {{in_name[2]}}, {{in_name[1]}}, {{ name|upper }}_SCALING);
{% endfilter %}
......@@ -3,7 +3,12 @@
Use this module to generate CPP exports for ARM CortexM boards.
This module has to be used with the Aidge suite
"""
from pathlib import Path
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]
from .export import *
from .export_registry import ExportLibAidgeARM, ExportLibCMSISNN
from .operators import *
import os
import shutil
from pathlib import Path
from aidge_export_arm_cortexm.utils import (ROOT, AVAILABLE_BOARDS, has_board)
from aidge_export_arm_cortexm import ROOT
from aidge_export_arm_cortexm.export_registry import ExportLibAidgeARM
# from aidge_export_arm_cortexm.utils.converter import numpy_dtype2ctype
......@@ -38,7 +38,12 @@ def supported_boards() -> list[str]:
def gen_board_files(path:str, board:str)->None:
if board not in supported_boards():
<<<<<<< HEAD
raise ValueError(f"Board {board} is not supported, supported board are:{supported_boards()}")
=======
joint_board_str = "\n\t-".join(supported_boards())
raise ValueError(f"Board {board} is not supported, supported board are:\n\t-{joint_board_str}")
>>>>>>> dev
if isinstance(path, str): path = Path(path)
# Create dnn directory is not exist
......@@ -50,6 +55,9 @@ def gen_board_files(path:str, board:str)->None:
# Copy all static files in the export
shutil.copytree(BOARDS_MAP[board], str(path), dirs_exist_ok=True)
<<<<<<< HEAD
# For N2D2 library, copy static folder to export/include
dnn_include_folder = dnn_folder / "include"
os.makedirs(str(dnn_include_folder), exist_ok=True)
\ No newline at end of file
os.makedirs(str(dnn_include_folder), exist_ok=True)
=======
>>>>>>> dev
......@@ -8,24 +8,6 @@ class ExportLibAidgeARM(ExportLib):
self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "forward.jinja")
# TODO ugly fix for Tensor registration issue...
import aidge_core
aidge_core.register_Tensor(["aidge_arm", aidge_core.dtype.float32],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.float32]))
aidge_core.register_Tensor(["aidge_arm", aidge_core.dtype.int32],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int32]))
aidge_core.register_Tensor(["aidge_arm", aidge_core.dtype.int8],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int8]))
aidge_core.register_Tensor(["aidge_arm", aidge_core.dtype.int4],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int4]))
aidge_core.register_Tensor(["aidge_arm", aidge_core.dtype.uint4],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint4]))
aidge_core.register_Tensor(["aidge_arm", aidge_core.dtype.dual_int4],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.dual_int4]))
aidge_core.register_Tensor(["aidge_arm", aidge_core.dtype.dual_uint4],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.dual_uint4]))
class ExportLibCMSISNN(ExportLib):
_name="export_cmsisnn"
......@@ -8,9 +8,7 @@ import aidge_core
import aidge_backend_cpu
from aidge_core.export_utils import ExportNode, ExportNodeCpp
from aidge_core.export_utils.code_generation import *
from aidge_export_arm_cortexm.utils import ROOT
from aidge_export_arm_cortexm.utils.converter import numpy_dtype2ctype
from aidge_export_arm_cortexm.utils.generation import *
from aidge_export_arm_cortexm import ROOT
from aidge_export_arm_cortexm.export_registry import ExportLibAidgeARM
# from data_conversion import datatype_converter_aidge2arm
from aidge_export_arm_cortexm.data_conversion import datatype_converter_aidge2arm
......@@ -18,6 +16,24 @@ from aidge_export_arm_cortexm.data_conversion import datatype_converter_aidge2ar
##############################################
############## Export functions ##############
##############################################
# Note: to remove
def numpy_dtype2ctype(dtype):
if dtype == np.int8:
return "int8_t"
elif dtype == np.int16:
return "int16_t"
elif dtype == np.int32:
return "int32_t"
elif dtype == np.int64:
return "int64_t"
elif dtype == np.float32:
return "float"
elif dtype == np.float64:
return "double"
# Add more dtype mappings as needed
else:
raise ValueError(f"Unsupported {dtype} dtype")
def export_params(name:str,
array: np.ndarray,
......@@ -486,7 +502,7 @@ class ConvDW_ARMCortexM(ExportNodeCpp):
self.attributes["padding"] = [0, 0]
self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_config.jinja")
self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "conv_kernel.jinja")
self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "conv_dw_kernel.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "ConvDW.hpp")
......@@ -511,7 +527,7 @@ class PaddedConvDW_ARMCortexM(ExportNodeCpp):
).attr.dilation_dims
self.config_template = str(ROOT / "_Aidge_Arm" / "templates" / "configuration" / "conv_config.jinja")
self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "conv_kernel.jinja")
self.forward_template = str(ROOT / "_Aidge_Arm" / "templates" / "forward_call" / "conv_dw_kernel.jinja")
self.include_list = []
self.kernels_to_copy = [
str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "ConvDW.hpp")
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment