Skip to content
Snippets Groups Projects
Commit 159108b5 authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Update export ARM with new ImplSpec registration.

parent 798f71cd
No related branches found
No related tags found
No related merge requests found
......@@ -2,9 +2,12 @@ from aidge_core.export_utils import ExportLib
class ExportLibAidgeARM(ExportLib):
_name="aidge_arm"
static_files={
}
# TODO ugly fix for Tensor registration issue...
import aidge_core
aidge_core.register_Tensor(["aidge_arm", aidge_core.dtype.float32],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.float32]))
class ExportLibCMSISNN(ExportLib):
_name="export_cmsisnn"
......@@ -4,8 +4,8 @@ import numpy as np
from pathlib import Path
from typing import Tuple, List
# import aidge_core
from aidge_core.export_utils import ExportNode, ExportNodeCpp, operator_register
import aidge_core
from aidge_core.export_utils import ExportNode, ExportNodeCpp
from aidge_core.export_utils.code_generation import *
from aidge_export_arm_cortexm.utils import ROOT
from aidge_export_arm_cortexm.utils.converter import numpy_dtype2ctype
......@@ -45,7 +45,7 @@ def set_up_output(name, dtype):
@operator_register(ExportLibAidgeARM, "Producer")
@ExportLibAidgeARM.register("Producer", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
class Producer_ARMCortexM(ExportNode):
def __init__(self, node, mem_info, is_input, is_output):
......@@ -193,7 +193,16 @@ class Scaling():
return self.scaling
@operator_register(ExportLibAidgeARM, "ReLU")
# TODO : find a way to remove this dummy exportnode
@ExportLibAidgeARM.register("Pad2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
class Pad_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
raise NotImplementedError("Pad2D nodes is not implemented")
@ExportLibAidgeARM.register("ReLU", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class ReLU_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
......@@ -205,11 +214,8 @@ class ReLU_ARMCortexM(ExportNodeCpp):
self.kernels_to_copy = [
str(ROOT / "_Aidge_Arm" / "kernels" / "Relu" / "aidge_relu_float32.h"),
]
@classmethod
def exportable(cls, node):
return True # TODO add check i/o NCHW
@operator_register(ExportLibAidgeARM, "Conv")
@ExportLibAidgeARM.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class Conv_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
......@@ -225,21 +231,20 @@ class Conv_ARMCortexM(ExportNodeCpp):
self.kernels_to_copy = [
str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "Conv.hpp")
]
@classmethod
def exportable(cls, node):
return True # TODO add check i/o NCHW
@operator_register(ExportLibAidgeARM, "PaddedConv")
@ExportLibAidgeARM.register_metaop("PaddedConv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class PaddedConv_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
self.attributes["activation"] = "Linear"
self.attributes.update(Scaling()("no_scaling"))
print(self.attributes)
for n in self.operator.get_micro_graph().get_nodes():
if n.type() == "Pad":
if n.type() == "Pad2D":
self.attributes["padding"] = n.get_operator(
).attr.begin_end_borders
if n.type() == "Conv":
if n.type() == "Conv2D":
self.attributes["kernel_dims"] = n.get_operator(
).attr.kernel_dims
self.attributes["stride_dims"] = n.get_operator(
......@@ -253,11 +258,6 @@ class PaddedConv_ARMCortexM(ExportNodeCpp):
self.kernels_to_copy = [
str(ROOT / "_Aidge_Arm" / "kernels" / "Convolution" / "Conv.hpp")
]
@classmethod
def exportable(cls, node):
return True # TODO add check i/o NCHW
class Pooling_ARMCortexM(ExportNodeCpp):
......@@ -281,7 +281,7 @@ class Pooling_ARMCortexM(ExportNodeCpp):
def exportable(cls, node):
return True # TODO add check i/o NCHW
@operator_register(ExportLibAidgeARM, "FC")
@ExportLibAidgeARM.register("FC", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class FC_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
......@@ -301,19 +301,19 @@ class FC_ARMCortexM(ExportNodeCpp):
def exportable(cls, node):
return True # TODO add check i/o NCHW
@operator_register(ExportLibAidgeARM, "MaxPooling")
@ExportLibAidgeARM.register("MaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class MaxPooling_ARMCortexM(Pooling_ARMCortexM):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
self.attributes["pool_type"] = "Max"
@operator_register(ExportLibAidgeARM, "AvgPooling")
@ExportLibAidgeARM.register("AvgPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class AvgPooling_ARMCortexM(Pooling_ARMCortexM):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
self.attributes["pool_type"] = "Avg"
@operator_register(ExportLibAidgeARM, "FcReluScaling")
@ExportLibAidgeARM.register_metaop("FcReluScaling", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class FC_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
......@@ -331,7 +331,7 @@ class FC_ARMCortexM(ExportNodeCpp):
str(ROOT / "_Aidge_Arm" / "kernels" / "FullyConnected" / "Fc.hpp")
]
@operator_register(ExportLibAidgeARM, "Add")
@ExportLibAidgeARM.register("Add", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class Add_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
......@@ -345,7 +345,7 @@ class Add_ARMCortexM(ExportNodeCpp):
str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "aidge_supportfunctions.h")
]
@operator_register(ExportLibAidgeARM, "Mul")
@ExportLibAidgeARM.register("Mul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class Mul_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
......@@ -359,7 +359,7 @@ class Mul_ARMCortexM(ExportNodeCpp):
str(ROOT / "_Aidge_Arm" / "kernels" / "Utils" / "aidge_supportfunctions.h")
]
@operator_register(ExportLibAidgeARM, "Softmax")
@ExportLibAidgeARM.register("Softmax", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class Softmax_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
......@@ -372,7 +372,7 @@ class Softmax_ARMCortexM(ExportNodeCpp):
str(ROOT / "_Aidge_Arm" / "kernels" / "Softmax" / "aidge_softmax_chw_float32.h"),
]
@operator_register(ExportLibAidgeARM, "Sigmoid")
@ExportLibAidgeARM.register("Sigmoid", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class Sigmoid_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
......@@ -385,7 +385,7 @@ class Sigmoid_ARMCortexM(ExportNodeCpp):
str(ROOT / "_Aidge_Arm" / "kernels" / "Sigmoid" / "aidge_sigmoid_float32.h"),
]
@operator_register(ExportLibAidgeARM, "MatMul")
@ExportLibAidgeARM.register("MatMul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class MatMul_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
......@@ -399,7 +399,7 @@ class MatMul_ARMCortexM(ExportNodeCpp):
# TODO: Is this used ?
# @operator_register("ConvReluScaling")
# @register("ConvReluScaling")
# class ConvReluScaling_ARMCortexM(Conv_ARMCortexM):
# def __init__(self, node, board, library):
# super(Conv_ARMCortexM, self).__init__(node, board, library)
......@@ -422,7 +422,7 @@ class MatMul_ARMCortexM(ExportNodeCpp):
# self.scaling = Scaling(self.operator.attr.scaling_factor,
# self.operator.attr.quantized_nb_bits)("floating_point")
# @operator_register("BatchNorm")
# @register("BatchNorm")
# class BatchNorm2D_ARMCortexM(ExportNode):
# def __init__(self, node, board, library):
......@@ -480,7 +480,7 @@ class MatMul_ARMCortexM(ExportNodeCpp):
# ))
# return list_actions
# @operator_register("Reshape")
# @register("Reshape")
# class Reshape_ARMCortexM(ExportNode):
# def __init__(self, node, board, library):
# super().__init__(node)
......@@ -526,7 +526,7 @@ class MatMul_ARMCortexM(ExportNodeCpp):
# return list_actions
# @operator_register("Gather")
# @register("Gather")
# class Gather_ARMCortexM(ExportNode):
# def __init__(self, node, board, library):
# super().__init__(node)
......@@ -582,7 +582,7 @@ class MatMul_ARMCortexM(ExportNodeCpp):
# return list_actions
# @operator_register("Transpose")
# @register("Transpose")
# class Transpose_ARMCortexM(ExportNode):
# def __init__(self, node, board, library):
# super().__init__(node)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment