Skip to content
Snippets Groups Projects
Commit d6cf4674 authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Remove is_input and is_output from node_export.

parent a234801d
No related branches found
No related tags found
No related merge requests found
......@@ -37,8 +37,8 @@ def export_params(name: str,
@ExportLibCpp.register("Producer", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
class ProducerCPP(ExportNode):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
self.values = np.array(self.operator.get_output(0))
if len(self.values.shape) == 4: # Note: export in HWC
......@@ -59,14 +59,14 @@ class ProducerCPP(ExportNode):
# TODO : find a way to remove this dummy exportnode
@ExportLibCpp.register("Pad2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
class Pad_ARMCortexM(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
def __init__(self, node, mem_info):
raise NotImplementedError("Pad2D nodes is not implemented")
@ExportLibCpp.register("ReLU", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class ReLUCPP(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
self.attributes["activation"] = "Rectifier"
self.attributes["rescaling"] = "NoScaling"
self.config_template = str(
......@@ -81,8 +81,8 @@ class ReLUCPP(ExportNodeCpp):
@ExportLibCpp.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class ConvCPP(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
# No padding with Conv
# Use PaddedConv to add padding attribute
self.attributes["padding"] = [0, 0]
......@@ -102,8 +102,8 @@ class ConvCPP(ExportNodeCpp):
@ExportLibCpp.register_metaop("PaddedConv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class PaddedConvCPP(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
# TODO find a way to retrive attr for meta op
for n in self.operator.get_micro_graph().get_nodes():
if n.type() == "Pad2D":
......@@ -132,8 +132,8 @@ class PaddedConvCPP(ExportNodeCpp):
@ExportLibCpp.register("Add", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class AddCPP(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
self.attributes["elemwise_op"] = "Add"
self.attributes["activation"] = "Linear"
self.attributes["rescaling"] = "NoScaling"
......@@ -150,8 +150,8 @@ class AddCPP(ExportNodeCpp):
@ExportLibCpp.register("Sub", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class SubCPP(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
self.attributes["elemwise_op"] = "Sub"
self.attributes["activation"] = "Linear"
self.attributes["rescaling"] = "NoScaling"
......@@ -169,8 +169,8 @@ class SubCPP(ExportNodeCpp):
@ExportLibCpp.register("Mul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class MulCPP(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
self.attributes["elemwise_op"] = "Mul"
self.attributes["activation"] = "Linear"
self.attributes["rescaling"] = "NoScaling"
......@@ -187,8 +187,8 @@ class MulCPP(ExportNodeCpp):
@ExportLibCpp.register("MaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class MaxPoolCPP(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
# No padding with MaxPooling
# Use PaddedMaxPooling to add padding attribute
......@@ -210,8 +210,8 @@ class MaxPoolCPP(ExportNodeCpp):
@ExportLibCpp.register_metaop("PaddedMaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class PaddedMaxPoolCPP(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
for n in self.operator.get_micro_graph().get_nodes():
if n.type() == "Pad2D":
self.attributes["padding"] = n.get_operator(
......@@ -238,8 +238,8 @@ class PaddedMaxPoolCPP(ExportNodeCpp):
@ExportLibCpp.register("GlobalAveragePooling", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class GlobalAveragePoolCPP(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
self.attributes["stride_dims"] = [1, 1]
# No padding with MaxPooling
......@@ -265,8 +265,8 @@ class GlobalAveragePoolCPP(ExportNodeCpp):
@ExportLibCpp.register("FC", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
class FcCPP(ExportNodeCpp):
def __init__(self, node, mem_info, is_input, is_output):
super().__init__(node, mem_info, is_input, is_output)
def __init__(self, node, mem_info):
super().__init__(node, mem_info)
self.attributes["activation"] = "Linear"
self.attributes["rescaling"] = "NoScaling"
self.config_template = str(
......
{% filter indent(width=4, first=False) %}
{% for outidx in range(nb_out) -%}
{{out_cdtype[outidx]}}* {{out_name[outidx]}} = ({{out_cdtype[outidx]}}*) mem + {{out_name[outidx]|upper}}_OFFSET;
{% endfor %}
{% endfilter %}
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
activation_forward<{{name|upper}}_NB_DATA,
{{name|upper}}_ACTIVATION>
({{in_name[0]}}, {{out_name[0]}}, {{name|upper}}_RESCALING);
{% include "./_save_outputs.jinja" %}
{% endfilter %}
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
batchnorm_forward<{{ out_name[0]|upper }}_NB_OUTPUTS,
{{ out_name[0]|upper }}_OUT_HEIGHT,
{{ out_name[0]|upper }}_OUT_WIDTH,
{{name|upper}}_ACTIVATION>
({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{in_name[3]}}, {{in_name[4]}}, {{name|upper}}_EPSILON);
{% include "./_save_outputs.jinja" %}
{% endfilter %}
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
convolution_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
{{ in_name[0]|upper }}_IN_HEIGHT,
{{ in_name[0]|upper }}_IN_WIDTH,
......@@ -17,3 +17,4 @@ convolution_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
{{name|upper}}_ACTIVATION>
({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{name|upper}}_RESCALING);
{% include "./_save_outputs.jinja" %}
{% endfilter %}
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
elemwise_forward<{{name|upper}}_NB_ELTS,
{{name|upper}}_ELEM_OP,
{{name|upper}}_ACTIVATION>
({{out_name[0]}}, {{name|upper}}_RESCALING, {{in_name[0]}}, {{in_name[1]}});
{% include "./_save_outputs.jinja" %}
{% endfilter %}
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
fullyconnected_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
{{ in_name[0]|upper }}_IN_HEIGHT,
{{ in_name[0]|upper }}_IN_WIDTH,
......@@ -9,3 +9,4 @@ fullyconnected_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
{{name|upper}}_ACTIVATION>
({{in_name[0]}}, {{out_name[0]}}, {{in_name[1]}}, {{in_name[2]}}, {{name|upper}}_RESCALING);
{% include "./_save_outputs.jinja" %}
{% endfilter %}
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
leakyrelu_forward<{{name|upper}}_NB_DATA>
({{input_name}}, {{output_name}}, {{name|upper}}_ALPHA);
{% include "./_save_outputs.jinja" %}
{% endfilter %}
{% filter indent(width=4, first=False) %}
{% include "./_mem_offset.jinja" %}
pooling_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
{{ in_name[0]|upper }}_IN_HEIGHT,
{{ in_name[0]|upper }}_IN_WIDTH,
......@@ -16,3 +16,4 @@ pooling_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
{{name|upper}}_ACTIVATION>
({{in_name[0]}}, {{out_name[0]}});
{% include "./_save_outputs.jinja" %}
{% endfilter %}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment