diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 0000000000000000000000000000000000000000..77e8c9dbd6c7c95d38505acdfc45740403031597 --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,188 @@ +--- +Checks: "google-*, readability-identifier-naming" +WarningsAsErrors: '' +HeaderFilterRegex: '' +FormatStyle: none +CheckOptions: + # Case style + # Following https://webkit.org/code-style-guidelines/ : + # CamelCase for class,struct, namespace + # camelBack for variables, functions + # CamelCase for class,struct, namespace + - key: 'readability-identifier-naming.AbstractClassCase' + value: 'CamelCase' + # CamelCase for class,struct, namespace + - key: 'readability-identifier-naming.ClassCase' + value: 'CamelCase' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ClassConstantCase' + value: 'camelBack' + - key: 'readability-identifier-naming.ClassConstantPrefix' + value: 's_' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ClassMemberCase' + value: 'camelBack' + - key: 'readability-identifier-naming.ClassMemberPrefix' + value: 's_' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ClassMethodCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantMemberCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantParameterCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantPointerParameterCase' + value: 'camelBack' + # camelBack for variables, functions + # camelBack for variables, functions + - key: 'readability-identifier-naming.ClassMemberCase' + value: 'camelBack' + - key: 'readability-identifier-naming.ClassMemberPrefix' + value: 's_' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ClassMethodCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantMemberCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantParameterCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantPointerParameterCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstexprFunctionCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstexprMethodCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstexprVariableCase' + value: 'camelBack' + # CamelCase for class,struct, namespace + - key: 'readability-identifier-naming.EnumCase' + value: 'CamelCase' + # CamelCase for class,struct, namespace + - key: 'readability-identifier-naming.EnumConstantCase' + value: 'CamelCase' + # camelBack for variables, functions + - key: 'readability-identifier-naming.FunctionCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.GlobalConstantCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.GlobalConstantPointerCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.GlobalFunctionCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.GlobalPointerCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.GlobalVariableCase' + value: 'camelBack' + # CamelCase for class,struct, namespace + - key: 'readability-identifier-naming.InlineNamespaceCase' + value: 'CamelCase' + # camelBack for variables, functions + - key: 'readability-identifier-naming.LocalConstantCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.LocalConstantPointerCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.LocalPointerCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.LocalVariableCase' + value: 'camelBack' + - key: 'readability-identifier-naming.MacroDefinitionCase' + value: 'UPPER_CASE' + # camelBack for variables, functions + - key: 'readability-identifier-naming.MemberCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.MethodCase' + value: 'CamelCase' + # CamelCase for class,struct, namespace + - key: 'readability-identifier-naming.NamespaceCase' + value: 'CamelCase' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ParameterCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ParameterPackCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.PointerParameterCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.PrivateMemberCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.PrivateMemberPrefix' + value: 'm_' + # camelBack for variables, functions + - key: 'readability-identifier-naming.PrivateMethodCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ProtectedMemberCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ProtectedMemberPrefix' + value: 'm_' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ProtectedMethodCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.PublicMemberCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.PublicMethodCase' + value: 'camelBack' + - key: 'readability-identifier-naming.ScopedEnumConstantCase' + value: 'CamelCase' + - key: 'readability-identifier-naming.StaticConstantCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.StaticVariableCase' + value: 'camelBack' + - key: 'readability-identifier-naming.StructCase' + value: 'CamelCase' + - key: 'readability-identifier-naming.TemplateParameterCase' + value: 'CamelCase' + - key: 'readability-identifier-naming.TemplateTemplateParameterCase' + value: 'CamelCase' + - key: 'readability-identifier-naming.TypeTemplateParameterIgnoredRegexp' + value: 'expr-type' + # CamelCase for Type aliases + - key: 'readability-identifier-naming.TypeAliasCase' + value: 'CamelCase' + - key: 'readability-identifier-naming.TypedefCase' + value: 'CamelCase' + - key: 'readability-identifier-naming.TypeTemplateParameterCase' + value: 'CamelCase' + - key: 'readability-identifier-naming.UnionCase' + value: 'CamelCase' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ValueTemplateParameterCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.VariableCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.VirtualMethodCase' + value: 'camelCase' +... diff --git a/aidge_core/aidge_export_aidge/export.py b/aidge_core/aidge_export_aidge/export.py index aa993c4bee266b308506f46260445c7f58de1a60..51468ed846dc7a731152a1ddb3f4374847631402 100644 --- a/aidge_core/aidge_export_aidge/export.py +++ b/aidge_core/aidge_export_aidge/export.py @@ -93,7 +93,7 @@ def serialize_to_cpp(export_folder: str, node, None) - set_operator.add(node.type()) + # set_operator.add(node.type()) # TODO: list_configs and list_actions don't need to be passed by argument # Export the configuration diff --git a/aidge_core/aidge_export_aidge/operator_export/add.py b/aidge_core/aidge_export_aidge/operator_export/add.py new file mode 100644 index 0000000000000000000000000000000000000000..4eb7c3e37f0d63388a5bfe8600f184b9da2ffc49 --- /dev/null +++ b/aidge_core/aidge_export_aidge/operator_export/add.py @@ -0,0 +1,14 @@ +from aidge_core.aidge_export_aidge.registry import ExportSerialize +from aidge_core.aidge_export_aidge import ROOT_EXPORT +from aidge_core.export_utils import ExportNodeCpp +from aidge_core import ImplSpec, IOSpec, dtype + +@ExportSerialize.register("Add", ImplSpec(IOSpec(dtype.any))) +class Add(ExportNodeCpp): + def __init__(self, node, mem_info): + super().__init__(node, mem_info) + self.config_template = "" + self.forward_template = str( + ROOT_EXPORT / "templates/graph_ctor/add.jinja") + self.include_list = ["aidge/operator/Add.hpp"] + self.kernels_to_copy = [] diff --git a/aidge_core/aidge_export_aidge/operator_export/conv.py b/aidge_core/aidge_export_aidge/operator_export/conv.py index 8805629b7f1622f3687409c9beb8ccc4322d2a25..ea23c1551787c8579549d54a1fe7396995eb1bff 100644 --- a/aidge_core/aidge_export_aidge/operator_export/conv.py +++ b/aidge_core/aidge_export_aidge/operator_export/conv.py @@ -11,7 +11,7 @@ class Conv(ExportNodeCpp): ROOT_EXPORT / "templates/attributes/conv.jinja") self.forward_template = str( ROOT_EXPORT /"templates/graph_ctor/conv.jinja") - self.include_list = [] + self.include_list = ["aidge/operator/Conv.hpp"] self.kernels_to_copy = [] self.config_path = "include/attributes" self.config_extension = "hpp" diff --git a/aidge_core/aidge_export_aidge/operator_export/conv_depth_wise.py b/aidge_core/aidge_export_aidge/operator_export/conv_depth_wise.py new file mode 100644 index 0000000000000000000000000000000000000000..3e04f8aac17da5662a3ed08bc627969dbb3a9c13 --- /dev/null +++ b/aidge_core/aidge_export_aidge/operator_export/conv_depth_wise.py @@ -0,0 +1,17 @@ +from aidge_core.aidge_export_aidge.registry import ExportSerialize +from aidge_core.aidge_export_aidge import ROOT_EXPORT +from aidge_core.export_utils import ExportNodeCpp +from aidge_core import ImplSpec, IOSpec, dtype + +@ExportSerialize.register(["ConvDepthWise1D", "ConvDepthWise2D"], ImplSpec(IOSpec(dtype.any))) +class ConvDepthWise(ExportNodeCpp): + def __init__(self, node, mem_info): + super().__init__(node, mem_info) + self.config_template = str( + ROOT_EXPORT / "templates/attributes/conv_depth_wise.jinja") + self.forward_template = str( + ROOT_EXPORT /"templates/graph_ctor/conv_depth_wise.jinja") + self.include_list = ["aidge/operator/ConvDepthWise.hpp"] + self.kernels_to_copy = [] + self.config_path = "include/attributes" + self.config_extension = "hpp" diff --git a/aidge_core/aidge_export_aidge/operator_export/fc.py b/aidge_core/aidge_export_aidge/operator_export/fc.py index 6fae97d6668813727bcb81e6c175e18dc369bdd9..4f964a9942600d46740b570975a218b4c2e7aabd 100644 --- a/aidge_core/aidge_export_aidge/operator_export/fc.py +++ b/aidge_core/aidge_export_aidge/operator_export/fc.py @@ -12,7 +12,7 @@ class FC(ExportNodeCpp): ROOT_EXPORT / "templates/attributes/fc.jinja") self.forward_template = str( ROOT_EXPORT / "templates/graph_ctor/fc.jinja") - self.include_list = [] + self.include_list = ["aidge/operator/FC.hpp"] self.kernels_to_copy = [] self.config_path = "include/attributes" self.config_extension = "hpp" diff --git a/aidge_core/aidge_export_aidge/operator_export/maxpooling.py b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py index df53de9eb6e9705064696e202c14e343beae17d5..6d9c7998fb90153bfdfd2898c1dfcfb1ad730f20 100644 --- a/aidge_core/aidge_export_aidge/operator_export/maxpooling.py +++ b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py @@ -11,7 +11,7 @@ class MaxPooling(ExportNodeCpp): ROOT_EXPORT / "templates/attributes/maxpooling.jinja") self.forward_template = str( ROOT_EXPORT / "templates/graph_ctor/maxpooling.jinja") - self.include_list = [] + self.include_list = ["aidge/operator/MaxPooling.hpp"] self.kernels_to_copy = [] self.config_path = "include/attributes" self.config_extension = "hpp" diff --git a/aidge_core/aidge_export_aidge/operator_export/pad.py b/aidge_core/aidge_export_aidge/operator_export/pad.py new file mode 100644 index 0000000000000000000000000000000000000000..5d6869de07b985169399697e95b6b719f658c911 --- /dev/null +++ b/aidge_core/aidge_export_aidge/operator_export/pad.py @@ -0,0 +1,17 @@ +from aidge_core.aidge_export_aidge.registry import ExportSerialize +from aidge_core.aidge_export_aidge import ROOT_EXPORT +from aidge_core.export_utils import ExportNodeCpp +from aidge_core import ImplSpec, IOSpec, dtype + +@ExportSerialize.register(["Pad1D", "Pad2D"], ImplSpec(IOSpec(dtype.any))) +class Pad(ExportNodeCpp): + def __init__(self, node, mem_info): + super().__init__(node, mem_info) + self.config_template = str( + ROOT_EXPORT / "templates/attributes/pad.jinja") + self.forward_template = str( + ROOT_EXPORT /"templates/graph_ctor/pad.jinja") + self.include_list = ["aidge/operator/Pad.hpp"] + self.kernels_to_copy = [] + self.config_path = "include/attributes" + self.config_extension = "hpp" diff --git a/aidge_core/aidge_export_aidge/operator_export/producer.py b/aidge_core/aidge_export_aidge/operator_export/producer.py index 475d3625511c1b1d9e44ebcd517620c46771718b..02f2f1f39c6797d7f92a5938d6dbe8853079a624 100644 --- a/aidge_core/aidge_export_aidge/operator_export/producer.py +++ b/aidge_core/aidge_export_aidge/operator_export/producer.py @@ -23,7 +23,7 @@ class Producer(ExportNodeCpp): ROOT_EXPORT / "templates/graph_ctor/producer.jinja") self.attributes["tensor_name"] = f"{child.name()}_{in_idx}" self.attributes["values"] = str(self.operator.get_output(0)) - self.include_list = [] + self.include_list = ["aidge/operator/Producer.hpp"] self.kernels_to_copy = [] self.config_path = "include/attributes" self.config_extension = "hpp" diff --git a/aidge_core/aidge_export_aidge/operator_export/relu.py b/aidge_core/aidge_export_aidge/operator_export/relu.py index 3001357343fd24ddbfb2dd799efc3f72be648499..b8398e30504b534fba755e6c613d361d873e09cd 100644 --- a/aidge_core/aidge_export_aidge/operator_export/relu.py +++ b/aidge_core/aidge_export_aidge/operator_export/relu.py @@ -10,5 +10,5 @@ class ReLU(ExportNodeCpp): self.config_template = "" self.forward_template = str( ROOT_EXPORT / "templates/graph_ctor/relu.jinja") - self.include_list = [] + self.include_list = ["aidge/operator/ReLU.hpp"] self.kernels_to_copy = [] diff --git a/aidge_core/aidge_export_aidge/operator_export/sub.py b/aidge_core/aidge_export_aidge/operator_export/sub.py index b728e088d2516af9f1d090ae27bb526c03aecff4..01b68b70f4cfcf3b3899202269106c58cb7e54a1 100644 --- a/aidge_core/aidge_export_aidge/operator_export/sub.py +++ b/aidge_core/aidge_export_aidge/operator_export/sub.py @@ -10,5 +10,5 @@ class Sub(ExportNodeCpp): self.config_template = "" self.forward_template = str( ROOT_EXPORT / "templates/graph_ctor/sub.jinja") - self.include_list = [] + self.include_list = ["aidge/operator/Sub.hpp"] self.kernels_to_copy = [] diff --git a/aidge_core/aidge_export_aidge/templates/attributes/conv_depth_wise.jinja b/aidge_core/aidge_export_aidge/templates/attributes/conv_depth_wise.jinja new file mode 100644 index 0000000000000000000000000000000000000000..7c2ffff448bb3d028f378ba6fc124abdad6e9ad7 --- /dev/null +++ b/aidge_core/aidge_export_aidge/templates/attributes/conv_depth_wise.jinja @@ -0,0 +1,16 @@ +#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H +#define EXPORT_ATTRIBUTES_{{name|upper}}_H + +#define _{{name|upper}}_CHANNELS {{out_chan[0]}} + +{% for i in range(kernel_dims|length) %} +#define _{{name|upper}}_KERNEL_{{i}} {{kernel_dims[i]}} +{%- endfor %} +{% for i in range(stride_dims|length) %} +#define _{{name|upper}}_STRIDE_{{i}} {{stride_dims[i]}} +{%- endfor %} +{% for i in range(dilation_dims|length) %} +#define _{{name|upper}}_DILATION_{{i}} {{dilation_dims[i]}} +{%- endfor %} + +#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */ diff --git a/aidge_core/aidge_export_aidge/templates/attributes/pad.jinja b/aidge_core/aidge_export_aidge/templates/attributes/pad.jinja new file mode 100644 index 0000000000000000000000000000000000000000..8fb76a6b220c643da9a3f02d38f1757fed0c1b86 --- /dev/null +++ b/aidge_core/aidge_export_aidge/templates/attributes/pad.jinja @@ -0,0 +1,12 @@ +#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H +#define EXPORT_ATTRIBUTES_{{name|upper}}_H + +{%- set half_length = (begin_end_borders|length / 2)|int -%} +{% for i in range(half_length) %} +#define _{{name|upper}}_BEGIN_BORDERS_{{i}} {{begin_end_borders[2*i]}} +#define _{{name|upper}}_END_BORDERS_{{i}} {{begin_end_borders[2*i+1]}} +{%- endfor %} +#define _{{name|upper}}_BORDER_TYPE {{border_type|int}} +#define _{{name|upper}}_BORDER_VALUE {{border_value}} + +#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */ diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/add.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/add.jinja new file mode 100644 index 0000000000000000000000000000000000000000..2bfaf93646fc24f6a44ac170a8c2c932f5daf0fc --- /dev/null +++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/add.jinja @@ -0,0 +1,9 @@ +{% filter indent(width=4, first=False) %} +/*** {{name|upper}} ***/ +std::shared_ptr<Aidge::Node> {{name}} = + Aidge::Add( + "{{name}}" + ); +{% include "./_set_input.jinja" %} +graph->add({{name}}); +{% endfilter %} diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv_depth_wise.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv_depth_wise.jinja new file mode 100644 index 0000000000000000000000000000000000000000..f7e1a85bbc084631ea4d26bfadd106bc2a5a69fe --- /dev/null +++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv_depth_wise.jinja @@ -0,0 +1,25 @@ +{% filter indent(width=4, first=False) %} +/*** {{name|upper}} ***/ +std::shared_ptr<Aidge::Node> {{name}} = + Aidge::ConvDepthWise( + _{{name|upper}}_CHANNELS, + { + {%- for i in range(kernel_dims|length) -%} + _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%} + {%- endfor -%} + }, + "{{name}}", + { + {%- for i in range(stride_dims|length) -%} + _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%} + {%- endfor -%} + }, + { + {%- for i in range(dilation_dims|length) -%} + _{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%} + {%- endfor -%} + } + ); +{% include "./_set_input.jinja" %} +graph->add({{name}}); +{% endfilter %} diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/pad.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/pad.jinja new file mode 100644 index 0000000000000000000000000000000000000000..a7bd866207be9f048ae431922710b975268a6155 --- /dev/null +++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/pad.jinja @@ -0,0 +1,17 @@ +{% filter indent(width=4, first=False) %} +/*** {{name|upper}} ***/ +{%- set half_length = (begin_end_borders|length / 2)|int -%} +std::shared_ptr<Aidge::Node> {{name}} = + Aidge::Pad<{{half_length}}>( + { + {%- for i in range(half_length) -%} + _{{name|upper}}_BEGIN_BORDERS_{{i}}, _{{name|upper}}_END_BORDERS_{{i}} {%- if not loop.last %}, {% endif -%} + {%- endfor -%} + }, + "{{name}}", + static_cast<Aidge::PadBorderType>(_{{name|upper}}_BORDER_TYPE), + _{{name|upper}}_BORDER_VALUE + ); +{% include "./_set_input.jinja" %} +graph->add({{name}}); +{% endfilter %} diff --git a/aidge_core/show_graphview.py b/aidge_core/show_graphview.py index ddf0fc4b4659a727c7879738ef5e3eb40186cac1..633298f10dbfdafe40022f88f741f82d2d35c681 100644 --- a/aidge_core/show_graphview.py +++ b/aidge_core/show_graphview.py @@ -4,24 +4,24 @@ import builtins import aidge_core import numpy as np from pathlib import Path - + def _retrieve_operator_attrs(node : aidge_core.Node) -> dict[str, int, float, bool, None]: """ Returns the dictionary containing the attributes of a given Node. - :param graph: A Node in the list of ordered nodes. + :param graph: A Node in the list of ordered nodes. :type graph: aidge_core.Node :return: A dictionary with the Node's attributes. :rtype: dict[str, int, float, bool, None] - """ + """ if node.get_operator().attr is not None: node_attr_dict = node.get_operator().attr.dict() for key,value in node_attr_dict.items(): if not type(value).__name__ in dir(builtins): node_attr_dict[key] = value.name - + else: node_attr_dict = {} @@ -29,49 +29,49 @@ def _retrieve_operator_attrs(node : aidge_core.Node) -> dict[str, int, float, bo def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_embed : bool, write_trainable_params_ext : bool, path_trainable_params : Path, params_file_format : str) -> dict[str, int, float, bool, None]: """ - Creates a dictionary to store the information of a given ordered GraphView. + Creates a dictionary to store the information of a given ordered GraphView. :param ordered_nodes: A list with the GraphView's ordered nodes. :type graph: list - :param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed). + :param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed). :type write_trainable_params_embed: bool - :param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file. + :param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file. :type write_trainable_params_ext: bool :param path_trainable_params: Path of the external file used to store the Nodes' trainable parameters. :type path_trainable_params: Path :param params_file_format: Format of the external file used to store the Nodes' trainable parameters. Options: ``npz`` or ``json``. Default : ``json``. Requires ``write_trainable_params_ext``. :type params_file_format: str - + :return: A dictionary with the GraphView description. :rtype: dict[str, int, float, bool, None] - """ + """ graphview_dict = {'graph': []} for node in ordered_nodes: - + if node is not None: - node_dict = {'name' : node.name(), + node_dict = {'name' : node.name(), 'optype' : node.get_operator().type(), 'nb_inputs' : node.get_operator().nb_inputs(), 'nb_outputs' : node.get_operator().nb_outputs()} - + inputs = [] for input_idx in range(node.get_operator().nb_inputs()): input_dict = {'dims' : node.get_operator().get_input(input_idx).dims(), 'data_type' : str(node.get_operator().get_input(input_idx).dtype()), - 'data_format' : str(node.get_operator().get_input(input_idx).dformat())} - inputs.append(input_dict) - + 'data_format' : str(node.get_operator().get_input(input_idx).dformat())} + inputs.append(input_dict) + node_dict['inputs'] = inputs outputs = [] for output_idx in range(node.get_operator().nb_outputs()): output_dict = {'dims' : node.get_operator().get_output(output_idx).dims(), 'data_type' : str(node.get_operator().get_output(output_idx).dtype()), - 'data_format' : str(node.get_operator().get_output(output_idx).dformat())} - outputs.append(output_dict) - + 'data_format' : str(node.get_operator().get_output(output_idx).dformat())} + outputs.append(output_dict) + node_dict['outputs'] = outputs parents = node.get_parents() @@ -79,8 +79,8 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e if parents[0] is None: parents.append(parents.pop(0)) else: pass - - parents_inputs = [] + + parents_inputs = [] for parent in parents: if parent is not None: for output_idx in range(parent.get_operator().nb_outputs()): @@ -91,7 +91,7 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e elif parent is None: for input_idx in list(range(node.get_operator().nb_inputs())): if input_idx not in [item[1] for item in parents_inputs]: - parents_inputs.append((None, input_idx)) + parents_inputs.append((None, input_idx)) parents_inputs.sort(key=lambda x: x[1]) node_dict['parents'] = parents_inputs @@ -103,15 +103,15 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e if child.get_operator().get_input(input_idx).dims() == node.get_operator().get_output(output_idx).dims(): children_outputs.append((child.name(), output_idx)) node_dict['children'] = children_outputs - + # Check if my node is a metaop attributes_dict = {} - if isinstance(node.get_operator(), aidge_core.MetaOperator_Op): + if isinstance(node.get_operator(), aidge_core.MetaOperatorOp): attributes_dict['micro_graph'] = [] for micro_node in node.get_operator().get_micro_graph().get_nodes(): - micro_node_dict = {'name' : micro_node.name(), + micro_node_dict = {'name' : micro_node.name(), 'optype' : micro_node.type()} - + micro_node_attr_dict = _retrieve_operator_attrs(micro_node) micro_node_dict['attributes'] = micro_node_attr_dict attributes_dict['micro_graph'].append(micro_node_dict) @@ -124,7 +124,7 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e if node.type() == 'Producer': if write_trainable_params_ext: - + params_file_format.casefold() if params_file_format=='npz': @@ -134,14 +134,14 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e elif params_file_format=='json': tensor = np.array(node.get_operator().get_output(0)) tensor_dict = { - node.name() : + node.name() : { 'dims' : tensor.shape, 'data_type' : str(tensor.dtype), 'tensor_data' : tensor.tolist() - } + } } - + with open(Path(path_trainable_params, node.name() + '.json'), 'w') as fp: json.dump(tensor_dict, fp, indent=4) @@ -150,10 +150,10 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e else: raise Exception("File format to write trainable parameters not recognized.") - + elif write_trainable_params_embed: node_dict['tensor_data'] = np.array(node.get_operator().get_output(0)).tolist() - + else: pass @@ -161,13 +161,13 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e else: # node is None pass - + return graphview_dict def _write_dict_json(graphview_dict : dict[str, int, float, bool, None], json_path : str) -> None: """ Writes dictionary containing GraphView description to a JSON file. - + :param graphview_dict: A dictionary with the GraphView description. :type graphview_dict: dict[str, int, float, bool, None] :param json_path: Path to write JSON file. @@ -178,18 +178,18 @@ def _write_dict_json(graphview_dict : dict[str, int, float, bool, None], json_pa json.dump(graphview_dict, fp, indent=4) return None - -def gview_to_json(gview : aidge_core.GraphView, json_path : Path, write_trainable_params_embed : bool = False, write_trainable_params_ext : bool = False, params_file_format : str = 'json') -> None: + +def gview_to_json(gview : aidge_core.GraphView, json_path : Path, write_trainable_params_embed : bool = False, write_trainable_params_ext : bool = False, params_file_format : str = 'json') -> None: """ Generates the description for a GraphView in the JSON format. - + :param graph: A GraphView of Aidge. :type graph: aidge_core.GraphView :param json_path: Path to write JSON file. :type json_path: Path - :param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed). + :param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed). :type write_trainable_params_embed: bool, optional - :param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file. + :param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file. :type write_trainable_params_ext: bool, optional :param params_file_format: Format of the external file used to store the Nodes' trainable parameters. Options: ``npz`` or ``json``. Default : ``json``. Requires ``write_trainable_params_ext``. :type params_file_format: str, optional @@ -201,7 +201,7 @@ def gview_to_json(gview : aidge_core.GraphView, json_path : Path, write_trainabl elif not json_path.is_dir(): if json_path.suffix == '.json': pass - else: + else: raise Exception('If ``json_path`` contains a filename it must be of JSON format.') if write_trainable_params_ext: @@ -212,14 +212,14 @@ def gview_to_json(gview : aidge_core.GraphView, json_path : Path, write_trainabl if isinstance(gview, aidge_core.GraphView): # Sort GraphView in topological order ordered_nodes = gview.get_ordered_nodes() - - # Create dict from GraphView + + # Create dict from GraphView graphview_dict = _create_dict(ordered_nodes, write_trainable_params_embed, write_trainable_params_ext, path_trainable_params, params_file_format) - + # Write dict to JSON _write_dict_json(graphview_dict, json_path) else: raise Exception("Graph must be an instance of aidge_core.GraphView.") - + return None \ No newline at end of file diff --git a/aidge_core/testing/__init__.py b/aidge_core/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6966bbb6355c798727870ace37f99193fdda66a2 --- /dev/null +++ b/aidge_core/testing/__init__.py @@ -0,0 +1,12 @@ +# +# Do not add there auto import of submodules. +# +# The testing module contains utils and other tools +# related to tests, possibly reusable by other aidge +# components unit_tests. +# +# Import a specific module explicitly with for instance: +# import aidge_core.testing.utils +# or +# from aidge_core.testing.utils import (....,) +# diff --git a/aidge_core/testing/utils/__init__.py b/aidge_core/testing/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d69be0c5b80c7df229d8a24e6891970b8108fb14 --- /dev/null +++ b/aidge_core/testing/utils/__init__.py @@ -0,0 +1,10 @@ +# +# Should provide some general utility functions for testing. +# For instance: +# - filesystem +# - os dependencies +# - unit tests setup +# + +from .tree_cache import tree_update_from_cache +from .tree_utils import tree_move, tree_remove diff --git a/aidge_core/testing/utils/tree_cache.py b/aidge_core/testing/utils/tree_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..5b363c7c73ea36636a40c007b24cc244b10303c2 --- /dev/null +++ b/aidge_core/testing/utils/tree_cache.py @@ -0,0 +1,145 @@ +""" + +Provide tree_update_from_cache(path) method which +minimize changes in a generated tree when files are +re-generated but identical. + +It takes as argument a generated tree, and optionally a cache path. +Then it will update both the generated tree and the cache tree +to take the cache version of the files when identical, or the newly +generated one otherwise. + +This is in particular useful for speeding up iterative compilation +when generating a source/build system tree. + +For instance: +- first time, one generates a tree of files: + - generated: path/{t1,t2,t3} +- then call tree_update_from_cache("path") + - will generate: __cache_path/{t1,t2,t3} + - and untouch: path/{t1,t2,t3} +- second time, re-generate a tree of file: + - say generated files are identical: path/{t1,t2,t3} +- then call tree_update_from_cache("path") + - will untouch in cache: __cache_path/{t1,t2,t3} + - and reset to previous timestamps files: path/{t1,t2,t3} +- third time, re-generate again with some changes: + - say t1 is identical, t2 content has changed and no t3: path/{t1,t2'} +- then call tree_update_from_cache("path") + - will update t2' and remove t3 in cache: __cache_path/{t1,t2'} + - and reset to previous timestamp t1: path/{t1,t2'} + +Note that by default the `dir`/__cache_`name` cache path is used +for a given path `dir`/`name`. +Though it is also possible to have the cache path inside the generated tree, +in this case use for instance: + + tree_update_from_cache(path, Path(path) / "__cache_src") + +For more evolved scenarii, specialize the provided FileTreeCache class. + +""" + + +from pathlib import Path +import shutil +import filecmp +from typing import Optional, Union, List + +from .tree_utils import tree_move, tree_remove + + +__all__ = [ + "FileTreeCache", + "tree_update_from_cache", +] + + +class FileTreeCache(): + """ + Class for implementation of the file tree cache. + Can be derived to changes for instance default cache name/tmp name prefixes + or to specialize for other contexts. + """ + default_cache_prefix = "__cache_" + default_tmp_cache_prefix = "__tmp_cache_" + default_tmp_prefix = "__tmp_" + + def __init__(self, + src_path: Union[str|Path], + cache_path: Optional[Union[str|Path]] = None + ) -> None: + self.src_path = Path(src_path).absolute() + self.cache_path = ( + Path(cache_path).absolute() + if cache_path is not None else + (self.src_path.parent / + f"{self.default_cache_prefix}{self.src_path.name}") + ) + ctx_msg = f"tree_cache: {src_path = }, {cache_path = }" + assert self.src_path != self.cache_path, f"src_path and cache_path must differ on {ctx_msg}" + assert not self.src_path.is_relative_to(self.cache_path), f"src_path must not be relative to cache_path on {ctx_msg}" + self._tmp_path = ( + self.src_path.parent / + f"{self.default_tmp_prefix}{self.src_path.name}") + self._tmp_cache_path = ( + self.src_path.parent / + f"{self.default_tmp_cache_prefix}{self.src_path.name}") + + @classmethod + def _copytree_or_cache(cls, src_dir: Path, dst_dir: Path, cache_dir: Path, dst_cache_dir: Path) -> None: + assert not dst_dir.exists() + assert not dst_cache_dir.exists() + assert src_dir.is_dir() + assert not cache_dir.exists() or cache_dir.is_dir() + assert not cache_dir.is_relative_to(src_dir) + + def copy_or_cache(src, dst): + base_src = Path(src).relative_to(src_dir) + cache_src = cache_dir / base_src + base_dst = Path(dst).relative_to(dst_dir) + cache_dst = dst_cache_dir / base_dst + cache_dst.parent.mkdir(parents=True, exist_ok=True) + if cache_src.exists() and filecmp.cmp(str(src), str(cache_src), shallow=False): + shutil.copy2(str(cache_src), str(cache_dst)) + shutil.copy2(str(cache_src), dst) + else: + shutil.copy2(src, str(cache_dst)) + shutil.copy2(src, dst) + shutil.copytree(str(src_dir), str(dst_dir), copy_function=copy_or_cache) + + def update_from_cache(self) -> None: + assert self.src_path.exists(), f"src path must exist before swapping with cache" + + # Move cache path apart first as it may be relative to source path + tree_move(self.cache_path, self._tmp_cache_path, ignore_missing=True, exist_ok=True) + # Move source path apart before recreating merged source tree + tree_move(self.src_path, self._tmp_path, exist_ok=True) + + # Manage the source/cache merge to the dst/dst_cahe with a variant of + # copytree. + self._copytree_or_cache( + src_dir=self._tmp_path, + dst_dir=self.src_path, + cache_dir=self._tmp_cache_path, + dst_cache_dir=self.cache_path, + ) + + # Remove tmp source path + tree_remove(self._tmp_path) + # Note that the tmp cache path may not exist + tree_remove(self._tmp_cache_path, ignore_missing=True) + + +def tree_update_from_cache( + src_path: Union[str|Path], + cache_path: Optional[Union[str|Path]] = None) -> None: + """ + Update from cache the current generation of a tree from the + older generations, preserving file stamps when files contents are identical. + + :param src_path: str or Path object to the generated tree + :param cache_path: optional str or Path object to the cache path, + or defaults to: `cache_path = src_path.parent / f"__cache_{src_path.name}"` + """ + FileTreeCache(src_path, cache_path).update_from_cache() diff --git a/aidge_core/testing/utils/tree_utils.py b/aidge_core/testing/utils/tree_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3a6b2aad88e16075ed64bee03ba8e8fa550376e2 --- /dev/null +++ b/aidge_core/testing/utils/tree_utils.py @@ -0,0 +1,65 @@ +""" + +Provide utility function for file trees manipulations. + +""" + +import shutil +from pathlib import Path +from typing import Union, Optional + + +__all__ = [ + "tree_move", + "tree_remove", +] + + +def tree_remove( + path: Union[str|Path], + ignore_missing: bool = False, +) -> None: + """ + Remove the full tree at path. + Optionally ignore if the path does not exist when ignore_missing is True. + + :param path: str or Path object to the directory path + :param ignore_missing: if True will return early is path does not exists + """ + path = Path(path) + ctx_msg = f"tree_remove: : {path = }" + assert ignore_missing or path.exists(), f"path must exists when ignore_missing is False on {ctx_msg}" + if ignore_missing and not path.exists(): + return + shutil.rmtree(path) + + +def tree_move( + src_path: Union[str|Path], + dst_path: Union[str|Path], + ignore_missing: bool = False, + exist_ok: bool = False, +) -> None: + """ + Move the whole src_path file tree to dst_path. + Optionally does nothing if the src path does not exists and ignore_missing is True. + Optionally the full dst_path will be removed first when exists_ok is True. + + :param src_path: str or Path object to the source directory path + :param dst_path: str or Path object to the new path name for the source directory + :param ignore_missing: if True will return early is src_path does not exists + :param exist_ok: if True will first erase the new path name if it exists + """ + src_path = Path(src_path) + dst_path = Path(dst_path) + ctx_msg = f"tree_move: : {src_path = }, {dst_path = }" + assert ignore_missing or src_path.exists(), f"src_path must exists when ignore_missing is False on {ctx_msg}" + assert exist_ok or not dst_path.exists(), f"dst_path must not exists when exist_ok is False on {ctx_msg}" + assert src_path != dst_path, f"paths must not be identical on {ctx_msg}" + assert not dst_path.is_relative_to(src_path), f"dst_path must not be relative to src_path on {ctx_msg}" + assert not src_path.is_relative_to(dst_path), f"src_path must not be relative to dst_path on {ctx_msg}" + if ignore_missing and not src_path.exists(): + return + if exist_ok and dst_path.exists(): + shutil.rmtree(dst_path) + shutil.move(src_path, dst_path) diff --git a/aidge_core/unit_tests/test_export.py b/aidge_core/unit_tests/test_export.py index 32d902b5feedebb3c4e27b6fae0da07be77175fa..b8e1f0ba9d5f72c80f25f68884b797f138dd69d0 100644 --- a/aidge_core/unit_tests/test_export.py +++ b/aidge_core/unit_tests/test_export.py @@ -26,6 +26,8 @@ def initFiller(model): value = prod_op.get_output(0) value.set_backend("cpu") tuple_out = node.output(0)[0] + # Force seed before filler for reproducibility + aidge_core.random.Generator.set_seed(0) # No conv in current network if tuple_out[0].type() == "Conv" and tuple_out[1] == 1: # Conv weight @@ -76,14 +78,14 @@ class test_export(unittest.TestCase): model = aidge_core.sequential( [ aidge_core.FC( - in_channels=32 * 32 * 3, out_channels=512, name="InputNode" + in_channels=32 * 32 * 3, out_channels=64, name="InputNode" ), aidge_core.ReLU(name="Relu0"), - aidge_core.FC(in_channels=512, out_channels=256, name="FC1"), + aidge_core.FC(in_channels=64, out_channels=32, name="FC1"), aidge_core.ReLU(name="Relu1"), - aidge_core.FC(in_channels=256, out_channels=128, name="FC2"), + aidge_core.FC(in_channels=32, out_channels=16, name="FC2"), aidge_core.ReLU(name="Relu2"), - aidge_core.FC(in_channels=128, out_channels=10, name="OutputNode"), + aidge_core.FC(in_channels=16, out_channels=10, name="OutputNode"), ] ) diff --git a/aidge_core/unit_tests/test_recipes.py b/aidge_core/unit_tests/test_recipes.py index c8dd4c727fbaf8224e8d04111a5054caeb5e5c99..f4dd0220ecdc5950e1b1dcef0d8bf2d4782216bf 100644 --- a/aidge_core/unit_tests/test_recipes.py +++ b/aidge_core/unit_tests/test_recipes.py @@ -46,9 +46,9 @@ class test_recipes(unittest.TestCase): def test_fuse_matmul_add(self): matmul0 = aidge_core.MatMul(name="MatMul0") - add0 = aidge_core.Add(2, name="Add0") + add0 = aidge_core.Add(name="Add0") matmul1 = aidge_core.MatMul(name="MatMul1") - add1 = aidge_core.Add(2, name="Add1") + add1 = aidge_core.Add(name="Add1") w0 = aidge_core.Producer([1, 1], name="W0") w0.add_child(matmul0, 0, 0) b0 = aidge_core.Producer([1], name="B0") diff --git a/aidge_core/unit_tests/test_topological_order.py b/aidge_core/unit_tests/test_topological_order.py index 8e7f2e2d9b9770c2fae1e5c2812ba33113589134..01a69409e86c486ec2fb8c8bdb2a18ab0e3d9c1c 100644 --- a/aidge_core/unit_tests/test_topological_order.py +++ b/aidge_core/unit_tests/test_topological_order.py @@ -29,7 +29,7 @@ class test_topological_order(unittest.TestCase): loop0.get_operator().set_back_edges({1}) assert not loop0.get_operator().is_back_edge(0) assert loop0.get_operator().is_back_edge(1) - add0 = aidge_core.Add(2, "add0") + add0 = aidge_core.Add("add0") loop0.add_child(add0, 0, 1) add0.add_child(loop0, 0, 1) @@ -50,7 +50,7 @@ class test_topological_order(unittest.TestCase): loop0.get_operator().set_back_edges({0}) assert not loop0.get_operator().is_back_edge(1) assert loop0.get_operator().is_back_edge(0) - add0 = aidge_core.Add(2, "add0") + add0 = aidge_core.Add("add0") loop0.add_child(add0, 0, 1) add0.add_child(loop0, 0, 0) diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp index e014b041fdad94f5f17d636a2da92180de59e152..51cc9c444edf03febf4416149e9160df0bbfca9c 100644 --- a/include/aidge/graph/Node.hpp +++ b/include/aidge/graph/Node.hpp @@ -76,7 +76,7 @@ public: * @param attrs Attributes for the Node. */ Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttributes> attrs); - Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs); +// Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs); /** * @brief Construct a new Node object associated with the input Operator. @@ -124,11 +124,14 @@ public: // INNER /////////////////////////////////////////////////////// + inline std::shared_ptr<DynamicAttributes> attributes() const { + return mAttrs; + } /** * @brief Name of the Node. * @return std::string */ - inline std::string name() const noexcept { return (mAttrs->hasAttr("name")) ? mAttrs->getAttr<std::string>("name") : ""; } + inline std::string name() const noexcept { return mAttrs->getAttr<std::string>("name"); } /** * @brief Set the Node name. @@ -173,6 +176,7 @@ public: * @return std::shared_ptr<Operator> */ inline std::shared_ptr<Operator> getOperator() const { return (*mOperator)(mAttrs); } +// inline std::shared_ptr<Operator> getOperator() const { return mOperator; } /////////////////////////////////////////////////////// // TENSOR MANAGEMENT diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index f96996079b9e89f80c78b8e409830369480705a8..827fc0c2732695364aa2393692d7040b8b1a0e9f 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -29,7 +29,7 @@ class Add_Op : public OperatorTensor, public: static const std::string Type; - Add_Op(const IOIndex_t nbIn); + Add_Op(); /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -66,7 +66,7 @@ public: } }; -std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = ""); +std::shared_ptr<Node> Add(const std::string& name = ""); } #endif /* AIDGE_CORE_OPERATOR_ADD_H_ */ diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp index 2c670bf23d4703a5a9e8502c8b356fdde32e2561..bc1852ec0759ffaafa015143f22b0a1c8f6c893e 100644 --- a/include/aidge/operator/Pad.hpp +++ b/include/aidge/operator/Pad.hpp @@ -21,6 +21,7 @@ #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/ArrayHelpers.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -47,7 +48,7 @@ public: Pad_Op() = delete; constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples, - const PadBorderType &borderType = PadBorderType::Constant, + PadBorderType borderType = PadBorderType::Constant, double borderValue = 0.0) : OperatorTensor(Type, {InputCategory::Data}, 1), mAttributes(std::make_shared<Attributes_>( @@ -92,7 +93,7 @@ public: template <std::array<DimSize_t, 1>::size_type DIM> std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples, const std::string& name = "", - const PadBorderType &borderType = PadBorderType::Constant, + PadBorderType borderType = PadBorderType::Constant, double borderValue = 0.0); // helper with C-style array instead of std::array for beginEndTuples to allow automatic template DIM deduction @@ -100,7 +101,7 @@ template <DimSize_t DIM> inline std::shared_ptr<Node> Pad( DimSize_t const (&beginEndTuples)[2*DIM], const std::string& name = "", - const PadBorderType &borderType = PadBorderType::Constant, + PadBorderType borderType = PadBorderType::Constant, double borderValue = 0.0) { return Pad<DIM>(to_array(beginEndTuples), name, borderType, borderValue); diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp index 9b264c1d3d7955f71538dd90f105cfd7ee469d0a..a9a84a3ee80eea5c0032fa08bce4ab96c44dba04 100644 --- a/include/aidge/operator/ReLU.hpp +++ b/include/aidge/operator/ReLU.hpp @@ -25,16 +25,34 @@ namespace Aidge { -class ReLU_Op : public OperatorTensor, - public Registrable<ReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>> { +/** + * @brief Description of an element-wise Rectified Linear Unit (ReLU) operation + * on an input Tensor. + * + * For each element x in the input, the function is defined as: + * `f(x) = max(0, x)` + * + * The input and output Tensors have the same dimensions. + * + * @see OperatorTensor + * @see Registrable + */ +class ReLU_Op : + public OperatorTensor, + public Registrable<ReLU_Op, // <Op, backend, implementation creation function> + std::string, + std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>> +{ public: static const std::string Type; ReLU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {} /** - * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). - * @param op Operator to copy. + * @brief Copy-constructor. + * @param op ReLU_Op to copy. + * @details Copies the operator attributes and its output tensor(s), but not + * its input tensors. The new operator has no associated input. */ ReLU_Op(const ReLU_Op& op); diff --git a/include/aidge/scheduler/ProdConso.hpp b/include/aidge/scheduler/ProdConso.hpp index a7c0ed5ae73d1f891744e835f0da5ad14a37f850..fce8d7f6548aaeb04300291d33cc2a5e44fb6fe7 100644 --- a/include/aidge/scheduler/ProdConso.hpp +++ b/include/aidge/scheduler/ProdConso.hpp @@ -42,10 +42,14 @@ public: */ virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const; - // Amount of input data that cannot be overwritten during the execution. + /** + * @brief Amount of input data that cannot be overwritten during the execution. + */ virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const; - // Memory required at an output for a given input size. + /** + * @brief Memory required at an output for a given input size. + */ virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const; /** diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp index 792d73693be0780f2e938d828b0f29889216631b..2d03f4e8b8d5ce9c74f1d140a2e13317decc8dac 100644 --- a/include/aidge/scheduler/Scheduler.hpp +++ b/include/aidge/scheduler/Scheduler.hpp @@ -28,8 +28,29 @@ namespace Aidge { class Node; class GraphView; + +/** + * @class Scheduler + * @brief Generate and manage the execution schedule order of nodes in a graph. + * It provides functionality for static scheduling, memory + * management, and visualization of the scheduling process. + * + * Key features: + * - Static scheduling generation with early and late execution times + * - Memory layout generation for scheduled nodes + * - Input tensor connection to graph nodes + * - Scheduling visualization through diagram generation + * + * @see GraphView + * @see Node + * @see MemoryManager + */ class Scheduler { protected: + /** + * @struct StaticSchedulingElement + * @brief Represents a node in the static schedule. + */ struct StaticSchedulingElement { StaticSchedulingElement( std::shared_ptr<Node> node_, @@ -37,15 +58,17 @@ protected: std::size_t late_ = static_cast<std::size_t>(-1)) : node(node_), early(early_), late(late_) {} - std::shared_ptr<Node> node; - std::size_t early; - std::size_t late; - std::vector<std::shared_ptr<StaticSchedulingElement>> earlierThan; - std::vector<std::shared_ptr<StaticSchedulingElement>> laterThan; + std::shared_ptr<Node> node; /** Scheduled `Node` */ + std::size_t early; /** Earliest possible execution time */ + std::size_t late; /** Latest possible execution time */ + std::vector<StaticSchedulingElement*> earlierThan; /** Nodes that must be executed earlier */ + std::vector<StaticSchedulingElement*> laterThan; /** Nodes that must be executed later */ }; /** - * @brief Node with its start/end execution time stored for later display. + * @struct SchedulingElement + * @brief Represent a `Node` with its actual execution times. + * @details Start and end times are stored for later display. */ struct SchedulingElement { SchedulingElement( @@ -54,21 +77,32 @@ protected: std::chrono::time_point<std::chrono::high_resolution_clock> end_) : node(node_), start(start_), end(end_) {} ~SchedulingElement() noexcept = default; - std::shared_ptr<Node> node; - std::chrono::time_point<std::chrono::high_resolution_clock> start; - std::chrono::time_point<std::chrono::high_resolution_clock> end; + std::shared_ptr<Node> node; /** Executed `Node` */ + std::chrono::time_point<std::chrono::high_resolution_clock> start; /** Actual start time of execution */ + std::chrono::time_point<std::chrono::high_resolution_clock> end; /** Actual end time of execution */ }; public: + /** + * @struct PriorProducersConsumers + * @brief Manages producer-consumer relationships for nodes. + */ struct PriorProducersConsumers { PriorProducersConsumers(); PriorProducersConsumers(const PriorProducersConsumers&); ~PriorProducersConsumers() noexcept; - bool isPrior = false; - std::set<std::shared_ptr<Aidge::Node>> requiredProducers; - std::set<std::shared_ptr<Aidge::Node>> priorConsumers; + bool isPrior = false; /** Indicates if this Node is a prior to another Node */ + std::set<std::shared_ptr<Aidge::Node>> requiredProducers; /** Set of required producer nodes */ + std::set<std::shared_ptr<Aidge::Node>> priorConsumers; /** Set of required prior consumer nodes */ }; public: + Scheduler() = delete; + + /** + * @brief Constructor for the Scheduler class. + * @param graphView Shared pointer to the GraphView to be scheduled. + * @param upperNode Shared pointer to the upper node of the GraphView (optional). + */ Scheduler(std::shared_ptr<GraphView> graphView, std::shared_ptr<Node> upperNode = nullptr) : mGraphView(graphView), mUpperNode(upperNode) @@ -76,15 +110,20 @@ public: // ctor }; - virtual ~Scheduler() noexcept; + virtual ~Scheduler(); public: /** - * @brief Return a vector of Node ordered by the order they are called by the scheduler. - * @return std::vector<std::shared_ptr<Node>> + * @brief Get the static scheduling order of nodes. + * @param step The step of the static schedule to retrieve (default is 0). + * @return Vector of shared pointers to Nodes in their scheduled order. */ std::vector<std::shared_ptr<Node>> getStaticScheduling(std::size_t step = 0) const; + /** + * @brief Get the GraphView associated with this Scheduler. + * @return Shared pointer to the GraphView. + */ inline std::shared_ptr<GraphView> graphView() const noexcept { return mGraphView; } @@ -110,20 +149,23 @@ public: MemoryManager generateMemory(bool incProducers = false, bool wrapAroundBuffer = false) const; /** - * @brief Place the data tensors inside in the data input tensor of the graphView. In case of multiple data input tensors, they are mapped to producers in the order given by the graph. + * @brief Connect input tensors to the data input of the GraphView. + * In case of multiple data input tensors, they are mapped to producers in + * the order given by the graph. * * @param data data input tensors */ void connectInputs(const std::vector<std::shared_ptr<Aidge::Tensor>>& data); /** - * @brief Save in a Markdown file the static scheduling with early and late relative order for the nodes. - * @param fileName Name of the generated file. + * @brief Save the static scheduling diagram, with early and late relative + * order of execution for the nodes, to a file in Mermaid format. + * @param fileName Name of the file to save the diagram (without extension). */ void saveStaticSchedulingDiagram(const std::string& fileName) const; /** - * @brief Save in a Markdown file the order of layers execution. + * @brief Save in a Mermaid file the order of layers execution. * @param fileName Name of the generated file. */ void saveSchedulingDiagram(const std::string& fileName) const; @@ -139,34 +181,53 @@ protected: Elts_t getNbAvailableData(const std::shared_ptr<Node>& node, const IOIndex_t inputIdx) const; + /** + * @brief Get the prior producers and consumers for a node. + * @param node Shared pointer to the Node. + * @return PriorProducersConsumers object containing prior information. + */ PriorProducersConsumers getPriorProducersConsumers(const std::shared_ptr<Node>& node) const; /** * @brief Generate an initial base scheduling for the GraphView. * The scheduling is entirely sequential and garanteed to be valid w.r.t. * each node producer-consumer model. + * @return Vector of pointers to `StaticSchedulingElement` representing the base schedule. */ - std::vector<std::shared_ptr<StaticSchedulingElement>> generateBaseScheduling() const; + std::vector<StaticSchedulingElement*> generateBaseScheduling() const; /** - * Fill-in early and late scheduling step from initial base scheduling. - * For each node, specifies the earliest and latest possible execution - * logical step. - */ - void generateEarlyLateScheduling(std::vector<std::shared_ptr<StaticSchedulingElement>>& schedule) const; + * @brief Calculates early and late execution times for each node in an initial base scheduling. + * + * This method performs two passes over the schedule: + * 1. Forward pass: Calculates the earliest possible execution time for each node + * 2. Backward pass: Calculates the latest possible execution time for each node + * + * It also establishes 'earlierThan' and 'laterThan' relationships between nodes. + * + * @param schedule Vector of shared pointers to StaticSchedulingElements to be processed + */ + void generateEarlyLateScheduling(std::vector<StaticSchedulingElement*>& schedule) const; private: + /** + * @brief Summarize the consumer state of a node for debugging purposes. + * @param consumer Shared pointer to the consumer Node. + * @param nodeName Name of the node. + * @details Provide the amount of data consumed and required for each input + * and the amount of data produced for each output. + */ void summarizeConsumerState(const std::shared_ptr<Node>& consumer, const std::string& nodeName) const; protected: - /** @brief Shared ptr to the scheduled graph view */ + /** @brief Shared pointer to the scheduled GraphView */ std::shared_ptr<GraphView> mGraphView; - /** @brief Shared ptr to the upper node containing the graph view */ + /** @brief Weak pointer to the upper node containing the graph view */ std::weak_ptr<Node> mUpperNode; /** @brief List of SchedulingElement (i.e: Nodes with their computation time) */ std::vector<SchedulingElement> mScheduling; /** @brief List of nodes ordered by their */ - std::vector<std::vector<std::shared_ptr<StaticSchedulingElement>>> mStaticSchedule; + std::vector<std::vector<StaticSchedulingElement*>> mStaticSchedule; std::size_t mStaticScheduleStep = 0; mutable std::map<std::shared_ptr<Node>, PriorProducersConsumers> mPriorCache; }; diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp index 8a00a1cb4a419f1125411b5b1c823bf91570d62e..f8adfd5f4becb7677b3a59791f8549bb114fbbc4 100644 --- a/python_binding/operator/pybind_Add.cpp +++ b/python_binding/operator/pybind_Add.cpp @@ -22,14 +22,14 @@ namespace Aidge { void declare_Add(py::module &m) { py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance()) - .def(py::init<const IOIndex_t>(), py::arg("nb_inputs")) + .def(py::init<>()) .def_static("get_inputs_name", &Add_Op::getInputsName) .def_static("get_outputs_name", &Add_Op::getOutputsName) .def_readonly_static("Type", &Add_Op::Type); declare_registrable<Add_Op>(m, "AddOp"); - m.def("Add", &Add, py::arg("nb_inputs"), py::arg("name") = ""); + m.def("Add", &Add, py::arg("name") = ""); } void init_Add(py::module &m) { diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp index 5f6cc12fc1e84ee245fdb7babf025b5af4316315..5f173068af0f1140830d458979ec924c38ade078 100644 --- a/python_binding/operator/pybind_MetaOperatorDefs.cpp +++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp @@ -194,7 +194,7 @@ void init_MetaOperatorDefs(py::module &m) { // declare_PaddedMaxPoolingOp<3>(m); declare_LSTMOp(m); - py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperator_Op", py::multiple_inheritance()) + py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperatorOp", py::multiple_inheritance()) .def(py::init<const char *, const std::shared_ptr<GraphView>&, const std::vector<InputCategory>&>(), py::arg("type"), py::arg("graph"), diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp index 2668f181385272bb3af24e83fcf63f1d50881eba..7dc4a4bee1a009b3ca033ea29861768c1a6fc19d 100644 --- a/python_binding/operator/pybind_Pad.cpp +++ b/python_binding/operator/pybind_Pad.cpp @@ -30,7 +30,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) { m, pyClassName.c_str(), py::multiple_inheritance()) .def(py::init<const std::array<DimSize_t, 2*DIM> &, - const PadBorderType &, + PadBorderType, double>(), py::arg("beginEndTuples"), py::arg("borderType") = PadBorderType::Constant, @@ -42,7 +42,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) { declare_registrable<Pad_Op<DIM>>(m, pyClassName); m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples, const std::string& name, - const PadBorderType &borderType = PadBorderType::Constant, + PadBorderType borderType = PadBorderType::Constant, double borderValue = 0.0) { AIDGE_ASSERT(beginEndTuples.size() == 2*DIM, "begin_end_tuples size [{}] does not match DIM [{}]", beginEndTuples.size(), 2*DIM); return Pad<DIM>(to_array<2*DIM>(beginEndTuples.begin()), name, borderType, borderValue); diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp index abfc91c6cdf9fd4f6eb46100074b22083514d82e..6f60d2f15ce0e561c32d7bc5a7561c2f8d507588 100644 --- a/src/data/Tensor.cpp +++ b/src/data/Tensor.cpp @@ -35,7 +35,7 @@ Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const { AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type"); AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format"); - auto add_ = Add_Op(2); + auto add_ = Add_Op(); add_.associateInput(0, std::make_shared<Tensor>(*this)); add_.associateInput(1, std::make_shared<Tensor>(other)); add_.setDataType(dataType()); diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp index c19eab12ae34418386b1481702f64e4a82e9f771..da6d833f3aa933cd5e707814c279142de5bc4a23 100644 --- a/src/graph/Node.cpp +++ b/src/graph/Node.cpp @@ -18,6 +18,7 @@ #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" #include "aidge/utils/Types.h" +#include "aidge/utils/future_std/any.hpp" Aidge::Node::Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttributes> attrs) : mAttrs(attrs), @@ -31,23 +32,18 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttribute mIdOutParents( std::vector<IOIndex_t>(static_cast<std::size_t>(op->nbInputs()), gk_IODefaultIndex)) { - // ctor - if (op) { - mForward.push_back([this](){ this->mOperator->forward(); return true; }); - mBackward.push_back([this](){ this->mOperator->backward(); return true; }); - } + mForward.push_back([this](){ this->mOperator->forward(); return true; }); + // mForward.push_back(std::bind(&Operator::forward, mOperator.get())); + mBackward.push_back([this](){ this->mOperator->backward(); return true; }); } -Aidge::Node::Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs) - : Node(op, std::make_shared<DynamicAttributes>(attrs)) {} +// Aidge::Node::Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs) +// : Node(op, std::make_shared<DynamicAttributes>(attrs)) {} Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name) - : Node(op, DynamicAttributes()) + : Node(op, std::make_shared<DynamicAttributes>(std::map<std::string, future_std::any>({std::make_pair("name", future_std::any(name))}))) { - // ctor - if (!name.empty()) { - mAttrs->setAttr<std::string>("name", name); - } + //ctor } /////////////////////////////////////////////////////// diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp index 033c476c8a9e865fdf9d5670e295c3e4fb6101b3..f6fd0cd9fc647e29402d36f1f6838642e099ae6c 100644 --- a/src/operator/Add.cpp +++ b/src/operator/Add.cpp @@ -22,12 +22,10 @@ const std::string Aidge::Add_Op::Type = "Add"; -Aidge::Add_Op::Add_Op(const IOIndex_t nbIn) - : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1) +Aidge::Add_Op::Add_Op() + : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) { - if (nbIn == 0) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input."); - } + // ctor } Aidge::Add_Op::Add_Op(const Add_Op& op) @@ -89,6 +87,8 @@ std::set<std::string> Aidge::Add_Op::getAvailableBackends() const { return Registrar<Add_Op>::getKeys(); } -std::shared_ptr<Aidge::Node> Aidge::Add(const IOIndex_t nbIn, const std::string& name) { - return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name); +//////////////////////////////////////////////////////////////////////////////// + +std::shared_ptr<Aidge::Node> Aidge::Add(const std::string& name) { + return std::make_shared<Node>(std::make_shared<Add_Op>(), name); } \ No newline at end of file diff --git a/src/operator/MetaOperatorDefs/LSTM.cpp b/src/operator/MetaOperatorDefs/LSTM.cpp index 9620f040472aed984afb99018cde5476ec5f60d3..2ed548805010a6cc87950c4d1f7b89edbea4f75c 100644 --- a/src/operator/MetaOperatorDefs/LSTM.cpp +++ b/src/operator/MetaOperatorDefs/LSTM.cpp @@ -35,14 +35,14 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel, auto input = Identity((!name.empty()) ? name + "_input" : ""); auto hiddenState = Memorize(seqLength, (!name.empty()) ? name + "_hidden_state" : ""); auto cellState = Memorize(seqLength, (!name.empty()) ? name + "_cell_state" : ""); - auto add = Add(2, (!name.empty()) ? name + "_add" : ""); + auto add = Add((!name.empty()) ? name + "_add" : ""); // Forget gate auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateX" : ""); input->addChild(forgetGateX, 0, 0); auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateH" : ""); hiddenState->addChild(forgetGateH, 1, 0); - auto forgetGate = Add(2, (!name.empty()) ? name + "_forgetGate" : ""); + auto forgetGate = Add((!name.empty()) ? name + "_forgetGate" : ""); forgetGateX->addChild(forgetGate, 0, 0); forgetGateH->addChild(forgetGate, 0, 1); auto forgetGateAct = Sigmoid((!name.empty()) ? name + "_forgetGateAct" : ""); @@ -57,7 +57,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel, input->addChild(inputGateX, 0, 0); auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_inputGateH" : ""); hiddenState->addChild(inputGateH, 1, 0); - auto inputGate = Add(2, (!name.empty()) ? name + "_inputGate" : ""); + auto inputGate = Add((!name.empty()) ? name + "_inputGate" : ""); inputGateX->addChild(inputGate, 0, 0); inputGateH->addChild(inputGate, 0, 1); auto inputGateAct = Sigmoid((!name.empty()) ? name + "_inputGateAct" : ""); @@ -71,7 +71,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel, input->addChild(cellCandidateX, 0, 0); auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_cellCandidateH" : ""); hiddenState->addChild(cellCandidateH, 1, 0); - auto cellCandidate = Add(2, (!name.empty()) ? name + "_cellCandidate" : ""); + auto cellCandidate = Add((!name.empty()) ? name + "_cellCandidate" : ""); cellCandidateX->addChild(cellCandidate, 0, 0); cellCandidateH->addChild(cellCandidate, 0, 1); auto cellCandidateAct = Tanh((!name.empty()) ? name + "_cellCandidateAct" : ""); @@ -83,7 +83,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel, input->addChild(outputGateX, 0, 0); auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_outputGateH" : ""); hiddenState->addChild(outputGateH, 1, 0); - auto outputGate = Add(2, (!name.empty()) ? name + "_outputGate" : ""); + auto outputGate = Add((!name.empty()) ? name + "_outputGate" : ""); outputGateX->addChild(outputGate, 0, 0); outputGateH->addChild(outputGate, 0, 1); auto outputGateAct = Sigmoid((!name.empty()) ? name + "_outputGateAct" : ""); @@ -143,14 +143,14 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength) auto input = Identity(""); auto hiddenState = Memorize(seqLength, ""); auto cellState = Memorize(seqLength, ""); - auto add = Add(2, ""); + auto add = Add(""); // Forget gate auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), ""); input->addChild(forgetGateX, 0, 0); auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), ""); hiddenState->addChild(forgetGateH, 1, 0); - auto forgetGate = Add(2, ""); + auto forgetGate = Add(""); forgetGateX->addChild(forgetGate, 0, 0); forgetGateH->addChild(forgetGate, 0, 1); auto forgetGateAct = Sigmoid(""); @@ -165,7 +165,7 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength) input->addChild(inputGateX, 0, 0); auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), ""); hiddenState->addChild(inputGateH, 1, 0); - auto inputGate = Add(2, ""); + auto inputGate = Add(""); inputGateX->addChild(inputGate, 0, 0); inputGateH->addChild(inputGate, 0, 1); auto inputGateAct = Sigmoid(""); @@ -179,7 +179,7 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength) input->addChild(cellCandidateX, 0, 0); auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), ""); hiddenState->addChild(cellCandidateH, 1, 0); - auto cellCandidate = Add(2, ""); + auto cellCandidate = Add(""); cellCandidateX->addChild(cellCandidate, 0, 0); cellCandidateH->addChild(cellCandidate, 0, 1); auto cellCandidateAct = Tanh(""); @@ -191,7 +191,7 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength) input->addChild(outputGateX, 0, 0); auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), ""); hiddenState->addChild(outputGateH, 1, 0); - auto outputGate = Add(2,""); + auto outputGate = Add(""); outputGateX->addChild(outputGate, 0, 0); outputGateH->addChild(outputGate, 0, 1); auto outputGateAct = Sigmoid(""); diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp index e82f1ba3013dfef726475c255e8b9b804adc7daa..ba762da5737e986941e0c72196503415f7af29b7 100644 --- a/src/operator/Pad.cpp +++ b/src/operator/Pad.cpp @@ -61,16 +61,18 @@ std::set<std::string> Aidge::Pad_Op<DIM>::getAvailableBackends() const { template class Aidge::Pad_Op<1>; template class Aidge::Pad_Op<2>; +//////////////////////////////////////////////////////////////////////////////// + template <std::array<Aidge::DimSize_t, 1>::size_type DIM> std::shared_ptr<Aidge::Node> Aidge::Pad(const std::array<Aidge::DimSize_t, 2*DIM> &beginEndTuples, const std::string& name, - const PadBorderType &borderType, + PadBorderType borderType, double borderValue) { AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", Pad_Op<DIM>::Type); return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name); } -template std::shared_ptr<Aidge::Node> Aidge::Pad<1>(const std::array<Aidge::DimSize_t, 2> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue); -template std::shared_ptr<Aidge::Node> Aidge::Pad<2>(const std::array<Aidge::DimSize_t, 4> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue); -template std::shared_ptr<Aidge::Node> Aidge::Pad<3>(const std::array<Aidge::DimSize_t, 6> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue); +template std::shared_ptr<Aidge::Node> Aidge::Pad<1>(const std::array<Aidge::DimSize_t, 2> &beginEndTuples, const std::string&, PadBorderType, double borderValue); +template std::shared_ptr<Aidge::Node> Aidge::Pad<2>(const std::array<Aidge::DimSize_t, 4> &beginEndTuples, const std::string&, PadBorderType, double borderValue); +template std::shared_ptr<Aidge::Node> Aidge::Pad<3>(const std::array<Aidge::DimSize_t, 6> &beginEndTuples, const std::string&, PadBorderType, double borderValue); diff --git a/src/recipes/ConvToMatMul.cpp b/src/recipes/ConvToMatMul.cpp index 31462861e1bbe29cb467ad719576ec86c2d46f7f..70be33932295aab49653bdc2853f4411ded919b4 100644 --- a/src/recipes/ConvToMatMul.cpp +++ b/src/recipes/ConvToMatMul.cpp @@ -75,7 +75,7 @@ size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) { // Handle bias if (convOp->getInput(2) && !convOp->getInput(2)->empty()) { - auto add = Add(2, (!convNode->name().empty()) ? convNode->name() + "_add" : ""); + auto add = Add((!convNode->name().empty()) ? convNode->name() + "_add" : ""); auto bReshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>{{1, static_cast<int64_t>(convOp->getInput(2)->size()), 1, 1}}), (!convNode->name().empty()) ? convNode->name() + "_b_reshape_shape_prod" : "", true); diff --git a/src/scheduler/ParallelScheduler.cpp b/src/scheduler/ParallelScheduler.cpp index 1d70646b70091e2e3ff6f03b8ee82ae62aeb1e43..2b9a1f5b62741d5f08dfc3e5aa45b1102d54b850 100644 --- a/src/scheduler/ParallelScheduler.cpp +++ b/src/scheduler/ParallelScheduler.cpp @@ -48,7 +48,7 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std:: // Sort static scheduling, the order will be the prefered threads scheduling // order for non critical nodes - std::deque<std::shared_ptr<StaticSchedulingElement>> staticSchedule(mStaticSchedule.at(mStaticScheduleStep).begin(), mStaticSchedule.at(mStaticScheduleStep).end()); + std::deque<StaticSchedulingElement*> staticSchedule(mStaticSchedule.at(mStaticScheduleStep).begin(), mStaticSchedule.at(mStaticScheduleStep).end()); std::stable_sort(staticSchedule.begin(), staticSchedule.end(), [](const auto& lhs, const auto& rhs) { return ((lhs->early < rhs->early) || (lhs->early == rhs->early && lhs->late < rhs->late)); }); @@ -59,12 +59,12 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std:: size_t latest = 0; std::mutex schedulingMutex; - std::map<std::shared_ptr<StaticSchedulingElement>, std::atomic<bool>> finished; + std::map<StaticSchedulingElement*, std::atomic<bool>> finished; while (!staticSchedule.empty()) { Log::debug("Step {}", latest); - std::vector<std::shared_ptr<StaticSchedulingElement>> mustFinish; + std::vector<StaticSchedulingElement*> mustFinish; // Run all nodes that must be run at this step: latest (critical nodes) for (size_t i = 0; i < staticSchedule.size(); ) { @@ -188,7 +188,7 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std:: } // Wait for all nodes that must finish at latest to be finished - // By scheduling construction, no other node can be started before all + // By scheduling construction, no other node can be started before all // nodes at latest step are finished while (true) { bool ready = true; diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp index 958b2543208dfdce3eee4e1ba7a22cc8bd0be74b..34aea5ffd909f57e4834deeed6d0bdc2664d4644 100644 --- a/src/scheduler/Scheduler.cpp +++ b/src/scheduler/Scheduler.cpp @@ -37,7 +37,14 @@ #include "aidge/utils/Types.h" -Aidge::Scheduler::~Scheduler() noexcept = default; +Aidge::Scheduler::~Scheduler() { + for (auto& staticScheduleVec : mStaticSchedule) { + for (auto& staticScheduleElt : staticScheduleVec) { + delete staticScheduleElt; + } + staticScheduleVec.clear(); + } +} Aidge::Scheduler::PriorProducersConsumers::PriorProducersConsumers() = default; Aidge::Scheduler::PriorProducersConsumers::PriorProducersConsumers(const PriorProducersConsumers&) = default; Aidge::Scheduler::PriorProducersConsumers::~PriorProducersConsumers() noexcept = default; @@ -48,7 +55,7 @@ void Aidge::Scheduler::generateScheduling() { mStaticSchedule.push_back(schedule); } -std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::Scheduler::generateBaseScheduling() const { +std::vector<Aidge::Scheduler::StaticSchedulingElement*> Aidge::Scheduler::generateBaseScheduling() const { // 0) setup useful variables // map associating each node with string "name (type#rank)" @@ -60,7 +67,7 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S // producers-consumers model! std::set<std::shared_ptr<Node>> stillConsumers; - std::vector<std::shared_ptr<StaticSchedulingElement>> schedule; + std::vector<StaticSchedulingElement*> schedule; // 1) Initialize consumers list: start from the output nodes and @@ -124,7 +131,7 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S // Producers are special nodes that generate data on demand. for (const auto& requiredProducer : requiredProducers) { requiredProducer->getOperator()->updateConsummerProducer(); - schedule.push_back(std::make_shared<StaticSchedulingElement>(requiredProducer)); + schedule.push_back(new StaticSchedulingElement(requiredProducer)); } // 5) Find runnable consumers. @@ -178,7 +185,7 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S for (const auto& runnable : runnableConsumers) { Log::debug("Runnable: {}", namePtrTable.at(runnable)); runnable->getOperator()->updateConsummerProducer(); - schedule.push_back(std::make_shared<StaticSchedulingElement>(runnable)); + schedule.push_back(new StaticSchedulingElement(runnable)); } // 7) Update consumers list @@ -310,7 +317,7 @@ void Aidge::Scheduler::summarizeConsumerState(const std::shared_ptr<Aidge::Node> } -void Aidge::Scheduler::generateEarlyLateScheduling(std::vector<std::shared_ptr<StaticSchedulingElement>>& schedule) const { +void Aidge::Scheduler::generateEarlyLateScheduling(std::vector<StaticSchedulingElement*>& schedule) const { std::size_t latest = 0; // Calculate early (logical) start for (std::size_t elt = 0; elt < schedule.size(); ++elt) { @@ -390,15 +397,20 @@ void Aidge::Scheduler::resetScheduling() { for (auto node : mGraphView->getNodes()) { node->getOperator()->resetConsummerProducer(); } - + for (auto& staticScheduleVec : mStaticSchedule) { + for (auto& staticScheduleElt : staticScheduleVec) { + delete staticScheduleElt; + } + staticScheduleVec.clear(); + } mStaticSchedule.clear(); mStaticScheduleStep = 0; mScheduling.clear(); } /** - * This version is a simplified version without special handling of concatenation. -*/ + * @warning This version is a simplified version without special handling of concatenation. + */ Aidge::MemoryManager Aidge::Scheduler::generateMemory(bool incProducers, bool wrapAroundBuffer) const { MemoryManager memManager; @@ -669,8 +681,8 @@ Aidge::Elts_t Aidge::Scheduler::getNbAvailableData(const std::shared_ptr<Node>& return Elts_t::NoneElts(); } -Aidge::Scheduler::PriorProducersConsumers Aidge::Scheduler::getPriorProducersConsumers( - const std::shared_ptr<Node>& node) const +Aidge::Scheduler::PriorProducersConsumers +Aidge::Scheduler::getPriorProducersConsumers(const std::shared_ptr<Node>& node) const { const auto priorCache = mPriorCache.find(node); if (priorCache != mPriorCache.end()) { @@ -707,6 +719,7 @@ Aidge::Scheduler::PriorProducersConsumers Aidge::Scheduler::getPriorProducersCon const auto& parentPrior = getPriorProducersConsumers(parent.first); if (!parentPrior.isPrior) { + // only happens in case of cyclic graphs return PriorProducersConsumers(); // not scheduled } else { diff --git a/src/scheduler/SequentialScheduler.cpp b/src/scheduler/SequentialScheduler.cpp index 88b5e98bc62456bd59dc235c3112396daaeddd24..4e6e91f51878ffce7d910a361c9a6e8fff9cb835 100644 --- a/src/scheduler/SequentialScheduler.cpp +++ b/src/scheduler/SequentialScheduler.cpp @@ -45,7 +45,7 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, const std::vector<std } // Sort static scheduling according to the policy - std::vector<std::shared_ptr<StaticSchedulingElement>> staticSchedule(mStaticSchedule.at(mStaticScheduleStep).begin(), mStaticSchedule.at(mStaticScheduleStep).end()); + std::vector<StaticSchedulingElement*> staticSchedule(mStaticSchedule.at(mStaticScheduleStep).begin(), mStaticSchedule.at(mStaticScheduleStep).end()); if (mSchedulingPolicy == SchedulingPolicy::AsSoonAsPossible) { std::stable_sort(staticSchedule.begin(), staticSchedule.end(), diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp index a08808ee5e6c2657a76213dcff80cec53b23e7ee..2fa06cf23b3b681211208a3e5bbea9226f0930b8 100644 --- a/unit_tests/graph/Test_GraphView.cpp +++ b/unit_tests/graph/Test_GraphView.cpp @@ -447,10 +447,10 @@ TEST_CASE("[core/graph] GraphView(getOrderedNodes)", "[GraphView][getOrderedNode auto data1 = Producer({2}, "data1"); auto data2 = Producer({2}, "data2"); auto data3 = Producer({2}, "data3"); - auto add1 = Add(2, "add1"); - auto add2 = Add(2, "add2"); + auto add1 = Add("add1"); + auto add2 = Add("add2"); auto split1 = Split(2, 0, {1, 1}, "split1"); - auto add3 = Add(3, "add3"); + auto add3 = Add("add3"); auto g = std::make_shared<GraphView>("TestGraph"); data1->addChild(add1); data2->addChild(add1); @@ -508,9 +508,9 @@ TEST_CASE("[core/graph] GraphView(getOrderedNodes)", "[GraphView][getOrderedNode TEST_CASE("[core/graph] GraphView(getOrderedNodes) cyclic", "[GraphView][getOrderedNodes]") { auto data1 = Producer({2}, "data1"); auto data2 = Producer({2}, "data2"); - auto add1 = Add(2, "add1"); + auto add1 = Add("add1"); auto mem1 = Memorize(1, "mem1"); - auto add2 = Add(2, "add2"); + auto add2 = Add("add2"); auto g = std::make_shared<GraphView>("TestGraph"); data1->addChild(add1); data2->addChild(add1); diff --git a/unit_tests/graph/Test_Matching.cpp b/unit_tests/graph/Test_Matching.cpp index d63b1e754a254e7ba69089ba465eb0226922f352..d6d98d4701cba900548d127879c9b3940cf1d739 100644 --- a/unit_tests/graph/Test_Matching.cpp +++ b/unit_tests/graph/Test_Matching.cpp @@ -51,10 +51,10 @@ TEST_CASE("[core/graph] Matching") { PaddedConv(8, 16, {3, 3}, "conv3", {1, 1}, {2, 2, 2, 2}), ReLU("relu3"), PaddedConv(8, 16, {5, 5}, "conv4", {1, 1}, {2, 2, 2, 2}), - Add(2, "add"), + Add("add"), PaddedConv(8, 16, {5, 5}, "conv5", {1, 1}, {2, 2, 2, 2}), ReLU("relu5"), - Add(2, "add2") + Add("add2") }); g1->getNode("relu3")->addChild(g1->getNode("add"), 0, 1); diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp index 68ac509e79e347106a9a132249f125ebe6e39f6a..79e471d44a49dfb52fd5eb4aa1ed2dc4ab8dc0bb 100644 --- a/unit_tests/graphRegex/Test_GraphRegex.cpp +++ b/unit_tests/graphRegex/Test_GraphRegex.cpp @@ -153,9 +153,9 @@ TEST_CASE("GraphRegexUser") { // generate the original GraphView auto matmul0 = MatMul("matmul0"); - auto add0 = Add(2, "add0"); + auto add0 = Add("add0"); auto matmul1 = MatMul("matmul1"); - auto add1 = Add(2, "add1"); + auto add1 = Add("add1"); auto b0 = Producer({5}, "B0"); auto w0 = Producer({5, 5}, "W0"); diff --git a/unit_tests/operator/Test_ConstantOfShape.cpp b/unit_tests/operator/Test_ConstantOfShape.cpp index c10d97ce5fb774e051e75f051772e1cbcd41dbea..00013f86d9a7b357e409a483c80ca80cd332a17e 100644 --- a/unit_tests/operator/Test_ConstantOfShape.cpp +++ b/unit_tests/operator/Test_ConstantOfShape.cpp @@ -78,6 +78,7 @@ TEST_CASE("[core/operator] ConstantOfShape_Op(forwardDims)", for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) { CHECK(array_in[i] == op->getOutput(0)->dims().at(i)); } + delete[] array_in; } } } diff --git a/unit_tests/operator/Test_Operator.cpp b/unit_tests/operator/Test_Operator.cpp index a050bbc4021b0c70a0d8faf6478eb2bd13ebdb58..6bd12c51ef367ad1cf1859afc56af8a21a706237 100644 --- a/unit_tests/operator/Test_Operator.cpp +++ b/unit_tests/operator/Test_Operator.cpp @@ -26,7 +26,7 @@ namespace Aidge { // TEST_CASE("[core/operator] Operator(computeReceptiveField)", "[Operator][computeReceptiveFiled]") { // auto dataProvider1 = Producer({16, 3, 224, 224}, "dataProvider1"); // auto dataProvider2 = Producer({16, 3, 224, 224}, "dataProvider2"); -// auto gen1 = Add(2); +// auto gen1 = Add(); // auto gen2 = ReLU(); // auto g = std::make_shared<GraphView>("TestGraph"); diff --git a/unit_tests/recipes/Test_MatMulToFC.cpp b/unit_tests/recipes/Test_MatMulToFC.cpp index 2adf882ca69e0d5ca5f050d1b89cfb09d81b536b..28eae0be17297467a29eab4e868e074c336d4a12 100644 --- a/unit_tests/recipes/Test_MatMulToFC.cpp +++ b/unit_tests/recipes/Test_MatMulToFC.cpp @@ -27,9 +27,9 @@ TEST_CASE("[cpu/recipes] MatMulToFC", "[MatMulToFC][recipes]") { SECTION("with Add") { // generate the original GraphView auto matmul0 = MatMul("matmul0"); - auto add0 = Add(2, "add0"); + auto add0 = Add("add0"); auto matmul1 = MatMul("matmul1"); - auto add1 = Add(2, "add1"); + auto add1 = Add("add1"); auto b0 = Producer({5}, "B0"); auto w0 = Producer({5, 5}, "W0"); @@ -76,7 +76,7 @@ TEST_CASE("[cpu/recipes] MatMulToFC", "[MatMulToFC][recipes]") { // generate the original GraphView auto matmul0 = MatMul("matmul0"); auto matmul1 = MatMul("matmul1"); - auto add1 = Add(2, "add1"); + auto add1 = Add("add1"); auto w0 = Producer({5, 5}, "W0"); auto b1 = Producer({5}, "B1");