diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 0000000000000000000000000000000000000000..77e8c9dbd6c7c95d38505acdfc45740403031597 --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,188 @@ +--- +Checks: "google-*, readability-identifier-naming" +WarningsAsErrors: '' +HeaderFilterRegex: '' +FormatStyle: none +CheckOptions: + # Case style + # Following https://webkit.org/code-style-guidelines/ : + # CamelCase for class,struct, namespace + # camelBack for variables, functions + # CamelCase for class,struct, namespace + - key: 'readability-identifier-naming.AbstractClassCase' + value: 'CamelCase' + # CamelCase for class,struct, namespace + - key: 'readability-identifier-naming.ClassCase' + value: 'CamelCase' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ClassConstantCase' + value: 'camelBack' + - key: 'readability-identifier-naming.ClassConstantPrefix' + value: 's_' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ClassMemberCase' + value: 'camelBack' + - key: 'readability-identifier-naming.ClassMemberPrefix' + value: 's_' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ClassMethodCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantMemberCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantParameterCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantPointerParameterCase' + value: 'camelBack' + # camelBack for variables, functions + # camelBack for variables, functions + - key: 'readability-identifier-naming.ClassMemberCase' + value: 'camelBack' + - key: 'readability-identifier-naming.ClassMemberPrefix' + value: 's_' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ClassMethodCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantMemberCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantParameterCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstantPointerParameterCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstexprFunctionCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstexprMethodCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ConstexprVariableCase' + value: 'camelBack' + # CamelCase for class,struct, namespace + - key: 'readability-identifier-naming.EnumCase' + value: 'CamelCase' + # CamelCase for class,struct, namespace + - key: 'readability-identifier-naming.EnumConstantCase' + value: 'CamelCase' + # camelBack for variables, functions + - key: 'readability-identifier-naming.FunctionCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.GlobalConstantCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.GlobalConstantPointerCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.GlobalFunctionCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.GlobalPointerCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.GlobalVariableCase' + value: 'camelBack' + # CamelCase for class,struct, namespace + - key: 'readability-identifier-naming.InlineNamespaceCase' + value: 'CamelCase' + # camelBack for variables, functions + - key: 'readability-identifier-naming.LocalConstantCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.LocalConstantPointerCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.LocalPointerCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.LocalVariableCase' + value: 'camelBack' + - key: 'readability-identifier-naming.MacroDefinitionCase' + value: 'UPPER_CASE' + # camelBack for variables, functions + - key: 'readability-identifier-naming.MemberCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.MethodCase' + value: 'CamelCase' + # CamelCase for class,struct, namespace + - key: 'readability-identifier-naming.NamespaceCase' + value: 'CamelCase' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ParameterCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ParameterPackCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.PointerParameterCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.PrivateMemberCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.PrivateMemberPrefix' + value: 'm_' + # camelBack for variables, functions + - key: 'readability-identifier-naming.PrivateMethodCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ProtectedMemberCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ProtectedMemberPrefix' + value: 'm_' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ProtectedMethodCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.PublicMemberCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.PublicMethodCase' + value: 'camelBack' + - key: 'readability-identifier-naming.ScopedEnumConstantCase' + value: 'CamelCase' + - key: 'readability-identifier-naming.StaticConstantCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.StaticVariableCase' + value: 'camelBack' + - key: 'readability-identifier-naming.StructCase' + value: 'CamelCase' + - key: 'readability-identifier-naming.TemplateParameterCase' + value: 'CamelCase' + - key: 'readability-identifier-naming.TemplateTemplateParameterCase' + value: 'CamelCase' + - key: 'readability-identifier-naming.TypeTemplateParameterIgnoredRegexp' + value: 'expr-type' + # CamelCase for Type aliases + - key: 'readability-identifier-naming.TypeAliasCase' + value: 'CamelCase' + - key: 'readability-identifier-naming.TypedefCase' + value: 'CamelCase' + - key: 'readability-identifier-naming.TypeTemplateParameterCase' + value: 'CamelCase' + - key: 'readability-identifier-naming.UnionCase' + value: 'CamelCase' + # camelBack for variables, functions + - key: 'readability-identifier-naming.ValueTemplateParameterCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.VariableCase' + value: 'camelBack' + # camelBack for variables, functions + - key: 'readability-identifier-naming.VirtualMethodCase' + value: 'camelCase' +... diff --git a/aidge_core/__init__.py b/aidge_core/__init__.py index 652f485a9d3de6869b55613549172d49913e8509..32042125ed6ecb1d935e240837afe6516706dbcb 100644 --- a/aidge_core/__init__.py +++ b/aidge_core/__init__.py @@ -7,8 +7,10 @@ http://www.eclipse.org/legal/epl-2.0. SPDX-License-Identifier: EPL-2.0 """ -from .aidge_core import * # import so generated by PyBind -from .export_utils import ExportNode, generate_file, generate_str -from .aidge_export_aidge import * -from . import utils +from aidge_core.aidge_core import * # import so generated by PyBind +import aidge_core.export_utils +import aidge_core.utils +from aidge_core.aidge_export_aidge import serialize_to_cpp +from aidge_core.show_graphview import gview_to_json +from aidge_core.mem_info import * from ._version import * diff --git a/aidge_core/aidge_export_aidge/__init__.py b/aidge_core/aidge_export_aidge/__init__.py index c5d6f96b3300c0e86147baac30d7cae8a3a0b798..9f042cdbcfff071dabc9a31817bf8e2e95c36ad9 100644 --- a/aidge_core/aidge_export_aidge/__init__.py +++ b/aidge_core/aidge_export_aidge/__init__.py @@ -5,4 +5,4 @@ FILE = Path(__file__).resolve() ROOT_EXPORT = FILE.parents[0] from .operator_export import * -from .export import export +from .export import serialize_to_cpp diff --git a/aidge_core/aidge_export_aidge/export.py b/aidge_core/aidge_export_aidge/export.py index b0e859b71e77bb03b95acb6ca75dcf09a8af0722..f5d8c8c7ca6f3fa7c7ef19b1aef3987c20acd1f7 100644 --- a/aidge_core/aidge_export_aidge/export.py +++ b/aidge_core/aidge_export_aidge/export.py @@ -2,15 +2,14 @@ import aidge_core import shutil import os from pathlib import Path -from .utils import supported_operators, OPERATORS_REGISTRY -from . import ROOT_EXPORT - - -from aidge_core import ExportNode, generate_file +import aidge_core.export_utils +from . import ROOT_EXPORT +from aidge_core.aidge_export_aidge.registry import ExportSerialize +from aidge_core.export_utils import generate_file -def export(export_folder: str, +def serialize_to_cpp(export_folder: str, graph_view: aidge_core.GraphView, enable_python_binding: bool = True, ): @@ -54,12 +53,11 @@ def export(export_folder: str, ### Generating an export for each nodes and dnn file ### list_configs = [] # List of headers to include in dnn.cpp to access attribute and parameters list_actions = [] # List of string to construct graph - set_operator = set() + list_operators = [] # List of operator types used (to be made unique latter) # Queue of Aidge nodes to explore, guarantee a topological exploration of the graph open_nodes = list(graph_view.get_input_nodes()) # List of Aidge nodes already explored closed_nodes = [] - while open_nodes: node = open_nodes.pop(0) if node in closed_nodes: @@ -80,25 +78,37 @@ def export(export_folder: str, continue # Next nodes to treat are children of current node open_nodes += list(node.get_children()) + node.get_operator().set_backend(ExportSerialize._name) + op_impl = node.get_operator().get_impl() + if op_impl is None: + raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an implementation.") + if not isinstance(op_impl, ExportSerialize): + raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an exportable backend ({op_impl}).") + required_specs = op_impl.get_required_spec() + specs = op_impl.get_best_match(required_specs) + export_node = op_impl.get_export_node(specs) + if export_node is None: + raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].") + op = export_node( + node, None) + - if node.type() in supported_operators(): - set_operator.add(node.type()) - op = OPERATORS_REGISTRY[node.type()](node) + # set_operator.add(node.type()) - # TODO: list_configs and list_actions don't need to be passed by argument - # Export the configuration - list_configs = op.export(export_folder_path, list_configs) + # TODO: list_configs and list_actions don't need to be passed by argument + # Export the configuration + list_configs += op.export(export_folder_path) - # Add forward kernel - list_actions = op.forward(list_actions) - else: - raise RuntimeError(f"Operator: {node.type()} is not supported") + # Add forward kernel + list_actions += op.forward() closed_nodes.append(node) + list_operators = list(dict.fromkeys(list_operators)) # make unique + # Generate full dnn.cpp - aidge_core.generate_file( + aidge_core.export_utils.generate_file( export_folder_path / "src/dnn.cpp", ROOT_EXPORT / "templates/dnn.jinja", headers=list_configs, - operators=set_operator, + operators=list_operators, actions=list_actions, ) diff --git a/aidge_core/aidge_export_aidge/operator_export/add.py b/aidge_core/aidge_export_aidge/operator_export/add.py new file mode 100644 index 0000000000000000000000000000000000000000..4eb7c3e37f0d63388a5bfe8600f184b9da2ffc49 --- /dev/null +++ b/aidge_core/aidge_export_aidge/operator_export/add.py @@ -0,0 +1,14 @@ +from aidge_core.aidge_export_aidge.registry import ExportSerialize +from aidge_core.aidge_export_aidge import ROOT_EXPORT +from aidge_core.export_utils import ExportNodeCpp +from aidge_core import ImplSpec, IOSpec, dtype + +@ExportSerialize.register("Add", ImplSpec(IOSpec(dtype.any))) +class Add(ExportNodeCpp): + def __init__(self, node, mem_info): + super().__init__(node, mem_info) + self.config_template = "" + self.forward_template = str( + ROOT_EXPORT / "templates/graph_ctor/add.jinja") + self.include_list = ["aidge/operator/Add.hpp"] + self.kernels_to_copy = [] diff --git a/aidge_core/aidge_export_aidge/operator_export/conv.py b/aidge_core/aidge_export_aidge/operator_export/conv.py index fb7092fb18982a3cc3f11a1ca47394ce2f77d0b6..ea23c1551787c8579549d54a1fe7396995eb1bff 100644 --- a/aidge_core/aidge_export_aidge/operator_export/conv.py +++ b/aidge_core/aidge_export_aidge/operator_export/conv.py @@ -1,31 +1,17 @@ -from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input +from aidge_core.aidge_export_aidge.registry import ExportSerialize from aidge_core.aidge_export_aidge import ROOT_EXPORT -from aidge_core import ExportNode, generate_file, generate_str -from pathlib import Path +from aidge_core.export_utils import ExportNodeCpp +from aidge_core import ImplSpec, IOSpec, dtype -@operator_register("Conv") -class Conv(ExportNode): - def __init__(self, node): - super().__init__(node) - - def export(self, export_folder:Path, list_configs:list): - include_path = f"attributes/{self.name}.hpp" - filepath = export_folder / f"include/{include_path}" - - generate_file( - filepath, - ROOT_EXPORT / "templates/attributes/conv.jinja", - name=self.name, - **self.attributes - ) - list_configs.append(include_path) - return list_configs - - def forward(self, list_actions:list): - list_actions.append(generate_str( - ROOT_EXPORT /"templates/graph_ctor/conv.jinja", - name=self.name, - inputs=parse_node_input(self.node.inputs()), - **self.attributes - )) - return list_actions +@ExportSerialize.register(["Conv1D", "Conv2D"], ImplSpec(IOSpec(dtype.any))) +class Conv(ExportNodeCpp): + def __init__(self, node, mem_info): + super().__init__(node, mem_info) + self.config_template = str( + ROOT_EXPORT / "templates/attributes/conv.jinja") + self.forward_template = str( + ROOT_EXPORT /"templates/graph_ctor/conv.jinja") + self.include_list = ["aidge/operator/Conv.hpp"] + self.kernels_to_copy = [] + self.config_path = "include/attributes" + self.config_extension = "hpp" diff --git a/aidge_core/aidge_export_aidge/operator_export/conv_depth_wise.py b/aidge_core/aidge_export_aidge/operator_export/conv_depth_wise.py new file mode 100644 index 0000000000000000000000000000000000000000..3e04f8aac17da5662a3ed08bc627969dbb3a9c13 --- /dev/null +++ b/aidge_core/aidge_export_aidge/operator_export/conv_depth_wise.py @@ -0,0 +1,17 @@ +from aidge_core.aidge_export_aidge.registry import ExportSerialize +from aidge_core.aidge_export_aidge import ROOT_EXPORT +from aidge_core.export_utils import ExportNodeCpp +from aidge_core import ImplSpec, IOSpec, dtype + +@ExportSerialize.register(["ConvDepthWise1D", "ConvDepthWise2D"], ImplSpec(IOSpec(dtype.any))) +class ConvDepthWise(ExportNodeCpp): + def __init__(self, node, mem_info): + super().__init__(node, mem_info) + self.config_template = str( + ROOT_EXPORT / "templates/attributes/conv_depth_wise.jinja") + self.forward_template = str( + ROOT_EXPORT /"templates/graph_ctor/conv_depth_wise.jinja") + self.include_list = ["aidge/operator/ConvDepthWise.hpp"] + self.kernels_to_copy = [] + self.config_path = "include/attributes" + self.config_extension = "hpp" diff --git a/aidge_core/aidge_export_aidge/operator_export/fc.py b/aidge_core/aidge_export_aidge/operator_export/fc.py index fcd528528707dc6eec917790b46e509c2984fa66..4f964a9942600d46740b570975a218b4c2e7aabd 100644 --- a/aidge_core/aidge_export_aidge/operator_export/fc.py +++ b/aidge_core/aidge_export_aidge/operator_export/fc.py @@ -1,37 +1,18 @@ -from aidge_core.aidge_export_aidge.utils import operator_register,parse_node_input +from aidge_core.aidge_export_aidge.registry import ExportSerialize from aidge_core.aidge_export_aidge import ROOT_EXPORT -from aidge_core import ExportNode, generate_file, generate_str -from pathlib import Path - -@operator_register("FC") -class FC(ExportNode): - def __init__(self, node): - super().__init__(node) - - - def export(self, export_folder:Path, list_configs:list): - - - include_path = f"attributes/{self.name}.hpp" - filepath = export_folder / f"include/{include_path}" - - - generate_file( - filepath, - ROOT_EXPORT / "templates/attributes/fc.jinja", - name=self.name, - InChannels=self.inputs_dims[1][1], - OutChannels=self.operator.out_channels(), - **self.attributes - ) - list_configs.append(include_path) - return list_configs - - def forward(self, list_actions:list): - list_actions.append(generate_str( - ROOT_EXPORT / "templates/graph_ctor/fc.jinja", - name=self.name, - inputs=parse_node_input(self.node.inputs()), - **self.attributes - )) - return list_actions +from aidge_core.export_utils import ExportNodeCpp +from aidge_core import ImplSpec, IOSpec, dtype + + +@ExportSerialize.register("FC", ImplSpec(IOSpec(dtype.any))) +class FC(ExportNodeCpp): + def __init__(self, node, mem_info): + super().__init__(node, mem_info) + self.config_template = str( + ROOT_EXPORT / "templates/attributes/fc.jinja") + self.forward_template = str( + ROOT_EXPORT / "templates/graph_ctor/fc.jinja") + self.include_list = ["aidge/operator/FC.hpp"] + self.kernels_to_copy = [] + self.config_path = "include/attributes" + self.config_extension = "hpp" diff --git a/aidge_core/aidge_export_aidge/operator_export/maxpooling.py b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py index 0c63e71b423b90f62536cafd25c61101e76e0562..6d9c7998fb90153bfdfd2898c1dfcfb1ad730f20 100644 --- a/aidge_core/aidge_export_aidge/operator_export/maxpooling.py +++ b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py @@ -1,32 +1,17 @@ -from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input +from aidge_core.aidge_export_aidge.registry import ExportSerialize from aidge_core.aidge_export_aidge import ROOT_EXPORT -from aidge_core import ExportNode, generate_file, generate_str -from pathlib import Path +from aidge_core.export_utils import ExportNodeCpp +from aidge_core import ImplSpec, IOSpec, dtype -@operator_register("MaxPooling") -class MaxPooling(ExportNode): - def __init__(self, node): - super().__init__(node) - - - def export(self, export_folder:Path, list_configs:list): - include_path = f"attributes/{self.name}.hpp" - filepath = export_folder / f"include/{include_path}" - - generate_file( - filepath, - ROOT_EXPORT / "templates/attributes/maxpooling.jinja", - name=self.name, - **self.attributes - ) - list_configs.append(include_path) - return list_configs - - def forward(self, list_actions:list): - list_actions.append(generate_str( - ROOT_EXPORT / "templates/graph_ctor/maxpooling.jinja", - name=self.name, - inputs=parse_node_input(self.node.inputs()), - **self.attributes - )) - return list_actions +@ExportSerialize.register(["MaxPooling1D", "MaxPooling2D", "MaxPooling3D"], ImplSpec(IOSpec(dtype.any))) +class MaxPooling(ExportNodeCpp): + def __init__(self, node, mem_info): + super().__init__(node, mem_info) + self.config_template = str( + ROOT_EXPORT / "templates/attributes/maxpooling.jinja") + self.forward_template = str( + ROOT_EXPORT / "templates/graph_ctor/maxpooling.jinja") + self.include_list = ["aidge/operator/MaxPooling.hpp"] + self.kernels_to_copy = [] + self.config_path = "include/attributes" + self.config_extension = "hpp" diff --git a/aidge_core/aidge_export_aidge/operator_export/pad.py b/aidge_core/aidge_export_aidge/operator_export/pad.py new file mode 100644 index 0000000000000000000000000000000000000000..5d6869de07b985169399697e95b6b719f658c911 --- /dev/null +++ b/aidge_core/aidge_export_aidge/operator_export/pad.py @@ -0,0 +1,17 @@ +from aidge_core.aidge_export_aidge.registry import ExportSerialize +from aidge_core.aidge_export_aidge import ROOT_EXPORT +from aidge_core.export_utils import ExportNodeCpp +from aidge_core import ImplSpec, IOSpec, dtype + +@ExportSerialize.register(["Pad1D", "Pad2D"], ImplSpec(IOSpec(dtype.any))) +class Pad(ExportNodeCpp): + def __init__(self, node, mem_info): + super().__init__(node, mem_info) + self.config_template = str( + ROOT_EXPORT / "templates/attributes/pad.jinja") + self.forward_template = str( + ROOT_EXPORT /"templates/graph_ctor/pad.jinja") + self.include_list = ["aidge/operator/Pad.hpp"] + self.kernels_to_copy = [] + self.config_path = "include/attributes" + self.config_extension = "hpp" diff --git a/aidge_core/aidge_export_aidge/operator_export/producer.py b/aidge_core/aidge_export_aidge/operator_export/producer.py index d082e9726b7ca33fbe6f4692bf7b55930b69cb9d..02f2f1f39c6797d7f92a5938d6dbe8853079a624 100644 --- a/aidge_core/aidge_export_aidge/operator_export/producer.py +++ b/aidge_core/aidge_export_aidge/operator_export/producer.py @@ -1,65 +1,30 @@ -from aidge_core.aidge_export_aidge.utils import operator_register -from aidge_core.aidge_export_aidge import ROOT_EXPORT -from aidge_core import dtype, ExportNode, generate_file, generate_str import numpy as np -from pathlib import Path - -# Convert aidge datatype to C++ type -datatype_converter = { - dtype.float64 : "double", - dtype.float32 : "float", - dtype.float16 : "half_float::half", - dtype.int8 : "int8_t", - dtype.int16 : "int16_t", - dtype.int32 : "int32_t", - dtype.int64 : "int64_t", - dtype.uint8 : "uint8_t", - dtype.uint16 : "uint16_t", - dtype.uint32 : "uint32_t", - dtype.uint64 : "uint64_t" -} +from aidge_core.aidge_export_aidge.registry import ExportSerialize +from aidge_core.aidge_export_aidge import ROOT_EXPORT +from aidge_core.export_utils import ExportNodeCpp +from aidge_core import ImplSpec, IOSpec, dtype -@operator_register("Producer") -class Producer(ExportNode): +@ExportSerialize.register("Producer", ImplSpec(IOSpec(dtype.any))) +class Producer(ExportNodeCpp): """ If there is a standardization of the export operators then this class should be just a inheritance of ProducerCPP """ - def __init__(self, node): - super().__init__(node) + def __init__(self, node, mem_info): + super().__init__(node, mem_info) child, in_idx = self.node.output(0)[0] - self.tensor_name = f"{child.name()}_{in_idx}" - self.values = np.array(self.operator.get_output(0)) - - def export(self, export_folder:Path, list_configs:list): - assert(len(self.node.output(0)) == 1) - include_path = f"parameters/{self.tensor_name}.hpp" - filepath = export_folder / f"include/{include_path}" + self.values = np.array(self.operator.get_output(0)) - aidge_tensor = self.operator.get_output(0) - aidge_type = aidge_tensor.dtype() - if aidge_type in datatype_converter: - datatype = datatype_converter[aidge_type] - else: - raise RuntimeError(f"No conversion found for data type {aidge_type}.") - generate_file( - filepath, - ROOT_EXPORT / "templates/parameter.jinja", - dims = aidge_tensor.dims(), - data_t = datatype, # TODO : get data from producer - name = self.tensor_name, - values = str(aidge_tensor) - ) - list_configs.append(include_path) - return list_configs + self.config_template = str( + ROOT_EXPORT / "templates/parameter.jinja") + self.forward_template = str( + ROOT_EXPORT / "templates/graph_ctor/producer.jinja") + self.attributes["tensor_name"] = f"{child.name()}_{in_idx}" + self.attributes["values"] = str(self.operator.get_output(0)) + self.include_list = ["aidge/operator/Producer.hpp"] + self.kernels_to_copy = [] + self.config_path = "include/attributes" + self.config_extension = "hpp" - def forward(self, list_actions:list): - list_actions.append(generate_str( - ROOT_EXPORT / "templates/graph_ctor/producer.jinja", - name=self.name, - tensor_name=self.tensor_name, - **self.attributes - )) - return list_actions diff --git a/aidge_core/aidge_export_aidge/operator_export/relu.py b/aidge_core/aidge_export_aidge/operator_export/relu.py index c0f4f6afdc35737a8967f51c1859bda0c9773f88..b8398e30504b534fba755e6c613d361d873e09cd 100644 --- a/aidge_core/aidge_export_aidge/operator_export/relu.py +++ b/aidge_core/aidge_export_aidge/operator_export/relu.py @@ -1,21 +1,14 @@ -from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input -from aidge_core import ExportNode, generate_str +from aidge_core.aidge_export_aidge.registry import ExportSerialize from aidge_core.aidge_export_aidge import ROOT_EXPORT -from pathlib import Path +from aidge_core.export_utils import ExportNodeCpp +from aidge_core import ImplSpec, IOSpec, dtype -@operator_register("ReLU") -class ReLU(ExportNode): - def __init__(self, node): - super().__init__(node) - - def export(self, export_folder:Path, list_configs:list): - return list_configs - - def forward(self, list_actions:list): - list_actions.append(generate_str( - ROOT_EXPORT / "templates/graph_ctor/relu.jinja", - name=self.name, - inputs=parse_node_input(self.node.inputs()), - **self.attributes - )) - return list_actions +@ExportSerialize.register("ReLU", ImplSpec(IOSpec(dtype.any))) +class ReLU(ExportNodeCpp): + def __init__(self, node, mem_info): + super().__init__(node, mem_info) + self.config_template = "" + self.forward_template = str( + ROOT_EXPORT / "templates/graph_ctor/relu.jinja") + self.include_list = ["aidge/operator/ReLU.hpp"] + self.kernels_to_copy = [] diff --git a/aidge_core/aidge_export_aidge/operator_export/sub.py b/aidge_core/aidge_export_aidge/operator_export/sub.py index efcdd0924fbcf6944b0fb95a967e1a3e16ccc3c5..01b68b70f4cfcf3b3899202269106c58cb7e54a1 100644 --- a/aidge_core/aidge_export_aidge/operator_export/sub.py +++ b/aidge_core/aidge_export_aidge/operator_export/sub.py @@ -1,21 +1,14 @@ -from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input -from aidge_core import ExportNode, generate_str +from aidge_core.aidge_export_aidge.registry import ExportSerialize from aidge_core.aidge_export_aidge import ROOT_EXPORT -from pathlib import Path +from aidge_core.export_utils import ExportNodeCpp +from aidge_core import ImplSpec, IOSpec, dtype -@operator_register("Sub") -class Sub(ExportNode): - def __init__(self, node): - super().__init__(node) - - def export(self, export_folder:Path, list_configs:list): - return list_configs - - def forward(self, list_actions:list): - list_actions.append(generate_str( - ROOT_EXPORT / "templates/graph_ctor/sub.jinja", - name=self.name, - inputs=parse_node_input(self.node.inputs()), - **self.attributes - )) - return list_actions +@ExportSerialize.register("Sub", ImplSpec(IOSpec(dtype.any))) +class Sub(ExportNodeCpp): + def __init__(self, node, mem_info): + super().__init__(node, mem_info) + self.config_template = "" + self.forward_template = str( + ROOT_EXPORT / "templates/graph_ctor/sub.jinja") + self.include_list = ["aidge/operator/Sub.hpp"] + self.kernels_to_copy = [] diff --git a/aidge_core/aidge_export_aidge/registry.py b/aidge_core/aidge_export_aidge/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..fe94a22399438b9a07673d21220ff1d0ba4a1dda --- /dev/null +++ b/aidge_core/aidge_export_aidge/registry.py @@ -0,0 +1,10 @@ +from aidge_core.export_utils import ExportLib +from . import ROOT_EXPORT +import aidge_core + + +class ExportSerialize(ExportLib): + _name="export_serialize" + +aidge_core.register_Tensor(["export_serialize", aidge_core.dtype.float32], + aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.float32])) diff --git a/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja index 48d07e8db8d5fb116148e9d41100fffa01fcf622..58c52abec545f8e62e21dfb35c9f4a5d652f681e 100644 --- a/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja +++ b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja @@ -1,17 +1,17 @@ #ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H #define EXPORT_ATTRIBUTES_{{name|upper}}_H -#define _{{name|upper}}_IN_CHANNELS {{InChannels}} -#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}} +#define _{{name|upper}}_IN_CHANNELS {{in_chan[0]}} +#define _{{name|upper}}_OUT_CHANNELS {{out_chan[0]}} -{% for i in range(KernelDims|length) %} -#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}} +{% for i in range(kernel_dims|length) %} +#define _{{name|upper}}_KERNEL_{{i}} {{kernel_dims[i]}} {%- endfor %} -{% for i in range(StrideDims|length) %} -#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}} +{% for i in range(stride_dims|length) %} +#define _{{name|upper}}_STRIDE_{{i}} {{stride_dims[i]}} {%- endfor %} -{% for i in range(DilationDims|length) %} -#define _{{name|upper}}_DILATION_{{i}} {{DilationDims[i]}} +{% for i in range(dilation_dims|length) %} +#define _{{name|upper}}_DILATION_{{i}} {{dilation_dims[i]}} {%- endfor %} #endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */ diff --git a/aidge_core/aidge_export_aidge/templates/attributes/conv_depth_wise.jinja b/aidge_core/aidge_export_aidge/templates/attributes/conv_depth_wise.jinja new file mode 100644 index 0000000000000000000000000000000000000000..7c2ffff448bb3d028f378ba6fc124abdad6e9ad7 --- /dev/null +++ b/aidge_core/aidge_export_aidge/templates/attributes/conv_depth_wise.jinja @@ -0,0 +1,16 @@ +#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H +#define EXPORT_ATTRIBUTES_{{name|upper}}_H + +#define _{{name|upper}}_CHANNELS {{out_chan[0]}} + +{% for i in range(kernel_dims|length) %} +#define _{{name|upper}}_KERNEL_{{i}} {{kernel_dims[i]}} +{%- endfor %} +{% for i in range(stride_dims|length) %} +#define _{{name|upper}}_STRIDE_{{i}} {{stride_dims[i]}} +{%- endfor %} +{% for i in range(dilation_dims|length) %} +#define _{{name|upper}}_DILATION_{{i}} {{dilation_dims[i]}} +{%- endfor %} + +#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */ diff --git a/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja index e292f9b611978877c47b15e91f926f30d27a1cc5..32f4d00515b1ced28b5e49889c09759f0f0dd0db 100644 --- a/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja +++ b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja @@ -1,7 +1,7 @@ #ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H #define EXPORT_ATTRIBUTES_{{name|upper}}_H -#define _{{name|upper}}_IN_CHANNELS {{InChannels}} -#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}} +#define _{{name|upper}}_IN_CHANNELS {{in_chan[0]}} +#define _{{name|upper}}_OUT_CHANNELS {{out_chan[0]}} #endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */ diff --git a/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja b/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja index d258f580e6ff9c523a87b834fdccf2f3b14fb133..96de14b01167a2f4267343594ded8df6f4b5576d 100644 --- a/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja +++ b/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja @@ -1,13 +1,13 @@ #ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H #define EXPORT_ATTRIBUTES_{{name|upper}}_H -{% for i in range(KernelDims|length) %} -#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}} +{% for i in range(kernel_dims|length) %} +#define _{{name|upper}}_KERNEL_{{i}} {{kernel_dims[i]}} {%- endfor %} -{% for i in range(StrideDims|length) %} -#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}} +{% for i in range(stride_dims|length) %} +#define _{{name|upper}}_STRIDE_{{i}} {{stride_dims[i]}} {%- endfor %} -#define _{{name|upper}}_CEIL_MODE {{CeilMode|int}} +#define _{{name|upper}}_CEIL_MODE {{ceil_mode|int}} #endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */ diff --git a/aidge_core/aidge_export_aidge/templates/attributes/pad.jinja b/aidge_core/aidge_export_aidge/templates/attributes/pad.jinja new file mode 100644 index 0000000000000000000000000000000000000000..8fb76a6b220c643da9a3f02d38f1757fed0c1b86 --- /dev/null +++ b/aidge_core/aidge_export_aidge/templates/attributes/pad.jinja @@ -0,0 +1,12 @@ +#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H +#define EXPORT_ATTRIBUTES_{{name|upper}}_H + +{%- set half_length = (begin_end_borders|length / 2)|int -%} +{% for i in range(half_length) %} +#define _{{name|upper}}_BEGIN_BORDERS_{{i}} {{begin_end_borders[2*i]}} +#define _{{name|upper}}_END_BORDERS_{{i}} {{begin_end_borders[2*i+1]}} +{%- endfor %} +#define _{{name|upper}}_BORDER_TYPE {{border_type|int}} +#define _{{name|upper}}_BORDER_VALUE {{border_value}} + +#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */ diff --git a/aidge_core/aidge_export_aidge/templates/dnn.jinja b/aidge_core/aidge_export_aidge/templates/dnn.jinja index 5da46b2d8a439a359dfb1c7ec8ebc18e8d516767..bb8faff4a800d9676317f5c0301827e21d19df6d 100644 --- a/aidge_core/aidge_export_aidge/templates/dnn.jinja +++ b/aidge_core/aidge_export_aidge/templates/dnn.jinja @@ -17,7 +17,7 @@ /*** OPERATOR ATTRIBUTES & PARAMETERS ***/ {%- for header in headers %} -#include "{{ header }}" +#include "{{ header | replace('include/', '') }}" {%- endfor %} /*** HEADER ***/ diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja index 8e841ea2a10c71b884736dcbd7cfd03b52c5ad4f..d9f59a94663692b00594f0ecf5f452cc8e2132ca 100644 --- a/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja +++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja @@ -1,7 +1,7 @@ {# NOTE: Trying a shorter notation like {%- for input in inputs if input[0] %} will mess up loop.index as the input set up at None will not increment ! #} -{%- for input in inputs %} -{%- if input[0] %} -{{input[0]}}->addChild({{name}}, {{input[1]}}, {{loop.index - 1}}); {# NOTE: loop.index begin at 1 #} +{%- for input_node, out_id in node.inputs() %} +{%- if input_node %} +{{input_node.name()}}->addChild({{name}}, {{out_id}}, {{loop.index - 1}}); {# NOTE: loop.index begin at 1 #} {%- endif %} {%- endfor %} diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/add.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/add.jinja new file mode 100644 index 0000000000000000000000000000000000000000..2bfaf93646fc24f6a44ac170a8c2c932f5daf0fc --- /dev/null +++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/add.jinja @@ -0,0 +1,9 @@ +{% filter indent(width=4, first=False) %} +/*** {{name|upper}} ***/ +std::shared_ptr<Aidge::Node> {{name}} = + Aidge::Add( + "{{name}}" + ); +{% include "./_set_input.jinja" %} +graph->add({{name}}); +{% endfilter %} diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja index a805f8065e87244bf0546ca42d294b86f144a26d..bd4eed2d39f0872759f568b6cc54b8abe3792db7 100644 --- a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja +++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja @@ -5,18 +5,18 @@ std::shared_ptr<Aidge::Node> {{name}} = _{{name|upper}}_IN_CHANNELS, _{{name|upper}}_OUT_CHANNELS, { - {%- for i in range(KernelDims|length) -%} + {%- for i in range(kernel_dims|length) -%} _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%} {%- endfor -%} }, "{{name}}", { - {%- for i in range(StrideDims|length) -%} + {%- for i in range(stride_dims|length) -%} _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%} {%- endfor -%} }, { - {%- for i in range(DilationDims|length) -%} + {%- for i in range(dilation_dims|length) -%} _{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%} {%- endfor -%} } diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv_depth_wise.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv_depth_wise.jinja new file mode 100644 index 0000000000000000000000000000000000000000..f7e1a85bbc084631ea4d26bfadd106bc2a5a69fe --- /dev/null +++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv_depth_wise.jinja @@ -0,0 +1,25 @@ +{% filter indent(width=4, first=False) %} +/*** {{name|upper}} ***/ +std::shared_ptr<Aidge::Node> {{name}} = + Aidge::ConvDepthWise( + _{{name|upper}}_CHANNELS, + { + {%- for i in range(kernel_dims|length) -%} + _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%} + {%- endfor -%} + }, + "{{name}}", + { + {%- for i in range(stride_dims|length) -%} + _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%} + {%- endfor -%} + }, + { + {%- for i in range(dilation_dims|length) -%} + _{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%} + {%- endfor -%} + } + ); +{% include "./_set_input.jinja" %} +graph->add({{name}}); +{% endfilter %} diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja index c6587c128509712e1a8e903e7484476548e9347d..ceb4784a0942e91b44fe6956833dafda26a2e314 100644 --- a/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja +++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja @@ -3,13 +3,13 @@ std::shared_ptr<Aidge::Node> {{name}} = Aidge::MaxPooling( { - {%- for i in range(KernelDims|length) -%} + {%- for i in range(kernel_dims|length) -%} _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%} {%- endfor -%} }, "{{name}}", { - {%- for i in range(StrideDims|length) -%} + {%- for i in range(stride_dims|length) -%} _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%} {%- endfor -%} }, diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/pad.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/pad.jinja new file mode 100644 index 0000000000000000000000000000000000000000..a7bd866207be9f048ae431922710b975268a6155 --- /dev/null +++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/pad.jinja @@ -0,0 +1,17 @@ +{% filter indent(width=4, first=False) %} +/*** {{name|upper}} ***/ +{%- set half_length = (begin_end_borders|length / 2)|int -%} +std::shared_ptr<Aidge::Node> {{name}} = + Aidge::Pad<{{half_length}}>( + { + {%- for i in range(half_length) -%} + _{{name|upper}}_BEGIN_BORDERS_{{i}}, _{{name|upper}}_END_BORDERS_{{i}} {%- if not loop.last %}, {% endif -%} + {%- endfor -%} + }, + "{{name}}", + static_cast<Aidge::PadBorderType>(_{{name|upper}}_BORDER_TYPE), + _{{name|upper}}_BORDER_VALUE + ); +{% include "./_set_input.jinja" %} +graph->add({{name}}); +{% endfilter %} diff --git a/aidge_core/aidge_export_aidge/templates/parameter.jinja b/aidge_core/aidge_export_aidge/templates/parameter.jinja index 11a407cc89f72f24167871a594decc6d90ab489d..0ff9634d98c57ad84dabc30625e660ca404612f6 100644 --- a/aidge_core/aidge_export_aidge/templates/parameter.jinja +++ b/aidge_core/aidge_export_aidge/templates/parameter.jinja @@ -4,7 +4,7 @@ #include <aidge/data/Tensor.hpp> #include <memory> -std::shared_ptr<Aidge::Tensor> {{name}} = std::make_shared<Aidge::Tensor>(Aidge::Array{{dims|length}}D<{{data_t}}, {{ dims|join(", ") }}> { +std::shared_ptr<Aidge::Tensor> {{tensor_name}} = std::make_shared<Aidge::Tensor>(Aidge::Array{{out_dims[0]|length}}D<{{out_cdtype[0]}}, {{ out_dims[0]|join(", ") }}> { {{ values }} }); diff --git a/aidge_core/aidge_export_aidge/utils/__init__.py b/aidge_core/aidge_export_aidge/utils.py similarity index 92% rename from aidge_core/aidge_export_aidge/utils/__init__.py rename to aidge_core/aidge_export_aidge/utils.py index ecdf2aec2692a48e108d5f4ad05ed05803319525..31a0d6ee77eafb0883f2882bfaae999307a0fa4b 100644 --- a/aidge_core/aidge_export_aidge/utils/__init__.py +++ b/aidge_core/aidge_export_aidge/utils.py @@ -1,5 +1,3 @@ -from .operator_registry import * - def parse_node_input(node_inputs: list) -> list: """Parse node intputs in order to adapt the list for Jinja. diff --git a/aidge_core/aidge_export_aidge/utils/operator_registry.py b/aidge_core/aidge_export_aidge/utils/operator_registry.py deleted file mode 100644 index dd6fbaaceeba9c2125b38354eca9cc116acd29b1..0000000000000000000000000000000000000000 --- a/aidge_core/aidge_export_aidge/utils/operator_registry.py +++ /dev/null @@ -1,18 +0,0 @@ -OPERATORS_REGISTRY = {} - -def operator_register(*args): - - key_list = [arg for arg in args] - - def decorator(operator): - def wrapper(*args, **kwargs): - return operator(*args, **kwargs) - - for key in key_list: - OPERATORS_REGISTRY[key] = operator - - return wrapper - return decorator - -def supported_operators(): - return list(OPERATORS_REGISTRY.keys()) diff --git a/aidge_core/export_utils/__init__.py b/aidge_core/export_utils/__init__.py index 6fc846d93301f45b0635cd9b2fabae65fa7be8ab..a97e978749d1f5480ef8ef1e7e9c5f00d9c3d7df 100644 --- a/aidge_core/export_utils/__init__.py +++ b/aidge_core/export_utils/__init__.py @@ -1,2 +1,6 @@ -from .node_export import * -from .code_generation import * +from .node_export import ExportNode, ExportNodeCpp +from .code_generation import generate_file, generate_str, copy_file +from .export_registry import ExportLib +from .scheduler_export import scheduler_export +from .tensor_export import tensor_to_c, generate_input_file + diff --git a/aidge_core/export_utils/code_generation.py b/aidge_core/export_utils/code_generation.py index a02fc0966702cec7a2cbe33f8411bb71e3035e90..4f0f4634dd8ac09c8c0a86506dc52d420889b22a 100644 --- a/aidge_core/export_utils/code_generation.py +++ b/aidge_core/export_utils/code_generation.py @@ -1,7 +1,8 @@ from pathlib import Path -from jinja2 import Environment, FileSystemLoader +from jinja2 import Environment, FileSystemLoader, StrictUndefined from typing import Union - +import os +import shutil def generate_file(file_path: Union[Path, str], template_path: Union[Path, str], **kwargs) -> None: """Generate a file at `file_path` using the jinja template located at `file_path`. @@ -18,16 +19,14 @@ def generate_file(file_path: Union[Path, str], template_path: Union[Path, str], file_path = Path(file_path) if isinstance(template_path, str): template_path = Path(template_path) + if not template_path.exists(): + raise ValueError(f"Path to template {template_path} is not valid !") # Make dir file_path.parent.mkdir(parents=True, exist_ok=True) - # Select template - template = Environment(loader=FileSystemLoader( - template_path.parent)).get_template(template_path.name) - # Generate file with open(file_path, mode="w", encoding="utf-8") as file: - file.write(template.render(kwargs)) + file.write(generate_str(template_path, **kwargs)) def generate_str(template_path: Union[Path, str], **kwargs) -> str: @@ -43,4 +42,12 @@ def generate_str(template_path: Union[Path, str], **kwargs) -> str: if isinstance(template_path, str): template_path = Path(template_path) return Environment(loader=FileSystemLoader( - template_path.parent)).get_template(template_path.name).render(kwargs) + template_path.parent), undefined=StrictUndefined, keep_trailing_newline=True).get_template(template_path.name).render(kwargs) + +def copy_file(filename, dst_folder): + + # If directory doesn't exist, create it + if not os.path.exists(dst_folder): + os.makedirs(dst_folder) + + shutil.copy(filename, dst_folder) diff --git a/aidge_core/export_utils/data_conversion.py b/aidge_core/export_utils/data_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..401fc39f2a70245a67719699b5f0cdc61108e0cf --- /dev/null +++ b/aidge_core/export_utils/data_conversion.py @@ -0,0 +1,30 @@ +import numpy as np +import aidge_core + + +datatype_converter_aide2c = { + aidge_core.dtype.float64 : "double", + aidge_core.dtype.float32 : "float", + aidge_core.dtype.float16 : "half_float::half", + aidge_core.dtype.int8 : "int8_t", + aidge_core.dtype.int16 : "int16_t", + aidge_core.dtype.int32 : "int32_t", + aidge_core.dtype.int64 : "int64_t", + aidge_core.dtype.uint8 : "uint8_t", + aidge_core.dtype.uint16 : "uint16_t", + aidge_core.dtype.uint32 : "uint32_t", + aidge_core.dtype.uint64 : "uint64_t" +} + +def aidge2c(datatype): + """Convert a aidge datatype to C type + + :param datatype: Aidge datatype to convert + :type datatype: :py:object:`aidge_core.DataType` + :return: A string representing the C type + :rtype: string + """ + if datatype in datatype_converter_aide2c: + return datatype_converter_aide2c[datatype] + else: + raise ValueError(f"Unsupported {datatype} aidge datatype") diff --git a/aidge_core/export_utils/export_registry.py b/aidge_core/export_utils/export_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..fd24008a6de6c58c1e78f088e086817e2a769373 --- /dev/null +++ b/aidge_core/export_utils/export_registry.py @@ -0,0 +1,118 @@ +from typing import Dict, List +import aidge_core +from aidge_core.export_utils import ExportNode + +class classproperty: + """Helper class to define class properties, + Is equivalent to applying the decorator ``@property`` and ``@classmethod``. + But these two decorator are exclusive with python > 12. + See discussion https://discuss.python.org/t/add-a-supported-read-only-classproperty-decorator-in-the-stdlib/18090/12 + """ + + def __init__(self, fget): + self.fget = fget + + def __get__(self, instance, owner): + return self.fget(owner) + + +# TODO: very naive implementation ! +# error handling should be added ! +class ExportLib(aidge_core.OperatorImpl): + """Aidge export lib, define a registry + """ + # PUBLIC + # Lib name useful ? + # Help define namespace + _name: str = None + # key: Path where static file is + # Value: Path where to copy the file relative to the export root + static_files: Dict[str, str] = {} + # Custom main generation jinja file + main_jinja_path = None + # Main memory section + memory_section = None + # PRIVATE + # Registry of exportNode, class level dictionary, shared across all ExportLib + _cls_export_node_registry = {} + + def __init__(self, operator): + super(ExportLib, self).__init__(operator, self._name) + + @classproperty + def _export_node_registry(cls) -> Dict[str, List['ExportNode']]: + """Define as a class property to access the registry at class level while keeping it at instance level. + + :return: The export node registry specific to the class + :rtype: Dict[str, List[ExportNode]] + """ + return cls._cls_export_node_registry.setdefault(cls, {}) + + # Override the virtual OperatorImpl method, in order to provide available + # implementation specifications + def get_available_impl_specs(self): + if self.get_operator().type() in self._export_node_registry: + spec_vec = [i for i, _ in self._export_node_registry[self.get_operator().type()]] + return spec_vec + else: + return [] + + def get_export_node(self, spec: aidge_core.aidge_core.ImplSpec): + for registered_spec, export_node in self._export_node_registry[self.get_operator().type()]: + if registered_spec == spec: + + return export_node + return None + + # Decorator to register kernels for this export + @classmethod + def register(cls, op_type, spec): + def decorator(operator): + class Wrapper(operator): + def __init__(self, *args, **kwargs): + return operator(*args, **kwargs) + type_list = [] + if isinstance(op_type, list): + type_list = op_type + elif isinstance(op_type, str): + type_list = [op_type] + else: + raise TypeError("Argument type of register method should be of type 'List[str]' or 'str', got {type(type)}") + + for type_name in type_list: + if (type_name not in cls._export_node_registry): + cls._export_node_registry[type_name] = [] + cls._export_node_registry[type_name].append((spec, operator)) + + register_func: str = f"register_{type_name}Op" + # If operator is not defined, then it means we try to register a MetaOperator + if register_func not in dir(aidge_core): + raise ValueError(f"Operator of type: {type_name} is not declared as registrable!\nHint: If you try to register a MetaOperator use register_metaop instead.") + else: + # Equivalent to aidge_core.register_ConvOp("ExportLibX", ExportLibX) + aidge_core.__getattribute__(register_func)(cls._name, cls) + return Wrapper + return decorator + + # Decorator to register kernels for this export + @classmethod + def register_metaop(cls, op_type, spec): + def decorator(operator): + class Wrapper(operator): + def __init__(self, *args, **kwargs): + return operator(*args, **kwargs) + type_list = [] + if isinstance(op_type, list): + type_list = op_type + elif isinstance(op_type, str): + type_list = [op_type] + else: + raise TypeError("Argument 'op_type' of register method should be of type 'List[str]' or 'str', got {type(type)}") + for type_name in type_list: + if (type_name not in cls._export_node_registry): + cls._export_node_registry[type_name] = [] + cls._export_node_registry[type_name].append((spec, operator)) + aidge_core.register_MetaOperatorOp([cls._name, type_name], cls) + spec.attrs.add_attr("type", type_name) # MetaOperator specs need to verify the type + return Wrapper + return decorator diff --git a/aidge_core/export_utils/node_export.py b/aidge_core/export_utils/node_export.py index 7aceaa0ccc1f07674241d6f35bbeff90330f2596..479eaf01ff8c8e85a3bf83adac88f5ee7fe86857 100644 --- a/aidge_core/export_utils/node_export.py +++ b/aidge_core/export_utils/node_export.py @@ -1,61 +1,324 @@ -from aidge_core import Node, Attributes +import aidge_core +from pathlib import Path +from aidge_core.export_utils import data_conversion, code_generation from abc import ABC, abstractmethod +from typing import List + + +def get_chan(tensor: aidge_core.Tensor) -> int: + """Given a tensor return the number of channel + """ + dformat = tensor.dformat() + dims = tensor.dims() + if dformat == aidge_core.dformat.default: + if len(dims) == 4: # Suppose NCHW + return dims[1] + elif len(dims) == 2: # Suppose NC + return dims[1] + else: + return None + elif dformat == aidge_core.dformat.nchw: + return dims[1] + elif dformat == aidge_core.dformat.nhwc: + return dims[3] + elif dformat == aidge_core.dformat.chwn: + return dims[0] + elif dformat == aidge_core.dformat.ncdhw: + return dims[1] + elif dformat == aidge_core.dformat.ndhwc: + return dims[4] + elif dformat == aidge_core.dformat.cdhwn: + return dims[0] + else: + raise RuntimeError(f"Unknown dataformat: {dformat}") + + +def get_height(tensor: aidge_core.Tensor) -> int: + dformat = tensor.dformat() + dims = tensor.dims() + if dformat == aidge_core.dformat.default: + if len(dims) == 4: # Suppose NCHW + return dims[2] + elif len(dims) == 2: # Suppose NC + return 1 + else: + return None + elif dformat == aidge_core.dformat.nchw: + return dims[2] + elif dformat == aidge_core.dformat.nhwc: + return dims[1] + elif dformat == aidge_core.dformat.chwn: + return dims[1] + elif dformat == aidge_core.dformat.ncdhw: + return dims[3] + elif dformat == aidge_core.dformat.ndhwc: + return dims[2] + elif dformat == aidge_core.dformat.cdhwn: + return dims[2] + else: + raise RuntimeError(f"Unknown dataformat: {dformat}") + + +def get_width(tensor: aidge_core.Tensor) -> int: + dformat = tensor.dformat() + dims = tensor.dims() + if dformat == aidge_core.dformat.default: + if len(dims) == 4: # Suppose NCHW + return dims[3] + elif len(dims) == 2: # Suppose NC + return 1 + else: + return None + elif dformat == aidge_core.dformat.nchw: + return dims[3] + elif dformat == aidge_core.dformat.nhwc: + return dims[2] + elif dformat == aidge_core.dformat.chwn: + return dims[2] + elif dformat == aidge_core.dformat.ncdhw: + return dims[4] + elif dformat == aidge_core.dformat.ndhwc: + return dims[3] + elif dformat == aidge_core.dformat.cdhwn: + return dims[3] + else: + raise RuntimeError(f"Unknown dataformat: {dformat}") class ExportNode(ABC): """Abstract class to interface node with export generation. + + This class expose a dictionary ``attributes`` which contains all the information required to generate an export: + - All the attributes of the Aidge node are automatically fetch, the key to get an attribute is the attribute name in python format, example ``no_bias`` + - **name**: Name of the Node, ``str`` + - **node**: Aidge Node, :py:class:`aidge_core.Node` + - **nb_in**: Number of inputs, ``int`` + - **in_name**: unique name for each input, if no input node the name is ``{node_name}_input_{in_id}``, if there is a parent, the name is ``{parent_name}_output_{out_id}``, ``list[str]`` + - **in_dims**: A list of the dimension for each inputs, ``list[list[int]]`` + - **in_node**: A list of Node associated for each inputs, ``list[aidge_core.Node]`` + - **in_size**: A list of the size for each inputs, ``list[int]`` + - **in_chan**: A list of channel for each inputs, deduced by the dataformat, ``list[int]`` + - **in_height**: A list of height for each inputs, deduced by the dataformat, ``list[int]`` + - **in_width**: A list of width for each inputs, deduced by the dataformat, ``list[int]`` + - **in_dtype**: A list of type (Aidge format) for each input, ``List[:py:class:`aidge_core.dtype`]`` + - **in_cdtype**: A list of type (C/C++ format) for each input, ``List[str]`` + - **out_name**: unique name for each output, the name is ``{name}_output_{out_id}``, ``list[str]`` + - **out_node**: A list of list of Node associated for each outputs, ``list[list[aidge_core.Node]]`` + - **nb_out**: Number of outputs, ``int`` + - **out_dims**: A list of the dimension for each inputs, ``list[list[int]]`` + - **out_size**: A list of the size for each outputs, ``list[int]`` + - **out_chan**: A list of channel for each outputs, deduced by the dataformat, ``list[int]`` + - **out_height**: A list of height for each outputs, deduced by the dataformat, ``list[int]`` + - **out_width**: A list of width for each outputs, deduced by the dataformat, ``list[int]`` + - **out_dtype**: A list of type (Aidge format) for each output, ``List[:py:class:`aidge_core.dtype`]`` + - **out_cdtype**: A list of type (C/C++ format) for each output, ``List[str]`` + - **mem_info**: True if mem_info is available for this node, ``bool`` + - **mem_info_size**: A list of memory size for each output, ``List[int]`` + - **mem_info_offset**: A list of offset to access each output, ``List[int]`` + - **mem_info_stride**: old N2D2 + - **mem_info_length**: old N2D2 + - **mem_info_cont_size**: old N2D2 + - **mem_info_cont_offset**: old N2D2 + - **mem_info_wrap_offset**: old N2D2 + - **mem_info_wrap_size**: old N2D2 """ @abstractmethod - def __init__(self, aidge_node: Node) -> None: - """Create ExportNode and retieve attirubtes from ``aidge_node``: - - - name: aidge Node name - - attributes: dictionnary of attributes of the aidge Operator linked to the node, attributes name follow aidge naming convention - - parameters: List of parameters node, order in the list is the same as the one defined by the aidge operator - + def __init__(self, aidge_node: aidge_core.Node, mem_info: List[dict]=None) -> None: + """Create ExportNode and retrieve attributes from ``aidge_node``: """ + super().__init__() self.node = aidge_node self.operator = aidge_node.get_operator() - self.name = self.node.name() - self.attributes = self.operator.attr.dict() if self.operator.attr is not None else {} # Attributes are auto fetched from aidge operators - - # rename is_leaf ? - self.is_last = len(self.node.get_children()) == 0 - + # Attributes are auto fetched from aidge operators + self.attributes = {} if isinstance(self.operator, aidge_core.MetaOperatorOp) or self.operator.attr is None else self.operator.attr.dict() + self.attributes["node"] = self.node + self.attributes["name"] = self.node.name() + self.attributes["nb_in"] = self.node.get_nb_inputs() + self.attributes["nb_out"] = self.node.get_nb_outputs() + # List of input nodes self.inputs = [] + # List of output nodes self.outputs = [] - self.inputs_dims = [] - self.outputs_dims = [] - for idx, parent_node in enumerate(self.node.get_parents()): - self.inputs.append(parent_node) - if parent_node is not None: - self.inputs_dims.append(self.operator.get_input(idx).dims()) - else: - if self.operator.get_input(idx) is not None: - self.inputs_dims.append(self.operator.get_input(idx).dims()) - else: - self.inputs_dims.append(None) + self.attributes["in_name"] = [None] * self.attributes["nb_in"] + self.attributes["in_node"] = [None] * self.attributes["nb_in"] + self.attributes["in_dims"] = [None] * self.attributes["nb_in"] + self.attributes["in_size"] = [None] * self.attributes["nb_in"] + self.attributes["in_dformat"] = [None] * self.attributes["nb_in"] + self.attributes["in_format"] = [None] * self.attributes["nb_in"] + self.attributes["in_dtype"] = [None] * self.attributes["nb_in"] + self.attributes["in_cdtype"] = [None] * self.attributes["nb_in"] + self.attributes["in_chan"] = [None] * self.attributes["nb_in"] + self.attributes["in_height"] = [None] * self.attributes["nb_in"] + self.attributes["in_width"] = [None] * self.attributes["nb_in"] - for idx, child_node in enumerate(self.node.get_children()): - self.outputs.append(child_node) + self.attributes["out_name"] = [None] * self.attributes["nb_out"] + self.attributes["out_nodes"] = [None] * self.attributes["nb_out"] + self.attributes["out_dims"] = [None] * self.attributes["nb_out"] + self.attributes["out_size"] = [None] * self.attributes["nb_out"] + self.attributes["out_dformat"] = [None] * self.attributes["nb_out"] + self.attributes["out_format"] = [None] * self.attributes["nb_out"] + self.attributes["out_dtype"] = [None] * self.attributes["nb_out"] + self.attributes["out_cdtype"] = [None] * self.attributes["nb_out"] + self.attributes["out_chan"] = [None] * self.attributes["nb_out"] + self.attributes["out_height"] = [None] * self.attributes["nb_out"] + self.attributes["out_width"] = [None] * self.attributes["nb_out"] - # Dirty hot fix, change it quickly - self.outputs_dims.append(self.operator.get_output(0).dims()) + # Producer don't have mem_info + # TODO: document this attribute + # true if node have mem_info else false + self.attributes["mem_info"] = mem_info is not None and self.node.type() != "Producer" + if self.attributes["mem_info"]: + self.attributes["mem_info_size"] = [None] * self.attributes["nb_out"] + self.attributes["mem_info_offset"] = [None] * self.attributes["nb_out"] + self.attributes["mem_info_stride"] = [None] * self.attributes["nb_out"] + self.attributes["mem_info_length"] = [None] * self.attributes["nb_out"] + self.attributes["mem_info_cont_size"] = [None] * self.attributes["nb_out"] + self.attributes["mem_info_cont_offset"] = [None] * self.attributes["nb_out"] + self.attributes["mem_info_wrap_offset"] = [None] * self.attributes["nb_out"] + self.attributes["mem_info_wrap_size"] = [None] * self.attributes["nb_out"] - @abstractmethod - def export(self, export_folder:str, list_configs:list): + for idx, parent_node_in_id in enumerate(self.node.inputs()): + parent_node, out_id = parent_node_in_id + self.inputs.append(parent_node) + if self.operator.get_input(idx) is not None: + tensor = self.operator.get_input(idx) + self.attributes["in_name"][idx] = f"{self.attributes['name']}_input_{idx}" if parent_node is None else f"{parent_node.name()}_output_{out_id}" + self.attributes["in_node"][idx] = parent_node + self.attributes["in_dims"][idx] = tensor.dims() + self.attributes["in_size"][idx] = tensor.size() + self.attributes["in_dformat"][idx] = tensor.dformat() + self.attributes["in_format"][idx] = aidge_core.format_as(tensor.dformat()) + self.attributes["in_dtype"][idx] = tensor.dtype() + self.attributes["in_cdtype"][idx] = data_conversion.aidge2c( + tensor.dtype()) + self.attributes["in_chan"][idx] = get_chan(tensor) + self.attributes["in_height"][idx] = get_height(tensor) + self.attributes["in_width"][idx] = get_width(tensor) + elif self.operator.input_category(idx) == aidge_core.InputCategory.OptionalParam or \ + self.operator.input_category(idx) == aidge_core.InputCategory.OptionalData: + pass + else: + raise RuntimeError(f"No input for {self.node.name()} at input {idx}, did you forget to forward dims?") + for idx, list_child_node_in_id in enumerate(self.node.outputs()): + out_nodes = [node_in_id[0] + for node_in_id in list_child_node_in_id] + self.outputs += out_nodes + if self.operator.get_output(idx) is not None: + tensor = self.operator.get_output(idx) + self.attributes["out_name"][idx] = f"{self.attributes['name']}_output_{idx}" + self.attributes["out_nodes"][idx] = out_nodes + self.attributes["out_dims"][idx] = tensor.dims() + self.attributes["out_size"][idx] = tensor.size() + self.attributes["out_dformat"][idx] = tensor.dformat() + self.attributes["out_format"][idx] = aidge_core.format_as(tensor.dformat()) + self.attributes["out_dtype"][idx] = tensor.dtype() + self.attributes["out_cdtype"][idx] = data_conversion.aidge2c( + tensor.dtype()) + self.attributes["out_chan"][idx] = get_chan(tensor) + self.attributes["out_height"][idx] = get_height(tensor) + self.attributes["out_width"][idx] = get_width(tensor) + # Output mem_info + # TODO: add to docstring + if self.attributes["mem_info"]: + if "size" in mem_info[idx]: + self.attributes["mem_info_size"][idx] = mem_info[idx]["size"] + else: + raise RuntimeError("Size is mandatory") + if "offset" in mem_info[idx]: + self.attributes["mem_info_offset"][idx] = mem_info[idx]["offset"] + else: + raise RuntimeError("Offset is mandatory") + if "stride" in mem_info[idx]: + self.attributes["mem_info_stride"][idx] = mem_info[idx]["stride"] + else: + self.attributes["mem_info_stride"][idx] = mem_info[idx]["size"] + if "length" in mem_info[idx]: + self.attributes["mem_info_length"][idx] = mem_info[idx]["length"] + else: + self.attributes["mem_info_length"][idx] = tensor.size() + if "cont_size" in mem_info[idx]: + self.attributes["mem_info_cont_size"][idx] = mem_info[idx]["cont_size"] + else: + self.attributes["mem_info_cont_size"][idx] = mem_info[idx]["size"] + if "cont_offset" in mem_info[idx]: + self.attributes["mem_info_cont_offset"][idx] = mem_info[idx]["cont_offset"] + else: + self.attributes["mem_info_cont_offset"][idx] = mem_info[idx]["offset"] + if "cont_offset" in mem_info[idx]: + self.attributes["mem_info_wrap_offset"][idx] = mem_info[idx]["wrap_offset"] + else: + self.attributes["mem_info_wrap_offset"][idx] = 0 + if "wrap_size" in mem_info[idx]: + self.attributes["mem_info_wrap_size"][idx] = mem_info[idx]["wrap_size"] + else: + self.attributes["mem_info_wrap_size"][idx] = 0 + else: + raise RuntimeError(f"No output for {self.node.name()}") + +class ExportNodeCpp(ExportNode): + # Path to the template defining how to export the node definition + config_template: str = None + # Path to the template defining how to export the node definition + forward_template: str = None + # List of includes to add example "include/toto.hpp" + include_list: list = None + # A list of path of kernels to copy in the export + # kernels are copied in str(export_folder / "include" / "kernels") + # They are automatically added to the include list. + kernels_to_copy: list = None + # Path where all the kernels are stored in the export (prefixed by export_root) + kernels_path: str = "include/kernels" + # Path of config folders + config_path: str = "include/layers" + # Config_folder_extension + config_extension: str = "h" + def export(self, export_folder: str): """Define how to export the node definition. """ - pass + if self.config_template is None: + raise ValueError("config_template have not been defined") + if self.include_list is None: + raise ValueError("include_list have not been defined") + if self.kernels_to_copy is None: + raise ValueError("kernels_to_copy have not been defined") - @abstractmethod - def forward(self, list_actions:list): + kernel_include_list = [] + for kernel in self.kernels_to_copy: + kernel_path = Path(kernel) + code_generation.copy_file( + kernel_path, + str(export_folder / self.kernels_path) + ) + kernel_include_list.append( + self.kernels_path + "/" + kernel_path.stem + kernel_path.suffix) + + if self.config_template != "": + path_to_definition = f"{self.config_path}/{self.attributes['name']}.{self.config_extension}" + + try: + code_generation.generate_file( + str(export_folder / path_to_definition), + self.config_template, + **self.attributes + ) + except Exception as e: + raise RuntimeError(f"Error when creating config file for {self.node.name()}[{self.node.type()}].") from e + kernel_include_list.append(path_to_definition) + + return self.include_list + kernel_include_list + + def forward(self): """Define how to generate code to perform a forward pass. """ - pass - + if self.forward_template is None: + raise ValueError("forward_template have not been defined") + forward_call: str = code_generation.generate_str( + self.forward_template, + **self.attributes + ) + return [forward_call] diff --git a/aidge_core/export_utils/scheduler_export.py b/aidge_core/export_utils/scheduler_export.py new file mode 100644 index 0000000000000000000000000000000000000000..df0b4a385327e4bdccd6fe4de46043d151658dbd --- /dev/null +++ b/aidge_core/export_utils/scheduler_export.py @@ -0,0 +1,159 @@ +import aidge_core +import os +import shutil +from pathlib import Path +from aidge_core.export_utils import ExportLib, generate_file, copy_file +from typing import List, Tuple + + +def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib = None, memory_manager=None, memory_manager_args=None, labels=False) -> None: + graphview = scheduler.graph_view() + export_folder = Path().absolute() / export_folder_path + + os.makedirs(str(export_folder), exist_ok=True) + + dnn_folder = export_folder / "dnn" + os.makedirs(str(dnn_folder), exist_ok=True) + + if memory_manager_args is None: + memory_manager_args = {} + + if memory_manager is None: + raise ValueError("A memory manager is required (no default value yet).") + peak_mem, mem_info = memory_manager( + scheduler, **memory_manager_args) + + # List of function call for forward.cpp + list_actions: List[str] = [] + # List of headers for forward.cpp + list_configs: List[str] = [] + + inputs_name: List[str] = [] + inputs_dtype: List[str] = [] + outputs_name: List[str] = [] + outputs_dtype: List[str] = [] + outputs_size: List[int] = [] + + # List of aidge_core.Node ordered by scheduler + list_forward_nodes: List[aidge_core.Node] = scheduler.get_static_scheduling() + + # If exportLib define use it + # else parse component in platform + # if export_lib is None: + # raise ValueError("Export need an ExportLib.") + for node in list_forward_nodes: + if export_lib is not None: + aidge_core.Log.debug(f"Setting backend {export_lib._name} to {node.name()}[{node.type()}].") + node.get_operator().set_backend(export_lib._name) + + op_impl = node.get_operator().get_impl() + if op_impl is None: + raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an implementation.") + if not isinstance(op_impl, ExportLib): + raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an exportable backend ({op_impl}).") + + is_input:bool = node in graphview.get_input_nodes() + is_output:bool = node in graphview.get_output_nodes() + + if is_input: + # GraphView.get_inputs_nodes() returns the nodes that have an Input set to None or not in the graph + # However, some inputs are Optional and thus the node may not be an input of the graph! + # So we need ot check that all the inputs of the nodes or in the graph or not optional + # This is what the following code block is checking. + for idx, node_in in enumerate(node.inputs()): + optional:bool = node.get_operator().input_category(idx) == aidge_core.InputCategory.OptionalData + # Note: node_in is a Tuple(Node, out_idx) + in_graph:bool = node_in[0] in graphview.get_nodes() + is_input &= (in_graph or not optional) + + # Get operator current specs + required_specs = op_impl.get_required_spec() + # Get specs of the implementation that match current specs + specs = op_impl.get_best_match(required_specs) + # Retrieve said implementation + export_node = op_impl.get_export_node(specs) + + if export_node is None: + raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].") + # Instanciate ExportNode + op = export_node(node, mem_info[node]) + + # For configuration files + list_configs += op.export(dnn_folder) + # For forward file + list_actions += op.forward() + if is_input: + for idx, node_in in enumerate(node.inputs()): + if node_in[0] not in graphview.get_nodes(): + inputs_name.append(op.attributes["in_name"][idx]) + inputs_dtype.append( + op.attributes["in_cdtype"][idx] + ) + if is_output: + for idx in range(len(node.outputs())): + outputs_name.append(op.attributes["out_name"][idx]) + outputs_dtype.append( + op.attributes["out_cdtype"][idx] + ) + outputs_size.append(op.attributes["out_size"][idx]) + + func_name = "model_forward" + + + args = ", ".join([f"const {dtype}* {name}" for name, + dtype in zip(inputs_name, inputs_dtype)]) + args += ", " +", ".join([f"{dtype}** {name}" for name, + dtype in zip(outputs_name, outputs_dtype)]) + forward_func = f"void {func_name}({args})" + + ROOT = Path(__file__).resolve().parents[0] + generate_file( + str(dnn_folder / "src" / "forward.cpp"), + str(ROOT / "templates" / "forward.jinja"), + func_name=func_name, + headers=set(list_configs), + actions=list_actions, + mem_ctype=inputs_dtype[0], # Legacy behavior ... + mem_section=export_lib.mem_section, + peak_mem=peak_mem, + inputs_name=inputs_name, + inputs_dtype=inputs_dtype, + outputs_name=outputs_name, + outputs_dtype=outputs_dtype + ) + + # Generate dnn API + generate_file( + str(dnn_folder / "include" / "forward.hpp"), + str(ROOT / "templates" / "forward_header.jinja"), + libraries=[], + func_name=func_name, + inputs_name=inputs_name, + inputs_dtype=inputs_dtype, + outputs_name=outputs_name, + outputs_dtype=outputs_dtype + ) + + if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size): + raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.") + + if export_lib is not None and export_lib.main_jinja_path is not None: + main_jinja_path = export_lib.main_jinja_path + else : + main_jinja_path = str(ROOT / "templates" / "main.jinja") + + generate_file( + str(export_folder / "main.cpp"), + main_jinja_path, + func_name=func_name, + inputs_name=inputs_name, + outputs_name=outputs_name, + outputs_dtype=outputs_dtype, + outputs_size=outputs_size, + labels=labels + ) + + if export_lib is not None: + # Copy all static files in the export + for source, destination in export_lib.static_files.items(): + copy_file(source, str(export_folder / destination)) diff --git a/aidge_core/export_utils/templates/c_data.jinja b/aidge_core/export_utils/templates/c_data.jinja new file mode 100644 index 0000000000000000000000000000000000000000..3709379e5c5417233341837d853cc6f68872a194 --- /dev/null +++ b/aidge_core/export_utils/templates/c_data.jinja @@ -0,0 +1,6 @@ +{#- For libraries #} +#include <stdint.h> + {# Design header of the array -#} +static const {{ data_t }} {{ name }}[{{ dims |join("*") }}] __attribute__((section("nn_data"))) = { +{{ values |join(", ") }} +}; diff --git a/aidge_core/export_utils/templates/forward.jinja b/aidge_core/export_utils/templates/forward.jinja new file mode 100644 index 0000000000000000000000000000000000000000..fde4b2a1392c4ada353af06246951e26c6236df6 --- /dev/null +++ b/aidge_core/export_utils/templates/forward.jinja @@ -0,0 +1,41 @@ + +#include <stdint.h> + +#ifdef SAVE_OUTPUTS +#include <sys/types.h> +#include <sys/stat.h> +#endif + +#include "include/forward.hpp" + +// Layer & memory configurations +{%- for header in headers %} +#include "{{ header }}" +{%- endfor %} + +// Memory block +{%- if mem_section == None %} +static {{mem_ctype}} mem[{{peak_mem}}]; +{%- else %} +static {{mem_ctype}} mem[{{peak_mem}}] __attribute__((section("{{ mem_section }}"))); +{%- endif %} + +{# Forward function #} +{#- Support multiple inputs with different datatypes and multiple outputs with different datatypes -#} +void {{ func_name }} ( + {%- for i in range(inputs_name | length) -%} + const {{ inputs_dtype[i] }}* {{ inputs_name[i] }}, + {%- endfor -%} + {%- for o in range(outputs_name | length) -%} + {{ outputs_dtype[o] }}** {{ outputs_name[o] }}_ptr{% if not loop.last %}, {% endif %} + {%- endfor -%}) +{ + + {%- for action in actions %} + {{ action }} + {%- endfor %} + + {%- for output_name in outputs_name %} + *{{ output_name }}_ptr = {{ output_name }}; + {%- endfor %} +} diff --git a/aidge_core/export_utils/templates/forward_header.jinja b/aidge_core/export_utils/templates/forward_header.jinja new file mode 100644 index 0000000000000000000000000000000000000000..574f5323866786f3b6d6f98af53b8ccf3d9975b3 --- /dev/null +++ b/aidge_core/export_utils/templates/forward_header.jinja @@ -0,0 +1,25 @@ +#ifndef DNN_HPP +#define DNN_HPP + +#ifdef __cplusplus +extern "C" { +#endif + +{#- For libraries #} +{% for lib in libraries %} +#include <{{ lib }}> +{%- endfor %} + +void {{ func_name }} ( + {%- for i in range(inputs_name | length) -%} + const {{ inputs_dtype[i] }}* {{ inputs_name[i] }}, + {%- endfor -%} + {%- for o in range(outputs_name | length) %} + {{ outputs_dtype[o] }}** {{ outputs_name[o] }}{% if not loop.last %}, {% endif %} + {%- endfor -%}); + +#ifdef __cplusplus +} +#endif + +#endif /* DNN_HPP */ diff --git a/aidge_core/export_utils/templates/main.jinja b/aidge_core/export_utils/templates/main.jinja new file mode 100644 index 0000000000000000000000000000000000000000..d0c22719a5d9b9eaa15d3ef9ef86307a060b54be --- /dev/null +++ b/aidge_core/export_utils/templates/main.jinja @@ -0,0 +1,40 @@ + +#include <iostream> +#include "forward.hpp" +{% for name in inputs_name %} +#include "{{ name }}.h" +{% endfor %} + +{% set printf_formats = { + "double": "%lf", + "float": "%f", + "int8_t": "%hhd", + "int16_t": "%hd", + "int32_t": "%d", + "int64_t": "%lld", + "uint8_t": "%hhu", + "uint16_t": "%hu", + "uint32_t": "%u", + "uint64_t": "%llu" +} %} + +int main() +{ + // Initialize the output arrays + {%- for o in range(outputs_name | length) %} + {{ outputs_dtype[o] }}* {{ outputs_name[o] }} = nullptr; + {% endfor %} + + // Call the forward function + {{ func_name }}({{ inputs_name|join(", ") }}, &{{ outputs_name|join(", &") }}); + + // Print the results of each output + {%- for o in range(outputs_name | length) %} + printf("{{ outputs_name[o] }}:\n"); + for (int o = 0; o < {{ outputs_size[o] }}; ++o) { + printf("{{ printf_formats[outputs_dtype[o]] }} ", {{ outputs_name[o] }}[o]); + } + printf("\n"); + {% endfor %} + return 0; +} diff --git a/aidge_core/export_utils/tensor_export.py b/aidge_core/export_utils/tensor_export.py new file mode 100644 index 0000000000000000000000000000000000000000..43f013dd02e730ae72c89a5bbe329c4fbb8a0324 --- /dev/null +++ b/aidge_core/export_utils/tensor_export.py @@ -0,0 +1,40 @@ +import os + +from aidge_core.export_utils.code_generation import generate_file +from aidge_core.export_utils.data_conversion import aidge2c +from aidge_core import Tensor +from pathlib import Path + +def tensor_to_c(tensor:Tensor)->str: + """Given a :py:class:``aigd_core.Tensor``, return a C description of the tensor. + For example: + { + {1, 2}, + {3, 4} + } + + :param tensor: Tensor to transform to a string + :type tensor: Tensor + :return: String representation of a C array + :rtype: str + """ + return str(tensor) + +def generate_input_file(export_folder:str, + array_name:str, + tensor:Tensor): + + # If directory doesn't exist, create it + if not os.path.exists(export_folder): + os.makedirs(export_folder) + print(f"gen : {export_folder}/{array_name}.h") + ROOT = Path(__file__).resolve().parents[0] + generate_file( + file_path=f"{export_folder}/{array_name}.h", + template_path=str(ROOT / "templates" / "c_data.jinja"), + dims = tensor.dims(), + data_t = aidge2c(tensor.dtype()), + name = array_name, + values = list(tensor) + ) + diff --git a/aidge_core/mem_info.py b/aidge_core/mem_info.py new file mode 100644 index 0000000000000000000000000000000000000000..1f2f48dd64aba276e70940d5bf461da6f50a4b38 --- /dev/null +++ b/aidge_core/mem_info.py @@ -0,0 +1,119 @@ +import os +import shutil +from pathlib import Path +import aidge_core +from typing import Tuple, List + + +# Default memory management, which can be used for development +def compute_default_mem_info(scheduler: aidge_core.Scheduler) -> Tuple[int, List]: + """Basic memory management concatenate memory block, no memory reuse ! + + :param scheduler: Aidge scheduler + :type scheduler: :py:class:`aidge_core.Scheduler` + :return: The total memory size (in number of elements) and a list (of size nb node) of list (of size nb output) of dictionnary (size, offset) + :rtype: Tuple[int, list] + """ + mem_info = {} + mem_size = 0 + + # Exclude Producers and the last layers (because the results are stored outside the export) + for i, node in enumerate(scheduler.get_static_scheduling()): + if node.type() != "Producer": + node_mem_info = [] + for out_id in range(node.get_nb_outputs()): + dims = node.get_operator().get_output(out_id).dims() + mem = 1 + for dim in dims: + mem *= dim + + # Add memeory info + node_mem_info.append({ + "size": mem, + "offset": mem_size + }) + + # Increment offset for the next layer + mem_size += mem + mem_info[node] = node_mem_info + else: + mem_info[node] = [] # No meminfo for producer + return mem_size, mem_info + + +def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder: Path, wrapping: bool = False) -> Tuple[int, List[dict]]: + # The forward dims has to done outside the function + # Also supposed the generation of the scheduler has been performed outside + # Otherwise decomment the following line + # scheduler.generate_scheduling() + # Generate the memory manager + # So far, the Producers are not take in consideration in the meory manager => inc_producers=False + mem_manager = scheduler.generate_memory( + inc_producers=False, wrap_around_buffer=wrapping) + + # List of nodes which are connected at the input of the graph (None if input is not connected) + nodes_at_input = [n[0] for n in scheduler.graph_view().inputs()] + # Use gnuplot to generate the log + if isinstance(stats_folder, str): + stats_folder = Path(stats_folder) + os.makedirs(str(Path(stats_folder) / "graph"), exist_ok=True) + mem_manager.log("memory_info") + os.chmod("memory_info_plot.gnu", 0o777) + os.system("./memory_info_plot.gnu") + shutil.move("memory_info", str(Path(stats_folder) / "graph" / "memory_info")) + shutil.move("memory_info_plot.png", str( + Path(stats_folder) / "graph" / "memory_info_plot.png")) + os.remove("memory_info_plot.gnu") + # In the export, we currently use an unified memory buffer whose size + # is determined by the memory peak usage + mem_size = mem_manager.get_peak_usage() + mem_info = {} + + mem_planes = mem_manager.get_planes() + + for node in scheduler.get_static_scheduling(): + node_mem_info = [] + if node.type() == "Producer": + pass + elif node in nodes_at_input: + # Input memory management (suppose tensor ends with [:, channel, height, width])) + tensor = node.get_operator().get_output(0) + if tensor is None: + raise RuntimeError("Warning input producer not provided") + if len(tensor.dims()) < 3: + raise RuntimeError( + f"Input producer dimensions must be with [:, channel, height, width] but got {tensor.dims()} instead") + # TODO : use get_chan get_height and get_width function ! + node_mem_info.append({ + "size": tensor.dims()[-3], # Should be nb_channels + "offset": 0, # Suppose input data is stored outside the export function + # so the memory offset is not important to consider + "stride": tensor.dims()[-3], # Should be nb_channels + "length": tensor.dims()[-1], # Should be width + "count": tensor.dims()[-2], # Should be height + "cont_offset": 0, # Suppose input data is stored outside the export function + # so the memory offset is not important to consider + "cont_size": tensor.dims()[-1] * \ + tensor.dims()[-2] * \ + tensor.dims()[-3], # Size of input + "wrap_offset": 0, # No wrapping + "wrap_size": 0 # No wrapping + }) + else: + for out_id in range(node.get_nb_outputs()): + plane = mem_planes[node][out_id] + node_mem_info.append({ + "size": plane.size, + "offset": plane.get_contiguous_offset(), + "stride": plane.stride, + "length": plane.length, + "count": plane.count, + "cont_offset": plane.get_contiguous_offset(), + "cont_size": plane.get_contiguous_size(), + "wrap_offset": plane.get_wrapped_offset(), + "wrap_size": plane.get_wrapped_size() + }) + mem_info[node] = node_mem_info + return mem_size, mem_info + + diff --git a/aidge_core/show_graphview.py b/aidge_core/show_graphview.py new file mode 100644 index 0000000000000000000000000000000000000000..633298f10dbfdafe40022f88f741f82d2d35c681 --- /dev/null +++ b/aidge_core/show_graphview.py @@ -0,0 +1,225 @@ +import os +import json +import builtins +import aidge_core +import numpy as np +from pathlib import Path + +def _retrieve_operator_attrs(node : aidge_core.Node) -> dict[str, int, float, bool, None]: + """ + Returns the dictionary containing the attributes of a given Node. + + :param graph: A Node in the list of ordered nodes. + :type graph: aidge_core.Node + + :return: A dictionary with the Node's attributes. + :rtype: dict[str, int, float, bool, None] + """ + + if node.get_operator().attr is not None: + node_attr_dict = node.get_operator().attr.dict() + for key,value in node_attr_dict.items(): + if not type(value).__name__ in dir(builtins): + node_attr_dict[key] = value.name + + else: + node_attr_dict = {} + + return node_attr_dict + +def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_embed : bool, write_trainable_params_ext : bool, path_trainable_params : Path, params_file_format : str) -> dict[str, int, float, bool, None]: + """ + Creates a dictionary to store the information of a given ordered GraphView. + + :param ordered_nodes: A list with the GraphView's ordered nodes. + :type graph: list + :param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed). + :type write_trainable_params_embed: bool + :param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file. + :type write_trainable_params_ext: bool + :param path_trainable_params: Path of the external file used to store the Nodes' trainable parameters. + :type path_trainable_params: Path + :param params_file_format: Format of the external file used to store the Nodes' trainable parameters. Options: ``npz`` or ``json``. Default : ``json``. Requires ``write_trainable_params_ext``. + :type params_file_format: str + + :return: A dictionary with the GraphView description. + :rtype: dict[str, int, float, bool, None] + """ + + graphview_dict = {'graph': []} + + for node in ordered_nodes: + + if node is not None: + node_dict = {'name' : node.name(), + 'optype' : node.get_operator().type(), + 'nb_inputs' : node.get_operator().nb_inputs(), + 'nb_outputs' : node.get_operator().nb_outputs()} + + inputs = [] + for input_idx in range(node.get_operator().nb_inputs()): + input_dict = {'dims' : node.get_operator().get_input(input_idx).dims(), + 'data_type' : str(node.get_operator().get_input(input_idx).dtype()), + 'data_format' : str(node.get_operator().get_input(input_idx).dformat())} + inputs.append(input_dict) + + node_dict['inputs'] = inputs + + outputs = [] + for output_idx in range(node.get_operator().nb_outputs()): + output_dict = {'dims' : node.get_operator().get_output(output_idx).dims(), + 'data_type' : str(node.get_operator().get_output(output_idx).dtype()), + 'data_format' : str(node.get_operator().get_output(output_idx).dformat())} + outputs.append(output_dict) + + node_dict['outputs'] = outputs + + parents = node.get_parents() + if None in parents: + if parents[0] is None: parents.append(parents.pop(0)) + else: + pass + + parents_inputs = [] + for parent in parents: + if parent is not None: + for output_idx in range(parent.get_operator().nb_outputs()): + for input_idx in range(node.get_operator().nb_inputs()): + if parent.get_operator().get_output(output_idx).dims() == node.get_operator().get_input(input_idx).dims(): + parents_inputs.append((parent.name(), input_idx)) + + elif parent is None: + for input_idx in list(range(node.get_operator().nb_inputs())): + if input_idx not in [item[1] for item in parents_inputs]: + parents_inputs.append((None, input_idx)) + + parents_inputs.sort(key=lambda x: x[1]) + node_dict['parents'] = parents_inputs + + children_outputs = [] + for child in node.get_children(): + for input_idx in range(child.get_operator().nb_inputs()): + for output_idx in range(node.get_operator().nb_outputs()): + if child.get_operator().get_input(input_idx).dims() == node.get_operator().get_output(output_idx).dims(): + children_outputs.append((child.name(), output_idx)) + node_dict['children'] = children_outputs + + # Check if my node is a metaop + attributes_dict = {} + if isinstance(node.get_operator(), aidge_core.MetaOperatorOp): + attributes_dict['micro_graph'] = [] + for micro_node in node.get_operator().get_micro_graph().get_nodes(): + micro_node_dict = {'name' : micro_node.name(), + 'optype' : micro_node.type()} + + micro_node_attr_dict = _retrieve_operator_attrs(micro_node) + micro_node_dict['attributes'] = micro_node_attr_dict + attributes_dict['micro_graph'].append(micro_node_dict) + + else: + node_attr_dict = _retrieve_operator_attrs(node) + attributes_dict.update(node_attr_dict) + + node_dict['attributes'] = attributes_dict + + if node.type() == 'Producer': + if write_trainable_params_ext: + + params_file_format.casefold() + + if params_file_format=='npz': + np.savez_compressed(Path(path_trainable_params, node.name()), **{node.name() : node.get_operator().get_output(0)}) + node_dict['tensor_data'] = Path(path_trainable_params, node.name() + '.npz') + + elif params_file_format=='json': + tensor = np.array(node.get_operator().get_output(0)) + tensor_dict = { + node.name() : + { + 'dims' : tensor.shape, + 'data_type' : str(tensor.dtype), + 'tensor_data' : tensor.tolist() + } + } + + with open(Path(path_trainable_params, node.name() + '.json'), 'w') as fp: + json.dump(tensor_dict, fp, indent=4) + + node_dict['tensor_data'] = Path(path_trainable_params, node.name() + '.json') + + else: + raise Exception("File format to write trainable parameters not recognized.") + + + elif write_trainable_params_embed: + node_dict['tensor_data'] = np.array(node.get_operator().get_output(0)).tolist() + + else: + pass + + graphview_dict['graph'].append(node_dict) + + else: # node is None + pass + + return graphview_dict + +def _write_dict_json(graphview_dict : dict[str, int, float, bool, None], json_path : str) -> None: + """ + Writes dictionary containing GraphView description to a JSON file. + + :param graphview_dict: A dictionary with the GraphView description. + :type graphview_dict: dict[str, int, float, bool, None] + :param json_path: Path to write JSON file. + :type json_path: str + """ + + with open(json_path, 'w') as fp: + json.dump(graphview_dict, fp, indent=4) + + return None + +def gview_to_json(gview : aidge_core.GraphView, json_path : Path, write_trainable_params_embed : bool = False, write_trainable_params_ext : bool = False, params_file_format : str = 'json') -> None: + """ + Generates the description for a GraphView in the JSON format. + + :param graph: A GraphView of Aidge. + :type graph: aidge_core.GraphView + :param json_path: Path to write JSON file. + :type json_path: Path + :param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed). + :type write_trainable_params_embed: bool, optional + :param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file. + :type write_trainable_params_ext: bool, optional + :param params_file_format: Format of the external file used to store the Nodes' trainable parameters. Options: ``npz`` or ``json``. Default : ``json``. Requires ``write_trainable_params_ext``. + :type params_file_format: str, optional + """ + + if json_path.is_dir(): + json_path = (json_path.parent).joinpath('model.json') + + elif not json_path.is_dir(): + if json_path.suffix == '.json': + pass + else: + raise Exception('If ``json_path`` contains a filename it must be of JSON format.') + + if write_trainable_params_ext: + path_trainable_params = (json_path.parent).joinpath(json_path.stem + '_trainable_params/') + else: + path_trainable_params = Path() + + if isinstance(gview, aidge_core.GraphView): + # Sort GraphView in topological order + ordered_nodes = gview.get_ordered_nodes() + + # Create dict from GraphView + graphview_dict = _create_dict(ordered_nodes, write_trainable_params_embed, write_trainable_params_ext, path_trainable_params, params_file_format) + + # Write dict to JSON + _write_dict_json(graphview_dict, json_path) + + else: + raise Exception("Graph must be an instance of aidge_core.GraphView.") + + return None \ No newline at end of file diff --git a/aidge_core/testing/__init__.py b/aidge_core/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6966bbb6355c798727870ace37f99193fdda66a2 --- /dev/null +++ b/aidge_core/testing/__init__.py @@ -0,0 +1,12 @@ +# +# Do not add there auto import of submodules. +# +# The testing module contains utils and other tools +# related to tests, possibly reusable by other aidge +# components unit_tests. +# +# Import a specific module explicitly with for instance: +# import aidge_core.testing.utils +# or +# from aidge_core.testing.utils import (....,) +# diff --git a/aidge_core/testing/utils/__init__.py b/aidge_core/testing/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d69be0c5b80c7df229d8a24e6891970b8108fb14 --- /dev/null +++ b/aidge_core/testing/utils/__init__.py @@ -0,0 +1,10 @@ +# +# Should provide some general utility functions for testing. +# For instance: +# - filesystem +# - os dependencies +# - unit tests setup +# + +from .tree_cache import tree_update_from_cache +from .tree_utils import tree_move, tree_remove diff --git a/aidge_core/testing/utils/tree_cache.py b/aidge_core/testing/utils/tree_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..5b363c7c73ea36636a40c007b24cc244b10303c2 --- /dev/null +++ b/aidge_core/testing/utils/tree_cache.py @@ -0,0 +1,145 @@ +""" + +Provide tree_update_from_cache(path) method which +minimize changes in a generated tree when files are +re-generated but identical. + +It takes as argument a generated tree, and optionally a cache path. +Then it will update both the generated tree and the cache tree +to take the cache version of the files when identical, or the newly +generated one otherwise. + +This is in particular useful for speeding up iterative compilation +when generating a source/build system tree. + +For instance: +- first time, one generates a tree of files: + - generated: path/{t1,t2,t3} +- then call tree_update_from_cache("path") + - will generate: __cache_path/{t1,t2,t3} + - and untouch: path/{t1,t2,t3} +- second time, re-generate a tree of file: + - say generated files are identical: path/{t1,t2,t3} +- then call tree_update_from_cache("path") + - will untouch in cache: __cache_path/{t1,t2,t3} + - and reset to previous timestamps files: path/{t1,t2,t3} +- third time, re-generate again with some changes: + - say t1 is identical, t2 content has changed and no t3: path/{t1,t2'} +- then call tree_update_from_cache("path") + - will update t2' and remove t3 in cache: __cache_path/{t1,t2'} + - and reset to previous timestamp t1: path/{t1,t2'} + +Note that by default the `dir`/__cache_`name` cache path is used +for a given path `dir`/`name`. +Though it is also possible to have the cache path inside the generated tree, +in this case use for instance: + + tree_update_from_cache(path, Path(path) / "__cache_src") + +For more evolved scenarii, specialize the provided FileTreeCache class. + +""" + + +from pathlib import Path +import shutil +import filecmp +from typing import Optional, Union, List + +from .tree_utils import tree_move, tree_remove + + +__all__ = [ + "FileTreeCache", + "tree_update_from_cache", +] + + +class FileTreeCache(): + """ + Class for implementation of the file tree cache. + Can be derived to changes for instance default cache name/tmp name prefixes + or to specialize for other contexts. + """ + default_cache_prefix = "__cache_" + default_tmp_cache_prefix = "__tmp_cache_" + default_tmp_prefix = "__tmp_" + + def __init__(self, + src_path: Union[str|Path], + cache_path: Optional[Union[str|Path]] = None + ) -> None: + self.src_path = Path(src_path).absolute() + self.cache_path = ( + Path(cache_path).absolute() + if cache_path is not None else + (self.src_path.parent / + f"{self.default_cache_prefix}{self.src_path.name}") + ) + ctx_msg = f"tree_cache: {src_path = }, {cache_path = }" + assert self.src_path != self.cache_path, f"src_path and cache_path must differ on {ctx_msg}" + assert not self.src_path.is_relative_to(self.cache_path), f"src_path must not be relative to cache_path on {ctx_msg}" + self._tmp_path = ( + self.src_path.parent / + f"{self.default_tmp_prefix}{self.src_path.name}") + self._tmp_cache_path = ( + self.src_path.parent / + f"{self.default_tmp_cache_prefix}{self.src_path.name}") + + @classmethod + def _copytree_or_cache(cls, src_dir: Path, dst_dir: Path, cache_dir: Path, dst_cache_dir: Path) -> None: + assert not dst_dir.exists() + assert not dst_cache_dir.exists() + assert src_dir.is_dir() + assert not cache_dir.exists() or cache_dir.is_dir() + assert not cache_dir.is_relative_to(src_dir) + + def copy_or_cache(src, dst): + base_src = Path(src).relative_to(src_dir) + cache_src = cache_dir / base_src + base_dst = Path(dst).relative_to(dst_dir) + cache_dst = dst_cache_dir / base_dst + cache_dst.parent.mkdir(parents=True, exist_ok=True) + if cache_src.exists() and filecmp.cmp(str(src), str(cache_src), shallow=False): + shutil.copy2(str(cache_src), str(cache_dst)) + shutil.copy2(str(cache_src), dst) + else: + shutil.copy2(src, str(cache_dst)) + shutil.copy2(src, dst) + shutil.copytree(str(src_dir), str(dst_dir), copy_function=copy_or_cache) + + def update_from_cache(self) -> None: + assert self.src_path.exists(), f"src path must exist before swapping with cache" + + # Move cache path apart first as it may be relative to source path + tree_move(self.cache_path, self._tmp_cache_path, ignore_missing=True, exist_ok=True) + # Move source path apart before recreating merged source tree + tree_move(self.src_path, self._tmp_path, exist_ok=True) + + # Manage the source/cache merge to the dst/dst_cahe with a variant of + # copytree. + self._copytree_or_cache( + src_dir=self._tmp_path, + dst_dir=self.src_path, + cache_dir=self._tmp_cache_path, + dst_cache_dir=self.cache_path, + ) + + # Remove tmp source path + tree_remove(self._tmp_path) + # Note that the tmp cache path may not exist + tree_remove(self._tmp_cache_path, ignore_missing=True) + + +def tree_update_from_cache( + src_path: Union[str|Path], + cache_path: Optional[Union[str|Path]] = None) -> None: + """ + Update from cache the current generation of a tree from the + older generations, preserving file stamps when files contents are identical. + + :param src_path: str or Path object to the generated tree + :param cache_path: optional str or Path object to the cache path, + or defaults to: `cache_path = src_path.parent / f"__cache_{src_path.name}"` + """ + FileTreeCache(src_path, cache_path).update_from_cache() diff --git a/aidge_core/testing/utils/tree_utils.py b/aidge_core/testing/utils/tree_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3a6b2aad88e16075ed64bee03ba8e8fa550376e2 --- /dev/null +++ b/aidge_core/testing/utils/tree_utils.py @@ -0,0 +1,65 @@ +""" + +Provide utility function for file trees manipulations. + +""" + +import shutil +from pathlib import Path +from typing import Union, Optional + + +__all__ = [ + "tree_move", + "tree_remove", +] + + +def tree_remove( + path: Union[str|Path], + ignore_missing: bool = False, +) -> None: + """ + Remove the full tree at path. + Optionally ignore if the path does not exist when ignore_missing is True. + + :param path: str or Path object to the directory path + :param ignore_missing: if True will return early is path does not exists + """ + path = Path(path) + ctx_msg = f"tree_remove: : {path = }" + assert ignore_missing or path.exists(), f"path must exists when ignore_missing is False on {ctx_msg}" + if ignore_missing and not path.exists(): + return + shutil.rmtree(path) + + +def tree_move( + src_path: Union[str|Path], + dst_path: Union[str|Path], + ignore_missing: bool = False, + exist_ok: bool = False, +) -> None: + """ + Move the whole src_path file tree to dst_path. + Optionally does nothing if the src path does not exists and ignore_missing is True. + Optionally the full dst_path will be removed first when exists_ok is True. + + :param src_path: str or Path object to the source directory path + :param dst_path: str or Path object to the new path name for the source directory + :param ignore_missing: if True will return early is src_path does not exists + :param exist_ok: if True will first erase the new path name if it exists + """ + src_path = Path(src_path) + dst_path = Path(dst_path) + ctx_msg = f"tree_move: : {src_path = }, {dst_path = }" + assert ignore_missing or src_path.exists(), f"src_path must exists when ignore_missing is False on {ctx_msg}" + assert exist_ok or not dst_path.exists(), f"dst_path must not exists when exist_ok is False on {ctx_msg}" + assert src_path != dst_path, f"paths must not be identical on {ctx_msg}" + assert not dst_path.is_relative_to(src_path), f"dst_path must not be relative to src_path on {ctx_msg}" + assert not src_path.is_relative_to(dst_path), f"src_path must not be relative to dst_path on {ctx_msg}" + if ignore_missing and not src_path.exists(): + return + if exist_ok and dst_path.exists(): + shutil.rmtree(dst_path) + shutil.move(src_path, dst_path) diff --git a/aidge_core/unit_tests/test_export.py b/aidge_core/unit_tests/test_export.py index 5d2e700a86925d1455cdee83e7d40cd891e72ba6..d98a6fdbc20dd7e99169422205a4e680350aed27 100644 --- a/aidge_core/unit_tests/test_export.py +++ b/aidge_core/unit_tests/test_export.py @@ -8,8 +8,6 @@ http://www.eclipse.org/legal/epl-2.0. SPDX-License-Identifier: EPL-2.0 """ -import aidge_core -from aidge_core.utils import run_command import unittest import os import pathlib @@ -18,6 +16,10 @@ import subprocess import sys +import aidge_core +from aidge_core.utils import run_command +from aidge_core.testing.utils import tree_update_from_cache, tree_move, tree_remove + def initFiller(model): # Initialize parameters (weights and biases) for node in model.get_nodes(): @@ -26,6 +28,8 @@ def initFiller(model): value = prod_op.get_output(0) value.set_backend("cpu") tuple_out = node.output(0)[0] + # Force seed before filler for reproducibility + aidge_core.random.Generator.set_seed(0) # No conv in current network if tuple_out[0].type() == "Conv" and tuple_out[1] == 1: # Conv weight @@ -43,22 +47,6 @@ def initFiller(model): pass -def clean_dir(dir: pathlib.Path) -> None: - if not dir.is_dir(): - print(f"Error : directory {dir} doesn't exist. Exiting clean_dir().") - return - for filename in os.listdir(dir): - file_path = os.path.join(dir, filename) - try: - if os.path.isfile(file_path) or os.path.islink(file_path): - os.unlink(file_path) - elif os.path.isdir(file_path): - shutil.rmtree(file_path) - except Exception as e: - print(f"Failed to delete {file_path}. Reason: {e}") - return - - class test_export(unittest.TestCase): """Test aidge export""" @@ -66,6 +54,10 @@ class test_export(unittest.TestCase): self.EXPORT_PATH: pathlib.Path = pathlib.Path("dummy_export") self.BUILD_DIR: pathlib.Path = self.EXPORT_PATH / "build" self.INSTALL_DIR: pathlib.Path = (self.EXPORT_PATH / "install").absolute() + self.TMP_BUILD_DIR: pathlib.Path = ( + self.EXPORT_PATH.parent / + f"__tmp_{self.EXPORT_PATH.name}_build" + ) def tearDown(self): pass @@ -76,28 +68,41 @@ class test_export(unittest.TestCase): model = aidge_core.sequential( [ aidge_core.FC( - in_channels=32 * 32 * 3, out_channels=512, name="InputNode" + in_channels=32 * 32 * 3, out_channels=64, name="InputNode" ), aidge_core.ReLU(name="Relu0"), - aidge_core.FC(in_channels=512, out_channels=256, name="FC1"), + aidge_core.FC(in_channels=64, out_channels=32, name="FC1"), aidge_core.ReLU(name="Relu1"), - aidge_core.FC(in_channels=256, out_channels=128, name="FC2"), + aidge_core.FC(in_channels=32, out_channels=16, name="FC2"), aidge_core.ReLU(name="Relu2"), - aidge_core.FC(in_channels=128, out_channels=10, name="OutputNode"), + aidge_core.FC(in_channels=16, out_channels=10, name="OutputNode"), ] ) initFiller(model) + model.forward_dims([[1, 32*32*3]]) - # Export model - aidge_core.export(self.EXPORT_PATH, model) + # Preserve previously generated build if present + tree_move(self.BUILD_DIR, self.TMP_BUILD_DIR, ignore_missing=True, exist_ok=True) + # Clean install dir + tree_remove(self.INSTALL_DIR, ignore_missing=True) - self.assertTrue( - self.EXPORT_PATH.is_dir(), "Export folder has not been generated" + # Export model + aidge_core.serialize_to_cpp(self.EXPORT_PATH, model) + self.assertTrue(self.EXPORT_PATH.is_dir(), "Export folder has not been generated") + # Add other source files + shutil.copyfile(pathlib.Path(__file__).parent / "static/main.cpp", self.EXPORT_PATH / "main.cpp") + + # Use cache if any, put cache inside export dir + # such that cleaning export dir also cleans the cache + tree_update_from_cache( + self.EXPORT_PATH, + cache_path=self.EXPORT_PATH / "__cache_export" ) - os.makedirs(self.BUILD_DIR, exist_ok=True) - clean_dir(self.BUILD_DIR) # if build dir existed already ensure its emptyness - clean_dir(self.INSTALL_DIR) + + # Move back preserved build dir if any and ensure build dir exists + tree_move(self.TMP_BUILD_DIR, self.BUILD_DIR, ignore_missing=True) + self.BUILD_DIR.mkdir(exist_ok=True) # Test compilation of export search_path = ( @@ -106,11 +111,6 @@ class test_export(unittest.TestCase): else os.environ["AIDGE_INSTALL"] ) - shutil.copyfile( - pathlib.Path(__file__).parent / "static/main.cpp", - self.EXPORT_PATH / "main.cpp", - ) - ########################## # CMAKE EXPORT try: diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py index 26d60f2fbaf0f3903baf191cf0a2ad5550fb3275..4d3b0a34485e7e0d7e0323f2fc121a83b1882de9 100644 --- a/aidge_core/unit_tests/test_impl.py +++ b/aidge_core/unit_tests/test_impl.py @@ -49,8 +49,8 @@ class test_OperatorImpl(unittest.TestCase): """Test registering an implementation """ global GLOBAL_CPT - aidge_core.register_ConvOp2D("cpu", testImpl) - self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D()) + aidge_core.register_Conv2DOp("cpu", testImpl) + self.assertTrue("cpu" in aidge_core.get_keys_Conv2DOp()) conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0") conv.get_operator().set_backend("cpu") conv.get_operator().set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3))) @@ -61,9 +61,9 @@ class test_OperatorImpl(unittest.TestCase): """Test registering an implementation """ global GLOBAL_CPT - aidge_core.register_ConvOp2D("cpu", testImpl) + aidge_core.register_Conv2DOp("cpu", testImpl) aidge_core.register_ProducerOp("cpu", testImpl) - self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D()) + self.assertTrue("cpu" in aidge_core.get_keys_Conv2DOp()) conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0") model = aidge_core.sequential([conv]) model.set_backend("cpu") diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py index 8d6f2686d9010ac4ebed80cd04f74effe763e977..21b62e92b501215fc51651b7ca7a7bcb5237b027 100644 --- a/aidge_core/unit_tests/test_operator_binding.py +++ b/aidge_core/unit_tests/test_operator_binding.py @@ -101,7 +101,7 @@ class test_operator_binding(unittest.TestCase): self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89) op = aidge_core.GenericOperatorOp("any_type", 1,0,1) - with self.assertRaises(RuntimeError): + with self.assertRaises(IndexError): op.attr.something op.attr.something = aidge_core.DynamicAttributes() diff --git a/aidge_core/unit_tests/test_recipes.py b/aidge_core/unit_tests/test_recipes.py index c8dd4c727fbaf8224e8d04111a5054caeb5e5c99..f4dd0220ecdc5950e1b1dcef0d8bf2d4782216bf 100644 --- a/aidge_core/unit_tests/test_recipes.py +++ b/aidge_core/unit_tests/test_recipes.py @@ -46,9 +46,9 @@ class test_recipes(unittest.TestCase): def test_fuse_matmul_add(self): matmul0 = aidge_core.MatMul(name="MatMul0") - add0 = aidge_core.Add(2, name="Add0") + add0 = aidge_core.Add(name="Add0") matmul1 = aidge_core.MatMul(name="MatMul1") - add1 = aidge_core.Add(2, name="Add1") + add1 = aidge_core.Add(name="Add1") w0 = aidge_core.Producer([1, 1], name="W0") w0.add_child(matmul0, 0, 0) b0 = aidge_core.Producer([1], name="B0") diff --git a/aidge_core/unit_tests/test_show_graphview.py b/aidge_core/unit_tests/test_show_graphview.py new file mode 100644 index 0000000000000000000000000000000000000000..4c68e93e39a543e96c2b664dbe554660bf37cc91 --- /dev/null +++ b/aidge_core/unit_tests/test_show_graphview.py @@ -0,0 +1,129 @@ +import json +import tempfile +import unittest +import builtins +import aidge_core +from pathlib import Path +from aidge_core.show_graphview import gview_to_json + +def create_gview(): + # Create a LeNet-like model + gview = aidge_core.sequential([aidge_core.PaddedConv2D(in_channels=1, out_channels=6, kernel_dims=[5,5], name='feature_feature_0_Conv', stride_dims=[1,1], padding_dims = [2,2,2,2]), + aidge_core.ReLU(name='feature_feature_1_Relu'), + aidge_core.MaxPooling2D(kernel_dims=[2,2], stride_dims=[2,2], ceil_mode=0, name='feature_feature_2_MaxPool'), + aidge_core.Conv2D(in_channels=6, out_channels=16, kernel_dims=[5,5], name='feature_feature_3_Conv', stride_dims=[1,1], dilation_dims = [1,1]), + aidge_core.ReLU(name='feature_feature_4_Relu'), + aidge_core.MaxPooling2D(kernel_dims=[2,2], stride_dims=[2,2], ceil_mode=0, name='feature_feature_5_MaxPool'), + aidge_core.FC(in_channels=400, out_channels=120, name='classifier_classifier_1_Gemm'), + aidge_core.ReLU(name='classifier_classifier_2_Relu'), + aidge_core.FC(in_channels=120, out_channels=84, name='classifier_classifier_3_Gemm'), + aidge_core.ReLU(name='classifier_classifier_4_Relu'), + aidge_core.FC(in_channels=84, out_channels=10, name='classifier_classifier_5_Gemm'), + ]) + + # Fill Producers + for node in gview.get_nodes(): + if node.type() == "Producer": + prod_op = node.get_operator() + value = prod_op.get_output(0) + value.set_backend("cpu") + tuple_out = node.output(0)[0] + + if (tuple_out[0].type() == "Conv" or tuple_out[0].type() == "PaddedConv") and tuple_out[1]==1: + # Conv weight + aidge_core.xavier_uniform_filler(value) + elif tuple_out[0].type() == "Conv" and tuple_out[1]==2: + # Conv bias + aidge_core.constant_filler(value, 0.01) + elif tuple_out[0].type() == "FC" and tuple_out[1]==1: + # FC weight + aidge_core.normal_filler(value) + elif tuple_out[0].type() == "FC" and tuple_out[1]==2: + # FC bias + aidge_core.constant_filler(value, 0.01) + else: + pass + + # Compile model + gview.forward_dims([[1, 1, 28, 28]]) + gview.set_datatype(aidge_core.dtype.float32) + + return gview + +class test_show_gview(unittest.TestCase): + """Test aidge functionality to show GraphView. + """ + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_gview_to_json(self): + + gview = create_gview() + + # Create temporary file to store JSON model description + model_description_file = tempfile.NamedTemporaryFile(mode="w+", suffix='.json') + + gview_to_json(gview, Path(model_description_file.name)) + + # Load JSON + with open(model_description_file.name, 'r') as fp: + model_json = json.load(fp) + + # Get list of nodes of Aidge graphview + gview_ordered_nodes = gview.get_ordered_nodes() + + # Iterate over the list of ordered nodes and the corresponding JSON + self.assertEqual(len(gview_ordered_nodes), len(model_json['graph'])) + + for node_gview, node_json in zip(gview_ordered_nodes, model_json['graph']): + + self.assertEqual(node_gview.get_operator().type(), node_json['optype']) + self.assertEqual(node_gview.get_operator().nb_inputs(), node_json['nb_inputs']) + self.assertEqual(node_gview.get_operator().nb_outputs(), node_json['nb_outputs']) + + self.assertEqual(node_gview.get_operator().nb_inputs(), len(node_json['inputs'])) + for input_idx in range(node_gview.get_operator().nb_inputs()): + self.assertEqual(node_gview.get_operator().get_input(input_idx).dims(), node_json['inputs'][input_idx]['dims']) + self.assertEqual(str(node_gview.get_operator().get_input(input_idx).dtype()), node_json['inputs'][input_idx]['data_type']) + self.assertEqual(str(node_gview.get_operator().get_input(input_idx).dformat()), node_json['inputs'][input_idx]['data_format']) + + self.assertEqual(node_gview.get_operator().nb_outputs(), len(node_json['outputs'])) + for output_idx in range(node_gview.get_operator().nb_outputs()): + self.assertEqual(node_gview.get_operator().get_output(output_idx).dims(), node_json['outputs'][output_idx]['dims']) + self.assertEqual(str(node_gview.get_operator().get_output(output_idx).dtype()), node_json['outputs'][output_idx]['data_type']) + self.assertEqual(str(node_gview.get_operator().get_output(output_idx).dformat()), node_json['outputs'][output_idx]['data_format']) + + self.assertEqual(len(node_gview.get_parents()), len(node_json['parents'])) + self.assertEqual(len(node_gview.get_children()), len(node_json['children'])) + + if not hasattr(node_gview.get_operator(), 'get_micro_graph'): + try: + self.assertEqual(len(node_gview.get_operator().attr.dict()), len(node_json['attributes'])) + self.assertDictEqual(node_gview.get_operator().attr.dict(), node_json['attributes']) + + except AttributeError: + self.assertIsNone(node_gview.get_operator().attr) and self.assertFalse(node_json['attributes']) + + elif hasattr(node_gview.get_operator(), 'get_micro_graph'): + + self.assertEqual(len(node_gview.get_operator().get_micro_graph().get_nodes()), len(node_json['attributes']['micro_graph'])) + + for micro_node_gview in node_gview.get_operator().get_micro_graph().get_nodes(): + for micro_node_json in node_json['attributes']['micro_graph']: + if micro_node_gview.get_operator().type() == micro_node_json['optype']: + + for key, value in micro_node_gview.get_operator().attr.dict().items(): + if not type(value).__name__ in dir(builtins): + # Replace original value by its name (str) because value is of a type that could not be written to the JSON + # Cannot update this dict inplace : micro_node_gview.get_operator().attr.dict().update({key : value.name}) + temp_mnode_dict = micro_node_gview.get_operator().attr.dict() + temp_mnode_dict.update({key : value.name}) + self.assertDictEqual(temp_mnode_dict, micro_node_json['attributes']) + +if __name__ == '__main__': + unittest.main() + diff --git a/aidge_core/unit_tests/test_topological_order.py b/aidge_core/unit_tests/test_topological_order.py index 8e7f2e2d9b9770c2fae1e5c2812ba33113589134..01a69409e86c486ec2fb8c8bdb2a18ab0e3d9c1c 100644 --- a/aidge_core/unit_tests/test_topological_order.py +++ b/aidge_core/unit_tests/test_topological_order.py @@ -29,7 +29,7 @@ class test_topological_order(unittest.TestCase): loop0.get_operator().set_back_edges({1}) assert not loop0.get_operator().is_back_edge(0) assert loop0.get_operator().is_back_edge(1) - add0 = aidge_core.Add(2, "add0") + add0 = aidge_core.Add("add0") loop0.add_child(add0, 0, 1) add0.add_child(loop0, 0, 1) @@ -50,7 +50,7 @@ class test_topological_order(unittest.TestCase): loop0.get_operator().set_back_edges({0}) assert not loop0.get_operator().is_back_edge(1) assert loop0.get_operator().is_back_edge(0) - add0 = aidge_core.Add(2, "add0") + add0 = aidge_core.Add("add0") loop0.add_child(add0, 0, 1) add0.add_child(loop0, 0, 0) diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp index cadd8c85ca541862cc6f298fa055713a6f65e3ed..6088cf31c91cdd1b939bb4508850a9d0f798e5d2 100644 --- a/include/aidge/aidge.hpp +++ b/include/aidge/aidge.hpp @@ -41,6 +41,7 @@ #include "aidge/operator/AvgPooling.hpp" #include "aidge/operator/BatchNorm.hpp" #include "aidge/operator/BitShift.hpp" +#include "aidge/operator/Clip.hpp" #include "aidge/operator/Concat.hpp" #include "aidge/operator/Conv.hpp" #include "aidge/operator/ConvDepthWise.hpp" @@ -51,6 +52,7 @@ #include "aidge/operator/GenericOperator.hpp" #include "aidge/operator/GlobalAveragePooling.hpp" #include "aidge/operator/GridSample.hpp" +#include "aidge/operator/Heaviside.hpp" #include "aidge/operator/MatMul.hpp" #include "aidge/operator/MaxPooling.hpp" #include "aidge/operator/MetaOperator.hpp" @@ -65,6 +67,7 @@ #include "aidge/operator/ReLU.hpp" #include "aidge/operator/Reshape.hpp" #include "aidge/operator/Resize.hpp" +#include "aidge/operator/Round.hpp" #include "aidge/operator/Shape.hpp" #include "aidge/operator/Scaling.hpp" #include "aidge/operator/Slice.hpp" diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp index 4af7da64ebca3c02eb9aabca1f2dad88fd8b9829..649898dd130d5811f65f65af87bc117d3502647c 100644 --- a/include/aidge/backend/OperatorImpl.hpp +++ b/include/aidge/backend/OperatorImpl.hpp @@ -28,7 +28,7 @@ class Operator; /** * @brief ImplSpec stores the requirements or the specifications of an implementation. - * + * */ struct ImplSpec { struct IOSpec { @@ -73,10 +73,15 @@ inline bool operator<(const ImplSpec& lhs, const ImplSpec& rhs) { || (lhs.inputs == rhs.inputs && lhs.outputs == rhs.outputs && lhs.attrs < rhs.attrs); } + +inline bool operator==(const ImplSpec& lhs, const ImplSpec& rhs) { + return !(lhs < rhs) && !(rhs < lhs); +} + /** * @brief Impl stores the details of a specific implementation. * It is associated to a ImplSpec in a registry. - * + * */ template <class FwdFunc, class BwdFunc> struct Impl { @@ -108,7 +113,7 @@ public: /** * @brief Get the operator required implementation specification, according * to the current operator configuration. - * + * */ ImplSpec getRequiredSpec() const; @@ -116,15 +121,15 @@ public: * @brief Get the best implementation that matches \p requiredSpecs. * If no implementation matches \p requiredSpecs, \p requiredSpecs is * returned. - * + * */ ImplSpec getBestMatch(const ImplSpec& requiredSpecs) const; /** - * @brief Get an adapted meta operator corresponding to the required + * @brief Get an adapted meta operator corresponding to the required * specifications \p requiredSpecs from the implementation specifications * \p spec. - * + * * @param spec Implementation specification * @param requiredSpecs Required specifications * @return std::shared_ptr<Node> Adapted meta op or nullptr @@ -132,12 +137,12 @@ public: std::shared_ptr<Node> getAdaptation(const ImplSpec& spec, const ImplSpec& requiredSpecs) const; /** - * @brief Get the best adapted meta operator corresponding to the required + * @brief Get the best adapted meta operator corresponding to the required * specifications \p requiredSpecs. * The best adaptation is the one with the lowest overhead cost. - * Currently, it is the one requiring the least number of additionnal + * Currently, it is the one requiring the least number of additionnal * operators to match the available implementations. - * + * * @param requiredSpecs Required specifications * @return std::shared_ptr<Node> Adapted meta op or nullptr */ @@ -147,7 +152,7 @@ public: protected: virtual std::shared_ptr<ProdConso> getProdConso() const; - virtual std::set<ImplSpec> getAvailableImplSpecs() const; + virtual std::vector<ImplSpec> getAvailableImplSpecs() const; bool checkIOSpec(const ImplSpec::IOSpec& required, const ImplSpec::IOSpec& spec) const; const Operator &mOp; diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp index efdb06c4ac6d0e6898d899cc639a88d1da301000..c025ad770809864ac4e2d2c38e616e3d95e3d96a 100644 --- a/include/aidge/graph/GraphView.hpp +++ b/include/aidge/graph/GraphView.hpp @@ -424,16 +424,19 @@ public: addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor); } - inline void updateNodeName(NodePtr nodeToRename, const std::string& newName){ - const std::string& oldName = nodeToRename->name(); - AIDGE_ASSERT(mNodeRegistry.find(newName) != mNodeRegistry.end(), "Name {} is already used in graph {}.", newName, name()); - - if (nodeToRename->name() != ""){ // Case node already had a name - AIDGE_ASSERT(mNodeRegistry.find(oldName) != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", oldName, name()); - mNodeRegistry[newName] = mNodeRegistry[oldName]; - mNodeRegistry.erase(oldName); - }else{ // Case node did not had a name - mNodeRegistry[newName] = nodeToRename; + inline void updateNodeName(const std::shared_ptr<Node>& node, const std::string& newName){ + if (!newName.empty()) { + auto itNew = mNodeRegistry.insert(std::make_pair(newName, node)); + if (!itNew.second) { + Log::notice("Replacing existing node name in graph node name registry: {}", newName); + (itNew.first)->second = node; + } + } + + if (!node->name().empty()) { + const auto it = mNodeRegistry.find(node->name()); + AIDGE_ASSERT(it != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", node->name(), name()); + mNodeRegistry.erase(it); } } diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp index 32932fa6f598737644f74d4e2ce5da89557b5d3d..51cc9c444edf03febf4416149e9160df0bbfca9c 100644 --- a/include/aidge/graph/Node.hpp +++ b/include/aidge/graph/Node.hpp @@ -54,7 +54,7 @@ private: return sharedA < sharedB; // shared_ptr has a valid comparison operator } }; - std::string mName; /** Name of the Node. Should be unique. */ + std::shared_ptr<DynamicAttributes> mAttrs; std::set<std::weak_ptr<GraphView>, weakCompare> mViews; /** Set of pointers to GraphView instances including this Node instance. */ const std::shared_ptr<Operator> mOperator; // Pointer to the associated Operator @@ -70,6 +70,14 @@ private: public: Node() = delete; + /** + * @brief Construct a new Node object associated with the input Operator. + * @param op Operator giving the Node its number of connections. + * @param attrs Attributes for the Node. + */ + Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttributes> attrs); +// Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs); + /** * @brief Construct a new Node object associated with the input Operator. * @param op Operator giving the Node its number of connections. @@ -116,11 +124,14 @@ public: // INNER /////////////////////////////////////////////////////// + inline std::shared_ptr<DynamicAttributes> attributes() const { + return mAttrs; + } /** * @brief Name of the Node. * @return std::string */ - inline std::string name() const noexcept { return mName; } + inline std::string name() const noexcept { return mAttrs->getAttr<std::string>("name"); } /** * @brief Set the Node name. @@ -164,7 +175,8 @@ public: * @brief Get the Operator object of the Node. * @return std::shared_ptr<Operator> */ - inline std::shared_ptr<Operator> getOperator() const { return mOperator; } + inline std::shared_ptr<Operator> getOperator() const { return (*mOperator)(mAttrs); } +// inline std::shared_ptr<Operator> getOperator() const { return mOperator; } /////////////////////////////////////////////////////// // TENSOR MANAGEMENT diff --git a/include/aidge/hook/ExecTime.hpp b/include/aidge/hook/ExecTime.hpp deleted file mode 100644 index 0964d9575b7ad345d5e07c9f19c7e56a3b69c813..0000000000000000000000000000000000000000 --- a/include/aidge/hook/ExecTime.hpp +++ /dev/null @@ -1,59 +0,0 @@ -/** - * \file execTime.hpp - * \brief execTime structure - * \version file 1.0.0 - * \date Creation 27 June 2023 - * \date 27 June 2023 - * \par ChangeLog - * \par - * v1.0.0, 27 June 2023<br> - * - Initial version. - * \author mn271187, ik243221 - * \copyright - * Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All - * rights reserved. - */ - -#ifndef execTime_H_ -#define execTime_H_ - -#include "aidge/operator/Operator.hpp" -#include "aidge/hook/Hook.hpp" -#include <memory> -#include <chrono> -#include <vector> - -namespace Aidge { - -class ExecTime : public Hook { -private: - std::vector<std::chrono::high_resolution_clock::time_point> registeredTimes = std::vector<std::chrono::high_resolution_clock::time_point>(); -public: - ExecTime(const std::shared_ptr<Operator> op) : Hook(op) {} - ~ExecTime() = default; - - void call() override final { - registeredTimes.push_back(std::chrono::high_resolution_clock::now()); - } - - static std::shared_ptr<ExecTime> create(const std::shared_ptr<Operator> op) - { - return std::make_shared<ExecTime>(op); - } - - std::vector<std::chrono::high_resolution_clock::time_point> getTimes() { - return registeredTimes; - } - - std::chrono::high_resolution_clock::time_point getTime(size_t idx) { - return registeredTimes[idx]; - } - -}; - -namespace { - static Registrar<Hook> registrarHook_ExecTime({"execution_time"}, Aidge::ExecTime::create); -} -} - -#endif /* execTime_H_ */ \ No newline at end of file diff --git a/include/aidge/hook/Hook.hpp b/include/aidge/hook/Hook.hpp deleted file mode 100644 index 5edf231d51f913f58351b4817e145b5f48953ddd..0000000000000000000000000000000000000000 --- a/include/aidge/hook/Hook.hpp +++ /dev/null @@ -1,41 +0,0 @@ -/** - * \file Hook.hpp - * \brief Hook structure - * \version file 1.0.0 - * \date Creation 27 June 2023 - * \date 27 June 2023 - * \par ChangeLog - * \par - * v1.0.0, 27 June 2023<br> - * - Initial version. - * \author mn271187, ik243221 - * \copyright - * Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All - * rights reserved. - */ - -#ifndef Hook_H_ -#define Hook_H_ - -#include "aidge/utils/Attributes.hpp" -#include "aidge/utils/Registrar.hpp" -#include <memory> - -namespace Aidge { - -class Operator; -class Hook : public Registrable<Hook, std::tuple<std::string>, std::function<std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>> { -//class Hook : public Registrable<Hook, std::tuple<std::string>, std::function<std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>>{ -protected: - const std::shared_ptr<Operator> mOperator; - -public: - Hook(std::shared_ptr<Operator> op) : mOperator(op) {} - virtual ~Hook() = default; - - virtual void call() = 0; - -}; -} - -#endif /* Hook_H_ */ diff --git a/include/aidge/hook/OutputRange.hpp b/include/aidge/hook/OutputRange.hpp deleted file mode 100644 index 355f4aaa15a6bcd77d99ec2dad344a45f8f9edc0..0000000000000000000000000000000000000000 --- a/include/aidge/hook/OutputRange.hpp +++ /dev/null @@ -1,74 +0,0 @@ -/** - * \file execTime.hpp - * \brief execTime structure - * \version file 1.0.0 - * \date Creation 27 June 2023 - * \date 27 June 2023 - * \par ChangeLog - * \par - * v1.0.0, 27 June 2023<br> - * - Initial version. - * \author ik243221 - * \copyright - * Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All - * rights reserved. - */ - -#ifndef AIDGE_CORE_HOOK_OUTPUTRANGE_H_ -#define AIDGE_CORE_HOOK_OUTPUTRANGE_H_ - -#include "aidge/operator/Operator.hpp" -#include "aidge/hook/Hook.hpp" -#include <memory> -#include <chrono> -#include <vector> -#include <cmath> -namespace Aidge { - -class OutputRange : public Hook { -private: - std::vector<float> registeredOutputs = std::vector<float>(); -public: - OutputRange(const std::shared_ptr<Operator> op) : Hook(op) {} - ~OutputRange() = default; - - void call() override final { - //std::cout << "call() outputRange hook " << std::endl; - //this assumes there is only 1 output possible - std::shared_ptr<Tensor> tensor = mOperator->getOutput(0); - //tensor->print(); - //std::cout << "call() outputRange hook : tensor printed" << std::endl; - float max_value = 0.; - float * casted_tensor = static_cast<float *>(tensor->getImpl()->rawPtr()); - //find the absolute max value in the tensor, save it to registered outputs - for(std::size_t i = 0; i < tensor->size(); ++i) { - //std::cout << "call() outputRange hook : casted_tensor[i] = " << casted_tensor[i] << std::endl; - if(std::abs(casted_tensor[i]) > max_value){ - max_value = std::abs(casted_tensor[i]); - } - } - //std::cout << "call() outputRange hook : max_value = " << max_value << std::endl; - registeredOutputs.push_back(max_value); - } - - static std::shared_ptr<OutputRange> create(const std::shared_ptr<Operator> op) - { - return std::make_shared<OutputRange>(op); - } - - std::vector<float> getOutputs() { - return registeredOutputs; - } - - float getOutput(size_t idx) { - return registeredOutputs[idx]; - } - -}; - -namespace { - static Registrar<Hook> registrarHook_OutputRange({"output_range"}, Aidge::OutputRange::create); -} -} - -#endif /* outputRange_H_ */ \ No newline at end of file diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index daf50771703d6608dbbe90364aac8667aefbdd1d..827fc0c2732695364aa2393692d7040b8b1a0e9f 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -18,8 +18,9 @@ #include "aidge/operator/OperatorTensor.hpp" #include "aidge/graph/Node.hpp" -#include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Types.h" +#include "aidge/utils/Registrar.hpp" namespace Aidge { @@ -28,7 +29,7 @@ class Add_Op : public OperatorTensor, public: static const std::string Type; - Add_Op(const IOIndex_t nbIn); + Add_Op(); /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -65,7 +66,7 @@ public: } }; -std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = ""); +std::shared_ptr<Node> Add(const std::string& name = ""); } #endif /* AIDGE_CORE_OPERATOR_ADD_H_ */ diff --git a/include/aidge/operator/Atan.hpp b/include/aidge/operator/Atan.hpp new file mode 100644 index 0000000000000000000000000000000000000000..f9c7d09e7a49cd8687166006eea75510aeda57ee --- /dev/null +++ b/include/aidge/operator/Atan.hpp @@ -0,0 +1,52 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CORE_OPERATOR_ATAN_H_ +#define AIDGE_CORE_OPERATOR_ATAN_H_ + +#include <cassert> +#include <memory> +#include <vector> + +#include "aidge/utils/Registrar.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/graph/Node.hpp" +#include "aidge/utils/Types.h" + +namespace Aidge { + +class Atan_Op : public OperatorTensor, + public Registrable<Atan_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Atan_Op&)>> { +public: + static const std::string Type; + + Atan_Op(); + + Atan_Op(const Atan_Op& op); + + std::shared_ptr<Operator> clone() const override; + + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; + std::set<std::string> getAvailableBackends() const override; + + static const std::vector<std::string> getInputsName(){ + return {"data_input"}; + } + static const std::vector<std::string> getOutputsName(){ + return {"data_output"}; + } +}; + +std::shared_ptr<Node> Atan(const std::string& name = ""); +} + +#endif /* AIDGE_CORE_OPERATOR_ATAN_H_ */ diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp new file mode 100644 index 0000000000000000000000000000000000000000..aa37b50320e9deaa94e83ff7cc3578745b560228 --- /dev/null +++ b/include/aidge/operator/Clip.hpp @@ -0,0 +1,122 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CORE_OPERATOR_CLIP_H_ +#define AIDGE_CORE_OPERATOR_CLIP_H_ + +#include <memory> +#include <vector> +#include <limits> + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" + +namespace Aidge { + +enum class ClipAttr { Min, Max }; + + +class Clip_Op : public OperatorTensor, + public Registrable<Clip_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Clip_Op&)>> { + +public: + static const std::string Type; +private: + using Attributes_ = StaticAttributes<ClipAttr, float, float>; + template <ClipAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: + Clip_Op() = delete; + Clip_Op(float min,float max) : + OperatorTensor(Type, {InputCategory::Data,InputCategory::OptionalData,InputCategory::OptionalData}, 1), + mAttributes(std::make_shared<Attributes_>(attr<ClipAttr::Min>(min), attr<ClipAttr::Max>(max))) + {} + /** + * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). + * @param op Operator to copy. + */ + Clip_Op(const Clip_Op& op) + : OperatorTensor(op), + mAttributes(op.mAttributes) + + { + if (op.mImpl){ + SET_IMPL_MACRO(Clip_Op, *this, op.backend()); + }else{ + mImpl = nullptr; + } + } + + /** + * @brief Clone the operator using its copy-constructor. + * @see Operator::Clip_Op + */ + std::shared_ptr<Operator> clone() const override + { + return std::make_shared<Clip_Op>(*this); + } + bool dimsForwarded() const override final; + bool forwardDims(bool allowDataDependency = false) override final; + + /** + * @brief Setter to specify the backend to use + */ + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + /** + * @brief Getter and Setter for min attribute value + * @return float& + */ + inline float& min() const noexcept { return mAttributes->getAttr<ClipAttr::Min>(); } + /** + * @brief Getter and Setter for max attribute value + * @return float& + */ + inline float& max() const noexcept { return mAttributes->getAttr<ClipAttr::Max>(); } + + std::set<std::string> getAvailableBackends() const override; + + static const std::vector<std::string> getInputsName(){ + return {"data_input","min_empty_tensor","max_empty_tensor"}; + } + static const std::vector<std::string> getOutputsName(){ + return {"data_output"}; + } +}; + /** + * @brief The Clip operator is a tensor operator that performs a clipping operation on tensor elements. + This class allows limiting tensor values to a specified range, defined by the min + and max parameters (Tensors of empty shapes). Values outside this range are replaced by the corresponding limit values + When ‘min’ is greater than ‘max’, the clip operator sets all the ‘input’ values to the value of ‘max’ + * @param[in] name Name of the node + * @param[in] min Min clip value as attribute + * @param[in] max Max clip value as attribute + * @return std::shared_ptr<Node> + */ + std::shared_ptr<Aidge::Node> Clip( + const std::string &name = "", + float min = std::numeric_limits<float>::lowest(), + float max = std::numeric_limits<float>::max() + ); + +} +namespace { +template <> +const char* const EnumStrings<Aidge::ClipAttr>::data[] + = {"min", "max"}; +} + +#endif /* AIDGE_CORE_OPERATOR_CLIP_H_ */ diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp index 2812da066887d63133ede2d69b5804f0b8a8101e..89b2c06a52f180ffb35363cb6ab07d4242e12033 100644 --- a/include/aidge/operator/GenericOperator.hpp +++ b/include/aidge/operator/GenericOperator.hpp @@ -64,7 +64,7 @@ public: inline T& getAttr(const std::string& name) { return mAttributes -> template getAttr<T>(name); } template <class T> - inline const T& getAttr(const std::string& name) const + inline T getAttr(const std::string& name) const { return mAttributes -> template getAttr<T>(name); } ///\brief Add a new Attribute, identified by its name. If it already exists, asserts. diff --git a/include/aidge/operator/Heaviside.hpp b/include/aidge/operator/Heaviside.hpp new file mode 100644 index 0000000000000000000000000000000000000000..65c7f341a1c5aa12eeada2165e459cc1c933e327 --- /dev/null +++ b/include/aidge/operator/Heaviside.hpp @@ -0,0 +1,107 @@ +/******************************************************************************** + * Copyright (c) 2024 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CORE_OPERATOR_HEAVISIDE_H_ +#define AIDGE_CORE_OPERATOR_HEAVISIDE_H_ + +#include <memory> +#include <string> +#include <vector> + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/graph/Node.hpp" +#include "aidge/operator/Operator.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" + +namespace Aidge { +enum class HeavisideAttr { + /** + * @brief The value used in the output tensor when the input is 0. + */ + Value +}; + +class Heaviside_Op + : public OperatorTensor, + public Registrable< + Heaviside_Op, + std::string, + std::function<std::shared_ptr<OperatorImpl>(const Heaviside_Op &)>> { + private: + using Attributes_ = StaticAttributes<HeavisideAttr, float>; + template <HeavisideAttr e> + using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + + public: + static const std::string Type; + + /* + * Compute the Heaviside step function for each element of the first input. + * Heaviside step function is defined as : + * + * \f[ + * heaviside(input, values) = \begin{cases} + * 0 & \text{if input } < 0 \\ + * values & \text{if input } = 0 \\ + * 1 & \text{if input } > 0 + * \end{cases} + * \f] + * */ + Heaviside_Op(float value); + + Heaviside_Op(const Heaviside_Op &op); + + std::shared_ptr<Operator> clone() const override; + + void setBackend(const std::string &name, DeviceIdx_t device = 0) override; + + std::set<std::string> getAvailableBackends() const override; + + static const std::vector<std::string> getInputsName() { + return {"data_input", "data_values"}; + } + + static const std::vector<std::string> getOutputsName() { + return {"output"}; + } + + inline std::shared_ptr<Attributes> attributes() const override { + return mAttributes; + } + inline float &value() const { + return mAttributes->template getAttr<HeavisideAttr::Value>(); + } +}; + +/** + * @brief Create a Heaviside node. + * + * Initializes a Heaviside node that computes the Heaviside step function for each element + * of the input tensor, using the specified value for inputs equal to zero. + * + * @param value The value used in the output tensor when the input is 0. + * @param name Optional. The name of the node. + * + * @return A shared pointer to a Node representing the Heaviside operation. + */ +std::shared_ptr<Node> Heaviside(float value, const std::string &name = ""); +} // namespace Aidge + +namespace { +template <> +const char *const EnumStrings<Aidge::HeavisideAttr>::data[] = {"value"}; +} + +#endif /* AIDGE_CORE_OPERATOR_HEAVISIDE_H_ */ diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp index ccff976cbb7cf8efc59223dfd658ca2a4d03a80b..b915cb8f16546e6626e99e41f5f9ebb1c038863e 100644 --- a/include/aidge/operator/MetaOperator.hpp +++ b/include/aidge/operator/MetaOperator.hpp @@ -28,7 +28,7 @@ namespace Aidge { class MetaOperator_Op : public OperatorTensor, - public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::function<std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)>> { + public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::function<std::shared_ptr<OperatorImpl>(const MetaOperator_Op &)>> { public: // outputs shared with micro-graph output Tensors // Micro-graph handling: @@ -37,7 +37,7 @@ public: std::weak_ptr<Node> mUpperNode; public: - MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph); + MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph, const std::vector<InputCategory>& forcedInputsCategory = {}); /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -113,6 +113,7 @@ public: std::shared_ptr<Node> MetaOperator(const char *type, const std::shared_ptr<GraphView>& graph, + const std::vector<InputCategory>& forcedInputsCategory = {}, const std::string& name = ""); } // namespace Aidge diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp index bc3348377525cdd2e5b2c030c8fc6b7cb8177e7b..481a7795e24acd006acfc66a0fccde1b8da747e7 100644 --- a/include/aidge/operator/MetaOperatorDefs.hpp +++ b/include/aidge/operator/MetaOperatorDefs.hpp @@ -126,7 +126,7 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> & MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode) }); - return MetaOperator("PaddedMaxPooling", graph, name); + return MetaOperator(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph, {}, name); } template <std::array<DimSize_t, 1>::size_type DIM> @@ -140,7 +140,7 @@ inline std::shared_ptr<MetaOperator_Op> PaddedMaxPooling_Op(const std::array<Dim MaxPooling(kernel_dims, "", stride_dims, ceil_mode) }); - return std::make_shared<MetaOperator_Op>("PaddedMaxPooling", graph); + return std::make_shared<MetaOperator_Op>(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph); } // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp index 87aa4080e57d14d0d8a738afed2e976521b42048..a799153e1db5eb83964ed06dd3bc0fb06da64de8 100644 --- a/include/aidge/operator/Operator.hpp +++ b/include/aidge/operator/Operator.hpp @@ -28,7 +28,7 @@ #include "aidge/data/Data.hpp" #include "aidge/utils/Attributes.hpp" #include "aidge/utils/Types.h" -#include "aidge/hook/Hook.hpp" + #ifdef PYBIND namespace py = pybind11; @@ -50,7 +50,7 @@ enum class InputCategory { class Operator : public std::enable_shared_from_this<Operator> { protected: std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator - std::map<std::string, std::shared_ptr<Hook>> mHooks; + std::shared_ptr<DynamicAttributes> mInheritedAttrs; private: std::string mType; @@ -81,7 +81,10 @@ public: mImpl = nullptr; // Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation. // See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion. - // Hooks are not copied. + } + std::shared_ptr<Operator> operator()(std::shared_ptr<DynamicAttributes> attrs) { + mInheritedAttrs = attrs; + return shared_from_this(); } virtual ~Operator() noexcept; @@ -90,6 +93,7 @@ public: virtual std::shared_ptr<Operator> clone() const = 0; virtual std::shared_ptr<Attributes> attributes() const { return nullptr; }; + virtual std::shared_ptr<DynamicAttributes> inheritedAttributes() const { return mInheritedAttrs; }; /** * @brief Set the specified input with a shallow copy. * @param inputIdx Index of the input to set. @@ -114,14 +118,6 @@ public: virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const = 0; virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0; - std::shared_ptr<Hook> getHook(const std::string& hookName) { - return mHooks[hookName]; - } - void addHook(const std::string& hookName) { - mHooks.insert(std::pair<std::string, std::shared_ptr<Hook>>(hookName,Registrar<Hook>::create({hookName})(shared_from_this()))); - } - - void runHooks() const; /////////////////////////////////////////////////////// // IMPLEMENTATION diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp index 2c670bf23d4703a5a9e8502c8b356fdde32e2561..bc1852ec0759ffaafa015143f22b0a1c8f6c893e 100644 --- a/include/aidge/operator/Pad.hpp +++ b/include/aidge/operator/Pad.hpp @@ -21,6 +21,7 @@ #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/ArrayHelpers.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -47,7 +48,7 @@ public: Pad_Op() = delete; constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples, - const PadBorderType &borderType = PadBorderType::Constant, + PadBorderType borderType = PadBorderType::Constant, double borderValue = 0.0) : OperatorTensor(Type, {InputCategory::Data}, 1), mAttributes(std::make_shared<Attributes_>( @@ -92,7 +93,7 @@ public: template <std::array<DimSize_t, 1>::size_type DIM> std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples, const std::string& name = "", - const PadBorderType &borderType = PadBorderType::Constant, + PadBorderType borderType = PadBorderType::Constant, double borderValue = 0.0); // helper with C-style array instead of std::array for beginEndTuples to allow automatic template DIM deduction @@ -100,7 +101,7 @@ template <DimSize_t DIM> inline std::shared_ptr<Node> Pad( DimSize_t const (&beginEndTuples)[2*DIM], const std::string& name = "", - const PadBorderType &borderType = PadBorderType::Constant, + PadBorderType borderType = PadBorderType::Constant, double borderValue = 0.0) { return Pad<DIM>(to_array(beginEndTuples), name, borderType, borderValue); diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp index 9b264c1d3d7955f71538dd90f105cfd7ee469d0a..a9a84a3ee80eea5c0032fa08bce4ab96c44dba04 100644 --- a/include/aidge/operator/ReLU.hpp +++ b/include/aidge/operator/ReLU.hpp @@ -25,16 +25,34 @@ namespace Aidge { -class ReLU_Op : public OperatorTensor, - public Registrable<ReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>> { +/** + * @brief Description of an element-wise Rectified Linear Unit (ReLU) operation + * on an input Tensor. + * + * For each element x in the input, the function is defined as: + * `f(x) = max(0, x)` + * + * The input and output Tensors have the same dimensions. + * + * @see OperatorTensor + * @see Registrable + */ +class ReLU_Op : + public OperatorTensor, + public Registrable<ReLU_Op, // <Op, backend, implementation creation function> + std::string, + std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>> +{ public: static const std::string Type; ReLU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {} /** - * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). - * @param op Operator to copy. + * @brief Copy-constructor. + * @param op ReLU_Op to copy. + * @details Copies the operator attributes and its output tensor(s), but not + * its input tensors. The new operator has no associated input. */ ReLU_Op(const ReLU_Op& op); diff --git a/include/aidge/operator/Round.hpp b/include/aidge/operator/Round.hpp new file mode 100644 index 0000000000000000000000000000000000000000..00352421d193eff543f1351b57f8db54ac742393 --- /dev/null +++ b/include/aidge/operator/Round.hpp @@ -0,0 +1,64 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CORE_OPERATOR_ROUND_H_ +#define AIDGE_CORE_OPERATOR_ROUND_H_ + +#include <memory> +#include <vector> +#include <string> + +#include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +namespace Aidge { + +class Round_Op : public OperatorTensor, + public Registrable<Round_Op, + std::string, + std::function<std::shared_ptr<OperatorImpl>(const Round_Op&)>> { + + +public: + static const std::string Type; + + Round_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {} + + /** + * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). + * @param op Operator to copy. + */ + Round_Op(const Round_Op& op); + + /** + * @brief Clone the operator using its copy-constructor. + * @see Operator::Round_Op + */ + std::shared_ptr<Operator> clone() const override; + + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; + std::set<std::string> getAvailableBackends() const override; + static const std::vector<std::string> getInputsName(){ + return {"data_input"}; + } + static const std::vector<std::string> getOutputsName(){ + return {"data_output"}; + } +}; + +std::shared_ptr<Node> Round(const std::string& name = ""); +} + + +#endif /* AIDGE_CORE_OPERATOR_ROUND_H_ */ diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp index 24bc3321673f4dcffd3e3663f7e0a0e584389492..ceef45bf0a12315354c8b7bf378c2da834b867b1 100644 --- a/include/aidge/operator/Sigmoid.hpp +++ b/include/aidge/operator/Sigmoid.hpp @@ -26,7 +26,7 @@ namespace Aidge { class Sigmoid_Op : public OperatorTensor, - public Registrable<Sigmoid_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Sigmoid_Op&)>> { + public Registrable<Sigmoid_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Sigmoid_Op&)>> { public: static const std::string Type; @@ -50,4 +50,4 @@ public: std::shared_ptr<Node> Sigmoid(const std::string& name = ""); } -#endif /* AIDGE_CORE_OPERATOR_SIGMOID_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_SIGMOID_H_ */ diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp index 155627f2cfd3173ccfbbe2a1ce8c23784cd06d71..beeca8d72a2067ed2dfcd98cf3d9ff0cb7b6ff3a 100644 --- a/include/aidge/operator/Transpose.hpp +++ b/include/aidge/operator/Transpose.hpp @@ -25,6 +25,11 @@ #include "aidge/utils/Types.h" namespace Aidge { +/** + * @brief implementation of the operator Transpose. + * @note Since this operator implementation is agnostic to the backend it is + * located here instead of in aidge_backend. + */ class TransposeImpl : public OperatorImpl { public: TransposeImpl(const Operator& op, const std::string& backend = "") @@ -33,8 +38,22 @@ public: void forward() override; }; -enum class TransposeAttr { OutputDimsOrder }; +enum class TransposeAttr { + /** + * @brief order of the ouput dims from the input dims. If left empty, + * the dimensions of input will be reversed. + */ + OutputDimsOrder +}; +/** + * @brief This operator has as purpose to transpose the axes of a given tensor. + * input#0 : Tensor to transpose + * @example Calling transpose() on a tensor of dimensions [1, 2, 3] with OutputDimsOrder=(1,0,2) result + * in a tensor of dim [2, 1, 3]. + * @example Calling transpose() on a tensor of dimensions [1,2,3,4] with an empty OutputDimsOrder vector + * will result in a tensor of dim [4,3,2,1]. + */ class Transpose_Op : public OperatorTensor, public Registrable<Transpose_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Transpose_Op&)>> { @@ -50,6 +69,10 @@ private: public: Transpose_Op() = delete; + /** + * @brief constructor for Transpose op + * @param[in] outputDimsOrder axes permutation order. By default axes are reversed. + */ Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder); /** @@ -70,6 +93,9 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + /** + * @brief axes new order, if left empty, axes will be reversed. + */ inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<TransposeAttr::OutputDimsOrder>(); } static const std::vector<std::string> getInputsName(){ @@ -80,8 +106,8 @@ public: } }; -std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder, - const std::string& name = ""); +std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder = {}, + const std::string& name = ""); } // namespace Aidge namespace { diff --git a/include/aidge/scheduler/ProdConso.hpp b/include/aidge/scheduler/ProdConso.hpp index a7c0ed5ae73d1f891744e835f0da5ad14a37f850..fce8d7f6548aaeb04300291d33cc2a5e44fb6fe7 100644 --- a/include/aidge/scheduler/ProdConso.hpp +++ b/include/aidge/scheduler/ProdConso.hpp @@ -42,10 +42,14 @@ public: */ virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const; - // Amount of input data that cannot be overwritten during the execution. + /** + * @brief Amount of input data that cannot be overwritten during the execution. + */ virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const; - // Memory required at an output for a given input size. + /** + * @brief Memory required at an output for a given input size. + */ virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const; /** diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp index 792d73693be0780f2e938d828b0f29889216631b..2d03f4e8b8d5ce9c74f1d140a2e13317decc8dac 100644 --- a/include/aidge/scheduler/Scheduler.hpp +++ b/include/aidge/scheduler/Scheduler.hpp @@ -28,8 +28,29 @@ namespace Aidge { class Node; class GraphView; + +/** + * @class Scheduler + * @brief Generate and manage the execution schedule order of nodes in a graph. + * It provides functionality for static scheduling, memory + * management, and visualization of the scheduling process. + * + * Key features: + * - Static scheduling generation with early and late execution times + * - Memory layout generation for scheduled nodes + * - Input tensor connection to graph nodes + * - Scheduling visualization through diagram generation + * + * @see GraphView + * @see Node + * @see MemoryManager + */ class Scheduler { protected: + /** + * @struct StaticSchedulingElement + * @brief Represents a node in the static schedule. + */ struct StaticSchedulingElement { StaticSchedulingElement( std::shared_ptr<Node> node_, @@ -37,15 +58,17 @@ protected: std::size_t late_ = static_cast<std::size_t>(-1)) : node(node_), early(early_), late(late_) {} - std::shared_ptr<Node> node; - std::size_t early; - std::size_t late; - std::vector<std::shared_ptr<StaticSchedulingElement>> earlierThan; - std::vector<std::shared_ptr<StaticSchedulingElement>> laterThan; + std::shared_ptr<Node> node; /** Scheduled `Node` */ + std::size_t early; /** Earliest possible execution time */ + std::size_t late; /** Latest possible execution time */ + std::vector<StaticSchedulingElement*> earlierThan; /** Nodes that must be executed earlier */ + std::vector<StaticSchedulingElement*> laterThan; /** Nodes that must be executed later */ }; /** - * @brief Node with its start/end execution time stored for later display. + * @struct SchedulingElement + * @brief Represent a `Node` with its actual execution times. + * @details Start and end times are stored for later display. */ struct SchedulingElement { SchedulingElement( @@ -54,21 +77,32 @@ protected: std::chrono::time_point<std::chrono::high_resolution_clock> end_) : node(node_), start(start_), end(end_) {} ~SchedulingElement() noexcept = default; - std::shared_ptr<Node> node; - std::chrono::time_point<std::chrono::high_resolution_clock> start; - std::chrono::time_point<std::chrono::high_resolution_clock> end; + std::shared_ptr<Node> node; /** Executed `Node` */ + std::chrono::time_point<std::chrono::high_resolution_clock> start; /** Actual start time of execution */ + std::chrono::time_point<std::chrono::high_resolution_clock> end; /** Actual end time of execution */ }; public: + /** + * @struct PriorProducersConsumers + * @brief Manages producer-consumer relationships for nodes. + */ struct PriorProducersConsumers { PriorProducersConsumers(); PriorProducersConsumers(const PriorProducersConsumers&); ~PriorProducersConsumers() noexcept; - bool isPrior = false; - std::set<std::shared_ptr<Aidge::Node>> requiredProducers; - std::set<std::shared_ptr<Aidge::Node>> priorConsumers; + bool isPrior = false; /** Indicates if this Node is a prior to another Node */ + std::set<std::shared_ptr<Aidge::Node>> requiredProducers; /** Set of required producer nodes */ + std::set<std::shared_ptr<Aidge::Node>> priorConsumers; /** Set of required prior consumer nodes */ }; public: + Scheduler() = delete; + + /** + * @brief Constructor for the Scheduler class. + * @param graphView Shared pointer to the GraphView to be scheduled. + * @param upperNode Shared pointer to the upper node of the GraphView (optional). + */ Scheduler(std::shared_ptr<GraphView> graphView, std::shared_ptr<Node> upperNode = nullptr) : mGraphView(graphView), mUpperNode(upperNode) @@ -76,15 +110,20 @@ public: // ctor }; - virtual ~Scheduler() noexcept; + virtual ~Scheduler(); public: /** - * @brief Return a vector of Node ordered by the order they are called by the scheduler. - * @return std::vector<std::shared_ptr<Node>> + * @brief Get the static scheduling order of nodes. + * @param step The step of the static schedule to retrieve (default is 0). + * @return Vector of shared pointers to Nodes in their scheduled order. */ std::vector<std::shared_ptr<Node>> getStaticScheduling(std::size_t step = 0) const; + /** + * @brief Get the GraphView associated with this Scheduler. + * @return Shared pointer to the GraphView. + */ inline std::shared_ptr<GraphView> graphView() const noexcept { return mGraphView; } @@ -110,20 +149,23 @@ public: MemoryManager generateMemory(bool incProducers = false, bool wrapAroundBuffer = false) const; /** - * @brief Place the data tensors inside in the data input tensor of the graphView. In case of multiple data input tensors, they are mapped to producers in the order given by the graph. + * @brief Connect input tensors to the data input of the GraphView. + * In case of multiple data input tensors, they are mapped to producers in + * the order given by the graph. * * @param data data input tensors */ void connectInputs(const std::vector<std::shared_ptr<Aidge::Tensor>>& data); /** - * @brief Save in a Markdown file the static scheduling with early and late relative order for the nodes. - * @param fileName Name of the generated file. + * @brief Save the static scheduling diagram, with early and late relative + * order of execution for the nodes, to a file in Mermaid format. + * @param fileName Name of the file to save the diagram (without extension). */ void saveStaticSchedulingDiagram(const std::string& fileName) const; /** - * @brief Save in a Markdown file the order of layers execution. + * @brief Save in a Mermaid file the order of layers execution. * @param fileName Name of the generated file. */ void saveSchedulingDiagram(const std::string& fileName) const; @@ -139,34 +181,53 @@ protected: Elts_t getNbAvailableData(const std::shared_ptr<Node>& node, const IOIndex_t inputIdx) const; + /** + * @brief Get the prior producers and consumers for a node. + * @param node Shared pointer to the Node. + * @return PriorProducersConsumers object containing prior information. + */ PriorProducersConsumers getPriorProducersConsumers(const std::shared_ptr<Node>& node) const; /** * @brief Generate an initial base scheduling for the GraphView. * The scheduling is entirely sequential and garanteed to be valid w.r.t. * each node producer-consumer model. + * @return Vector of pointers to `StaticSchedulingElement` representing the base schedule. */ - std::vector<std::shared_ptr<StaticSchedulingElement>> generateBaseScheduling() const; + std::vector<StaticSchedulingElement*> generateBaseScheduling() const; /** - * Fill-in early and late scheduling step from initial base scheduling. - * For each node, specifies the earliest and latest possible execution - * logical step. - */ - void generateEarlyLateScheduling(std::vector<std::shared_ptr<StaticSchedulingElement>>& schedule) const; + * @brief Calculates early and late execution times for each node in an initial base scheduling. + * + * This method performs two passes over the schedule: + * 1. Forward pass: Calculates the earliest possible execution time for each node + * 2. Backward pass: Calculates the latest possible execution time for each node + * + * It also establishes 'earlierThan' and 'laterThan' relationships between nodes. + * + * @param schedule Vector of shared pointers to StaticSchedulingElements to be processed + */ + void generateEarlyLateScheduling(std::vector<StaticSchedulingElement*>& schedule) const; private: + /** + * @brief Summarize the consumer state of a node for debugging purposes. + * @param consumer Shared pointer to the consumer Node. + * @param nodeName Name of the node. + * @details Provide the amount of data consumed and required for each input + * and the amount of data produced for each output. + */ void summarizeConsumerState(const std::shared_ptr<Node>& consumer, const std::string& nodeName) const; protected: - /** @brief Shared ptr to the scheduled graph view */ + /** @brief Shared pointer to the scheduled GraphView */ std::shared_ptr<GraphView> mGraphView; - /** @brief Shared ptr to the upper node containing the graph view */ + /** @brief Weak pointer to the upper node containing the graph view */ std::weak_ptr<Node> mUpperNode; /** @brief List of SchedulingElement (i.e: Nodes with their computation time) */ std::vector<SchedulingElement> mScheduling; /** @brief List of nodes ordered by their */ - std::vector<std::vector<std::shared_ptr<StaticSchedulingElement>>> mStaticSchedule; + std::vector<std::vector<StaticSchedulingElement*>> mStaticSchedule; std::size_t mStaticScheduleStep = 0; mutable std::map<std::shared_ptr<Node>, PriorProducersConsumers> mPriorCache; }; diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp index cf71ed0b5953fa1759e04c66311d3d829a603a01..fd29bf4ce57ac94e0860172d2d1c15dc40f15ae0 100644 --- a/include/aidge/utils/Attributes.hpp +++ b/include/aidge/utils/Attributes.hpp @@ -69,8 +69,6 @@ public: virtual std::map<std::string, future_std::any> getAttrs() const = 0; #ifdef PYBIND - virtual bool hasAttrPy(const std::string& name) const = 0; - /* Bindable get function, does not recquire any templating. * This is thanks to py::object which allow the function to * be agnostic from its return type. diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp index 04ed58f7e636d6a0d528f1946ead110857312576..52056852bc454f65f7d12cfc0608e5b6b0b1d933 100644 --- a/include/aidge/utils/DynamicAttributes.hpp +++ b/include/aidge/utils/DynamicAttributes.hpp @@ -50,39 +50,23 @@ public: * exist * \note at() throws if the Attribute does not exist, using find to test for Attribute existance */ - template<class T> const T& getAttr(const std::string& name) const + template<class T> T getAttr(const std::string& name) const { - mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T), - [](const future_std::any& lhs, const future_std::any& rhs) { -#ifdef PYBIND - if (lhs.type() == typeid(py::object)) { - return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs)); - } - else if (rhs.type() == typeid(py::object)) { - return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>()); - } - else -#endif - { - return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs)); - } - })); - const auto dot = name.find('.'); if (dot == name.npos) { + mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>())); + + const auto& attr = mAttrs.at(name); #ifdef PYBIND - // If attribute does not exist in C++, it might have been created or modified in Python - auto it = mAttrs.find(name); - if (it == mAttrs.end()) { - auto itPy = mAttrsPy.find(name); - if (itPy != mAttrsPy.end()) { - // Insert the attribute back in C++ - mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>()))); - } + if (attr.type() == typeid(py::object)) { + // Note: because of cast<T>(), this function cannot return a const reference! + return future_std::any_cast<const py::object&>(attr).cast<T>(); } + else #endif - - return future_std::any_cast<const T&>(mAttrs.at(name)); + { + return future_std::any_cast<const T&>(attr); + } } else { const auto ns = name.substr(0, dot); @@ -92,9 +76,21 @@ public: } template<class T> T& getAttr(const std::string& name) { - // Scott Meyers' solution to avoid code duplication - return const_cast<T&>( - static_cast<const DynamicAttributes&>(*this).getAttr<T>(name)); + const auto dot = name.find('.'); + if (dot == name.npos) { + mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>())); + + auto& attr = mAttrs.at(name); +#ifdef PYBIND + AIDGE_ASSERT(attr.type() != typeid(py::object), "getAttr(): cannot return a reference to a Python-defined attribute."); +#endif + return future_std::any_cast<T&>(attr); + } + else { + const auto ns = name.substr(0, dot); + const auto nsName = name.substr(dot + 1); + return future_std::any_cast<DynamicAttributes&>(mAttrs.at(ns)).getAttr<T>(nsName); + } } ///\brief Add a new Attribute, identified by its name. If it already exists, asserts. @@ -103,35 +99,12 @@ public: ///\param value Attribute value template<class T> void addAttr(const std::string& name, const T& value) { - mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T), - [](const future_std::any& lhs, const future_std::any& rhs) { -#ifdef PYBIND - if (lhs.type() == typeid(py::object)) { - return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs)); - } - else if (rhs.type() == typeid(py::object)) { - return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>()); - } - else -#endif - { - return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs)); - } - })); - const auto dot = name.find('.'); if (dot == name.npos) { + mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>())); + const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value))); AIDGE_ASSERT(res.second, "addAttr(): attribute \"{}\" already exists. Use setAttr() if this is expected.", name); - -#ifdef PYBIND - // We cannot handle Python object if the Python interpreter is not running - if (Py_IsInitialized()) { - // Keep a copy of the attribute in py::object that is updated everytime - const auto& resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value))); - AIDGE_ASSERT(resPy.second, "addAttr(): attribute \"{}\" already exists (added in Python). Use setAttr() if this is expected.", name); - } -#endif } else { const auto ns = name.substr(0, dot); @@ -147,37 +120,13 @@ public: ///\param value Attribute value template<class T> void setAttr(const std::string& name, const T& value) { - mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T), - [](const future_std::any& lhs, const future_std::any& rhs) { -#ifdef PYBIND - if (lhs.type() == typeid(py::object)) { - return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs)); - } - else if (rhs.type() == typeid(py::object)) { - return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>()); - } - else -#endif - { - return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs)); - } - })); - const auto dot = name.find('.'); if (dot == name.npos) { + mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>())); + auto res = mAttrs.emplace(std::make_pair(name, future_std::any(value))); if (!res.second) res.first->second = future_std::any(value); - -#ifdef PYBIND - // We cannot handle Python object if the Python interpreter is not running - if (Py_IsInitialized()) { - // Keep a copy of the attribute in py::object that is updated everytime - auto resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value))); - if (!resPy.second) - resPy.first->second = std::move(py::cast(value)); - } -#endif } else { const auto ns = name.substr(0, dot); @@ -191,9 +140,6 @@ public: const auto dot = name.find('.'); if (dot == name.npos) { mAttrs.erase(name); -#ifdef PYBIND - mAttrsPy.erase(name); -#endif } else { const auto ns = name.substr(0, dot); @@ -205,41 +151,12 @@ public: #ifdef PYBIND void addAttrPy(const std::string& name, py::object&& value) { - const auto dot = name.find('.'); - if (dot == name.npos) { - auto it = mAttrs.find(name); - AIDGE_ASSERT(it == mAttrs.end(), "add_attr(): attribute \"{}\" already exists (added in C++). Use set_attr() if this is expected.", name); - - const auto& res = mAttrsPy.emplace(std::make_pair(name, value)); - AIDGE_ASSERT(res.second, "add_attr(): attribute \"{}\" already exists. Use set_attr() if this is expected.", name); - } - else { - const auto ns = name.substr(0, dot); - const auto nsName = name.substr(dot + 1); - const auto& res = mAttrs.emplace(std::make_pair(ns, DynamicAttributes())); - - future_std::any_cast<DynamicAttributes&>(res.first->second).addAttrPy(nsName, std::move(value)); - } + addAttr(name, std::move(value)); } void setAttrPy(const std::string& name, py::object&& value) override final { - const auto dot = name.find('.'); - if (dot == name.npos) { - auto resPy = mAttrsPy.emplace(std::make_pair(name, value)); - if (!resPy.second) - resPy.first->second = std::move(value); - - // Force getAttr() to take attribute value from mAttrsPy and update mAttrs - mAttrs.erase(name); - } - else { - const auto ns = name.substr(0, dot); - const auto nsName = name.substr(dot + 1); - const auto& res = mAttrs.emplace(std::make_pair(ns, DynamicAttributes())); - - future_std::any_cast<DynamicAttributes&>(res.first->second).setAttrPy(nsName, std::move(value)); - } + setAttr(name, std::move(value)); } py::dict dict() const override { @@ -248,9 +165,9 @@ public: if (elt.second.type() == typeid(DynamicAttributes)) { attributes[elt.first.c_str()] = future_std::any_cast<const DynamicAttributes&>(elt.second).dict(); } - } - for (const auto& elt : mAttrsPy) { - attributes[elt.first.c_str()] = elt.second; + else { + attributes[elt.first.c_str()] = mAnyUtils.at(elt.second.type())->cast(elt.second); + } } return attributes; } @@ -273,12 +190,7 @@ public: bool hasAttr(const std::string& name) const override final { const auto dot = name.find('.'); if (dot == name.npos) { -#ifdef PYBIND - return (mAttrs.find(name) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend()); - -#else return (mAttrs.find(name) != mAttrs.cend()); -#endif } else { const auto ns = name.substr(0, dot); @@ -293,45 +205,22 @@ public: } } -#ifdef PYBIND - bool hasAttrPy(const std::string& name) const override final { - const auto dot = name.find('.'); - if (dot == name.npos) { - // Attributes might have been created in Python, the second condition is necessary. - return (mAttrs.find(name) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend()); - } - else { - const auto ns = name.substr(0, dot); - const auto it = mAttrs.find(ns); - if (it != mAttrs.cend()) { - const auto nsName = name.substr(dot + 1); - return future_std::any_cast<const DynamicAttributes&>(it->second).hasAttrPy(nsName); - } - else { - return false; - } - } - } -#endif - std::string getAttrType(const std::string& name) const override final { // In order to remain consistent between C++ and Python, with or without PyBind, the name of the type is: // - C-style for C++ created attributes // - Python-style for Python created attributes const auto dot = name.find('.'); if (dot == name.npos) { + const auto& attr = mAttrs.at(name); #ifdef PYBIND - // If attribute does not exist in C++, it might have been created in Python - auto it = mAttrs.find(name); - if (it == mAttrs.end()) { - auto itPy = mAttrsPy.find(name); - if (itPy != mAttrsPy.end()) { - return std::string(Py_TYPE(itPy->second.ptr())->tp_name); - } + if (attr.type() == typeid(py::object)) { + return std::string(Py_TYPE(future_std::any_cast<const py::object&>(attr).ptr())->tp_name); } + else #endif - - return mAttrs.at(name).type().name(); + { + return attr.type().name(); + } } else { const auto ns = name.substr(0, dot); @@ -344,11 +233,6 @@ public: std::set<std::string> attrsName; for(auto const& it: mAttrs) attrsName.insert(it.first); -#ifdef PYBIND - // Attributes might have been created in Python - for(auto const& it: mAttrsPy) - attrsName.insert(it.first); -#endif return attrsName; } @@ -356,21 +240,13 @@ public: /** * @detail See https://github.com/pybind/pybind11/issues/1590 as to why a * generic type caster for std::any is not feasable. - * The strategy here is to keep a copy of each attribute in py::object that is updated everytime. + * The strategy here is to store a cast() function for each attribute type ever used. */ inline py::object getAttrPy(const std::string& name) const override final { const auto dot = name.find('.'); if (dot == name.npos) { - auto itPy = mAttrsPy.find(name); - if (itPy == mAttrsPy.end()) { - // Attribute may be a namespace - auto it = mAttrs.find(name); - AIDGE_ASSERT(it != mAttrs.end() && it->second.type() == typeid(DynamicAttributes), "get_attr(): attribute \"{}\" not found", name); - return py::cast(future_std::any_cast<const DynamicAttributes&>(it->second)); - } - else { - return itPy->second; - } + const auto& attr = mAttrs.at(name); + return mAnyUtils.at(attr.type())->cast(attr); } else { const auto ns = name.substr(0, dot); @@ -384,24 +260,6 @@ public: { const auto dot = name.find('.'); if (dot == name.npos) { -#ifdef PYBIND - // If attribute does not exist in C++, it might have been created or modified in Python - auto it = mAttrs.find(name); - if (it == mAttrs.end()) { - auto itPy = mAttrsPy.find(name); - if (itPy != mAttrsPy.end()) { - // Attribute exists in Python, but its type is not known - // Return a std::any of py::object, which will be comparable - mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(py::object), - [](const future_std::any& lhs, const future_std::any& rhs) { - return (future_std::any_cast<py::object>(lhs) < future_std::any_cast<py::object>(rhs)); - })); - - return future_std::any(itPy->second); - } - } -#endif - return mAttrs.at(name); } else { @@ -415,34 +273,129 @@ public: return mAttrs; } - virtual ~DynamicAttributes() {} + virtual ~DynamicAttributes() { +#ifdef PYBIND + if (!Py_IsInitialized()) { + // Resets the internal pointer of py::object to nullptr without decreasing the object's reference count. + // At this point, the Python interpreter may have exited (it is the case if the current DynamicAttribute being destroyed is static), + // in which case py:object has already being destroyed despite the reference counting being > 0. + // See https://github.com/pybind/pybind11/issues/1598 + for (auto& attr : mAttrs) { + if (attr.second.type() == typeid(py::object)) { + future_std::any_cast<py::object&>(attr.second).release(); + } + } + } +#endif + } friend bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs); + friend struct std::hash<DynamicAttributes>; private: -#ifdef PYBIND - // Stores C++ attributes (copy) and Python-only attributes - // Code should be compiled with -fvisibility=hidden - // See https://pybind11.readthedocs.io/en/stable/faq.html: - // “‘SomeClass’ declared with greater visibility than the type of its - // field ‘SomeClass::member’ [-Wattributes]†- // This map will only be populated if Python interpreter is running - std::map<std::string, py::object> mAttrsPy; - // Stores C++ attributes only - // mutable because it may be updated in getAttr() from Python - mutable std::map<std::string, future_std::any> mAttrs; -#else std::map<std::string, future_std::any> mAttrs; -#endif public: - // Stores the comparison function for each attribute type ever used - static std::map<std::type_index, bool(*)(const future_std::any&, const future_std::any&)> mAnyCompare; + struct AnyUtils_ { +#ifdef PYBIND + virtual py::object cast(const future_std::any& attr) const = 0; +#endif + virtual bool compare(const future_std::any&, const future_std::any&) const = 0; + virtual size_t hash(const future_std::any&) const = 0; + virtual ~AnyUtils_() = default; + }; + + template <class T> + struct AnyUtils : public AnyUtils_ { +#ifdef PYBIND + py::object cast(const future_std::any& attr) const override final { + return py::cast(future_std::any_cast<const T&>(attr)); + } +#endif + + bool compare(const future_std::any& lhs, const future_std::any& rhs) const override final { +#ifdef PYBIND + if (lhs.type() == typeid(py::object) && rhs.type() != typeid(py::object)) { + return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs)); + } + else if (lhs.type() != typeid(py::object) && rhs.type() == typeid(py::object)) { + return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>()); + } + else +#endif + { + return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs)); + } + } + + size_t hash(const future_std::any& attr) const override final { + return std::hash<T>()(future_std::any_cast<T>(attr)); + } + }; + + // Stores typed utils functions for each attribute type ever used + static std::map<std::type_index, std::unique_ptr<AnyUtils_>> mAnyUtils; +}; + +template<> void DynamicAttributes::setAttr<future_std::any>(const std::string& name, const future_std::any& value); + +#ifdef PYBIND +template <> +struct DynamicAttributes::AnyUtils<py::object> : public DynamicAttributes::AnyUtils_ { + py::object cast(const future_std::any& attr) const override { + return future_std::any_cast<const py::object&>(attr); + } + + bool compare(const future_std::any& lhs, const future_std::any& rhs) const override { + return (future_std::any_cast<py::object>(lhs) < future_std::any_cast<py::object>(rhs)); + } + + size_t hash(const future_std::any& attr) const override final { + // Here we are mixing Python and C++ hashes... if both are + // well implemented, this should not increase the collision + // probability for the same number of stored hashes. + return py::hash(future_std::any_cast<py::object>(attr)); + } }; +#endif inline bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs) { return (lhs.mAttrs < rhs.mAttrs); } + +// Combine the hashes (boost-like hash combining, see boost::hash_combine()) +inline void hash_combine(std::size_t& seed, const std::size_t& value) { + seed ^= value + 0x9e3779b9 + (seed << 6) + (seed >> 2); +} +} + +namespace std { + // Make DynamicAttributes hashable so that is can be stored in hash-based containers. + // This is particularly useful in Python since set() and dict() are hash-based. + template <> + struct hash<Aidge::DynamicAttributes> { + size_t operator()(const Aidge::DynamicAttributes& attrs) const { + std::size_t seed = 0; + for (const auto& pair : attrs.mAttrs) { + Aidge::hash_combine(seed, std::hash<std::string>()(pair.first)); + Aidge::hash_combine(seed, Aidge::DynamicAttributes::mAnyUtils.at(pair.second.type())->hash(pair.second)); + } + return seed; + } + }; + + // General specialization of std::hash for any container that has iterators (e.g., std::vector, std::list, std::set) + template <template <typename...> class Container, typename T, typename... Args> + struct hash<Container<T, Args...>> { + std::size_t operator()(const Container<T, Args...>& iterable) const { + std::size_t seed = 0; + for (const auto& v : iterable) { + // Recursively hash the value pointed by the iterator + Aidge::hash_combine(seed, std::hash<T>()(v)); + } + return seed; + } + }; } namespace future_std { diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp index 414381891ce52046ee7c2df5b82a17e1314773cd..636863e292eeb677055dea379441ce422a6c90d8 100644 --- a/include/aidge/utils/StaticAttributes.hpp +++ b/include/aidge/utils/StaticAttributes.hpp @@ -199,18 +199,6 @@ public: return false; } -#ifdef PYBIND - bool hasAttrPy(const std::string& name) const override final { - for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) { - if (name == EnumStrings<ATTRS_ENUM>::data[i]) { - return true; - } - } - - return false; - } -#endif - // Runtime type access with name std::string getAttrType(const std::string& name) const override final { for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) { diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp index 04172c3ff68641a9fe0d14f9a326cd17e7002912..49e45ed7e447c00cf7300e8228ee7d1b04800083 100644 --- a/python_binding/backend/pybind_OperatorImpl.cpp +++ b/python_binding/backend/pybind_OperatorImpl.cpp @@ -55,9 +55,9 @@ public: ); } - std::set<ImplSpec> getAvailableImplSpecs() const noexcept override { + std::vector<ImplSpec> getAvailableImplSpecs() const noexcept override { PYBIND11_OVERRIDE_NAME( - std::set<ImplSpec>, + std::vector<ImplSpec>, OperatorImpl, "get_available_impl_specs", getAvailableImplSpecs @@ -81,6 +81,13 @@ void init_OperatorImpl(py::module& m){ .def(py::init<const DynamicAttributes&>(), py::arg("attr") = DynamicAttributes()) .def(py::init<const ImplSpec::IOSpec&, const DynamicAttributes&>(), py::arg("io"), py::arg("attr") = DynamicAttributes()) .def(py::init<const ImplSpec::IOSpec&, const ImplSpec::IOSpec&, const DynamicAttributes&>(), py::arg("i"), py::arg("o"), py::arg("attr") = DynamicAttributes()) + .def("__eq__", static_cast<bool(*)(const ImplSpec&, const ImplSpec&)>(&operator==)) + .def("__repr__", [](ImplSpec self){ + return fmt::format("{}\n", self); + }) + .def_readwrite("inputs", &ImplSpec::inputs) + .def_readwrite("outputs", &ImplSpec::outputs) + .def_readwrite("attrs", &ImplSpec::attrs) ; py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr()) @@ -98,4 +105,4 @@ void init_OperatorImpl(py::module& m){ .def("get_available_impl_specs", &OperatorImpl_Publicist::getAvailableImplSpecs) ; } -} +} // namespace Aidge diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp index e91f345d7974cb06aa7aec9e27300b9cf9230985..b1e879d8387a71a3819ee7e0f8bbcd1e9936c146 100644 --- a/python_binding/data/pybind_Data.cpp +++ b/python_binding/data/pybind_Data.cpp @@ -62,5 +62,9 @@ void init_Data(py::module& m){ py::class_<Data, std::shared_ptr<Data>>(m,"Data"); + + m.def("format_as", (const char* (*)(DataType)) &format_as, py::arg("dt")); + m.def("format_as", (const char* (*)(DataFormat)) &format_as, py::arg("df")); + } } diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp index 91f13c17dd923d9f4b9ed6e468ce7897059d8749..fe606cfb557042d581e09da7419d80841d1dc2d4 100644 --- a/python_binding/data/pybind_Tensor.cpp +++ b/python_binding/data/pybind_Tensor.cpp @@ -315,6 +315,7 @@ void init_Tensor(py::module& m){ .def(py::self - py::self) .def(py::self * py::self) .def(py::self / py::self) + .def("clone", &Tensor::clone) .def("sqrt", &Tensor::sqrt) .def("set_datatype", &Tensor::setDataType, py::arg("datatype"), py::arg("copyCast") = true) .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true) @@ -322,6 +323,7 @@ void init_Tensor(py::module& m){ .def("grad", &Tensor::grad) .def("set_grad", &Tensor::setGrad) .def("dtype", &Tensor::dataType) + .def("dformat", &Tensor::dataFormat) .def("size", &Tensor::size) .def("capacity", &Tensor::capacity) .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize, py::arg("dims"), py::arg("strides") = std::vector<DimSize_t>()) @@ -330,9 +332,11 @@ void init_Tensor(py::module& m){ .def("get_idx", &Tensor::getIdx) .def_static("get_available_backends", &Tensor::getAvailableBackends) .def("undefined", &Tensor::undefined) + .def("cpy_transpose", (void (Tensor::*)(const Tensor& src, const std::vector<DimSize_t>& transpose)) &Tensor::copyTranspose, py::arg("src"), py::arg("transpose")) + .def("__str__", [](Tensor& b) { - if (b.empty()) { - return std::string("{}"); + if (b.empty() && b.undefined()) { + return std::string("{}"); } else { return b.toString(); } diff --git a/python_binding/filler/pybind_Filler.cpp b/python_binding/filler/pybind_Filler.cpp index a85c0d6cd6fa0367dfc26328d214c99a4288a3be..cb0a823639fd7a2f5b9f47be2bd78b9a7e41f2d4 100644 --- a/python_binding/filler/pybind_Filler.cpp +++ b/python_binding/filler/pybind_Filler.cpp @@ -30,11 +30,17 @@ void init_Filler(py::module &m) { [](std::shared_ptr<Tensor> tensor, py::object value) -> void { switch (tensor->dataType()) { case DataType::Float64: - constantFiller<double>(tensor, value.cast<double>()); + constantFiller<cpptype_t<DataType::Float64>>(tensor, value.cast<cpptype_t<DataType::Float64>>()); break; case DataType::Float32: - constantFiller<float>(tensor, value.cast<float>()); + constantFiller<cpptype_t<DataType::Float32>>(tensor, value.cast<cpptype_t<DataType::Float32>>()); break; + case DataType::Int64: + constantFiller<cpptype_t<DataType::Int64>>(tensor, value.cast<cpptype_t<DataType::Int64>>()); + break; + case DataType::Int32: + constantFiller<cpptype_t<DataType::Int32>>(tensor, value.cast<cpptype_t<DataType::Int32>>()); + break; default: AIDGE_THROW_OR_ABORT( py::value_error, @@ -44,14 +50,14 @@ void init_Filler(py::module &m) { py::arg("tensor"), py::arg("value")) .def( "normal_filler", - [](std::shared_ptr<Tensor> tensor, double mean, - double stdDev) -> void { + [](std::shared_ptr<Tensor> tensor, py::object mean, + py::object stdDev) -> void { switch (tensor->dataType()) { case DataType::Float64: - normalFiller<double>(tensor, mean, stdDev); + normalFiller<cpptype_t<DataType::Float64>>(tensor, mean.cast<cpptype_t<DataType::Float64>>(), stdDev.cast<cpptype_t<DataType::Float64>>()); break; case DataType::Float32: - normalFiller<float>(tensor, mean, stdDev); + normalFiller<cpptype_t<DataType::Float32>>(tensor, mean.cast<cpptype_t<DataType::Float32>>(), stdDev.cast<cpptype_t<DataType::Float32>>()); break; default: AIDGE_THROW_OR_ABORT( @@ -60,23 +66,39 @@ void init_Filler(py::module &m) { } }, py::arg("tensor"), py::arg("mean") = 0.0, py::arg("stdDev") = 1.0) - .def( - "uniform_filler", - [](std::shared_ptr<Tensor> tensor, double min, double max) -> void { + .def("uniform_filler", [] (std::shared_ptr<Tensor> tensor, py::object min, py::object max) -> void { + if (py::isinstance<py::int_>(min) && py::isinstance<py::int_>(max)) { switch (tensor->dataType()) { - case DataType::Float64: - uniformFiller<double>(tensor, min, max); + case DataType::Int32: + uniformFiller<std::int32_t>(tensor, min.cast<std::int32_t>(), max.cast<std::int32_t>()); + break; + case DataType::Int64: + uniformFiller<std::int64_t>(tensor, min.cast<std::int64_t>(), max.cast<std::int64_t>()); + break; + default: + AIDGE_THROW_OR_ABORT( + py::value_error, + "Data type is not supported for Uniform filler."); break; + } + } else if (py::isinstance<py::float_>(min) && py::isinstance<py::float_>(max)) { + switch (tensor->dataType()) { case DataType::Float32: - uniformFiller<float>(tensor, min, max); + uniformFiller<float>(tensor, min.cast<float>(), max.cast<float>()); + break; + case DataType::Float64: + uniformFiller<double>(tensor, min.cast<double>(), max.cast<double>()); break; default: AIDGE_THROW_OR_ABORT( py::value_error, "Data type is not supported for Uniform filler."); + break; } - }, - py::arg("tensor"), py::arg("min"), py::arg("max")) + } else { + AIDGE_THROW_OR_ABORT(py::value_error,"Input must be either an int or a float."); + } + }, py::arg("tensor"), py::arg("min"), py::arg("max")) .def( "xavier_uniform_filler", [](std::shared_ptr<Tensor> tensor, py::object scaling, diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp index c0ee183b072398e2e393bdbd7446de0155519169..febb6f2ed594174a7aeef60f26b8f9a5ee0e23e3 100644 --- a/python_binding/graph/pybind_GraphView.cpp +++ b/python_binding/graph/pybind_GraphView.cpp @@ -30,6 +30,8 @@ void init_GraphView(py::module& m) { :param path: save location :type path: str )mydelimiter") + .def("inputs", (std::vector<std::pair<NodePtr, IOIndex_t>> (GraphView::*)() const) &GraphView::inputs) + .def("outputs", (std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> (GraphView::*)() const) &GraphView::outputs) .def("in_view", (bool (GraphView::*)(const NodePtr&) const) &GraphView::inView) .def("in_view", (bool (GraphView::*)(const std::string&) const) &GraphView::inView) .def("root_node", &GraphView::rootNode) @@ -146,6 +148,9 @@ void init_GraphView(py::module& m) { // return py::none(); // } // }) + .def("get_ranked_nodes", &GraphView::getRankedNodes) + .def("set_dataformat", &GraphView::setDataFormat, py::arg("dataformat")) + ; m.def("get_connected_graph_view", &getConnectedGraphView); diff --git a/python_binding/graph/pybind_Matching.cpp b/python_binding/graph/pybind_Matching.cpp new file mode 100644 index 0000000000000000000000000000000000000000..94f2471c3f234c1e401484c099a3815dd26d3c30 --- /dev/null +++ b/python_binding/graph/pybind_Matching.cpp @@ -0,0 +1,51 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <pybind11/pybind11.h> +#include <pybind11/stl.h> +#include <memory> +#include <string> +#include "aidge/graph/Matching.hpp" +#include "aidge/graph/GraphView.hpp" +#include "aidge/graph/Node.hpp" +#include "aidge/utils/Types.h" +//#include "aidge/data/Data.hpp" + +namespace py = pybind11; +namespace Aidge { +void init_SinglePassGraphMatching(py::module& m) { + py::class_<Aidge::SinglePassGraphMatching::MatchingResult>(m,"MatchingResult") + .def(py::init<>()) + .def_readwrite("graph", &Aidge::SinglePassGraphMatching::MatchingResult::graph) + .def_readwrite("anchors", &Aidge::SinglePassGraphMatching::MatchingResult::anchors) + .def_readwrite("startNode", &Aidge::SinglePassGraphMatching::MatchingResult::startNode); + + py::class_<Aidge::SinglePassGraphMatching>(m, "SinglePassGraphMatching") + .def(py::init<std::shared_ptr<GraphView>>(), py::arg("graph")) + .def("match", + [](Aidge::SinglePassGraphMatching& self, const std::string& query, bool disjoint){ + // Note: Need to convert set to vector has MatchingResult is not hashable and + // set<MatchingResult> cannot be binded + std::set<Aidge::SinglePassGraphMatching::MatchingResult> set_res = self.match(query, disjoint); + std::vector<Aidge::SinglePassGraphMatching::MatchingResult> vec_res(set_res.begin(), set_res.end()); + return vec_res; + }, + py::arg("query"), py::arg("disjoint") = false, + R"mydelimiter( Matches a query by direct, single-pass parse and match. + :param query: The query string to search. + :param disjoint: If true, only keep the longest disjoint matches. + :return: A set of MatchingResult instances. + )mydelimiter"); + + + +} +} // namespace Aidge diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp index d8e77bb259cbcbae7940a09dc405bb8f50b5b79b..35f6327444a874d8f5c2e94da6520244e095263a 100644 --- a/python_binding/graph/pybind_Node.cpp +++ b/python_binding/graph/pybind_Node.cpp @@ -34,6 +34,11 @@ void init_Node(py::module& m) { Type of the node. )mydelimiter") + .def("attributes", &Node::attributes, + R"mydelimiter( + Get attributes. + )mydelimiter") + .def("get_operator", &Node::getOperator, R"mydelimiter( Get the Operator object of the Node. @@ -48,7 +53,7 @@ void init_Node(py::module& m) { :rtype: str )mydelimiter") - .def("create_unique_name", &Node::createUniqueName, py::arg("base_name"), + .def("create_unique_name", &Node::createUniqueName, py::arg("base_name"), R"mydelimiter( Given a base name, generate a new name which is unique in all the GraphViews containing this node. diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp index 8a00a1cb4a419f1125411b5b1c823bf91570d62e..f8adfd5f4becb7677b3a59791f8549bb114fbbc4 100644 --- a/python_binding/operator/pybind_Add.cpp +++ b/python_binding/operator/pybind_Add.cpp @@ -22,14 +22,14 @@ namespace Aidge { void declare_Add(py::module &m) { py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance()) - .def(py::init<const IOIndex_t>(), py::arg("nb_inputs")) + .def(py::init<>()) .def_static("get_inputs_name", &Add_Op::getInputsName) .def_static("get_outputs_name", &Add_Op::getOutputsName) .def_readonly_static("Type", &Add_Op::Type); declare_registrable<Add_Op>(m, "AddOp"); - m.def("Add", &Add, py::arg("nb_inputs"), py::arg("name") = ""); + m.def("Add", &Add, py::arg("name") = ""); } void init_Add(py::module &m) { diff --git a/python_binding/operator/pybind_Atan.cpp b/python_binding/operator/pybind_Atan.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e9e277fcc336b8d4436aaf8dc1a834e666e400ae --- /dev/null +++ b/python_binding/operator/pybind_Atan.cpp @@ -0,0 +1,31 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <pybind11/pybind11.h> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Atan.hpp" +#include "aidge/operator/OperatorTensor.hpp" + +namespace py = pybind11; +namespace Aidge { + +void init_Atan(py::module& m) { + py::class_<Atan_Op, std::shared_ptr<Atan_Op>, OperatorTensor>(m, "AtanOp", py::multiple_inheritance()) + .def(py::init<>()) + .def_static("get_inputs_name", &Atan_Op::getInputsName) + .def_static("get_outputs_name", &Atan_Op::getOutputsName); + + declare_registrable<Atan_Op>(m, "AtanOp"); + + m.def("Atan", &Atan, py::arg("name") = ""); +} +} // namespace Aidge diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp index b98a642111402050fd3cba6dd8a12b11a3bbde8a..2d137c4de0c62ff1f8391b03cb07bc9230b7c9eb 100644 --- a/python_binding/operator/pybind_AvgPooling.cpp +++ b/python_binding/operator/pybind_AvgPooling.cpp @@ -28,7 +28,7 @@ namespace Aidge { template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) { - const std::string pyClassName("AvgPoolingOp" + std::to_string(DIM) + "D"); + const std::string pyClassName("AvgPooling" + std::to_string(DIM) + "DOp"); const std::string pyStaticAttrClassName("StaticAttributes" + pyClassName); // py::class_<StaticAttributes<AvgPoolingAttr, // std::array<DimSize_t, DIM>, diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp index 9a1bdacd169beebc843448d23bdaf8502de437b4..43b44eb7300072e501d33829b88537850beef37a 100644 --- a/python_binding/operator/pybind_BatchNorm.cpp +++ b/python_binding/operator/pybind_BatchNorm.cpp @@ -23,7 +23,7 @@ namespace Aidge { template <DimSize_t DIM> void declare_BatchNormOp(py::module& m) { - const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D"); + const std::string pyClassName("BatchNorm" + std::to_string(DIM) + "DOp"); py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor>( m, pyClassName.c_str(), py::multiple_inheritance()) .def(py::init<float, float>(), diff --git a/python_binding/operator/pybind_Clip.cpp b/python_binding/operator/pybind_Clip.cpp new file mode 100644 index 0000000000000000000000000000000000000000..27c47811a3c192f1f657f339fe4caf047a944224 --- /dev/null +++ b/python_binding/operator/pybind_Clip.cpp @@ -0,0 +1,48 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <pybind11/pybind11.h> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Clip.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/utils/Types.h" + +namespace py = pybind11; +namespace Aidge { + +void init_Clip(py::module& m) { + py::class_<Clip_Op, std::shared_ptr<Clip_Op>, OperatorTensor>(m, "ClipOp", py::multiple_inheritance()) + .def(py::init<float, float>(), py::arg("min") = std::numeric_limits<float>::lowest(), py::arg("max") = std::numeric_limits<float>::max()) + .def_static("get_inputs_name", &Clip_Op::getInputsName) + .def_static("get_outputs_name", &Clip_Op::getOutputsName) + .def("min",&Clip_Op::min,py::return_value_policy::reference_internal) + .def("max",&Clip_Op::max,py::return_value_policy::reference_internal); + + + declare_registrable<Clip_Op>(m, "ClipOp"); + + m.def("Clip", &Clip,py::arg("name") = "", + py::arg("min")= std::numeric_limits<float>::lowest(), + py::arg("max")= std::numeric_limits<float>::max(), + R"mydelimiter(ClipOp is a tensor operator that performs a clipping operation on tensor elements. + This class allows limiting tensor values to a specified range, defined by the min + and max parameters. Values outside this range are replaced by the corresponding + limit values. When 'min' is greater than 'max', the clip operator sets all the 'input' values to the value of 'max' + :param min: minimum clipping value. + :type min: float + :param max: maximum clipping value. + :type max: float + :param name: name of the node. + )mydelimiter"); +} +} // namespace Aidge diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp index bc72825b2161d8733334817e095c251c788e7eba..3bd54da74f90a38b0255e021234786cd21e6c8a3 100644 --- a/python_binding/operator/pybind_Conv.cpp +++ b/python_binding/operator/pybind_Conv.cpp @@ -26,7 +26,7 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_ConvOp(py::module &m) { - const std::string pyClassName("ConvOp" + std::to_string(DIM) + "D"); + const std::string pyClassName("Conv" + std::to_string(DIM) + "DOp"); py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor>( m, pyClassName.c_str(), py::multiple_inheritance()) diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp index 377d0fca5d78dff20b8df0cc0d5521eb9a3685a2..b69fee02a50b95e4abfda895580f3192e7876766 100644 --- a/python_binding/operator/pybind_ConvDepthWise.cpp +++ b/python_binding/operator/pybind_ConvDepthWise.cpp @@ -27,7 +27,7 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { - const std::string pyClassName("ConvDepthWiseOp" + std::to_string(DIM) + "D"); + const std::string pyClassName("ConvDepthWise" + std::to_string(DIM) + "DOp"); py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor>( m, pyClassName.c_str(), py::multiple_inheritance()) diff --git a/python_binding/operator/pybind_DepthToSpace.cpp b/python_binding/operator/pybind_DepthToSpace.cpp new file mode 100644 index 0000000000000000000000000000000000000000..efb8a7406774a5b071e8ebc3bda69d6ec773b50a --- /dev/null +++ b/python_binding/operator/pybind_DepthToSpace.cpp @@ -0,0 +1,59 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <pybind11/pybind11.h> + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/DepthToSpace.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Attributes.hpp" +#include "aidge/utils/Types.h" + +static typename Aidge::DepthToSpace_Op::Mode stringToMode(const std::string& mode) { +static std::unordered_map<std::string, typename Aidge::DepthToSpace_Op::Mode> map = { + {"DCR", Aidge::DepthToSpace_Op::Mode::DCR}, + {"CRD", Aidge::DepthToSpace_Op::Mode::CRD} +}; +return map[mode]; +} + +namespace py = pybind11; +namespace Aidge { + +void declare_DepthToSpace(py::module &m) { + + py::class_<DepthToSpace_Op, std::shared_ptr<DepthToSpace_Op>, OperatorTensor> (m, "DepthToSpaceOp", py::multiple_inheritance()) + .def(py::init([](const std::uint32_t blockSize, const std::string& mode) { + return new DepthToSpace_Op(blockSize, stringToMode(mode)); + }), py::arg("block_size"), py::arg("mode") = "CRD") + .def_static("get_inputs_name", &DepthToSpace_Op::getInputsName) + .def_static("get_outputs_name", &DepthToSpace_Op::getOutputsName) + .def_readonly_static("Type", &DepthToSpace_Op::Type) + .def("__repr__", [](DepthToSpace_Op& b) { + return fmt::format("Operator(type='{}')", b.Type); + }); + + declare_registrable<DepthToSpace_Op>(m, "DepthToSpaceOp"); + + m.def("DepthToSpace", []( + const std::uint32_t blockSize, + const std::string& mode, + const std::string& name) { + return DepthToSpace(blockSize, stringToMode(mode), name); + }, py::arg("block_size"), py::arg("mode") = "CRD", py::arg("name") = ""); +} + +void init_DepthToSpace(py::module &m) { + declare_DepthToSpace(m); +} + +} // namespace Aidge diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp index 6d6c03b82ad4f905c41bb0cf849fc4e05fda4cb2..69454c2abfa56a16dbc3f3638d68e98ce93d2538 100644 --- a/python_binding/operator/pybind_GridSample.cpp +++ b/python_binding/operator/pybind_GridSample.cpp @@ -55,7 +55,7 @@ void declare_GridSampleOp(py::module &m) { return new GridSample_Op(stringToInterpolationMode(mode), stringToPaddingMode(padding_mode), align_corners); }), py::arg("mode") = "linear", py::arg("padding_mode") = "zeros", - py::arg("alogn_corners") = false) + py::arg("align_corners") = false) .def_static("get_inputs_name", &GridSample_Op::getInputsName) .def_static("get_outputs_name", &GridSample_Op::getOutputsName) .def_readonly_static("Type", &GridSample_Op::Type) diff --git a/python_binding/operator/pybind_Heaviside.cpp b/python_binding/operator/pybind_Heaviside.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cbc2502aac018927c544a57f343a6305ee2bd86f --- /dev/null +++ b/python_binding/operator/pybind_Heaviside.cpp @@ -0,0 +1,56 @@ +/******************************************************************************** + * Copyright (c) 2024 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <pybind11/pybind11.h> + +#include "aidge/operator/Heaviside.hpp" +#include "aidge/operator/OperatorTensor.hpp" + +namespace py = pybind11; + +namespace Aidge { + +void init_Heaviside(py::module &m) { + py::class_<Heaviside_Op, std::shared_ptr<Heaviside_Op>, OperatorTensor>( + m, + "HeavisideOp", + py::multiple_inheritance(), + R"mydelimiter( + Initialize an Heaviside node. This node will compute a heaviside step function + on each element of the input tensor. + heaviside(input, values) = { 0 if input < 0 + { values if input == 0 + { 1 if input > 0 + + :param value : The value use for the output tensor when input is 0. + :type value : float + :param name : Name of the node. + )mydelimiter") + .def(py::init<float>(), py::arg("value")) + .def_static("get_inputs_name", &Heaviside_Op::getInputsName) + .def_static("get_outputs_name", &Heaviside_Op::getOutputsName) + .def_readonly_static("Type", &Heaviside_Op::Type); + + declare_registrable<Heaviside_Op>(m, "HeavisideOp"); + m.def("Heaviside", &Heaviside, py::arg("value"), py::arg("name") = "", + R"mydelimiter( + Initialize an Heaviside node. This node will compute a heaviside step function + on each element of the input tensor. + heaviside(input, values) = { 0 if input < 0 + { values if input == 0 + { 1 if input > 0 + + :param value : The value use for the output tensor when input is 0. + :type value : float + :param name : Name of the node. + )mydelimiter"); +} +} // namespace Aidge diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp index b59a4c5574ce5e56af13f9aea13e7514c9402c22..00f6d26bd3ae49b26d72236b71372c1f557ddbdd 100644 --- a/python_binding/operator/pybind_MaxPooling.cpp +++ b/python_binding/operator/pybind_MaxPooling.cpp @@ -26,9 +26,9 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) { - const std::string pyClassName("MaxPoolingOp" + std::to_string(DIM) + "D"); + const std::string pyClassName("MaxPooling" + std::to_string(DIM) + "DOp"); py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor>( - m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(), + m, pyClassName.c_str(), py::multiple_inheritance()) .def(py::init<const std::array<DimSize_t, DIM> &, const std::array<DimSize_t, DIM> &, diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp index d021a79c5ff4e337bebf424465458ddabf056a56..5f173068af0f1140830d458979ec924c38ade078 100644 --- a/python_binding/operator/pybind_MetaOperatorDefs.cpp +++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp @@ -47,7 +47,7 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) { py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0), py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1), py::arg("no_bias")= false); - m.def(("PaddedConvOp" + std::to_string(DIM) + "D").c_str(), []( + m.def(("PaddedConv" + std::to_string(DIM) + "DOp").c_str(), []( const std::vector<DimSize_t>& kernel_dims, const std::vector<DimSize_t> &stride_dims, const std::vector<DimSize_t> &padding_dims, @@ -87,7 +87,7 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) { py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0), py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1), py::arg("no_bias") = false); - m.def(("PaddedConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(), []( + m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "DOp").c_str(), []( const std::vector<DimSize_t>& kernel_dims, const std::vector<DimSize_t> &stride_dims, const std::vector<DimSize_t> &padding_dims, @@ -121,7 +121,7 @@ template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) { py::arg("name") = "", py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0)); - m.def(("PaddedAvgPoolingOp" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, + m.def(("PaddedAvgPooling" + std::to_string(DIM) + "DOp").c_str(), [](const std::vector<DimSize_t>& kernel_dims, const std::vector<DimSize_t> &stride_dims, const std::vector<DimSize_t> &padding_dims) { @@ -152,7 +152,7 @@ template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) { py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0), py::arg("ceil_mode") = false); - m.def(("PaddedMaxPoolingOp" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, + m.def(("PaddedMaxPooling" + std::to_string(DIM) + "DOp").c_str(), [](const std::vector<DimSize_t>& kernel_dims, const std::vector<DimSize_t> &stride_dims, const std::vector<DimSize_t> &padding_dims, bool ceil_mode) @@ -194,15 +194,20 @@ void init_MetaOperatorDefs(py::module &m) { // declare_PaddedMaxPoolingOp<3>(m); declare_LSTMOp(m); - py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperator_Op", py::multiple_inheritance()) - .def(py::init<const char *, const std::shared_ptr<GraphView>&>(), + py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperatorOp", py::multiple_inheritance()) + .def(py::init<const char *, const std::shared_ptr<GraphView>&, const std::vector<InputCategory>&>(), py::arg("type"), - py::arg("graph")) - .def("get_micro_graph", &MetaOperator_Op::getMicroGraph); + py::arg("graph"), + py::arg("forced_inputs_category") = std::vector<InputCategory>()) + .def("get_micro_graph", &MetaOperator_Op::getMicroGraph) + .def("set_upper_node", &MetaOperator_Op::setUpperNode); + + declare_registrable<MetaOperator_Op>(m, "MetaOperatorOp"); m.def("meta_operator", &MetaOperator, py::arg("type"), py::arg("graph"), + py::arg("forced_inputs_category") = std::vector<InputCategory>(), py::arg("name") = "" ); diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp index 6ffbdd007b9f929ccac18de12f2319dcd68b1eda..e22f88687eff6856ce57fab6621781ffc86873b4 100644 --- a/python_binding/operator/pybind_Operator.cpp +++ b/python_binding/operator/pybind_Operator.cpp @@ -60,8 +60,6 @@ void init_Operator(py::module& m){ .def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>()) .def("type", &Operator::type) .def("get_impl", &Operator::getImpl) - .def("get_hook", &Operator::getHook) - .def("add_hook", &Operator::addHook) .def_property_readonly("attr", &Operator::attributes) .def("set_back_edges", &Operator::setBackEdges, py::arg("input_indexes")) .def("is_back_edge", &Operator::isBackEdge, py::arg("input_index")) diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp index 04882b7f5b86c7c09ed8b8e5a15c4bfabd03bb55..7dc4a4bee1a009b3ca033ea29861768c1a6fc19d 100644 --- a/python_binding/operator/pybind_Pad.cpp +++ b/python_binding/operator/pybind_Pad.cpp @@ -25,12 +25,12 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_PadOp(py::module &m) { - const std::string pyClassName("PadOp" + std::to_string(DIM) + "D"); + const std::string pyClassName("Pad" + std::to_string(DIM) + "DOp"); py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, OperatorTensor>( m, pyClassName.c_str(), py::multiple_inheritance()) .def(py::init<const std::array<DimSize_t, 2*DIM> &, - const PadBorderType &, + PadBorderType, double>(), py::arg("beginEndTuples"), py::arg("borderType") = PadBorderType::Constant, @@ -42,7 +42,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) { declare_registrable<Pad_Op<DIM>>(m, pyClassName); m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples, const std::string& name, - const PadBorderType &borderType = PadBorderType::Constant, + PadBorderType borderType = PadBorderType::Constant, double borderValue = 0.0) { AIDGE_ASSERT(beginEndTuples.size() == 2*DIM, "begin_end_tuples size [{}] does not match DIM [{}]", beginEndTuples.size(), 2*DIM); return Pad<DIM>(to_array<2*DIM>(beginEndTuples.begin()), name, borderType, borderValue); diff --git a/python_binding/operator/pybind_Round.cpp b/python_binding/operator/pybind_Round.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e9ed0e473eaa820537590633a89ca47382d36672 --- /dev/null +++ b/python_binding/operator/pybind_Round.cpp @@ -0,0 +1,36 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <pybind11/pybind11.h> + +#include "aidge/operator/Round.hpp" +#include "aidge/operator/OperatorTensor.hpp" + +namespace py = pybind11; +namespace Aidge { + +void init_Round(py::module& m) { + py::class_<Round_Op, std::shared_ptr<Round_Op>, OperatorTensor>(m, "RoundOp", py::multiple_inheritance()) + .def(py::init<>()) + .def_static("get_inputs_name", &Round_Op::getInputsName) + .def_static("get_outputs_name", &Round_Op::getOutputsName) + .def_readonly_static("Type", &Round_Op::Type); + declare_registrable<Round_Op>(m, "RoundOp"); + m.def("Round", &Round, py::arg("name") = "", R"mydelimiter( + RoundOp is a tensor operator that rounds the values of a tensor element-wise. + This class rounds each value to the nearest integer. In the case of halves, + the rule is to round them to the nearest even integer. + :param X: input tensor. + :type X: tensor of type float, double, float16, or bfloat16. + :param Y: output tensor with the same shape and type as the input tensor. + )mydelimiter"); +} +} // namespace Aidge diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp index db7fc7bfb60ff8360933e5f84ab54d4cec8df724..09e0e2fa2c1c46bafcd253267d5c33187de6bd69 100644 --- a/python_binding/operator/pybind_Sigmoid.cpp +++ b/python_binding/operator/pybind_Sigmoid.cpp @@ -25,6 +25,8 @@ void init_Sigmoid(py::module& m) { .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName) .def_readonly_static("Type", &Sigmoid_Op::Type); + declare_registrable<Sigmoid_Op>(m, "SigmoidOp"); + m.def("Sigmoid", &Sigmoid, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp index 930dd95f3c3e4b10d2b4f8b496dfbbbcc6822050..20794a15585529e10a83d2bf6fa7c18edfbde3fa 100644 --- a/python_binding/operator/pybind_Transpose.cpp +++ b/python_binding/operator/pybind_Transpose.cpp @@ -28,13 +28,26 @@ namespace Aidge { void declare_Transpose(py::module &m) { const std::string pyClassName("TransposeOp"); py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, OperatorTensor>( - m, "TransposeOp", py::multiple_inheritance()) - .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order")) + m, "TransposeOp", py::multiple_inheritance(), + R"mydelimiter( + Initialize transpose operator + :param output_dims_order : axes permutation order, must be of rank = r and values between [0;r-1] + with r = input_tensor.nbDims() + :type output_dims_order : :py:class: List[Int] + )mydelimiter") + .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order")=std::vector<std::size_t>()) .def_static("get_inputs_name", &Transpose_Op::getInputsName) .def_static("get_outputs_name", &Transpose_Op::getOutputsName) .def_readonly_static("Type", &Transpose_Op::Type); declare_registrable<Transpose_Op>(m, pyClassName); - m.def("Transpose", &Transpose, py::arg("output_dims_order"), py::arg("name") = ""); + m.def("Transpose", &Transpose, py::arg("output_dims_order")=std::vector<std::size_t>(), py::arg("name") = "", + R"mydelimiter( + Initialize a node containing a transpose operator. + :param output_dims_order : axes permutation order, must be of rank = r and values between [0;r-1] + with r = input_tensor.nbDims() + :type output_dims_order : :py:class: List[Int] + :param name : name of the node. +)mydelimiter"); } void init_Transpose(py::module &m) { diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp index 52c8cc8a0199ac64b0f7bae97442178614ea5622..02f4b732c39ef5cbc1755eb314d32c25c96d01fd 100644 --- a/python_binding/pybind_core.cpp +++ b/python_binding/pybind_core.cpp @@ -31,13 +31,16 @@ void init_OperatorTensor(py::module&); void init_Add(py::module&); void init_And(py::module&); void init_ArgMax(py::module&); +void init_Atan(py::module&); void init_AvgPooling(py::module&); void init_BatchNorm(py::module&); void init_BitShift(py::module&); +void init_Clip(py::module&); void init_Concat(py::module&); void init_ConstantOfShape(py::module&); void init_Conv(py::module&); void init_ConvDepthWise(py::module&); +void init_DepthToSpace(py::module&); void init_Div(py::module&); void init_Erf(py::module&); void init_FC(py::module&); @@ -45,6 +48,7 @@ void init_Gather(py::module&); void init_GenericOperator(py::module&); void init_GlobalAveragePooling(py::module&); void init_GridSample(py::module&); +void init_Heaviside(py::module&); void init_Identity(py::module&); void init_LeakyReLU(py::module&); void init_MatMul(py::module&); @@ -61,6 +65,7 @@ void init_ReduceMean(py::module&); void init_ReduceSum(py::module&); void init_Reshape(py::module&); void init_Resize(py::module&); +void init_Round(py::module&); void init_Scaling(py::module&); void init_Shape(py::module&); void init_Sigmoid(py::module&); @@ -78,6 +83,7 @@ void init_Node(py::module&); void init_GraphView(py::module&); void init_OpArgs(py::module&); void init_Connector(py::module&); +void init_SinglePassGraphMatching(py::module&); void init_GraphRegex(py::module&); void init_MatchSolution(py::module&); @@ -105,6 +111,7 @@ void init_Aidge(py::module& m) { init_GraphView(m); init_OpArgs(m); init_Connector(m); + init_SinglePassGraphMatching(m); init_OperatorImpl(m); init_Log(m); @@ -114,13 +121,16 @@ void init_Aidge(py::module& m) { init_Add(m); init_And(m); init_ArgMax(m); + init_Atan(m); init_AvgPooling(m); init_BatchNorm(m); init_BitShift(m); + init_Clip(m); init_Concat(m); init_Conv(m); init_ConvDepthWise(m); init_ConstantOfShape(m); + init_DepthToSpace(m); init_Div(m); init_Erf(m); init_FC(m); @@ -128,6 +138,7 @@ void init_Aidge(py::module& m) { init_GenericOperator(m); init_GlobalAveragePooling(m); init_GridSample(m); + init_Heaviside(m); init_Identity(m); init_LeakyReLU(m); init_MatMul(m); @@ -143,6 +154,7 @@ void init_Aidge(py::module& m) { init_ReduceSum(m); init_Reshape(m); init_Resize(m); + init_Round(m); init_Scaling(m); init_Shape(m); init_Sigmoid(m); diff --git a/python_binding/utils/pybind_Attributes.cpp b/python_binding/utils/pybind_Attributes.cpp index bc0ccb3f4053e37c186acd919fcadae9d5d19a40..691691a86861b7a5ac731a842d8902d3520d4a23 100644 --- a/python_binding/utils/pybind_Attributes.cpp +++ b/python_binding/utils/pybind_Attributes.cpp @@ -30,13 +30,13 @@ DynamicAttributes test_DynamicAttributes_binding() { return attrs; } -double test_DynamicAttributes_binding_check(DynamicAttributes& attrs) { +double test_DynamicAttributes_binding_check(const DynamicAttributes& attrs) { return attrs.getAttr<double>("d"); } void init_Attributes(py::module& m){ py::class_<Attributes, std::shared_ptr<Attributes>>(m, "Attributes") - .def("has_attr", &Attributes::hasAttrPy, py::arg("name")) + .def("has_attr", &Attributes::hasAttr, py::arg("name")) .def("get_attr", &Attributes::getAttrPy, py::arg("name")) .def("__getattr__", &Attributes::getAttrPy, py::arg("name")) .def("set_attr", &Attributes::setAttrPy, py::arg("name"), py::arg("value")) diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp index 0fa2cfdadb3af350a5668444c0a330e023818a41..dd5c5c110154427a8af7afbf70b2c76b61e507a8 100644 --- a/src/backend/OperatorImpl.cpp +++ b/src/backend/OperatorImpl.cpp @@ -81,6 +81,13 @@ Aidge::ImplSpec Aidge::OperatorImpl::getRequiredSpec() const { else { requiredSpec.attrs.setAttr("type", mOp.type()); } + + const auto& inhAttrs = mOp.inheritedAttributes(); + if (inhAttrs) { + if (inhAttrs->hasAttr("impl")) { + requiredSpec.attrs.setAttr("impl", inhAttrs->getAny("impl")); + } + } return requiredSpec; } @@ -120,15 +127,15 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs) std::string qualifier; const auto qualifierPos = std::find_if(attrName.begin(), attrName.end(), [](char c) { return c == ':'; }); - if (qualifierPos != attrName.begin()) { + if (qualifierPos != attrName.end()) { name = attrName.substr(0, qualifierPos - attrName.begin()); - qualifier = attrName.substr(qualifierPos - attrName.begin()); + qualifier = attrName.substr(qualifierPos - attrName.begin() + 1); } - const bool mandatory = (qualifier == "!"); if (mandatory) { // Required attribute: if (!spec.attrs.hasAttr(name)) { + Log::debug("Could not find mandatory attribute {} value {}.", name); // Missing attribute match = false; break; @@ -136,6 +143,7 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs) else if (requiredSpecs.attrs.getAny(attrName) < spec.attrs.getAny(name) || spec.attrs.getAny(name) < requiredSpecs.attrs.getAny(attrName)) { + Log::debug("Attribute ({}) value mismatch {} != {}.", name, requiredSpecs.attrs.getAttr<std::string>(attrName), spec.attrs.getAttr<std::string>(name)); // Attribute value mismatch match = false; break; @@ -161,6 +169,10 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs) Log::debug(" {}:{} - {}", (match) ? "MATCH" : "MISMATCH", priority, spec); } + if(matchingSpecs.empty()){ + Log::debug(" No spec to match registered, returning requiredSpecs."); + return requiredSpecs; + } // Return best match const auto bestMatch = std::max_element(matchingSpecs.begin(), matchingSpecs.end()); if (*bestMatch >= 0) { @@ -367,6 +379,6 @@ std::shared_ptr<Aidge::ProdConso> Aidge::OperatorImpl::getProdConso() const { return std::make_shared<ProdConso>(mOp); } -std::set<Aidge::ImplSpec> Aidge::OperatorImpl::getAvailableImplSpecs() const { - return std::set<ImplSpec>(); +std::vector<Aidge::ImplSpec> Aidge::OperatorImpl::getAvailableImplSpecs() const { + return std::vector<ImplSpec>(); } diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp index abfc91c6cdf9fd4f6eb46100074b22083514d82e..6f60d2f15ce0e561c32d7bc5a7561c2f8d507588 100644 --- a/src/data/Tensor.cpp +++ b/src/data/Tensor.cpp @@ -35,7 +35,7 @@ Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const { AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type"); AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format"); - auto add_ = Add_Op(2); + auto add_ = Add_Op(); add_.associateInput(0, std::make_shared<Tensor>(*this)); add_.associateInput(1, std::make_shared<Tensor>(other)); add_.setDataType(dataType()); diff --git a/src/filler/ConstantFiller.cpp b/src/filler/ConstantFiller.cpp index 1e992f4a192c2fd629b2c813c902b127f29a2b02..b2118866f92290103d50290085c7675215a4d997 100644 --- a/src/filler/ConstantFiller.cpp +++ b/src/filler/ConstantFiller.cpp @@ -39,6 +39,7 @@ void Aidge::constantFiller(std::shared_ptr<Aidge::Tensor> tensor, T constantValu tensor->copyCastFrom(tensorWithValues); } - +template void Aidge::constantFiller<std::int32_t>(std::shared_ptr<Aidge::Tensor>, std::int32_t); +template void Aidge::constantFiller<std::int64_t>(std::shared_ptr<Aidge::Tensor>, std::int64_t); template void Aidge::constantFiller<float>(std::shared_ptr<Aidge::Tensor>, float); template void Aidge::constantFiller<double>(std::shared_ptr<Aidge::Tensor>, double); diff --git a/src/filler/UniformFiller.cpp b/src/filler/UniformFiller.cpp index a942f59d717fd8d7b541ee28868a7fb9f2e7cd95..1951fcc623612bd688048fcc5fb71526032b2a4a 100644 --- a/src/filler/UniformFiller.cpp +++ b/src/filler/UniformFiller.cpp @@ -8,8 +8,9 @@ * SPDX-License-Identifier: EPL-2.0 * ********************************************************************************/ +#include <cstdint> // std::int32_t #include <memory> -#include <random> // normal_distribution, uniform_real_distribution +#include <random> // normal_distribution, uniform_real_distribution #include "aidge/data/Tensor.hpp" #include "aidge/filler/Filler.hpp" @@ -19,10 +20,16 @@ template <typename T> void Aidge::uniformFiller(std::shared_ptr<Aidge::Tensor> tensor, T min, T max) { AIDGE_ASSERT(tensor->getImpl(), "Tensor got no implementation, cannot fill it."); - AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type"); + AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type {} and {}",NativeType<T>::type, tensor->dataType()); - std::uniform_real_distribution<T> uniformDist(min, max); + using DistType = typename std::conditional< + std::is_integral<T>::value, + std::uniform_int_distribution<T>, + std::uniform_real_distribution<T> + >::type; + + DistType uniformDist(min, max); std::shared_ptr<Aidge::Tensor> cpyTensor; // Create cpy only if tensor not on CPU @@ -42,3 +49,7 @@ template void Aidge::uniformFiller<float>(std::shared_ptr<Aidge::Tensor>, float, float); template void Aidge::uniformFiller<double>(std::shared_ptr<Aidge::Tensor>, double, double); +template void Aidge::uniformFiller<std::int32_t>(std::shared_ptr<Aidge::Tensor>, + std::int32_t, std::int32_t); +template void Aidge::uniformFiller<std::int64_t>(std::shared_ptr<Aidge::Tensor>, + std::int64_t, std::int64_t); \ No newline at end of file diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index ef322fe5b795b9cb9c62c3593abdd330fd471575..b2c03e794888a0909ada5db208fc07ad266d4ae2 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -103,7 +103,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd std::string givenName = (node_ptr->name().empty()) ? "<em>" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + "</em>" - : "\"" + node_ptr->name() + "\\n<sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\""; + : "\"" + node_ptr->name() + "<br/><sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\""; std::string nodeCls = ""; if (node_ptr->type() == "Producer") { @@ -144,7 +144,9 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd } IOIndex_t outputIdx = 0; for (const auto& childs : node_ptr->getOrderedChildren()) { - for (const auto& child : childs) { + // Keep only unique childs in order to avoid duplicating connections + const auto uniqueChilds = std::set<NodePtr>(childs.begin(), childs.end()); + for (const auto& child : uniqueChilds) { if (child != nullptr) { IOIndex_t inputIdx = 0; for (auto parent : child->inputs()) { @@ -164,7 +166,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd fmt::print(fp.get(), "{}_{}-->|\"{}{}→{}\"|{}:::externalCls\n", node_ptr->type(), namePtrTable.at(node_ptr), outputIdx, dims, inputIdx, static_cast<void*>(child.get())); } - break; + // Do no break here because the same child can be connected to several inputs } ++inputIdx; } @@ -270,7 +272,10 @@ void Aidge::GraphView::setRootNode(NodePtr node) { std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::inputNodes() const { std::set<std::shared_ptr<Aidge::Node>> nodes; for (const auto& node : mInputNodes) { - nodes.insert(node.first); + // Do not include dummy inputs + if (node.first) { + nodes.insert(node.first); + } } return nodes; } @@ -278,7 +283,10 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::inputNodes() const { std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::outputNodes() const { std::set<std::shared_ptr<Aidge::Node>> nodes; for (const auto& node : mOutputNodes) { - nodes.insert(node.first); + // Do not include dummy outputs + if (node.first) { + nodes.insert(node.first); + } } return nodes; } @@ -522,6 +530,9 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_ } if (parentsForwarded && op->forwardDims(allowDataDependency)) { + Log::debug("Dimensions forwarded for node {} (of type {})", + nodePtr->name(), nodePtr->type()); + // Recompute everytime, even if it was already computed in a // previous call of forwardDims(), as the graph may have changed! dimsForwarded.insert(nodePtr); @@ -532,7 +543,9 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_ } } else { - Log::debug("Unable to forward dimensions for node {} (of type {}) yet", nodePtr->name(), nodePtr->type()); + if (parentsForwarded) { + Log::debug("Unable to forward dimensions for node {} (of type {})", nodePtr->name(), nodePtr->type()); + } nextList.insert(nodePtr); } } diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp index b2ceb903d51dbb880979cd2191825a6310f9e5ff..da6d833f3aa933cd5e707814c279142de5bc4a23 100644 --- a/src/graph/Node.cpp +++ b/src/graph/Node.cpp @@ -18,9 +18,10 @@ #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" #include "aidge/utils/Types.h" +#include "aidge/utils/future_std/any.hpp" -Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name) - : mName(name), +Aidge::Node::Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttributes> attrs) + : mAttrs(attrs), mOperator(op), mParents(std::vector<std::shared_ptr<Node>>(static_cast<std::size_t>(op->nbInputs()), nullptr)), @@ -31,11 +32,18 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name) mIdOutParents( std::vector<IOIndex_t>(static_cast<std::size_t>(op->nbInputs()), gk_IODefaultIndex)) { - // ctor - if (op) { - mForward.push_back([this](){ this->mOperator->forward(); return true; }); - mBackward.push_back([this](){ this->mOperator->backward(); return true; }); - } + mForward.push_back([this](){ this->mOperator->forward(); return true; }); + // mForward.push_back(std::bind(&Operator::forward, mOperator.get())); + mBackward.push_back([this](){ this->mOperator->backward(); return true; }); +} + +// Aidge::Node::Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs) +// : Node(op, std::make_shared<DynamicAttributes>(attrs)) {} + +Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name) + : Node(op, std::make_shared<DynamicAttributes>(std::map<std::string, future_std::any>({std::make_pair("name", future_std::any(name))}))) +{ + //ctor } /////////////////////////////////////////////////////// @@ -70,7 +78,7 @@ Aidge::Connector Aidge::Node::operator()(const std::vector<Connector>& ctors) { void Aidge::Node::setName(const std::string& name) { for (auto graphView : views()) graphView->updateNodeName(shared_from_this(), name); - mName = name; + mAttrs->setAttr<std::string>("name", name); } std::string Aidge::Node::createUniqueName(std::string baseName) @@ -399,18 +407,18 @@ void Aidge::Node::resetConnections(bool includeLearnableParam) { /////////////////////////////////////////////////////// Aidge::NodePtr Aidge::Node::cloneSharedOperators() const { - return std::make_shared<Node>(mOperator, mName); + return std::make_shared<Node>(mOperator, mAttrs); } Aidge::NodePtr Aidge::Node::cloneSharedProducers() const { std::shared_ptr<Operator> op = (mOperator->type() == Producer_Op::Type) ? mOperator : mOperator->clone(); - return std::make_shared<Node>(op, mName); + return std::make_shared<Node>(op, mAttrs); } Aidge::NodePtr Aidge::Node::clone() const { - return std::make_shared<Node>(mOperator->clone(), mName); + return std::make_shared<Node>(mOperator->clone(), mAttrs); } std::set<Aidge::NodePtr> Aidge::Node::getNodeDelta(int delta, std::set<Aidge::NodePtr> nodeSee) { diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp index 033c476c8a9e865fdf9d5670e295c3e4fb6101b3..f6fd0cd9fc647e29402d36f1f6838642e099ae6c 100644 --- a/src/operator/Add.cpp +++ b/src/operator/Add.cpp @@ -22,12 +22,10 @@ const std::string Aidge::Add_Op::Type = "Add"; -Aidge::Add_Op::Add_Op(const IOIndex_t nbIn) - : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1) +Aidge::Add_Op::Add_Op() + : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) { - if (nbIn == 0) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input."); - } + // ctor } Aidge::Add_Op::Add_Op(const Add_Op& op) @@ -89,6 +87,8 @@ std::set<std::string> Aidge::Add_Op::getAvailableBackends() const { return Registrar<Add_Op>::getKeys(); } -std::shared_ptr<Aidge::Node> Aidge::Add(const IOIndex_t nbIn, const std::string& name) { - return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name); +//////////////////////////////////////////////////////////////////////////////// + +std::shared_ptr<Aidge::Node> Aidge::Add(const std::string& name) { + return std::make_shared<Node>(std::make_shared<Add_Op>(), name); } \ No newline at end of file diff --git a/src/operator/Atan.cpp b/src/operator/Atan.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c0a494ee66fb11bcccac21141da30df5546f0b3c --- /dev/null +++ b/src/operator/Atan.cpp @@ -0,0 +1,53 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/operator/Atan.hpp" + +#include <memory> +#include <string> + +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Atan_Op::Type = "Atan"; + +Aidge::Atan_Op::Atan_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {} + +Aidge::Atan_Op::Atan_Op(const Aidge::Atan_Op& op) + : OperatorTensor(op) +{ + if (op.mImpl){ + SET_IMPL_MACRO(Atan_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } +} + +std::shared_ptr<Aidge::Operator> Aidge::Atan_Op::clone() const { + return std::make_shared<Atan_Op>(*this); +} + + +void Aidge::Atan_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Atan_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} + +std::set<std::string> Aidge::Atan_Op::getAvailableBackends() const { + return Registrar<Atan_Op>::getKeys(); +} + +/////////////////////////////////////////////////// + +std::shared_ptr<Aidge::Node> Aidge::Atan(const std::string& name) { + return std::make_shared<Node>(std::make_shared<Atan_Op>(), name); +} diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp index f8c8e5e3f32fff8306184dfdf3baa87392479ebf..78266e3fb391d6f33da9e65b2125dd57885ac89e 100644 --- a/src/operator/AvgPooling.cpp +++ b/src/operator/AvgPooling.cpp @@ -24,7 +24,7 @@ #include "aidge/utils/Types.h" template <Aidge::DimIdx_t DIM> -const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling"; +const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling" + std::to_string(DIM) + "D"; template <Aidge::DimIdx_t DIM> @@ -134,4 +134,4 @@ std::shared_ptr<Aidge::Node> Aidge::AvgPooling(const std::array<Aidge::DimSize_t } template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&); template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&); -template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&); \ No newline at end of file +template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&); diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp index bcf3b29c45abe2c40788fd1ec0bad87db8ee227b..b18be528795ccf470d7503ef1a915b6b66dc255c 100644 --- a/src/operator/BatchNorm.cpp +++ b/src/operator/BatchNorm.cpp @@ -24,7 +24,7 @@ #include "aidge/utils/Types.h" template <Aidge::DimIdx_t DIM> -const std::string Aidge::BatchNorm_Op<DIM>::Type = "BatchNorm"; +const std::string Aidge::BatchNorm_Op<DIM>::Type = "BatchNorm" + std::to_string(DIM) + "D"; template <Aidge::DimIdx_t DIM> Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op) @@ -120,4 +120,4 @@ inline std::shared_ptr<Aidge::Node> Aidge::BatchNorm(const Aidge::DimSize_t nbFe template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t, const float, const float, const std::string&); template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const std::string&); -template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&); \ No newline at end of file +template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&); diff --git a/src/operator/Clip.cpp b/src/operator/Clip.cpp new file mode 100644 index 0000000000000000000000000000000000000000..10b864b54594c86ed1486611fdd91fd916f2291b --- /dev/null +++ b/src/operator/Clip.cpp @@ -0,0 +1,93 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/operator/Clip.hpp" + +#include <memory> +#include <string> +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include "aidge/operator/Clip.hpp" + +const std::string Aidge::Clip_Op::Type = "Clip"; + +bool Aidge::Clip_Op::dimsForwarded() const { + if ((getInput(1) && !getInput(1)->undefined()) + || (getInput(2) && !getInput(2)->undefined())) + { + // output dims are data dependent + return false; + } + + return OperatorTensor::dimsForwarded(); +} + + +bool Aidge::Clip_Op::forwardDims(bool allowDataDependency) +{ + if (getInput(1) ) + { + if( this->min() != std::numeric_limits<float>::lowest()) + { + Log::notice("{} : ignoring non-empty min attribute because input#1 " + "take precedence", + type()); + } + if (!allowDataDependency) { + Log::warn("{} : unable to forwardDims() because output dims are data " + "dependent on input#1", + type()); + return false; + } + std::shared_ptr<Tensor> fallback; + const auto& minV = mInputs[1]->refCastFrom(fallback, NativeType<float>::type, "cpu"); + this->min() = *(static_cast<float*>(minV.getImpl()->hostPtr())); + } + if (getInput(2)) + { + if( this->max() != std::numeric_limits<float>::max()) + { + Log::notice("{} : ignoring non-empty max attribute because input#2 " + "take precedence", + type()); + } + if (!allowDataDependency) { + Log::warn("{} : unable to forwardDims() because output dims are data " + "dependent on input#2", + type()); + return false; + } + std::shared_ptr<Tensor> fallback; + const auto& maxV = mInputs[2]->refCastFrom(fallback, NativeType<float>::type, "cpu"); + this->max() = *(static_cast<float*>(maxV.getImpl()->hostPtr())); + } + if (!inputsAssociated(false)) { + return false; + } + else if ((getInput(1) && !getInput(1)->empty()) || (getInput(2) && !getInput(2)->empty())) + { + AIDGE_THROW_OR_ABORT(std::runtime_error,"Expected Input#1 and Input#2 to be scalar (Tensors of empty shapes)"); + } + mOutputs[0] -> resize(getInput(0)->dims()); + return true; +} +void Aidge::Clip_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Clip_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} +std::set<std::string> Aidge::Clip_Op::getAvailableBackends() const { + return Registrar<Clip_Op>::getKeys(); +} +std::shared_ptr<Aidge::Node> Aidge::Clip(const std::string &name,float min,float max) +{ + return std::make_shared<Node>(std::make_shared<Clip_Op>(min, max), name); +} \ No newline at end of file diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp index e055c7e5ebb9a6cff9f774da444cc582ed7de34c..836c47645c20ff23539b836af8593cddfbb48498 100644 --- a/src/operator/Conv.cpp +++ b/src/operator/Conv.cpp @@ -24,7 +24,7 @@ #include "aidge/utils/Types.h" template <Aidge::DimIdx_t DIM> -const std::string Aidge::Conv_Op<DIM>::Type = "Conv"; +const std::string Aidge::Conv_Op<DIM>::Type = "Conv" + std::to_string(DIM) + "D"; template <Aidge::DimIdx_t DIM> Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op) diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp index f4d524356bd207a7ed101c2887c2fcda53f3bb83..d2a1c9e3d08d8e2c0400d436c6123aeb5f7ce66b 100644 --- a/src/operator/ConvDepthWise.cpp +++ b/src/operator/ConvDepthWise.cpp @@ -25,7 +25,7 @@ #include "aidge/utils/Types.h" template <Aidge::DimIdx_t DIM> -const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise"; +const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise" + std::to_string(DIM) + "D"; template <Aidge::DimIdx_t DIM> Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op) diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp index 99ccb7505cd959178e4bd7132e32552ea5a72ecf..1e1db2f94948dfd1dd4c6219419b7989eeac8b3a 100644 --- a/src/operator/Fold.cpp +++ b/src/operator/Fold.cpp @@ -24,7 +24,7 @@ #include "aidge/utils/Types.h" template <Aidge::DimIdx_t DIM> -const std::string Aidge::Fold_Op<DIM>::Type = "Fold"; +const std::string Aidge::Fold_Op<DIM>::Type = "Fold" + std::to_string(DIM) + "D"; template <Aidge::DimIdx_t DIM> Aidge::Fold_Op<DIM>::Fold_Op(const Aidge::Fold_Op<DIM> &op) @@ -102,4 +102,4 @@ std::shared_ptr<Aidge::Node> Aidge::Fold(const std::array<Aidge::DimSize_t, DIM> return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name); } -template std::shared_ptr<Aidge::Node> Aidge::Fold<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&); \ No newline at end of file +template std::shared_ptr<Aidge::Node> Aidge::Fold<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&); diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp index e8c66085de5bc7c808b7f2307a9a82b22a426bb2..0f90a5a58dbd8d69b847022c336075ecc3f3d553 100644 --- a/src/operator/GenericOperator.cpp +++ b/src/operator/GenericOperator.cpp @@ -43,6 +43,7 @@ Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type, Aidge::GenericOperator_Op::GenericOperator_Op(const Aidge::GenericOperator_Op& op) : OperatorTensor(op), + mForwardDims(op.mForwardDims), mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>()) { mImpl = std::make_shared<OperatorImpl>(*this, op.backend()); diff --git a/src/operator/Heaviside.cpp b/src/operator/Heaviside.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9ecb3b436d8312ef479d6bc0592cfe372235fa25 --- /dev/null +++ b/src/operator/Heaviside.cpp @@ -0,0 +1,64 @@ +/******************************************************************************** + * Copyright (c) 2024 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cstddef> // std::size_t +#include <memory> +#include <stdexcept> // std::runtime_error +#include <string> +#include <vector> + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Heaviside.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Types.h" + +namespace Aidge { +// ----------------------------------------------------------- Heaviside_Op +// class + +const std::string Heaviside_Op::Type = "Heaviside"; + +Heaviside_Op::Heaviside_Op(float value) + : OperatorTensor(Type, {InputCategory::Data}, 1), + mAttributes( + std::make_shared<Attributes_>(attr<HeavisideAttr::Value>(value))) {} + +Heaviside_Op::Heaviside_Op(const Heaviside_Op &op) + : OperatorTensor(op), mAttributes(op.mAttributes) { + if (op.mImpl) { + SET_IMPL_MACRO(Heaviside_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } +} + +std::shared_ptr<Aidge::Operator> Aidge::Heaviside_Op::clone() const { + return std::make_shared<Heaviside_Op>(*this); +} + +void Heaviside_Op::setBackend(const std::string &name, DeviceIdx_t device) { + SET_IMPL_MACRO(Heaviside_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} + +std::set<std::string> Aidge::Heaviside_Op::getAvailableBackends() const { + return Registrar<Heaviside_Op>::getKeys(); +} + +// --------------------------------------------------------------- Free +// functions + +NodePtr Heaviside(float value, const std::string &name) { + return std::make_shared<Node>(std::make_shared<Heaviside_Op>(value), name); +} + +} // namespace Aidge diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp index 5ce137fe6b6c0e4b7150bfc0f1182f6f8ee94850..535b53749caeffca34eb0bf541f06dee30a3a333 100644 --- a/src/operator/MaxPooling.cpp +++ b/src/operator/MaxPooling.cpp @@ -20,7 +20,7 @@ #include "aidge/utils/ErrorHandling.hpp" template <Aidge::DimIdx_t DIM> -const std::string Aidge::MaxPooling_Op<DIM>::Type = "MaxPooling"; +const std::string Aidge::MaxPooling_Op<DIM>::Type = "MaxPooling" + std::to_string(DIM) + "D"; template <Aidge::DimIdx_t DIM> Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM> &kernel_dims, diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp index e3acba9b4cccdf525d80f85344ba500cc7ac885f..d93d7d320f0f508b20714943ae3c8ed7fc561ec8 100644 --- a/src/operator/MetaOperator.cpp +++ b/src/operator/MetaOperator.cpp @@ -20,17 +20,22 @@ #include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/DynamicAttributes.hpp" -Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph) - : OperatorTensor(type, [graph]() { +Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph, const std::vector<InputCategory>& forcedInputsCategory) + : OperatorTensor(type, [graph, forcedInputsCategory]() { + IOIndex_t inputIdx = 0; std::vector<InputCategory> inputsCategory; for (const auto& in : graph->getOrderedInputs()) { - if (in.first) { + if (inputIdx < forcedInputsCategory.size()) { + inputsCategory.push_back(forcedInputsCategory[inputIdx]); + } + else if (in.first) { inputsCategory.push_back(in.first->getOperator()->inputCategory(in.second)); } else { // Dummy input, default to OptionalData inputsCategory.push_back(InputCategory::OptionalData); } + ++inputIdx; } return inputsCategory; }(), graph->getOrderedOutputs().size()), @@ -46,7 +51,7 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shar } std::shared_ptr<Aidge::Operator> Aidge::MetaOperator_Op::clone() const { - return std::make_shared<MetaOperator_Op>(*this); + return std::make_shared<MetaOperator_Op>(type(), mGraph->clone()); } void Aidge::MetaOperator_Op::associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) { @@ -54,6 +59,7 @@ void Aidge::MetaOperator_Op::associateInput(const IOIndex_t inputIdx, const std: AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx); const auto& inputOp = mGraph->getOrderedInputs()[inputIdx]; + AIDGE_ASSERT(inputOp.first, "associateInput(): inputIdx ({}) is a dummy input for this MetaOperator, cannot associate data!", inputIdx); inputOp.first->getOperator()->associateInput(inputOp.second, data); // Associate inputs for custom implementation @@ -64,6 +70,7 @@ void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, const std AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type()); const auto& inputOp = mGraph->getOrderedInputs()[inputIdx]; + AIDGE_ASSERT(inputOp.first, "setInput(): inputIdx ({}) is a dummy input for this MetaOperator, cannot associate data!", inputIdx); inputOp.first->getOperator()->setInput(inputOp.second, data); // Associate inputs for custom implementation @@ -243,10 +250,11 @@ void Aidge::MetaOperator_Op::forward() { std::shared_ptr<Aidge::Node> Aidge::MetaOperator(const char *type, const std::shared_ptr<Aidge::GraphView>& graph, + const std::vector<InputCategory>& forcedInputsCategory, const std::string& name) { - auto op = std::make_shared<MetaOperator_Op>(type, graph); + auto op = std::make_shared<MetaOperator_Op>(type, graph, forcedInputsCategory); auto node = std::make_shared<Node>(op, name); op->setUpperNode(node); return node; -} \ No newline at end of file +} diff --git a/src/operator/MetaOperatorDefs/LSTM.cpp b/src/operator/MetaOperatorDefs/LSTM.cpp index 910e7c67aad0068679ca2d240b23312add3e42d7..2ed548805010a6cc87950c4d1f7b89edbea4f75c 100644 --- a/src/operator/MetaOperatorDefs/LSTM.cpp +++ b/src/operator/MetaOperatorDefs/LSTM.cpp @@ -35,14 +35,14 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel, auto input = Identity((!name.empty()) ? name + "_input" : ""); auto hiddenState = Memorize(seqLength, (!name.empty()) ? name + "_hidden_state" : ""); auto cellState = Memorize(seqLength, (!name.empty()) ? name + "_cell_state" : ""); - auto add = Add(2, (!name.empty()) ? name + "_add" : ""); + auto add = Add((!name.empty()) ? name + "_add" : ""); // Forget gate auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateX" : ""); input->addChild(forgetGateX, 0, 0); auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateH" : ""); hiddenState->addChild(forgetGateH, 1, 0); - auto forgetGate = Add(2, (!name.empty()) ? name + "_forgetGate" : ""); + auto forgetGate = Add((!name.empty()) ? name + "_forgetGate" : ""); forgetGateX->addChild(forgetGate, 0, 0); forgetGateH->addChild(forgetGate, 0, 1); auto forgetGateAct = Sigmoid((!name.empty()) ? name + "_forgetGateAct" : ""); @@ -57,7 +57,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel, input->addChild(inputGateX, 0, 0); auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_inputGateH" : ""); hiddenState->addChild(inputGateH, 1, 0); - auto inputGate = Add(2, (!name.empty()) ? name + "_inputGate" : ""); + auto inputGate = Add((!name.empty()) ? name + "_inputGate" : ""); inputGateX->addChild(inputGate, 0, 0); inputGateH->addChild(inputGate, 0, 1); auto inputGateAct = Sigmoid((!name.empty()) ? name + "_inputGateAct" : ""); @@ -71,7 +71,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel, input->addChild(cellCandidateX, 0, 0); auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_cellCandidateH" : ""); hiddenState->addChild(cellCandidateH, 1, 0); - auto cellCandidate = Add(2, (!name.empty()) ? name + "_cellCandidate" : ""); + auto cellCandidate = Add((!name.empty()) ? name + "_cellCandidate" : ""); cellCandidateX->addChild(cellCandidate, 0, 0); cellCandidateH->addChild(cellCandidate, 0, 1); auto cellCandidateAct = Tanh((!name.empty()) ? name + "_cellCandidateAct" : ""); @@ -83,7 +83,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel, input->addChild(outputGateX, 0, 0); auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_outputGateH" : ""); hiddenState->addChild(outputGateH, 1, 0); - auto outputGate = Add(2, (!name.empty()) ? name + "_outputGate" : ""); + auto outputGate = Add((!name.empty()) ? name + "_outputGate" : ""); outputGateX->addChild(outputGate, 0, 0); outputGateH->addChild(outputGate, 0, 1); auto outputGateAct = Sigmoid((!name.empty()) ? name + "_outputGateAct" : ""); @@ -115,7 +115,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel, {hiddenState, 1}, {cellState, 1}}); microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}}); - auto metaOp = MetaOperator("LSTM", microGraph, name); + auto metaOp = MetaOperator("LSTM", microGraph, {}, name); addProducer(metaOp, 1, {hiddenChannel, inChannel}, "wi"); addProducer(metaOp, 2, {hiddenChannel, inChannel}, "wo"); addProducer(metaOp, 3, {hiddenChannel, inChannel}, "wf"); @@ -143,14 +143,14 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength) auto input = Identity(""); auto hiddenState = Memorize(seqLength, ""); auto cellState = Memorize(seqLength, ""); - auto add = Add(2, ""); + auto add = Add(""); // Forget gate auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), ""); input->addChild(forgetGateX, 0, 0); auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), ""); hiddenState->addChild(forgetGateH, 1, 0); - auto forgetGate = Add(2, ""); + auto forgetGate = Add(""); forgetGateX->addChild(forgetGate, 0, 0); forgetGateH->addChild(forgetGate, 0, 1); auto forgetGateAct = Sigmoid(""); @@ -165,7 +165,7 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength) input->addChild(inputGateX, 0, 0); auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), ""); hiddenState->addChild(inputGateH, 1, 0); - auto inputGate = Add(2, ""); + auto inputGate = Add(""); inputGateX->addChild(inputGate, 0, 0); inputGateH->addChild(inputGate, 0, 1); auto inputGateAct = Sigmoid(""); @@ -179,7 +179,7 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength) input->addChild(cellCandidateX, 0, 0); auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), ""); hiddenState->addChild(cellCandidateH, 1, 0); - auto cellCandidate = Add(2, ""); + auto cellCandidate = Add(""); cellCandidateX->addChild(cellCandidate, 0, 0); cellCandidateH->addChild(cellCandidate, 0, 1); auto cellCandidateAct = Tanh(""); @@ -191,7 +191,7 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength) input->addChild(outputGateX, 0, 0); auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), ""); hiddenState->addChild(outputGateH, 1, 0); - auto outputGate = Add(2,""); + auto outputGate = Add(""); outputGateX->addChild(outputGate, 0, 0); outputGateH->addChild(outputGate, 0, 1); auto outputGateAct = Sigmoid(""); diff --git a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp index ef319ef38ad18de9eaed0a1d4a92c3877ee7cf8e..bcda67d0ce4c43e4936739affb9d681942062cb1 100644 --- a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp +++ b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp @@ -41,7 +41,7 @@ std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_ AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims) }); - return MetaOperator("PaddedAvgPooling", graph, name); + return MetaOperator(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), graph, {}, name); } template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&); @@ -75,7 +75,7 @@ inline std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<Dim AvgPooling(kernel_dims, "", stride_dims) }); - return std::make_shared<MetaOperator_Op>("PaddedAvgPooling", graph); + return std::make_shared<MetaOperator_Op>(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), graph); } template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<1>(const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&); diff --git a/src/operator/MetaOperatorDefs/PaddedConv.cpp b/src/operator/MetaOperatorDefs/PaddedConv.cpp index 31b1c675e9d577002350ea11dd0b42601a91ef76..ca769026d900868d372bd7207ad616e07041e857 100644 --- a/src/operator/MetaOperatorDefs/PaddedConv.cpp +++ b/src/operator/MetaOperatorDefs/PaddedConv.cpp @@ -43,7 +43,7 @@ std::shared_ptr<Aidge::Node> Aidge::PaddedConv(Aidge::DimSize_t in_channels, Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""), std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "") }); - auto metaOpNode = MetaOperator("PaddedConv", graph, name); + auto metaOpNode = MetaOperator(("PaddedConv" + std::to_string(DIM) + "D").c_str(), graph, {}, name); addProducer(metaOpNode, 1, append(out_channels, append(in_channels, kernel_dims)), "w"); if (!no_bias) { addProducer(metaOpNode, 2, {out_channels}, "b"); @@ -63,7 +63,7 @@ std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op( auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0); auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), ""); - return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv})); + return std::make_shared<MetaOperator_Op>(("PaddedConv" + std::to_string(DIM) + "D").c_str(), Sequential({pad, conv})); } template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<1>(const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&); template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&); diff --git a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp index 1c073b78a61763b46e330089cccfcc4bced352a4..b68794fb9b1ce76ddc5bca8e7d697a7b98d9f141 100644 --- a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp +++ b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp @@ -40,7 +40,7 @@ std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(const Aidge::DimSize_t n Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""), std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv_depth_wise" : "") }); - auto metaOpNode = MetaOperator("PaddedConvDepthWise", graph, name); + auto metaOpNode = MetaOperator(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), graph, {},name); addProducer(metaOpNode, 1, append(nb_channels, append(Aidge::DimSize_t(1), kernel_dims)), "w"); if (!no_bias) { addProducer(metaOpNode, 2, {nb_channels}, "b"); @@ -61,7 +61,7 @@ std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op( auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0); auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), ""); - return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv})); + return std::make_shared<MetaOperator_Op>(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), Sequential({pad, conv})); } template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<1>(const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&); template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&); diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp index f15a7dc3899a7bc864e8e76ff0946fb70584bf05..bd09e9d1297ec612b08634f59bfe33f0802ef3fd 100644 --- a/src/operator/Operator.cpp +++ b/src/operator/Operator.cpp @@ -65,20 +65,14 @@ void Aidge::Operator::resetConsummerProducer(){ mImpl->prodConso()->resetConsummerProducer(); } -void Aidge::Operator::runHooks() const { - for (auto& hook : mHooks) { - hook.second->call(); - } -} void Aidge::Operator::forward() { AIDGE_ASSERT(mImpl != nullptr, "forward(): an implementation is required for {}!", type()); mImpl->forward(); - runHooks(); } void Aidge::Operator::backward() { AIDGE_ASSERT(mImpl != nullptr, "backward(): an implementation is required for {}!", type()); - mImpl->backward(); + mImpl->backward(); } void Aidge::Operator::setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends) { diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp index 39f61e328bd3f98bc836604462bbfc064fbb93be..a0b5f2df52e373bd92dd57cc621318f2abbb45c9 100644 --- a/src/operator/Pad.cpp +++ b/src/operator/Pad.cpp @@ -20,7 +20,7 @@ #include "aidge/utils/Types.h" template <Aidge::DimIdx_t DIM> -const std::string Aidge::Pad_Op<DIM>::Type = "Pad"; +const std::string Aidge::Pad_Op<DIM>::Type = "Pad" + std::to_string(DIM) + "D"; template <Aidge::DimIdx_t DIM> std::shared_ptr<Aidge::Operator> Aidge::Pad_Op<DIM>::clone() const { @@ -34,9 +34,9 @@ bool Aidge::Pad_Op<DIM>::forwardDims(bool /*allowDataDependency*/) { const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>(); for (std::size_t dim = 0; dim < DIM; ++dim) { - outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim] + outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[dim] + inputDims[dim+2] - + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1]; + + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[DIM+dim]; } outputDims[1] = inputDims[1]; outputDims[0] = inputDims[0]; @@ -61,16 +61,18 @@ std::set<std::string> Aidge::Pad_Op<DIM>::getAvailableBackends() const { template class Aidge::Pad_Op<1>; template class Aidge::Pad_Op<2>; +//////////////////////////////////////////////////////////////////////////////// + template <std::array<Aidge::DimSize_t, 1>::size_type DIM> std::shared_ptr<Aidge::Node> Aidge::Pad(const std::array<Aidge::DimSize_t, 2*DIM> &beginEndTuples, const std::string& name, - const PadBorderType &borderType, + PadBorderType borderType, double borderValue) { AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", Pad_Op<DIM>::Type); return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name); } -template std::shared_ptr<Aidge::Node> Aidge::Pad<1>(const std::array<Aidge::DimSize_t, 2> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue); -template std::shared_ptr<Aidge::Node> Aidge::Pad<2>(const std::array<Aidge::DimSize_t, 4> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue); -template std::shared_ptr<Aidge::Node> Aidge::Pad<3>(const std::array<Aidge::DimSize_t, 6> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue); \ No newline at end of file +template std::shared_ptr<Aidge::Node> Aidge::Pad<1>(const std::array<Aidge::DimSize_t, 2> &beginEndTuples, const std::string&, PadBorderType, double borderValue); +template std::shared_ptr<Aidge::Node> Aidge::Pad<2>(const std::array<Aidge::DimSize_t, 4> &beginEndTuples, const std::string&, PadBorderType, double borderValue); +template std::shared_ptr<Aidge::Node> Aidge::Pad<3>(const std::array<Aidge::DimSize_t, 6> &beginEndTuples, const std::string&, PadBorderType, double borderValue); diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp index fdba4ac2e22d857a31779df2e5ff789c3eb92f5c..3d48b88ab400596d68cbfa34502e795766ff94f0 100644 --- a/src/operator/Producer.cpp +++ b/src/operator/Producer.cpp @@ -92,8 +92,6 @@ void Aidge::Producer_Op::forward() { if (!backend().empty()) { mImpl->forward(); } - - runHooks(); } void Aidge::Producer_Op::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const { diff --git a/src/operator/Round.cpp b/src/operator/Round.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ba4eff9d1e1cf06cc5a4bbda54010aec8c2f2f63 --- /dev/null +++ b/src/operator/Round.cpp @@ -0,0 +1,50 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/operator/Round.hpp" + +#include <memory> +#include <string> + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Round_Op::Type = "Round"; + +Aidge::Round_Op::Round_Op(const Aidge::Round_Op& op) + : OperatorTensor(op) +{ + if (op.mImpl){ + SET_IMPL_MACRO(Round_Op, *this, op.backend()); + }else{ + mImpl = nullptr; + } +} + + +std::shared_ptr<Aidge::Operator> Aidge::Round_Op::clone() const { + return std::make_shared<Round_Op>(*this); +} + +void Aidge::Round_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Round_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} + +std::set<std::string> Aidge::Round_Op::getAvailableBackends() const { + return Registrar<Round_Op>::getKeys(); +} + +std::shared_ptr<Aidge::Node> Aidge::Round(const std::string& name) { + return std::make_shared<Node>(std::make_shared<Round_Op>(), name); +} \ No newline at end of file diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp index a8b20d21ae1f6c7bfba1a9e52d039f292b6aa62e..b51b4f346c92f778fe0a044df187cd8d0d0f7304 100644 --- a/src/operator/Squeeze.cpp +++ b/src/operator/Squeeze.cpp @@ -102,7 +102,7 @@ bool Squeeze_Op::forwardDims(bool allowDataDependency) { axis < static_cast<int8_t>(input_dims.size()), "{} : Axis index OutOfBounds error, expected value " "within size limits of input tensor : " - "[-{},{}), got {}.", + "[-{},{}], got {}.", type(), input_dims.size(), input_dims.size() - 1, axis); auto temp = static_cast<DimIdx_t>(axis >= 0 ? axis : axis + input_dims.size()); diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp index 0cb1717f1c96c393b8845db129eee1429966cd98..d24b9c90927830db6f1c1256d133d871ba3dc37f 100644 --- a/src/operator/Transpose.cpp +++ b/src/operator/Transpose.cpp @@ -59,6 +59,15 @@ std::shared_ptr<Aidge::Operator> Aidge::Transpose_Op::clone() const { bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) { if (inputsAssociated()) { AIDGE_ASSERT(!getInput(0)->empty(), "Not applicable on scalars."); + // If permutation vector is not given, reverse the dims of input tensor + if (outputDimsOrder().empty()) + { + this->outputDimsOrder().resize(getInput(0)->nbDims()); + std::iota(this->outputDimsOrder().rbegin(), this->outputDimsOrder().rend(), 0); + } + + AIDGE_ASSERT(outputDimsOrder().size() == getInput(0)->nbDims(), + "Permutation vector must have the same rank as input tensor."); std::vector<DimSize_t> outputDims; for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) { outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]); @@ -86,6 +95,6 @@ std::set<std::string> Aidge::Transpose_Op::getAvailableBackends() const { ////////////////////////////////////////////////// std::shared_ptr<Aidge::Node> Aidge::Transpose(const std::vector<Aidge::DimSize_t> &outputDimsOrder, - const std::string& name) { + const std::string& name) { return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name); } \ No newline at end of file diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp index 43afd160e03395c65c4dcbe5504cb865da4ed8d8..f3353b45cd6a732fa456ea0585ec5d040d53ef31 100644 --- a/src/operator/Unsqueeze.cpp +++ b/src/operator/Unsqueeze.cpp @@ -80,7 +80,7 @@ bool Unsqueeze_Op::forwardDims(bool allowDataDependency) { axis < static_cast<int8_t>(output_nb_dims), "{} : Axis index OutOfBounds enrror, expected value " "within size limits of input tensor : " - "[-{},{}), got {}.", + "[-{},{}], got {}.", type(), output_nb_dims, output_nb_dims - 1, axis); axes_rectified_idx.push_back( static_cast<DimIdx_t>(axis >= 0 ? axis : axis + output_nb_dims)); diff --git a/src/recipes/ConvToMatMul.cpp b/src/recipes/ConvToMatMul.cpp index 9b88ffc73204b44cf857213d1fdfff49b3191f73..70be33932295aab49653bdc2853f4411ded919b4 100644 --- a/src/recipes/ConvToMatMul.cpp +++ b/src/recipes/ConvToMatMul.cpp @@ -24,7 +24,7 @@ #include "aidge/recipes/Recipes.hpp" size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) { - const auto matches = SinglePassGraphMatching(graphView).match("Conv"); + const auto matches = SinglePassGraphMatching(graphView).match("Conv2D"); size_t nbReplaced = 0; for (const auto& match : matches) { @@ -75,7 +75,7 @@ size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) { // Handle bias if (convOp->getInput(2) && !convOp->getInput(2)->empty()) { - auto add = Add(2, (!convNode->name().empty()) ? convNode->name() + "_add" : ""); + auto add = Add((!convNode->name().empty()) ? convNode->name() + "_add" : ""); auto bReshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>{{1, static_cast<int64_t>(convOp->getInput(2)->size()), 1, 1}}), (!convNode->name().empty()) ? convNode->name() + "_b_reshape_shape_prod" : "", true); diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp index 34722c19f8c0fddaffa7357136f1512a027e1617..4c4de25282c487d023f9c184b015ac332e716b7b 100644 --- a/src/recipes/FuseBatchNorm.cpp +++ b/src/recipes/FuseBatchNorm.cpp @@ -190,9 +190,10 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode, } void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::GraphView> graphView) { - auto matches = SinglePassGraphMatching(graphView).match("(Conv|ConvDepthWise|PaddedConv|PaddedConvDepthWise)->BatchNorm"); + auto matches = SinglePassGraphMatching(graphView).match("(Conv2D|ConvDepthWise2D|PaddedConv2D|PaddedConvDepthWise2D)->BatchNorm2D"); for (auto match : matches) { + fmt::println("Match !"); auto rootNode = match.graph->rootNode(); fuseBatchNorm(rootNode, *rootNode->getChildren().begin()); } diff --git a/src/scheduler/ParallelScheduler.cpp b/src/scheduler/ParallelScheduler.cpp index 1d70646b70091e2e3ff6f03b8ee82ae62aeb1e43..2b9a1f5b62741d5f08dfc3e5aa45b1102d54b850 100644 --- a/src/scheduler/ParallelScheduler.cpp +++ b/src/scheduler/ParallelScheduler.cpp @@ -48,7 +48,7 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std:: // Sort static scheduling, the order will be the prefered threads scheduling // order for non critical nodes - std::deque<std::shared_ptr<StaticSchedulingElement>> staticSchedule(mStaticSchedule.at(mStaticScheduleStep).begin(), mStaticSchedule.at(mStaticScheduleStep).end()); + std::deque<StaticSchedulingElement*> staticSchedule(mStaticSchedule.at(mStaticScheduleStep).begin(), mStaticSchedule.at(mStaticScheduleStep).end()); std::stable_sort(staticSchedule.begin(), staticSchedule.end(), [](const auto& lhs, const auto& rhs) { return ((lhs->early < rhs->early) || (lhs->early == rhs->early && lhs->late < rhs->late)); }); @@ -59,12 +59,12 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std:: size_t latest = 0; std::mutex schedulingMutex; - std::map<std::shared_ptr<StaticSchedulingElement>, std::atomic<bool>> finished; + std::map<StaticSchedulingElement*, std::atomic<bool>> finished; while (!staticSchedule.empty()) { Log::debug("Step {}", latest); - std::vector<std::shared_ptr<StaticSchedulingElement>> mustFinish; + std::vector<StaticSchedulingElement*> mustFinish; // Run all nodes that must be run at this step: latest (critical nodes) for (size_t i = 0; i < staticSchedule.size(); ) { @@ -188,7 +188,7 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std:: } // Wait for all nodes that must finish at latest to be finished - // By scheduling construction, no other node can be started before all + // By scheduling construction, no other node can be started before all // nodes at latest step are finished while (true) { bool ready = true; diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp index 1613450508ea84a230f36ba6526a1322c6a70559..7af3c62c5d0af33b01e596ecf4c91c35ab3e17b7 100644 --- a/src/scheduler/Scheduler.cpp +++ b/src/scheduler/Scheduler.cpp @@ -37,7 +37,14 @@ #include "aidge/utils/Types.h" -Aidge::Scheduler::~Scheduler() noexcept = default; +Aidge::Scheduler::~Scheduler() { + for (auto& staticScheduleVec : mStaticSchedule) { + for (auto& staticScheduleElt : staticScheduleVec) { + delete staticScheduleElt; + } + staticScheduleVec.clear(); + } +} Aidge::Scheduler::PriorProducersConsumers::PriorProducersConsumers() = default; Aidge::Scheduler::PriorProducersConsumers::PriorProducersConsumers(const PriorProducersConsumers&) = default; Aidge::Scheduler::PriorProducersConsumers::~PriorProducersConsumers() noexcept = default; @@ -48,7 +55,7 @@ void Aidge::Scheduler::generateScheduling() { mStaticSchedule.push_back(schedule); } -std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::Scheduler::generateBaseScheduling() const { +std::vector<Aidge::Scheduler::StaticSchedulingElement*> Aidge::Scheduler::generateBaseScheduling() const { // 0) setup useful variables // map associating each node with string "name (type#rank)" @@ -60,25 +67,21 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S // producers-consumers model! std::set<std::shared_ptr<Node>> stillConsumers; - std::vector<std::shared_ptr<StaticSchedulingElement>> schedule; - + std::vector<StaticSchedulingElement*> schedule; - // 1) Initialize consumers list: - // 1.1) List of the GraphView's input nodes - std::set<std::shared_ptr<Node>> consumers = mGraphView->inputNodes(); - // 1.2) List of nodes inside the GraphView connected to an inner Producer + // 1) Initialize consumers list: start from the output nodes and + // find the required prior producers/consumers at step 2). + // Beware that generateBaseScheduling() can be called multiple time + // with some node having already produced some data. In this case, + // we should always consume available data first. This is ensured + // by setting the consumers list to the output nodes and then recursively + // find the dependencies. + // The initial list may contain producer nodes, in which case + // getPriorProducersConsumers() at step 2 will have moved it in + // the requiredProducers list. + std::set<std::shared_ptr<Node>> consumers = mGraphView->outputNodes(); std::set<std::shared_ptr<Node>> producers; - for (const std::shared_ptr<Node>& nodePtr : mGraphView->getNodes()) { - if (nodePtr->type() == Producer_Op::Type) { - for (const auto& child : nodePtr->getChildren()) { - // Do not schedule childs outside current graph! - if (mGraphView->inView(child)) { - consumers.insert(child); - } - } - } - } do { // 2) From the current consumers list, check if any prior consumer node @@ -131,7 +134,7 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S // Producers are special nodes that generate data on demand. for (const auto& requiredProducer : requiredProducers) { requiredProducer->getOperator()->updateConsummerProducer(); - schedule.push_back(std::make_shared<StaticSchedulingElement>(requiredProducer)); + schedule.push_back(new StaticSchedulingElement(requiredProducer)); } // 5) Find runnable consumers. @@ -185,7 +188,7 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S for (const auto& runnable : runnableConsumers) { Log::debug("Runnable: {}", namePtrTable.at(runnable)); runnable->getOperator()->updateConsummerProducer(); - schedule.push_back(std::make_shared<StaticSchedulingElement>(runnable)); + schedule.push_back(new StaticSchedulingElement(runnable)); } // 7) Update consumers list @@ -300,24 +303,28 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S void Aidge::Scheduler::summarizeConsumerState(const std::shared_ptr<Aidge::Node>& consumer, const std::string& nodeName) const { Log::debug("\t- consumer: {}", fmt::styled(nodeName, fg(fmt::color::orange))); std::string crLog = "\t\tC/R:\t"; - for (IOIndex_t inId = 0; inId < consumer->nbInputs() - 1; ++inId) { - crLog += fmt::format("{}/{}\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId), - consumer->getOperator()->getNbRequiredData(inId)); + if (consumer->nbInputs() > 0) { + for (IOIndex_t inId = 0; inId < consumer->nbInputs() - 1; ++inId) { + crLog += fmt::format("{}/{}\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId), + consumer->getOperator()->getNbRequiredData(inId)); + } + crLog += fmt::format("{}/{}", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1), + consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1)); } - crLog += fmt::format("{}/{}", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1), - consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1)); Log::debug("{}", crLog); std::string pLog = "\t\tP:\t"; - for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) { - pLog += fmt::format("{}\n\t\t\t", consumer->getOperator()->getNbProducedData(outId)); + if (consumer->nbOutputs() > 0) { + for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) { + pLog += fmt::format("{}\n\t\t\t", consumer->getOperator()->getNbProducedData(outId)); + } + pLog += fmt::format("{}", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1)); } - pLog += fmt::format("{}", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1)); Log::debug("{}", pLog); } -void Aidge::Scheduler::generateEarlyLateScheduling(std::vector<std::shared_ptr<StaticSchedulingElement>>& schedule) const { +void Aidge::Scheduler::generateEarlyLateScheduling(std::vector<StaticSchedulingElement*>& schedule) const { std::size_t latest = 0; // Calculate early (logical) start for (std::size_t elt = 0; elt < schedule.size(); ++elt) { @@ -397,15 +404,20 @@ void Aidge::Scheduler::resetScheduling() { for (auto node : mGraphView->getNodes()) { node->getOperator()->resetConsummerProducer(); } - + for (auto& staticScheduleVec : mStaticSchedule) { + for (auto& staticScheduleElt : staticScheduleVec) { + delete staticScheduleElt; + } + staticScheduleVec.clear(); + } mStaticSchedule.clear(); mStaticScheduleStep = 0; mScheduling.clear(); } /** - * This version is a simplified version without special handling of concatenation. -*/ + * @warning This version is a simplified version without special handling of concatenation. + */ Aidge::MemoryManager Aidge::Scheduler::generateMemory(bool incProducers, bool wrapAroundBuffer) const { MemoryManager memManager; @@ -676,8 +688,8 @@ Aidge::Elts_t Aidge::Scheduler::getNbAvailableData(const std::shared_ptr<Node>& return Elts_t::NoneElts(); } -Aidge::Scheduler::PriorProducersConsumers Aidge::Scheduler::getPriorProducersConsumers( - const std::shared_ptr<Node>& node) const +Aidge::Scheduler::PriorProducersConsumers +Aidge::Scheduler::getPriorProducersConsumers(const std::shared_ptr<Node>& node) const { const auto priorCache = mPriorCache.find(node); if (priorCache != mPriorCache.end()) { @@ -714,6 +726,7 @@ Aidge::Scheduler::PriorProducersConsumers Aidge::Scheduler::getPriorProducersCon const auto& parentPrior = getPriorProducersConsumers(parent.first); if (!parentPrior.isPrior) { + // only happens in case of cyclic graphs return PriorProducersConsumers(); // not scheduled } else { @@ -727,7 +740,9 @@ Aidge::Scheduler::PriorProducersConsumers Aidge::Scheduler::getPriorProducersCon } prior.isPrior = true; - if (prior.priorConsumers.empty()) { + if (node->type() == Producer_Op::Type) { + prior.requiredProducers.insert(node); + } else if (prior.priorConsumers.empty()) { prior.priorConsumers.insert(node); } mPriorCache.insert(std::make_pair(node, prior)); diff --git a/src/scheduler/SequentialScheduler.cpp b/src/scheduler/SequentialScheduler.cpp index 88b5e98bc62456bd59dc235c3112396daaeddd24..4e6e91f51878ffce7d910a361c9a6e8fff9cb835 100644 --- a/src/scheduler/SequentialScheduler.cpp +++ b/src/scheduler/SequentialScheduler.cpp @@ -45,7 +45,7 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, const std::vector<std } // Sort static scheduling according to the policy - std::vector<std::shared_ptr<StaticSchedulingElement>> staticSchedule(mStaticSchedule.at(mStaticScheduleStep).begin(), mStaticSchedule.at(mStaticScheduleStep).end()); + std::vector<StaticSchedulingElement*> staticSchedule(mStaticSchedule.at(mStaticScheduleStep).begin(), mStaticSchedule.at(mStaticScheduleStep).end()); if (mSchedulingPolicy == SchedulingPolicy::AsSoonAsPossible) { std::stable_sort(staticSchedule.begin(), staticSchedule.end(), diff --git a/src/utils/DynamicAttributes.cpp b/src/utils/DynamicAttributes.cpp index 909d3bb2f5fda977ac497a19e1a1088eb52cfc88..6e64861605b3c853da0d754065e24ed43aba0f63 100644 --- a/src/utils/DynamicAttributes.cpp +++ b/src/utils/DynamicAttributes.cpp @@ -11,18 +11,40 @@ #include "aidge/utils/DynamicAttributes.hpp" -std::map<std::type_index, bool(*)(const future_std::any&, const future_std::any&)> Aidge::DynamicAttributes::mAnyCompare; +std::map<std::type_index, std::unique_ptr<Aidge::DynamicAttributes::AnyUtils_>> Aidge::DynamicAttributes::mAnyUtils = []() { + std::map<std::type_index, std::unique_ptr<Aidge::DynamicAttributes::AnyUtils_>> m; + m.emplace(typeid(DynamicAttributes), std::unique_ptr<AnyUtils<DynamicAttributes>>(new AnyUtils<DynamicAttributes>())); + return m; +}(); + +template<> void Aidge::DynamicAttributes::setAttr<future_std::any>(const std::string& name, const future_std::any& value) +{ + const auto dot = name.find('.'); + if (dot == name.npos) { + AIDGE_ASSERT(mAnyUtils.find(value.type()) != mAnyUtils.end(), "DynamicAttributes::setAttr(): cannot set value to std::any of never seen type."); + + auto res = mAttrs.emplace(std::make_pair(name, value)); + if (!res.second) + res.first->second = value; + } + else { + const auto ns = name.substr(0, dot); + const auto nsName = name.substr(dot + 1); + auto res = mAttrs.emplace(std::make_pair(ns, future_std::any(DynamicAttributes()))); + future_std::any_cast<DynamicAttributes&>(res.first->second).setAttr<future_std::any>(nsName, value); + } +} bool future_std::operator<(const future_std::any& lhs, const future_std::any& rhs) { if (lhs.type() == rhs.type()) { - return Aidge::DynamicAttributes::mAnyCompare.at(lhs.type())(lhs, rhs); + return Aidge::DynamicAttributes::mAnyUtils.at(lhs.type())->compare(lhs, rhs); } #ifdef PYBIND else if (lhs.type() == typeid(py::object)) { - return Aidge::DynamicAttributes::mAnyCompare.at(rhs.type())(lhs, rhs); + return Aidge::DynamicAttributes::mAnyUtils.at(rhs.type())->compare(lhs, rhs); } else if (rhs.type() == typeid(py::object)) { - return Aidge::DynamicAttributes::mAnyCompare.at(lhs.type())(lhs, rhs); + return Aidge::DynamicAttributes::mAnyUtils.at(lhs.type())->compare(lhs, rhs); } #endif else { diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp index a08808ee5e6c2657a76213dcff80cec53b23e7ee..2fa06cf23b3b681211208a3e5bbea9226f0930b8 100644 --- a/unit_tests/graph/Test_GraphView.cpp +++ b/unit_tests/graph/Test_GraphView.cpp @@ -447,10 +447,10 @@ TEST_CASE("[core/graph] GraphView(getOrderedNodes)", "[GraphView][getOrderedNode auto data1 = Producer({2}, "data1"); auto data2 = Producer({2}, "data2"); auto data3 = Producer({2}, "data3"); - auto add1 = Add(2, "add1"); - auto add2 = Add(2, "add2"); + auto add1 = Add("add1"); + auto add2 = Add("add2"); auto split1 = Split(2, 0, {1, 1}, "split1"); - auto add3 = Add(3, "add3"); + auto add3 = Add("add3"); auto g = std::make_shared<GraphView>("TestGraph"); data1->addChild(add1); data2->addChild(add1); @@ -508,9 +508,9 @@ TEST_CASE("[core/graph] GraphView(getOrderedNodes)", "[GraphView][getOrderedNode TEST_CASE("[core/graph] GraphView(getOrderedNodes) cyclic", "[GraphView][getOrderedNodes]") { auto data1 = Producer({2}, "data1"); auto data2 = Producer({2}, "data2"); - auto add1 = Add(2, "add1"); + auto add1 = Add("add1"); auto mem1 = Memorize(1, "mem1"); - auto add2 = Add(2, "add2"); + auto add2 = Add("add2"); auto g = std::make_shared<GraphView>("TestGraph"); data1->addChild(add1); data2->addChild(add1); diff --git a/unit_tests/graph/Test_Matching.cpp b/unit_tests/graph/Test_Matching.cpp index 2fdcd611d378ceb6c3dbdc853920eecf92c31141..d6d98d4701cba900548d127879c9b3940cf1d739 100644 --- a/unit_tests/graph/Test_Matching.cpp +++ b/unit_tests/graph/Test_Matching.cpp @@ -51,10 +51,10 @@ TEST_CASE("[core/graph] Matching") { PaddedConv(8, 16, {3, 3}, "conv3", {1, 1}, {2, 2, 2, 2}), ReLU("relu3"), PaddedConv(8, 16, {5, 5}, "conv4", {1, 1}, {2, 2, 2, 2}), - Add(2, "add"), + Add("add"), PaddedConv(8, 16, {5, 5}, "conv5", {1, 1}, {2, 2, 2, 2}), ReLU("relu5"), - Add(2, "add2") + Add("add2") }); g1->getNode("relu3")->addChild(g1->getNode("add"), 0, 1); @@ -65,8 +65,8 @@ TEST_CASE("[core/graph] Matching") { expandMetaOps(g1); g1->save("Test_examples", true); - SECTION("Conv->(ReLU->Pad->Conv)*") { - const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU->Pad->Conv)*"); + SECTION("Conv2D->(ReLU->Pad2D->Conv2D)*") { + const auto results = SinglePassGraphMatching(g1).match("Conv2D->(ReLU->Pad2D->Conv2D)*"); checkMatches(results, { {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "relu1", "relu2"}}, @@ -77,24 +77,24 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Conv->ReLU;ReLU->Pad") { - REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv->ReLU;ReLU->Pad")); + SECTION("Conv2D->ReLU;ReLU->Pad2D") { + REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv2D->ReLU;ReLU->Pad2D")); } - SECTION("Conv->ReLU#1;ReLU#2->Pad") { - REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv->ReLU#1;ReLU#2->Pad")); + SECTION("Conv2D->ReLU#1;ReLU#2->Pad2D") { + REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv2D->ReLU#1;ReLU#2->Pad2D")); } - SECTION("Conv?->ReLU") { - REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv?->ReLU")); + SECTION("Conv2D?->ReLU") { + REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv2D?->ReLU")); } SECTION("(Add#<*~.)*") { REQUIRE_THROWS(SinglePassGraphMatching(g1).match("(Add#<*~.)*")); } - SECTION("Conv->(ReLU~>Pad->Conv)*") { - const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU~>Pad->Conv)*"); + SECTION("Conv2D->(ReLU~>Pad2D->Conv2D)*") { + const auto results = SinglePassGraphMatching(g1).match("Conv2D->(ReLU~>Pad2D->Conv2D)*"); checkMatches(results, { {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}}, @@ -105,8 +105,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Conv->(ReLU~>Pad->Conv)* [disjoint]") { - const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU~>Pad->Conv)*", true); + SECTION("Conv2D->(ReLU~>Pad2D->Conv2D)* [disjoint]") { + const auto results = SinglePassGraphMatching(g1).match("Conv2D->(ReLU~>Pad2D->Conv2D)*", true); checkMatches(results, { {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}}, @@ -114,8 +114,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Conv~>(ReLU~>Pad->Conv)*") { - const auto results = SinglePassGraphMatching(g1).match("Conv~>(ReLU~>Pad->Conv)*"); + SECTION("Conv~>(ReLU~>Pad2D->Conv2D)*") { + const auto results = SinglePassGraphMatching(g1).match("Conv2D~>(ReLU~>Pad2D->Conv2D)*"); checkMatches(results, { {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}}, @@ -126,8 +126,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Pad->Conv#->ReLU;Conv#<1-Producer;Conv#<2-Producer") { - const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;Conv#<1-Producer;Conv#<2-Producer"); + SECTION("Pad2D->Conv2D#->ReLU;Conv2D#<1-Producer;Conv2D#<2-Producer") { + const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#->ReLU;Conv2D#<1-Producer;Conv2D#<2-Producer"); checkMatches(results, { {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}}, @@ -135,8 +135,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Pad->Conv#~>ReLU;Conv#<1-Producer;Conv#<2-Producer") { - const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;Conv#<1-Producer;Conv#<2-Producer"); + SECTION("Pad2D->Conv2D#~>ReLU;Conv2D#<1-Producer;Conv2D#<2-Producer") { + const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#~>ReLU;Conv2D#<1-Producer;Conv2D#<2-Producer"); checkMatches(results, { {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}}, @@ -145,8 +145,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Pad->Conv#~>ReLU;(Conv#<*-Producer){2}") { - const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;(Conv#<*-Producer){2}"); + SECTION("Pad2D->Conv2D#~>ReLU;(Conv2D#<*-Producer){2}") { + const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#~>ReLU;(Conv2D#<*-Producer){2}"); checkMatches(results, { {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}}, @@ -155,8 +155,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Pad->Conv#->ReLU;(Conv#<*-Producer){2}") { - const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;(Conv#<*-Producer){2}"); + SECTION("Pad2D->Conv2D#->ReLU;(Conv2D#<*-Producer){2}") { + const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#->ReLU;(Conv2D#<*-Producer){2}"); checkMatches(results, { {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}}, @@ -164,8 +164,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Pad->Conv#~>ReLU;(Conv#<*-.){2}") { - const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;(Conv#<*-.){2}"); + SECTION("Pad2D->Conv2D#~>ReLU;(Conv2D#<*-.){2}") { + const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#~>ReLU;(Conv2D#<*-.){2}"); checkMatches(results, { {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}}, @@ -174,8 +174,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Pad->Conv#->ReLU;(Conv#<*-.){2}") { - const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;(Conv#<*-.){2}"); + SECTION("Pad2D->Conv2D#->ReLU;(Conv2D#<*-.){2}") { + const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#->ReLU;(Conv2D#<*-.){2}"); checkMatches(results, { {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}}, @@ -184,7 +184,7 @@ TEST_CASE("[core/graph] Matching") { } SECTION("Conv#~>ReLU*;Conv#<-Pad*") { - const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU*;Conv#<-Pad*"); + const auto results = SinglePassGraphMatching(g1).match("Conv2D#~>ReLU*;Conv2D#<-Pad2D*"); checkMatches(results, { {"conv1", {"conv1", "relu1"}}, @@ -195,8 +195,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Conv#->ReLU*;Conv#<-Pad*") { - const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU*;Conv#<-Pad*"); + SECTION("Conv2D#->ReLU*;Conv2D#<-Pad2D*") { + const auto results = SinglePassGraphMatching(g1).match("Conv2D#->ReLU*;Conv2D#<-Pad2D*"); checkMatches(results, { {"conv1", {"conv1", "relu1"}}, @@ -207,8 +207,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Conv#->ReLU?-*>Add#1?->ReLU?;Conv#<-Pad?;(Add#1<*-.)?") { - const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU?-*>Add#1?->ReLU?;Conv#<-Pad?;(Add#1<*-.)?"); + SECTION("Conv2D#->ReLU?-*>Add#1?->ReLU?;Conv2D#<-Pad2D?;(Add#1<*-.)?") { + const auto results = SinglePassGraphMatching(g1).match("Conv2D#->ReLU?-*>Add#1?->ReLU?;Conv2D#<-Pad2D?;(Add#1<*-.)?"); checkMatches(results, { {"conv1", {"conv1", "relu1"}}, @@ -219,8 +219,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Conv#~>ReLU?-*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*-.)?") { - const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?-*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*-.)?"); + SECTION("Conv2D#~>ReLU?-*>Add#1?~>ReLU?;Conv2D#<-Pad?;(Add#1<*-.)?") { + const auto results = SinglePassGraphMatching(g1).match("Conv2D#~>ReLU?-*>Add#1?~>ReLU?;Conv2D#<-Pad2D?;(Add#1<*-.)?"); checkMatches(results, { {"conv1", {"conv1", "relu1"}}, @@ -231,8 +231,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Conv#~>ReLU?~*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*~.)?") { - const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?~*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*~.)?"); + SECTION("Conv2D#~>ReLU?~*>Add#1?~>ReLU?;Conv2D#<-Pad2D?;(Add#1<*~.)?") { + const auto results = SinglePassGraphMatching(g1).match("Conv2D#~>ReLU?~*>Add#1?~>ReLU?;Conv2D#<-Pad2D?;(Add#1<*~.)?"); checkMatches(results, { {"conv1", {"conv1", "relu1"}}, @@ -243,8 +243,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Conv#->ReLU?;Conv#<-Pad?") { - const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU?;Conv#<-Pad?"); + SECTION("Conv2D#->ReLU?;Conv2D#<-Pad2D?") { + const auto results = SinglePassGraphMatching(g1).match("Conv2D#->ReLU?;Conv2D#<-Pad2D?"); checkMatches(results, { {"conv1", {"conv1", "relu1"}}, @@ -255,8 +255,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Conv#~>ReLU?;Conv#<-Pad?") { - const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?;Conv#<-Pad?"); + SECTION("Conv2D#~>ReLU?;Conv2D#<-Pad2D?") { + const auto results = SinglePassGraphMatching(g1).match("Conv2D#~>ReLU?;Conv2D#<-Pad2D?"); checkMatches(results, { {"conv1", {"conv1", "relu1"}}, @@ -267,8 +267,8 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("(Conv|ReLU)->Add") { - const auto results = SinglePassGraphMatching(g1).match("(Conv|ReLU)->Add"); + SECTION("(Conv2D|ReLU)->Add") { + const auto results = SinglePassGraphMatching(g1).match("(Conv2D|ReLU)->Add"); checkMatches(results, { {"conv4_conv", {"add", "conv4_conv"}}, @@ -294,33 +294,33 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Conv~*>(ReLU&Add)") { - const auto results = SinglePassGraphMatching(g1).match("Conv~*>(ReLU&Add)"); + SECTION("Conv2D~*>(ReLU&Add)") { + const auto results = SinglePassGraphMatching(g1).match("Conv2D~*>(ReLU&Add)"); checkMatches(results, { {"conv5_conv", {"add2", "conv5_conv", "relu5"}} }); } - SECTION("Conv~>(ReLU&Add)") { - const auto results = SinglePassGraphMatching(g1).match("Conv~>(ReLU&Add)"); + SECTION("Conv2D~>(ReLU&Add)") { + const auto results = SinglePassGraphMatching(g1).match("Conv2D~>(ReLU&Add)"); REQUIRE(results.size() == 0); } - SECTION("ReLU~*>((Pad->Conv-*>Add#)&Add#)") { - const auto results = SinglePassGraphMatching(g1).match("ReLU~*>((Pad->Conv-*>Add#)&Add#)"); + SECTION("ReLU~*>((Pad2D->Conv2D-*>Add#)&Add#)") { + const auto results = SinglePassGraphMatching(g1).match("ReLU~*>((Pad2D->Conv2D-*>Add#)&Add#)"); checkMatches(results, { {"relu3", {"add", "conv4_conv", "conv4_pad", "relu3"}} }); } - SECTION("ReLU-*>((Pad->Conv-*>Add)&Add)") { - const auto results = SinglePassGraphMatching(g1).match("ReLU-*>((Pad->Conv-*>Add)&Add)"); + SECTION("ReLU-*>((Pad2D->Conv2D-*>Add)&Add)") { + const auto results = SinglePassGraphMatching(g1).match("ReLU-*>((Pad2D->Conv2D-*>Add)&Add)"); REQUIRE(results.size() == 0); } - SECTION("Pad->Conv[3x3]->ReLU") { + SECTION("Pad2D->Conv2D[3x3]->ReLU") { auto gm = SinglePassGraphMatching(g1); gm.addNodeLambda("3x3", [](const NodePtr& node) { const std::shared_ptr<Conv_Op<2>> op = @@ -328,20 +328,20 @@ TEST_CASE("[core/graph] Matching") { return (op->kernelDims() == std::array<DimSize_t, 2>({3, 3})); }); - const auto results = gm.match("Pad->Conv[3x3]->ReLU"); + const auto results = gm.match("Pad2D->Conv2D[3x3]->ReLU"); checkMatches(results, { {"conv3_pad", {"conv3_conv", "conv3_pad", "relu3"}} }); } - SECTION(".[test]->Pad") { + SECTION(".[test]->Pad2D") { auto gm = SinglePassGraphMatching(g1); gm.addNodeLambda("test", [](const NodePtr& node) { return (node->type() == "Add" || (node->type() == "ReLU" && node->name() == "relu1")); }); - const auto results = gm.match(".[test]->Pad"); + const auto results = gm.match(".[test]->Pad2D"); checkMatches(results, { {"add", {"add", "conv5_pad"}}, @@ -364,16 +364,16 @@ TEST_CASE("[core/graph] Matching") { Conv(1, 4, {5, 5}, "conv4") }); - SECTION("((Conv#->(.[exBN]|$))|(FC#->(.[exFC])*->$))") { + SECTION("((Conv2D#->(.[exBN]|$))|(FC#->(.[exFC])*->$))") { auto gm = SinglePassGraphMatching(g2); gm.addNodeLambda("exBN", [](const NodePtr& node) { - return (node->type() != "BatchNorm"); + return (node->type() != "BatchNorm2D"); }); gm.addNodeLambda("exFC", [](const NodePtr& node) { return (node->type() != "FC"); }); - const auto results = gm.match("((Conv#->(.[exBN]|$))|(FC#->(.[exFC])*->$))"); + const auto results = gm.match("((Conv2D#->(.[exBN]|$))|(FC#->(.[exFC])*->$))"); checkMatches(results, { {"conv2", {"conv2", "relu2"}}, @@ -396,13 +396,13 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Conv#->(.[exConv])*->$") { + SECTION("Conv2D#->(.[exConv])*->$") { auto gm = SinglePassGraphMatching(g2); gm.addNodeLambda("exConv", [](const NodePtr& node) { - return (node->type() != "Conv"); + return (node->type() != "Conv2D"); }); - const auto results = gm.match("Conv#->(.[exConv])*->$"); + const auto results = gm.match("Conv2D#->(.[exConv])*->$"); checkMatches(results, { {"conv4", {"conv4"}} @@ -423,13 +423,13 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("(((FC#|Conv#)<-(.[exParam])*<-$)|((FC#|Conv#)->(.[exParam])*->$));(FC#|Conv#)<1-Producer#") { + SECTION("(((FC#|Conv2D#)<-(.[exParam])*<-$)|((FC#|Conv2D#)->(.[exParam])*->$));(FC#|Conv2D#)<1-Producer#") { auto gm = SinglePassGraphMatching(g2); gm.addNodeLambda("exParam", [](const NodePtr& node) { - return (node->type() != "FC" && node->type() != "Conv"); + return (node->type() != "FC" && node->type() != "Conv2D"); }); - const auto results = gm.match("(((FC#|Conv#)<-(.[exParam])*<-$)|((FC#|Conv#)->(.[exParam])*->$));(FC#|Conv#)<1-Producer#"); + const auto results = gm.match("(((FC#|Conv2D#)<-(.[exParam])*<-$)|((FC#|Conv2D#)->(.[exParam])*->$));(FC#|Conv2D#)<1-Producer#"); checkMatches(results, { {"conv1", {"conv1", "conv1_w", "dataProvider"}}, @@ -437,13 +437,13 @@ TEST_CASE("[core/graph] Matching") { }); } - SECTION("Conv->ReLU [perf]") { + SECTION("Conv2D->ReLU [perf]") { const size_t nbTests = 3; std::mt19937::result_type seed(1); for (int test = 0; test < nbTests; ++test) { RandomGraph randGraph; - randGraph.types = {"Conv", "ReLU", "Dummy"}; + randGraph.types = {"Conv2D", "ReLU", "Dummy"}; randGraph.typesWeights = {0.4, 0.4, 0.2}; randGraph.avgIn = 1; randGraph.maxIn = 1; @@ -460,7 +460,7 @@ TEST_CASE("[core/graph] Matching") { auto gm = SinglePassGraphMatching(g1); const auto start = std::chrono::system_clock::now(); - const auto results = gm.match("Conv->ReLU#;ReLU#->Dummy"); + const auto results = gm.match("Conv2D->ReLU#;ReLU#->Dummy"); const auto end = std::chrono::system_clock::now(); const auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start); diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp index 68ac509e79e347106a9a132249f125ebe6e39f6a..79e471d44a49dfb52fd5eb4aa1ed2dc4ab8dc0bb 100644 --- a/unit_tests/graphRegex/Test_GraphRegex.cpp +++ b/unit_tests/graphRegex/Test_GraphRegex.cpp @@ -153,9 +153,9 @@ TEST_CASE("GraphRegexUser") { // generate the original GraphView auto matmul0 = MatMul("matmul0"); - auto add0 = Add(2, "add0"); + auto add0 = Add("add0"); auto matmul1 = MatMul("matmul1"); - auto add1 = Add(2, "add1"); + auto add1 = Add("add1"); auto b0 = Producer({5}, "B0"); auto w0 = Producer({5, 5}, "W0"); diff --git a/unit_tests/graphRegex/Test_examples.cpp b/unit_tests/graphRegex/Test_examples.cpp index d85ae5c893a7ae4497125a62dad3cde97dac5195..0ccc05b5a957673167a16643b22bf047fc80f43f 100644 --- a/unit_tests/graphRegex/Test_examples.cpp +++ b/unit_tests/graphRegex/Test_examples.cpp @@ -40,7 +40,7 @@ TEST_CASE("Examples", "[GraphMatching]") { auto regex = std::make_shared<GraphRegex>(); regex->setKeyFromGraph(g1); - regex->addQuery("Pad->Conv->ReLU"); + regex->addQuery("Pad2D->Conv2D->ReLU"); // Won't work, wrong number of matches: //regex->addQuery("Pad*->Conv->ReLU*"); @@ -52,4 +52,4 @@ TEST_CASE("Examples", "[GraphMatching]") { } } -} // namespace Aidge \ No newline at end of file +} // namespace Aidge diff --git a/unit_tests/operator/Test_Clip_Op.cpp b/unit_tests/operator/Test_Clip_Op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..51d6d248067c89be9703fd4d48f02347f285330a --- /dev/null +++ b/unit_tests/operator/Test_Clip_Op.cpp @@ -0,0 +1,110 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> +#include <catch2/generators/catch_generators_random.hpp> +#include <cstddef> // std::size_t +#include <memory> +#include <random> // std::mt19937, std::uniform_int_distribution +#include <vector> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Clip.hpp" +#include "aidge/operator/OperatorTensor.hpp" + +namespace Aidge { +TEST_CASE("[core/operator] Clip_Op(forwardDims)", "[Clip][forwardDims]") { + constexpr std::uint16_t NBTRIALS = 10; + + // Create a random number generator + auto rd = Catch::Generators::Detail::getSeed; + std::mt19937 gen(rd()); + std::uniform_int_distribution<std::size_t> dimsDist(1, 10); + std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5); + + // Create Clip Operator + std::shared_ptr<Node> myClip = Clip(); + auto op = std::static_pointer_cast<OperatorTensor>(myClip -> getOperator()); + + // Input tensor + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(Array2D<int,2,2> { + { + {1, 2}, + {3, 4} + } + }); + op -> associateInput(0,T0); + // Tensor representign Min value + std::shared_ptr<Tensor> Tmin = std::make_shared<Tensor>(2.0); + op -> associateInput(1,Tmin); + + // Tensor representign Max value + std::shared_ptr<Tensor> Tmax = std::make_shared<Tensor>(4.0); + op -> associateInput(2,Tmax); + /** + * @todo Special case: scalar not handled yet by + * ``OperatorTensor::forwardDims()`` + */ + SECTION("Scalar") { + //We set every Input as a Scalar + T0->resize({}); + Tmin->resize({}); + Tmax->resize({}); + + REQUIRE_NOTHROW(op->forwardDims(true)); + REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>())); + } + SECTION("Normal Input") { + // a scalar is compatible with any other Tensor + // input_0 + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(Array2D<int,2,2> { + { + {1, 2}, + {3, 4} + } + }); + const std::size_t nb_dims = nbDimsDist(gen); + std::vector<std::size_t> dims(nb_dims); + for (std::size_t i = 0; i < nb_dims; ++i) + { + dims[i] = dimsDist(gen); + } + T0->resize(dims); + op->associateInput(0,T0); + REQUIRE_NOTHROW(op->forwardDims(true)); + REQUIRE((op->getOutput(0)->dims()) == dims); + } + + SECTION("Min and max attributes ") + { + std::shared_ptr<Node> clip_attr = Clip("",-1.0,2.0); + auto opc = std::static_pointer_cast<OperatorTensor>(clip_attr -> getOperator()); + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + opc -> associateInput(0,T0); + std::shared_ptr<Tensor> Tmin = std::make_shared<Tensor>(7); + opc-> associateInput(1,Tmin); + std::shared_ptr<Tensor> Tmax = std::make_shared<Tensor>(4); + opc -> associateInput(2,Tmax); + + REQUIRE_NOTHROW(opc->forwardDims(true)); + REQUIRE((opc->getOutput(0)->dims() == std::vector<std::size_t>())); + } + SECTION("Min and max attributes (No Input for min and max)") + { + std::shared_ptr<Node> clip = Clip("",-1.0,2.0); + auto opcl = std::static_pointer_cast<OperatorTensor>(clip -> getOperator()); + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + opcl -> associateInput(0,T0); + REQUIRE_NOTHROW(opcl->forwardDims()); + REQUIRE((opcl->getOutput(0)->dims() == std::vector<std::size_t>())); + } +} +} // namespace Aidge diff --git a/unit_tests/operator/Test_ConstantOfShape.cpp b/unit_tests/operator/Test_ConstantOfShape.cpp index c10d97ce5fb774e051e75f051772e1cbcd41dbea..00013f86d9a7b357e409a483c80ca80cd332a17e 100644 --- a/unit_tests/operator/Test_ConstantOfShape.cpp +++ b/unit_tests/operator/Test_ConstantOfShape.cpp @@ -78,6 +78,7 @@ TEST_CASE("[core/operator] ConstantOfShape_Op(forwardDims)", for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) { CHECK(array_in[i] == op->getOutput(0)->dims().at(i)); } + delete[] array_in; } } } diff --git a/unit_tests/operator/Test_Heaviside_Op.cpp b/unit_tests/operator/Test_Heaviside_Op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d53268d1213730dc6202193d26b0531e781518f7 --- /dev/null +++ b/unit_tests/operator/Test_Heaviside_Op.cpp @@ -0,0 +1,70 @@ +/******************************************************************************** + * Copyright (c) 2024 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> +#include <catch2/generators/catch_generators_random.hpp> +#include <cstddef> // std::size_t +#include <memory> +#include <random> // std::mt19937, std::uniform_int_distribution +#include <vector> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Heaviside.hpp" +#include "aidge/operator/OperatorTensor.hpp" + +namespace Aidge { + +TEST_CASE("[core/operator] Heaviside_Op(forwardDims)", + "[Heaviside][forwardDims]") { + + constexpr std::uint16_t NBTRIALS = 10; + + // Create a random number generator + auto rd = Catch::Generators::Detail::getSeed; + std::mt19937 gen(rd()); + std::uniform_int_distribution<std::size_t> dimsDist(1, 10); + std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5); + + // Create Heaviside Operator + std::shared_ptr<Node> myHeaviside = Heaviside(0.5); + auto op = + std::static_pointer_cast<OperatorTensor>(myHeaviside->getOperator()); + + // input_0 + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + op->associateInput(0, T0); + + SECTION("Scalar") { + // input 0 + T0->resize({}); + + REQUIRE_NOTHROW(op->forwardDims()); + REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>())); + } + + SECTION("+1-D Tensor") { + + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + + const std::size_t nb_dims = nbDimsDist(gen); + std::vector<std::size_t> dims(nb_dims); + + for (std::size_t i = 0; i < nb_dims; ++i) { + dims[i] = dimsDist(gen); + } + T0->resize(dims); + + REQUIRE_NOTHROW(op->forwardDims()); + REQUIRE((op->getOutput(0)->dims()) == dims); + } + } +} +} // namespace Aidge diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp index d1b4e2e31e8c57e2c3eebd42019ba9f42c4d39e0..6711e1524fe0595b4effd68397f8cb684df590a9 100644 --- a/unit_tests/operator/Test_MetaOperator.cpp +++ b/unit_tests/operator/Test_MetaOperator.cpp @@ -34,10 +34,10 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") { REQUIRE(microGraph->getNodes().size() == 2); REQUIRE(microGraph->inputNodes().size() == 2); // 2 because Conv has inputs outside the meta-op (Producers for weight and bias) - REQUIRE(nodePtrTo(microGraph->getOrderedInputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"Pad", 0}, {"Conv", 1}, {"Conv", 2}})); - REQUIRE(nodePtrTo(microGraph->getOrderedOutputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"Conv", 0}})); + REQUIRE(nodePtrTo(microGraph->getOrderedInputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"Pad2D", 0}, {"Conv2D", 1}, {"Conv2D", 2}})); + REQUIRE(nodePtrTo(microGraph->getOrderedOutputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"Conv2D", 0}})); REQUIRE(microGraph->outputNodes().size() == 1); - REQUIRE((*microGraph->outputNodes().begin())->getOperator()->type() == "Conv"); + REQUIRE((*microGraph->outputNodes().begin())->getOperator()->type() == "Conv2D"); REQUIRE(op->nbInputs() == 3); REQUIRE(op->inputCategory(0) == InputCategory::Data); REQUIRE(op->inputCategory(1) == InputCategory::Param); diff --git a/unit_tests/operator/Test_Operator.cpp b/unit_tests/operator/Test_Operator.cpp index a050bbc4021b0c70a0d8faf6478eb2bd13ebdb58..6bd12c51ef367ad1cf1859afc56af8a21a706237 100644 --- a/unit_tests/operator/Test_Operator.cpp +++ b/unit_tests/operator/Test_Operator.cpp @@ -26,7 +26,7 @@ namespace Aidge { // TEST_CASE("[core/operator] Operator(computeReceptiveField)", "[Operator][computeReceptiveFiled]") { // auto dataProvider1 = Producer({16, 3, 224, 224}, "dataProvider1"); // auto dataProvider2 = Producer({16, 3, 224, 224}, "dataProvider2"); -// auto gen1 = Add(2); +// auto gen1 = Add(); // auto gen2 = ReLU(); // auto g = std::make_shared<GraphView>("TestGraph"); diff --git a/unit_tests/operator/Test_TransposeImpl.cpp b/unit_tests/operator/Test_TransposeImpl.cpp index 18f0d68d87ac1ee66ffb1f24c4c130f9b020d56e..02b338dd265ceb4d9e03bbf86398cc8db38045df 100644 --- a/unit_tests/operator/Test_TransposeImpl.cpp +++ b/unit_tests/operator/Test_TransposeImpl.cpp @@ -128,6 +128,75 @@ TEST_CASE("[cpu/operator] Transpose(forward)") { op->setBackend("cpu"); myTranspose->forward(); + REQUIRE(*(op->getOutput(0)) == *output); + } + SECTION("Default permutation") { + std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,2,3,1,4> { + { + { + { + {1, 2, 3, 4} + }, + { + {5, 6, 7, 8} + }, + { + {9, 10, 11, 12} + } + }, + { + { + {13, 14, 15, 16} + }, + { + {17, 18, 19, 20} + }, + { + {21, 22, 23, 24} + } + } + } + }); + + std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array4D<int,4,1,3,2> { + { + { + { + { 1, 13}, + { 5, 17}, + { 9, 21} + } + }, + { + { + { 2, 14}, + { 6, 18}, + {10, 22} + } + }, + { + { + { 3, 15}, + { 7, 19}, + {11, 23} + } + }, + { + { + { 4, 16}, + { 8, 20}, + {12, 24} + } + } + } + }); + std::shared_ptr<Node> myTranspose = Transpose({}); + auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator()); + op->associateInput(0,input); + op->setDataType(DataType::Int32); + op->setBackend("cpu"); + myTranspose->forward(); + REQUIRE(*(op->getOutput(0)) == *output); } } diff --git a/unit_tests/recipes/Test_FuseToMetaOps.cpp b/unit_tests/recipes/Test_FuseToMetaOps.cpp index 9fceedf2feef0a3ed79b83a8494a1a2b49f77291..7bb3ae63add6568f7de08a996b27a495a644cf46 100644 --- a/unit_tests/recipes/Test_FuseToMetaOps.cpp +++ b/unit_tests/recipes/Test_FuseToMetaOps.cpp @@ -35,7 +35,7 @@ TEST_CASE("[cpu/recipes] FuseToMetaOps", "[FuseToMetaOps][recipes]") { g1->save("FuseToMetaOps_before"); // FIXME: GraphRegex also matches the Conv Producers, which are not in the query! - const auto nbFused = fuseToMetaOps(g1, "Conv->ReLU", "ConvReLU"); + const auto nbFused = fuseToMetaOps(g1, "Conv2D->ReLU", "ConvReLU"); g1->save("FuseToMetaOps_after", true); REQUIRE(nbFused == 2); diff --git a/unit_tests/recipes/Test_LabelGraph.cpp b/unit_tests/recipes/Test_LabelGraph.cpp index 78f67d823a17454c1ecff40a2307556c990c4f53..82dae3c48c137b12b8c0816fadcf40a1251137d7 100644 --- a/unit_tests/recipes/Test_LabelGraph.cpp +++ b/unit_tests/recipes/Test_LabelGraph.cpp @@ -44,11 +44,11 @@ TEST_CASE("[LabelGraph] conv") { SECTION("Check resulting nodes") { REQUIRE(g2->getNodes().size() == 3); - REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling"); + REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling2D"); REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0)); - REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling"); + REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling2D"); REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0)); - REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling"); + REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling2D"); } } @@ -73,11 +73,11 @@ TEST_CASE("[LabelGraph] deleted node") { SECTION("Check resulting nodes") { REQUIRE(g2->getNodes().size() == 3); - REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling"); + REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling2D"); REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0)); - REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling"); + REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling2D"); REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0)); - REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling"); + REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling2D"); } SECTION("Check dimensions") { @@ -111,11 +111,11 @@ TEST_CASE("[LabelGraph] deleted nodes") { SECTION("Check resulting nodes") { REQUIRE(g2->getNodes().size() == 3); - REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling"); + REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling2D"); REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0)); - REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling"); + REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling2D"); REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0)); - REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling"); + REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling2D"); } } @@ -139,11 +139,11 @@ TEST_CASE("[LabelGraph] pooling") { SECTION("Check resulting nodes") { REQUIRE(g2->getNodes().size() == 3); - REQUIRE(g2->getNode("pool1")->getOperator()->type() == "MaxPooling"); + REQUIRE(g2->getNode("pool1")->getOperator()->type() == "MaxPooling2D"); REQUIRE(g2->getNode("pool1")->getOperator()->getRawOutput(0) == g2->getNode("pool2")->getOperator()->getRawInput(0)); - REQUIRE(g2->getNode("pool2")->getOperator()->type() == "MaxPooling"); + REQUIRE(g2->getNode("pool2")->getOperator()->type() == "MaxPooling2D"); REQUIRE(g2->getNode("pool2")->getOperator()->getRawOutput(0) == g2->getNode("pool3")->getOperator()->getRawInput(0)); - REQUIRE(g2->getNode("pool3")->getOperator()->type() == "MaxPooling"); + REQUIRE(g2->getNode("pool3")->getOperator()->type() == "MaxPooling2D"); } SECTION("Check dimensions") { diff --git a/unit_tests/recipes/Test_MatMulToFC.cpp b/unit_tests/recipes/Test_MatMulToFC.cpp index 2adf882ca69e0d5ca5f050d1b89cfb09d81b536b..28eae0be17297467a29eab4e868e074c336d4a12 100644 --- a/unit_tests/recipes/Test_MatMulToFC.cpp +++ b/unit_tests/recipes/Test_MatMulToFC.cpp @@ -27,9 +27,9 @@ TEST_CASE("[cpu/recipes] MatMulToFC", "[MatMulToFC][recipes]") { SECTION("with Add") { // generate the original GraphView auto matmul0 = MatMul("matmul0"); - auto add0 = Add(2, "add0"); + auto add0 = Add("add0"); auto matmul1 = MatMul("matmul1"); - auto add1 = Add(2, "add1"); + auto add1 = Add("add1"); auto b0 = Producer({5}, "B0"); auto w0 = Producer({5, 5}, "W0"); @@ -76,7 +76,7 @@ TEST_CASE("[cpu/recipes] MatMulToFC", "[MatMulToFC][recipes]") { // generate the original GraphView auto matmul0 = MatMul("matmul0"); auto matmul1 = MatMul("matmul1"); - auto add1 = Add(2, "add1"); + auto add1 = Add("add1"); auto w0 = Producer({5, 5}, "W0"); auto b1 = Producer({5}, "B1"); diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp index 3c3026ff09222f9623d886f9c4574bf23667cd9a..ec850d28109a2682bb762c89e814622de6eec3d8 100644 --- a/unit_tests/scheduler/Test_Scheduler.cpp +++ b/unit_tests/scheduler/Test_Scheduler.cpp @@ -26,6 +26,8 @@ #include "aidge/graph/Testing.hpp" #include "aidge/operator/GenericOperator.hpp" #include "aidge/operator/Producer.hpp" +#include "aidge/operator/Identity.hpp" +#include "aidge/operator/GenericOperator.hpp" #include "aidge/scheduler/SequentialScheduler.hpp" namespace Aidge { @@ -134,4 +136,58 @@ TEST_CASE("randomScheduling", "[Scheduler][randomGen]") { fmt::print("nbUnicity = {}/{}\n", nbUnicity, nbTests); } +TEST_CASE("someScheduling", "[Scheduler][someUseCases]") { + + SECTION("Identity Graph") { + auto data1 = Producer({1}, "data1"); + auto identity = Identity("id"); + auto g = std::make_shared<GraphView>("TestGraph"); + data1->addChild(identity); + g->add({data1, identity}); + auto scheduler = SequentialScheduler(g); + scheduler.generateScheduling(); + const auto sch = scheduler.getStaticScheduling(); + const auto nodes = g->getNodes(); + REQUIRE(sch.size() == nodes.size()); + REQUIRE(sch[0] == data1); + REQUIRE(sch[1] == identity); + } + + SECTION("Producer Graph") { + auto data1 = Producer({1}, "data1"); + auto g = std::make_shared<GraphView>("TestGraph"); + g->add({data1}); + auto scheduler = SequentialScheduler(g); + scheduler.generateScheduling(); + const auto sch = scheduler.getStaticScheduling(); + const auto nodes = g->getNodes(); + REQUIRE(sch.size() == nodes.size()); + REQUIRE(sch[0] == data1); + } + + SECTION("Generic producer Graph") { + auto gen1 = GenericOperator("Prod", 0, 0, 1, "gen1"); + auto g = std::make_shared<GraphView>("TestGraph"); + g->add({gen1}); + auto scheduler = SequentialScheduler(g); + scheduler.generateScheduling(); + const auto sch = scheduler.getStaticScheduling(); + const auto nodes = g->getNodes(); + REQUIRE(sch.size() == nodes.size()); + REQUIRE(sch[0] == gen1); + } + + SECTION("No output Graph") { + auto dead1 = GenericOperator("Dead", 1, 0, 0, "dead"); + auto g = std::make_shared<GraphView>("TestGraph"); + g->add({dead1}); + auto scheduler = SequentialScheduler(g); + scheduler.generateScheduling(); + const auto sch = scheduler.getStaticScheduling(); + const auto nodes = g->getNodes(); + REQUIRE(nodes.size() == 1); + REQUIRE(sch.size() == 0); + } +} + } // namespace Aidge diff --git a/version.txt b/version.txt index ee1372d33a29e27945406f0527f8af8e6ee119c9..0d91a54c7d439e84e3dd17d3594f1b2b6737f430 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.2.2 +0.3.0