diff --git a/aidge_onnx/node_export/generic_export.py b/aidge_onnx/node_export/generic_export.py index c5a27323417dffb45619e3cf8b5c47c27df1a094..40b8b9b2b97e5f2b6eced3f1e66aaaf49de0992e 100644 --- a/aidge_onnx/node_export/generic_export.py +++ b/aidge_onnx/node_export/generic_export.py @@ -12,6 +12,7 @@ from typing import List import onnx from onnx import helper import aidge_core +from aidge_core import Log from aidge_onnx.utils import _AIDGE_DOMAIN @@ -51,8 +52,7 @@ def generic_export( if issubclass(type(aidge_operator), aidge_core.Attributes): for key, val in aidge_operator.attr.dict(): - if verbose: - print(f"\t{key}: {val}") + Log.info(f"\t{key}: {val}") onnx_node.attribute.append(helper.make_attribute(key, val)) diff --git a/aidge_onnx/node_import/generic.py b/aidge_onnx/node_import/generic.py index 37b335a00f8464d09d3ac5d4ddac5101cd7015c8..79d2eac6c495ce71bc8709ed9532453c9880aaa6 100644 --- a/aidge_onnx/node_import/generic.py +++ b/aidge_onnx/node_import/generic.py @@ -8,9 +8,9 @@ http://www.eclipse.org/legal/epl-2.0. SPDX-License-Identifier: EPL-2.0 """ from typing import List, Tuple - import onnx import aidge_core +from aidge_core import Log def import_generic(onnx_node: onnx.NodeProto, input_nodes: List[Tuple[aidge_core.Node, int]], opset = None) -> aidge_core.Node: """ @@ -28,9 +28,7 @@ def import_generic(onnx_node: onnx.NodeProto, input_nodes: List[Tuple[aidge_core for onnx_attribute in onnx_node.attribute: operator.attr.add_attr(onnx_attribute.name, onnx.helper.get_attribute_value(onnx_attribute)) - # TODO : Add verbose parameter somewhere to avoid those logs ... - # TODO : Add a toString method to genericOperator - print(f"- {node_name} ({onnx_node.op_type} | GenericOperator)") + Log.notice(f"- {node_name} ({onnx_node.op_type} | GenericOperator)") for param_name in operator.attr.dict().keys(): - print(f"\t- {param_name} : {operator.attr.get_attr(param_name)}") + Log.notice(f"\t- {param_name} : {operator.attr.get_attr(param_name)}") return generic_node diff --git a/aidge_onnx/node_import/onnx_converters/add.py b/aidge_onnx/node_import/onnx_converters/add.py index d51d2d3dfe15914ca30b5bee6d59d3739c6f4d86..9a5888eb77b9616978227fcc76cc7359a4d6e61a 100644 --- a/aidge_onnx/node_import/onnx_converters/add.py +++ b/aidge_onnx/node_import/onnx_converters/add.py @@ -28,9 +28,9 @@ def import_add(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node, node_name = onnx_node.output[0] for attr in onnx_node.attribute: - print(f"Warning: Attribute {attr.name} is not supported for operator add.") + Log.warn(f"Attribute {attr.name} is not supported for operator add.") return None nb_inputs = len(input_nodes) my_node = aidge_core.Node(aidge_core.AddOp(nb_inputs), name=node_name) Log.notice(f"- {node_name} ({onnx_node.op_type})") - return my_node + return my_node \ No newline at end of file diff --git a/aidge_onnx/node_import/onnx_converters/averagepool.py b/aidge_onnx/node_import/onnx_converters/averagepool.py index 15eb6d1a4135f2ab3ebf7b5672498d6c5fca4492..b926e11ff55017f118f17b27b7a93d284c067603 100644 --- a/aidge_onnx/node_import/onnx_converters/averagepool.py +++ b/aidge_onnx/node_import/onnx_converters/averagepool.py @@ -50,7 +50,7 @@ def import_avg_pooling(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_co dilation_dims = [1] * len(kernel_dims) if np.count_nonzero(dilation_dims - np.array(1)) > 0: - print(f"Warning: Attribute 'dilations' value is not supported for operator averagepool.") + Log.warn(f"Attribute 'dilations' value is not supported for operator averagepool.") return None padding_dims = [0] * 2*len(kernel_dims) @@ -79,18 +79,18 @@ def import_avg_pooling(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_co if 'ceil_mode' in attrs: if attrs['ceil_mode'].i != 0: - print(f"Warning: Attribute 'ceil_mode' value {attrs['ceil_mode'].i} is not supported for operator averagepool.") + Log.warn(f"Attribute 'ceil_mode' value {attrs['ceil_mode'].i} is not supported for operator averagepool.") return None del attrs['ceil_mode'] if 'count_include_pad' in attrs: if attrs['count_include_pad'].i != 0: - print(f"Warning: Attribute 'count_include_pad' value {attrs['count_include_pad'].i} is not supported for operator averagepool.") + Log.warn(f"Attribute 'count_include_pad' value {attrs['count_include_pad'].i} is not supported for operator averagepool.") return None del attrs['count_include_pad'] if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator averagepool.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator averagepool.") return None if np.count_nonzero(padding_dims) > 0: @@ -100,7 +100,7 @@ def import_avg_pooling(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_co stride_dims=stride_dims, padding_dims=padding_dims) else: - print(f"Warning: PaddedAvgPooling{len(kernel_dims)}D is not supported.") + Log.warn(f"PaddedAvgPooling{len(kernel_dims)}D is not supported.") return None else: if f"AvgPooling{len(kernel_dims)}D" in dir(aidge_core): @@ -108,7 +108,7 @@ def import_avg_pooling(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_co kernel_dims, stride_dims=stride_dims) else: - print(f"Warning: AvgPooling{len(kernel_dims)}D is not supported.") + Log.warn(f"AvgPooling{len(kernel_dims)}D is not supported.") return None Log.notice(f"- {node_name} ({onnx_node.op_type})") diff --git a/aidge_onnx/node_import/onnx_converters/batchnorm.py b/aidge_onnx/node_import/onnx_converters/batchnorm.py index 3704417732f0dc2558166cf45a58cbcb13f12250..1b0fae5146396547a327971a90b77736ec36ded0 100644 --- a/aidge_onnx/node_import/onnx_converters/batchnorm.py +++ b/aidge_onnx/node_import/onnx_converters/batchnorm.py @@ -33,7 +33,7 @@ def import_batch_norm(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_cor if attrs['spatial'].i == 1: del attrs['spatial'] else: - print(f"Warning: Attribute 'spatial' value {attrs['spatial'].i} is not supported for operator batchnorm.") + Log.warn(f"Attribute 'spatial' value {attrs['spatial'].i} is not supported for operator batchnorm.") return None if 'epsilon' in attrs: epsilon = attrs['epsilon'].f @@ -49,12 +49,12 @@ def import_batch_norm(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_cor if 'training_mode' in attrs: if attrs['training_mode'].i != 0: - print(f"Warning: Attribute 'training_mode' value {attrs['training_mode'].i} is not supported for operator batchnorm.") + Log.warn(f"Attribute 'training_mode' value {attrs['training_mode'].i} is not supported for operator batchnorm.") return None del attrs['training_mode'] if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator batchnorm.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator batchnorm.") return None # Do not use BatchNorm2D helper here, because it requires nb_features argument diff --git a/aidge_onnx/node_import/onnx_converters/concat.py b/aidge_onnx/node_import/onnx_converters/concat.py index 6b413eab4f066e70632dc244ccba63f964df1321..618fc442df9f07855f0b5caf2d9b8a13e19593ae 100644 --- a/aidge_onnx/node_import/onnx_converters/concat.py +++ b/aidge_onnx/node_import/onnx_converters/concat.py @@ -54,8 +54,8 @@ def import_concat(onnx_node: onnx.NodeProto, input_nodes: List[Tuple[aidge_core. return None if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator 'concat' with opset {opset}.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator 'concat' with opset {opset}.") return None Log.notice(f"- {node_name} ({onnx_node.op_type})") - return aidge_core.Node(my_op, name = node_name) + return aidge_core.Node(my_op, name = node_name) \ No newline at end of file diff --git a/aidge_onnx/node_import/onnx_converters/constant.py b/aidge_onnx/node_import/onnx_converters/constant.py index 9e22707639d1b44692cc3f30c0d8ec326e3c6abd..4c3010d39f55852ee9d3425967f1860689a287dd 100644 --- a/aidge_onnx/node_import/onnx_converters/constant.py +++ b/aidge_onnx/node_import/onnx_converters/constant.py @@ -32,7 +32,7 @@ def import_constant(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core. if(len(onnx_node.attribute) == 1): values = numpy_helper.to_array(onnx_node.attribute[0].t) if onnx_node.attribute[0].name == "value": - print(f"val type: {values.dtype}") + Log.debug(f"val type: {values.dtype}") return aidge_core.Producer(aidge_core.Tensor(values) if values.shape != () else aidge_core.Tensor(np.array(values.item(), dtype=values.dtype)), node_name, True) elif onnx_node.attribute[0].name == "sparse_value": raise RuntimeError(f"The attribute {onnx_node.attribute[0].name} is not yet supported. Please create the conversion to Producer in node_converters/converters/contant.py or open an issue at: https://gitlab.eclipse.org/eclipse/aidge/aidge_onnx/-/issues") diff --git a/aidge_onnx/node_import/onnx_converters/conv.py b/aidge_onnx/node_import/onnx_converters/conv.py index d679f6f8f3504fc09645f1785ce75cab27c148d8..b14239d02612fa58a5f3a34c9aa65cb62bbac7a0 100644 --- a/aidge_onnx/node_import/onnx_converters/conv.py +++ b/aidge_onnx/node_import/onnx_converters/conv.py @@ -71,7 +71,7 @@ def import_conv(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node elif len(input_nodes) == 3: no_bias = False else: - print(f"Conv with {len(input_nodes)} inputs is not supported.") + Log.warn(f"Conv with {len(input_nodes)} inputs is not supported.") return None if 'kernel_shape' in attrs: @@ -127,7 +127,7 @@ def import_conv(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node del attrs['auto_pad'] if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator conv.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator conv.") return None # out_channels = input_nodes[1][0].get_operator().get_output(input_nodes[1][1]).dims()[0] @@ -156,7 +156,7 @@ def import_conv(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node else: raise RuntimeError(f"Unsupported node type: {node.type()} inside PaddedConv.") else: - print(f"Warning: PaddedConv{len(kernel_dims)}D is not supported.") + Log.warn(f"PaddedConv{len(kernel_dims)}D is not supported.") return None else: if f"Conv{len(kernel_dims)}D" in dir(aidge_core): @@ -168,12 +168,12 @@ def import_conv(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node dilation_dims=dilation_dims ) else: - print(f"Warning: Conv{len(kernel_dims)}D is not supported.") + Log.warn(f"Conv{len(kernel_dims)}D is not supported.") return None else: # ConvDepthWise # Let's continue like 'group' is always valid for now # if group != out_channels: - # print(f"Warning: 'group' attribute value {group} is not supported for operator conv.") + # Log.warn(f"'group' attribute value {group} is not supported for operator conv.") # return None # in_channels = out_channels @@ -188,7 +188,7 @@ def import_conv(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node dilation_dims=dilation_dims ) else: - print(f"Warning: PaddedConvDepthWise{len(kernel_dims)}D is not supported.") + Log.warn(f"PaddedConvDepthWise{len(kernel_dims)}D is not supported.") return None else: if f"ConvDepthWise{len(kernel_dims)}D" in dir(aidge_core): @@ -199,8 +199,8 @@ def import_conv(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node dilation_dims=dilation_dims ) else: - print(f"Warning: ConvDepthWise{len(kernel_dims)}D is not supported.") + Log.warn(f"ConvDepthWise{len(kernel_dims)}D is not supported.") return None Log.notice(f"- {node_name} ({onnx_node.op_type})") - return aidge_core.Node(aidge_op, name = node_name) + return aidge_core.Node(aidge_op, name = node_name) \ No newline at end of file diff --git a/aidge_onnx/node_import/onnx_converters/div.py b/aidge_onnx/node_import/onnx_converters/div.py index bfa63196ea43076a86d12a8d491421436294e958..5fef8996a66061ffd11a0d408338bfaeeb2bd28e 100644 --- a/aidge_onnx/node_import/onnx_converters/div.py +++ b/aidge_onnx/node_import/onnx_converters/div.py @@ -28,9 +28,9 @@ def import_div(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node, node_name = onnx_node.output[0] for attr in onnx_node.attribute: - print(f"Warning: Attribute {attr.name} is not supported for operator div.") + Log.warn(f"Attribute {attr.name} is not supported for operator div.") return None my_node = aidge_core.Div(name=node_name) Log.notice(f"- {node_name} ({onnx_node.op_type})") - return my_node + return my_node \ No newline at end of file diff --git a/aidge_onnx/node_import/onnx_converters/erf.py b/aidge_onnx/node_import/onnx_converters/erf.py index 514c724d1ab4a9e305a00e4dc704cb33bedef50f..c0c0407925dbe400c4af11fac8ad51f3c1bf6820 100644 --- a/aidge_onnx/node_import/onnx_converters/erf.py +++ b/aidge_onnx/node_import/onnx_converters/erf.py @@ -26,9 +26,9 @@ def import_erf(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node, node_name = onnx_node.output[0] for attr in onnx_node.attribute: - print(f"Warning: Attribute {attr.name} is not supported for operator erf.") + Log.warn(f"Attribute {attr.name} is not supported for operator erf.") return None my_node = aidge_core.Erf(name=node_name) Log.notice(f"- {node_name} ({onnx_node.op_type})") - return my_node + return my_node \ No newline at end of file diff --git a/aidge_onnx/node_import/onnx_converters/gather.py b/aidge_onnx/node_import/onnx_converters/gather.py index b574fdef2b3c01fa000f5604a935e4b07b48b59c..282f7d449e158cf81702debd26d646c3857f4b22 100644 --- a/aidge_onnx/node_import/onnx_converters/gather.py +++ b/aidge_onnx/node_import/onnx_converters/gather.py @@ -33,7 +33,7 @@ def import_gather(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.No del attrs['axis'] if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator gather.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator gather.") return None gather_node = aidge_core.Gather(axis, name=node_name) diff --git a/aidge_onnx/node_import/onnx_converters/gemm.py b/aidge_onnx/node_import/onnx_converters/gemm.py index 8e34a5ac0258473d7b5d6c1a171e524d782a2007..e820316b043cbb99efe588e60eee30da6a53a72e 100644 --- a/aidge_onnx/node_import/onnx_converters/gemm.py +++ b/aidge_onnx/node_import/onnx_converters/gemm.py @@ -29,30 +29,30 @@ def import_gemm(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node if 'transA' in attrs: if attrs['transA'].i != 0: - print(f"Warning: Attribute 'transA' value {attrs['transA'].i} is not supported for operator gemm.") + Log.warn(f"Attribute 'transA' value {attrs['transA'].i} is not supported for operator gemm.") return None del attrs['transA'] if 'transB' in attrs: if attrs['transB'].i != 1: - print(f"Warning: Attribute 'transB' value {attrs['transB'].i} is not supported for operator gemm.") + Log.warn(f"Attribute 'transB' value {attrs['transB'].i} is not supported for operator gemm.") return None del attrs['transB'] if 'alpha' in attrs: if attrs['alpha'].f != 1.0: - print(f"Warning: Attribute 'alpha' value {attrs['alpha'].f} is not supported for operator gemm.") + Log.warn(f"Attribute 'alpha' value {attrs['alpha'].f} is not supported for operator gemm.") return None del attrs['alpha'] if 'beta' in attrs: if attrs['beta'].f != 1.0: - print(f"Warning: Attribute 'beta' value {attrs['beta'].f} is not supported for operator gemm.") + Log.warn(f"Attribute 'beta' value {attrs['beta'].f} is not supported for operator gemm.") return None del attrs['beta'] if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator gemm.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator gemm.") return None # nb_outputs = input_nodes[1][0].get_operator().get_output(input_nodes[1][1]).dims()[0] @@ -60,4 +60,4 @@ def import_gemm(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node fc_node = aidge_core.Node(aidge_core.FCOp(), name=node_name) Log.notice(f"- {node_name} ({onnx_node.op_type})") - return fc_node + return fc_node \ No newline at end of file diff --git a/aidge_onnx/node_import/onnx_converters/identity.py b/aidge_onnx/node_import/onnx_converters/identity.py index 0a0767284485476f5946943dd0ce99847a652f0f..7d86ead249c6123625e0d2075d722a12c9f987f8 100644 --- a/aidge_onnx/node_import/onnx_converters/identity.py +++ b/aidge_onnx/node_import/onnx_converters/identity.py @@ -26,7 +26,7 @@ def import_identity(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core. node_name = onnx_node.output[0] for attr in onnx_node.attribute: - print(f"Warning: Attribute {attr.name} is not supported for operator identity.") + Log.warn(f"Attribute {attr.name} is not supported for operator identity.") return None my_node = aidge_core.Identity(name=node_name) diff --git a/aidge_onnx/node_import/onnx_converters/lstm.py b/aidge_onnx/node_import/onnx_converters/lstm.py index 95b49ec9173e12413a67857d796df37bb62b0259..ecb2cfe1768451a6bea8d88a565c03adfaf93cc3 100644 --- a/aidge_onnx/node_import/onnx_converters/lstm.py +++ b/aidge_onnx/node_import/onnx_converters/lstm.py @@ -32,7 +32,7 @@ def import_lstm(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node if attrs['direction'].s == b'forward': del attrs['direction'] else: - print(f"Warning: Attribute 'direction' value {attrs['direction'].s} is not supported for operator lstm.") + Log.warn(f"Attribute 'direction' value {attrs['direction'].s} is not supported for operator lstm.") return None if 'hidden_size' in attrs: @@ -43,11 +43,11 @@ def import_lstm(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node if attrs['input_forget'].i == 0: del attrs['input_forget'] else: - print(f"Warning: Attribute 'input_forget' value {attrs['input_forget'].i} is not supported for operator lstm.") + Log.warn(f"Attribute 'input_forget' value {attrs['input_forget'].i} is not supported for operator lstm.") return None if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator lstm.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator lstm.") return None #seq_length = input_nodes[0][0].get_operator().get_output(input_nodes[0][1]).dims()[0] diff --git a/aidge_onnx/node_import/onnx_converters/matmul.py b/aidge_onnx/node_import/onnx_converters/matmul.py index f77500b0475f80e66fb5572973bab990ba3db240..abff9ddcea3856b0fd0b70d92c2ea29fb6f67079 100644 --- a/aidge_onnx/node_import/onnx_converters/matmul.py +++ b/aidge_onnx/node_import/onnx_converters/matmul.py @@ -28,7 +28,7 @@ def import_matmul(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.No attrs = {attr.name : attr for attr in onnx_node.attribute} if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator matmul.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator matmul.") return None matmul_node = aidge_core.MatMul(name=node_name) diff --git a/aidge_onnx/node_import/onnx_converters/maxpool.py b/aidge_onnx/node_import/onnx_converters/maxpool.py index 74fb226374425d7dbba9390b86534413fcd681a8..7022f3c13a25aea27e68f2b1c3efbc613e5fe493 100644 --- a/aidge_onnx/node_import/onnx_converters/maxpool.py +++ b/aidge_onnx/node_import/onnx_converters/maxpool.py @@ -47,7 +47,7 @@ def import_max_pooling(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_co dilation_dims = [1] * len(kernel_dims) if np.count_nonzero(dilation_dims - np.array(1)) > 0: - print(f"Warning: Attribute 'dilations' value is not supported for operator maxpool.") + Log.warn(f"Attribute 'dilations' value is not supported for operator maxpool.") return None padding_dims = [0] * 2*len(kernel_dims) @@ -82,12 +82,12 @@ def import_max_pooling(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_co if 'storage_order' in attrs: if attrs['storage_order'].i != 0: - print(f"Warning: Attribute 'storage_order' value {attrs['storage_order'].i} is not supported for operator maxpool.") + Log.warn(f"Attribute 'storage_order' value {attrs['storage_order'].i} is not supported for operator maxpool.") return None del attrs['storage_order'] if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator maxpool.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator maxpool.") return None if (np.count_nonzero(padding_dims) > 0): @@ -99,7 +99,7 @@ def import_max_pooling(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_co padding_dims=padding_dims, ceil_mode=ceil_mode) else: - print(f"Warning: PaddedMaxPooling{len(kernel_dims)}D is not supported.") + Log.warn(f"PaddedMaxPooling{len(kernel_dims)}D is not supported.") return None else: if f"MaxPooling{len(kernel_dims)}D" in dir(aidge_core): @@ -109,7 +109,7 @@ def import_max_pooling(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_co stride_dims=stride_dims, ceil_mode=ceil_mode) else: - print(f"Warning: MaxPooling{len(kernel_dims)}D is not supported.") + Log.warn(f"MaxPooling{len(kernel_dims)}D is not supported.") return None Log.notice(f"- {node_name} ({onnx_node.op_type})") return max_pooling_node diff --git a/aidge_onnx/node_import/onnx_converters/mul.py b/aidge_onnx/node_import/onnx_converters/mul.py index 8a715cd5187db6f7feb26659d944442c6a621141..d23b02504539d86ef84652285198e9964422bbcf 100644 --- a/aidge_onnx/node_import/onnx_converters/mul.py +++ b/aidge_onnx/node_import/onnx_converters/mul.py @@ -28,7 +28,7 @@ def import_mul(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node, node_name = onnx_node.output[0] for attr in onnx_node.attribute: - print(f"Warning: Attribute {attr.name} is not supported for operator mul.") + Log.warn(f"Attribute {attr.name} is not supported for operator mul.") return None my_node = aidge_core.Mul(name=node_name) diff --git a/aidge_onnx/node_import/onnx_converters/pow.py b/aidge_onnx/node_import/onnx_converters/pow.py index d009b717e5d5a722eacc5c5ed82908f033212285..51e1fafa0938e827e88c147b4a2e2e20ace424ea 100644 --- a/aidge_onnx/node_import/onnx_converters/pow.py +++ b/aidge_onnx/node_import/onnx_converters/pow.py @@ -28,7 +28,7 @@ def import_pow(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node, node_name = onnx_node.output[0] for attr in onnx_node.attribute: - print(f"Warning: Attribute {attr.name} is not supported for operator pow.") + Log.warn(f"Attribute {attr.name} is not supported for operator pow.") return None my_node = aidge_core.Pow(name=node_name) diff --git a/aidge_onnx/node_import/onnx_converters/reducemean.py b/aidge_onnx/node_import/onnx_converters/reducemean.py index 3efcdff42471f20f8f18cd9bf2ab53ab0479eff1..d624c70480c34678f6716ae18a3d35738620c29a 100644 --- a/aidge_onnx/node_import/onnx_converters/reducemean.py +++ b/aidge_onnx/node_import/onnx_converters/reducemean.py @@ -103,11 +103,11 @@ def import_reducemean(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_cor # my_op = aidge_core.ReduceMeanOp(axes = rm_axes, reduce_other_axes = rm_reduce_other_axes, keepdims = rm_keepdims) # else: - # print(f"Changing 'axes' attribute for opset >= 18 in ReduceMean operator not supported yet") - print(f"Warning: unsupported operator 'reducemean' with opset {opset}.") + # Log.warn(f"Changing 'axes' attribute for opset >= 18 in ReduceMean operator not supported yet") + Log.warn(f"unsupported operator 'reducemean' with opset {opset}.") return None if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator 'reducemean' with opset {opset}.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator 'reducemean' with opset {opset}.") return None else: Log.notice(f"- {node_name} ({onnx_node.op_type})") diff --git a/aidge_onnx/node_import/onnx_converters/relu.py b/aidge_onnx/node_import/onnx_converters/relu.py index 2ff1cf26fc0c7cbff437ae7eb261707aa042d0ad..d5f4b7cc9860e20a25b42801b06abe3925fab3a5 100644 --- a/aidge_onnx/node_import/onnx_converters/relu.py +++ b/aidge_onnx/node_import/onnx_converters/relu.py @@ -46,7 +46,7 @@ def import_relu(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node del attrs['consumed_inputs'] if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator 'relu' with opset {opset}.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator 'relu' with opset {opset}.") return None my_node = aidge_core.Node(aidge_core.ReLUOp(), name=node_name) diff --git a/aidge_onnx/node_import/onnx_converters/reshape.py b/aidge_onnx/node_import/onnx_converters/reshape.py index 5916b2d19117af65eb79c6fbe63b3c28ea24f978..f24de68c557f466173c40831e5f81f646158e990 100644 --- a/aidge_onnx/node_import/onnx_converters/reshape.py +++ b/aidge_onnx/node_import/onnx_converters/reshape.py @@ -41,11 +41,11 @@ def import_reshape(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.N input_nodes.append(intput_node) if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator transpose.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator transpose.") return None else: for attr in attrs: - print(f"Warning: Attribute {attr} is not supported for operator reshape.") + Log.warn(f"Attribute {attr} is not supported for operator reshape.") return None if input_nodes[1] is not None: diff --git a/aidge_onnx/node_import/onnx_converters/shape.py b/aidge_onnx/node_import/onnx_converters/shape.py index 9b099728d55dfd4312a1744d35a54daa886e0216..169bdc097b73925c1f7efe267d1d95143dd9368c 100644 --- a/aidge_onnx/node_import/onnx_converters/shape.py +++ b/aidge_onnx/node_import/onnx_converters/shape.py @@ -36,5 +36,5 @@ def import_shape(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Nod del attrs['end'] my_node = aidge_core.Shape(start=start, end=end, name=node_name) - print(f"- {node_name} ({onnx_node.op_type})") + Log.notice(f"- {node_name} ({onnx_node.op_type})") return my_node \ No newline at end of file diff --git a/aidge_onnx/node_import/onnx_converters/shape_new.py b/aidge_onnx/node_import/onnx_converters/shape_new.py index 7f50247feb8916441c9fc61a5bc4e2abc05b8606..408de837f188ff1db46731d7c60f89c90a39556a 100644 --- a/aidge_onnx/node_import/onnx_converters/shape_new.py +++ b/aidge_onnx/node_import/onnx_converters/shape_new.py @@ -57,8 +57,8 @@ def import_shape(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Nod del attrs[key] if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator 'shape' with opset {opset}.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator 'shape' with opset {opset}.") return None my_node = aidge_core.Node(aidge_core.Shape(onnx_attr_parsed['start'], onnx_attr_parsed['end']), name = onnx_node.output[0]) - print(f"- {node_name} ({onnx_node.op_type})") + Log.notice(f"- {node_name} ({onnx_node.op_type})") return my_node \ No newline at end of file diff --git a/aidge_onnx/node_import/onnx_converters/sigmoid.py b/aidge_onnx/node_import/onnx_converters/sigmoid.py index fad751c9a4deb1610eec1de42fdd0eeff659c669..093a46bbc01fca3fb60892c653d980a177d497c5 100644 --- a/aidge_onnx/node_import/onnx_converters/sigmoid.py +++ b/aidge_onnx/node_import/onnx_converters/sigmoid.py @@ -28,7 +28,7 @@ def import_sigmoid(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.N node_name = onnx_node.output[0] for attr in onnx_node.attribute: - print(f"Warning: Attribute {attr.name} is not supported for operator relu.") + Log.warn(f"Attribute {attr.name} is not supported for operator relu.") return None my_node = aidge_core.Sigmoid(name=node_name) diff --git a/aidge_onnx/node_import/onnx_converters/slice.py b/aidge_onnx/node_import/onnx_converters/slice.py index 58b7f154307a21867713d226f3416d5bf2c587b2..5fc80b65566453cf61e3fff87be2393ee8dcacd6 100644 --- a/aidge_onnx/node_import/onnx_converters/slice.py +++ b/aidge_onnx/node_import/onnx_converters/slice.py @@ -26,9 +26,9 @@ def import_slice(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Nod node_name = onnx_node.output[0] for attr in onnx_node.attribute: - print(f"Warning: Attribute {attr.name} is not supported for operator slice.") + Log.warn(f"Attribute {attr.name} is not supported for operator slice.") return None slice_node = aidge_core.Slice(name=node_name) - print(f"- {node_name} ({onnx_node.op_type})") + Log.notice(f"- {node_name} ({onnx_node.op_type})") return slice_node diff --git a/aidge_onnx/node_import/onnx_converters/split.py b/aidge_onnx/node_import/onnx_converters/split.py index 6a77d54053596e029d7fa3cc7f8e8afb1b4ff992..d2fbe4db411986d0fb8e3f3ee263b1d1aff5a72b 100644 --- a/aidge_onnx/node_import/onnx_converters/split.py +++ b/aidge_onnx/node_import/onnx_converters/split.py @@ -34,7 +34,7 @@ def import_split(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Nod splits = [] if 'num_outputs' in attrs: if attrs['num_outputs'].i != num_outputs: - print(f"Warning: 'num_outputs' attribute has different value {attrs['num_outputs'].i} from available output nodes {num_outputs}!") + Log.warn(f"'num_outputs' attribute has different value {attrs['num_outputs'].i} from available output nodes {num_outputs}!") return None del attrs['num_outputs'] @@ -42,11 +42,11 @@ def import_split(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Nod splits = attrs['split'].ints del attrs['split'] if len(splits) != num_outputs: - print(f"Warning: 'splits' attribute has different length {len(splits)} from available output nodes {num_outputs}!") + Log.warn(f"'splits' attribute has different length {len(splits)} from available output nodes {num_outputs}!") return None if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator Split.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator Split.") return None if len(input_nodes) > 1: # in case split is provided as input @@ -54,14 +54,14 @@ def import_split(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Nod splits = input_nodes[1][0].get_operator().get_output(input_nodes[1][1]) num_outputs= len(splits) if len(splits) != num_outputs: - print(f"Warning: 'splits' attribute has different length {len(splits)} from available output nodes {num_outputs}!") + Log.warn(f"'splits' attribute has different length {len(splits)} from available output nodes {num_outputs}!") return None if len(splits): my_node = aidge_core.Split(nb_outputs=num_outputs, axis=axis , split=splits, name=node_name) - print(f"- {node_name} ({onnx_node.op_type})") + Log.notice(f"- {node_name} ({onnx_node.op_type})") return my_node else: my_node = aidge_core.Split(nb_outputs=num_outputs, axis=axis, name=node_name) - print(f"- {node_name} ({onnx_node.op_type})") + Log.notice(f"- {node_name} ({onnx_node.op_type})") return my_node \ No newline at end of file diff --git a/aidge_onnx/node_import/onnx_converters/sqrt.py b/aidge_onnx/node_import/onnx_converters/sqrt.py index b35ff6af68bd59e7570f195496a3dc03e03fa14b..180a31c8e4af56808da0faaec129dda9ce0bcc4e 100644 --- a/aidge_onnx/node_import/onnx_converters/sqrt.py +++ b/aidge_onnx/node_import/onnx_converters/sqrt.py @@ -1,36 +1,36 @@ -""" -Copyright (c) 2023 CEA-List - -This program and the accompanying materials are made available under the -terms of the Eclipse Public License 2.0 which is available at -http://www.eclipse.org/legal/epl-2.0. - -SPDX-License-Identifier: EPL-2.0 -""" -from typing import List, Tuple - -import aidge_core -from aidge_core import Log -import onnx - -from aidge_onnx.node_import import auto_register_import - -@auto_register_import("sqrt") -def import_sqrt(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node, int]], opset=None) -> aidge_core.Node: - """ - :param onnx_node: ONNX node to convert - :type onnx_node: onnx.NodeProto - :param input_nodes: List of Aidge nodes which constitute the input of the current node - :type input_nodes: List[aidge_core.Node] - :param opset: Indicate opset version of the ONNX model, default=None - :type opset: int, optional - """ - node_name = onnx_node.output[0] - - for attr in onnx_node.attribute: - print(f"Warning: Attribute {attr.name} is not supported for operator sqrt.") - return None - - my_node = aidge_core.Sqrt(name=node_name) - Log.notice(f"- {node_name} ({onnx_node.op_type})") - return my_node +""" +Copyright (c) 2023 CEA-List + +This program and the accompanying materials are made available under the +terms of the Eclipse Public License 2.0 which is available at +http://www.eclipse.org/legal/epl-2.0. + +SPDX-License-Identifier: EPL-2.0 +""" +from typing import List, Tuple + +import aidge_core +from aidge_core import Log +import onnx + +from aidge_onnx.node_import import auto_register_import + +@auto_register_import("sqrt") +def import_sqrt(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node, int]], opset=None) -> aidge_core.Node: + """ + :param onnx_node: ONNX node to convert + :type onnx_node: onnx.NodeProto + :param input_nodes: List of Aidge nodes which constitute the input of the current node + :type input_nodes: List[aidge_core.Node] + :param opset: Indicate opset version of the ONNX model, default=None + :type opset: int, optional + """ + node_name = onnx_node.output[0] + + for attr in onnx_node.attribute: + Log.warn(f"Attribute {attr.name} is not supported for operator sqrt.") + return None + + my_node = aidge_core.Sqrt(name=node_name) + Log.notice(f"- {node_name} ({onnx_node.op_type})") + return my_node \ No newline at end of file diff --git a/aidge_onnx/node_import/onnx_converters/sub.py b/aidge_onnx/node_import/onnx_converters/sub.py index 87b2b9650d599e73fd28fe0ae838c537e606f50c..55108b73db772f727dfc9ecb540e9a8155602782 100644 --- a/aidge_onnx/node_import/onnx_converters/sub.py +++ b/aidge_onnx/node_import/onnx_converters/sub.py @@ -30,10 +30,10 @@ def import_sub(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node, for attr in onnx_node.attribute: if opset < 7: # TODO : add broadcasting attr for opset < 6 - print(f"Warning: broadcasting attributes for operator sub under opset {opset} are not yet supported.") + Log.warn(f"broadcasting attributes for operator sub under opset {opset} are not yet supported.") return None else: - print(f"Warning: Attribute {attr.name} is not supported for operator sub.") + Log.warn(f"Attribute {attr.name} is not supported for operator sub.") return None my_node = aidge_core.Sub(name=node_name) diff --git a/aidge_onnx/node_import/onnx_converters/tanh.py b/aidge_onnx/node_import/onnx_converters/tanh.py index da6a6dabff786184a8630393e8242291498d664c..64e558988811a099ef85cf879aeb7a45eabed678 100644 --- a/aidge_onnx/node_import/onnx_converters/tanh.py +++ b/aidge_onnx/node_import/onnx_converters/tanh.py @@ -28,9 +28,9 @@ def import_tanh(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core.Node node_name = onnx_node.output[0] for attr in onnx_node.attribute: - print(f"Warning: Attribute {attr.name} is not supported for operator relu.") + Log.warn(f"Attribute {attr.name} is not supported for operator relu.") return None my_node = aidge_core.Tanh(name=node_name) Log.notice(f"- {node_name} ({onnx_node.op_type})") - return my_node + return my_node \ No newline at end of file diff --git a/aidge_onnx/node_import/onnx_converters/transpose.py b/aidge_onnx/node_import/onnx_converters/transpose.py index 25468dbb8f3a136cc3254f0894c1e341723b1eb5..f027d74f35ea55eef940ddc2c61a605a8685f658 100644 --- a/aidge_onnx/node_import/onnx_converters/transpose.py +++ b/aidge_onnx/node_import/onnx_converters/transpose.py @@ -30,7 +30,7 @@ def import_transpose(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core del attrs['perm'] if len(attrs) > 0: - print(f"Warning: unsupported attribute(s): {attrs.keys()} for operator transpose.") + Log.warn(f"unsupported attribute(s): {attrs.keys()} for operator transpose.") return None transpose_node = aidge_core.Transpose( @@ -38,4 +38,4 @@ def import_transpose(onnx_node:onnx.NodeProto, input_nodes:List[Tuple[aidge_core name=node_name) Log.notice(f"- {node_name} ({onnx_node.op_type})") - return transpose_node + return transpose_node \ No newline at end of file diff --git a/aidge_onnx/onnx_export.py b/aidge_onnx/onnx_export.py index 7873c5e886496f96e7a3bf18eeaa3b609193c4a5..ba14e432ec13cf8cf416abd06d6ee37698bd8760 100644 --- a/aidge_onnx/onnx_export.py +++ b/aidge_onnx/onnx_export.py @@ -20,6 +20,7 @@ from onnx import helper from onnx import numpy_helper from onnx import TensorProto from aidge_onnx.utils import _AIDGE_DOMAIN +from aidge_core import Log from .node_export.aidge_converter import AIDGE_NODE_CONVERTER_ @@ -84,7 +85,7 @@ def export_onnx(graph_view: aidge_core.GraphView, """ major, minor = onnx.__version__.split(".")[:2] if enable_custom_op and (int(major)*100 + int(minor) < 114): - print("Warning: Cannot enable custom operator with onnx < 1.14, update onnx library with:" + Log.warn("Cannot enable custom operator with onnx < 1.14, update onnx library with:" "\n\t> pip install --upgrade onnx\nDefaulting to enable_custom_op = False") enable_custom_op = False if opset is None: @@ -137,7 +138,7 @@ def export_onnx(graph_view: aidge_core.GraphView, if aidge_operator.get_output(0).has_impl(): if not aidge_operator.attr.constant: if verbose: - print(f"Creating initializer: {aidge_node.name()}") + Log.info(f"Creating initializer: {aidge_node.name()}") onnx_initializers.append( numpy_helper.from_array( np.array(aidge_operator.get_output(0)), @@ -166,8 +167,7 @@ def export_onnx(graph_view: aidge_core.GraphView, continue # Next nodes to treat are children of current node open_nodes += list(aidge_node.get_children()) - if verbose: - print(aidge_node.name() + "[" + aidge_node.type() + "]" + "\n" + + Log.notice(aidge_node.name() + "[" + aidge_node.type() + "]" + "\n" + "="*(len(aidge_node.name()) + 2 + len(aidge_node.type()))) aidge_operator = aidge_node.get_operator() @@ -266,9 +266,8 @@ def export_onnx(graph_view: aidge_core.GraphView, ) ) - if verbose: - print(f"\tInputs: {node_inputs_name}") - print(f"\tOutputs: {node_outputs_name}") + Log.notice(f"\tInputs: {node_inputs_name}") + Log.notice(f"\tOutputs: {node_outputs_name}") new_nodes = AIDGE_NODE_CONVERTER_[aidge_node.type()]( aidge_node, diff --git a/aidge_onnx/onnx_import.py b/aidge_onnx/onnx_import.py index bfaa930e03b8cbad2889684ea6df8c7498992b8d..59b4f42ea618d8b8c4d4d21277fe64f859f6d30b 100644 --- a/aidge_onnx/onnx_import.py +++ b/aidge_onnx/onnx_import.py @@ -28,8 +28,8 @@ def load_onnx(filename: str, verbose: bool = False): :returns: Aidge :py:class:`aidge_core.GraphView` corresponding to the ONNX model described by the onnx file ``filename`` :rtype: :py:class:`aidge_core.GraphView` """ - if verbose : print(f"Loading ONNX {filename}") - + Log.notice(f"Loading ONNX {filename}") + # Load the ONNX model model = onnx.load(filename) return _load_onnx2graphview(model, verbose) @@ -70,14 +70,14 @@ def native_coverage_report(graph: aidge_core.GraphView): nb_native_nodes = sum(native_node_types.values()) nb_generic_nodes = sum(generic_node_types.values()) - print(f"Native operators: {nb_native_nodes} ({len(native_node_types)} types)") + Log.notice(f"Native operators: {nb_native_nodes} ({len(native_node_types)} types)") for op, nb in sorted(native_node_types.items()): - print(f"- {op}: {nb}") - print(f"Generic operators: {nb_generic_nodes} ({len(generic_node_types)} types)") + Log.notice(f"- {op}: {nb}") + Log.notice(f"Generic operators: {nb_generic_nodes} ({len(generic_node_types)} types)") for op, nb in sorted(generic_node_types.items()): - print(f"- {op}: {nb}") - print(f"Native types coverage: {100 * len(native_node_types) / (len(native_node_types) + len(generic_node_types)):.1f}% ({len(native_node_types)}/{len(native_node_types) + len(generic_node_types)})") - print(f"Native operators coverage: {100 * nb_native_nodes / (nb_native_nodes + nb_generic_nodes):.1f}% ({nb_native_nodes}/{nb_native_nodes + nb_generic_nodes})") + Log.notice(f"- {op}: {nb}") + Log.notice(f"Native types coverage: {100 * len(native_node_types) / (len(native_node_types) + len(generic_node_types)):.1f}% ({len(native_node_types)}/{len(native_node_types) + len(generic_node_types)})") + Log.notice(f"Native operators coverage: {100 * nb_native_nodes / (nb_native_nodes + nb_generic_nodes):.1f}% ({nb_native_nodes}/{nb_native_nodes + nb_generic_nodes})") return (native_node_types, generic_node_types) diff --git a/aidge_onnx/onnx_test.py b/aidge_onnx/onnx_test.py index 97bcec33e349af5567db9592e77bca40c5af5cde..853b1cfd9862783c593b843aa21b7cd69849524d 100644 --- a/aidge_onnx/onnx_test.py +++ b/aidge_onnx/onnx_test.py @@ -1,4 +1,5 @@ import onnx +from aidge_core import Log def check_onnx_validity(onnx_file_path): """Check if an onnx file is valid with the ONNX standard. @@ -7,18 +8,18 @@ def check_onnx_validity(onnx_file_path): # Load the ONNX model model = onnx.load(onnx_file_path) except Exception as e: - print(f"An error occurred while loading the ONNX file:\n{e}") + Log.error(f"An error occurred while loading the ONNX file:\n{e}") return False try: # Check the model for errors onnx.checker.check_model(model) except onnx.onnx_cpp2py_export.checker.ValidationError as e: - print(f"The ONNX file is invalid:\n{e}") + Log.error(f"The ONNX file is invalid:\n{e}") return False except Exception as e: - print(f"An error occurred while checking the ONNX file: {e}") + Log.fatal(f"An error occurred while checking the ONNX file: {e}") return False - print("The ONNX file is valid.") + Log.info("The ONNX file is valid.") return True def _compare_onnx_attributes(attr_list_1, attr_list_2): @@ -65,7 +66,7 @@ def check_isomorphism(file_path1, file_path2): # Easy check on number of nodes. if len(nodes1) != len(nodes2): - print(f"Graph are not isomorphic, number of nodes differs ({len(nodes1)} != {len(nodes2)})") + Log.warn(f"Graph are not isomorphic, number of nodes differs ({len(nodes1)} != {len(nodes2)})") return False # Set of index of nodes from graph2 which has been matched in graph1 @@ -80,6 +81,6 @@ def check_isomorphism(file_path1, file_path2): matched_nodes.add(i) node_not_found = False if node_not_found: - print(f"Cannot find equivalent of node: {node1.name} in {file_path2}") + Log.warn(f"Cannot find equivalent of node: {node1.name} in {file_path2}") return False return True diff --git a/aidge_onnx/unit_tests/compare_layers_aidge_onnx.py b/aidge_onnx/unit_tests/compare_layers_aidge_onnx.py index cd6032041fb190b38280c76ddd1369b4d9c09617..1b24e17efbc201ab6ec25453e2d9f3b674ffd367 100644 --- a/aidge_onnx/unit_tests/compare_layers_aidge_onnx.py +++ b/aidge_onnx/unit_tests/compare_layers_aidge_onnx.py @@ -108,8 +108,8 @@ def compute_error( estimated_output.shape != groundtruth_output.shape ): # try to flatten the output bc if size differ it means that removeFlatten recipe has been applied print("Warn: shape of output differ even trying to flatten them to match.") - print(f"aidge output shape : {estimated_output.shape}") - print(f"onnx output shape : {estimated_output.shape}") + Log.notice(f"aidge output shape : {estimated_output.shape}") + Log.notice(f"onnx output shape : {estimated_output.shape}") estimated_output = estimated_output.flatten() groundtruth_output = groundtruth_output.flatten() @@ -123,8 +123,8 @@ def compute_error( print( "ERROR : shape of output differ even after trying to flatten them, aborting." ) - print(f"aidge output shape : {estimated_output.shape}") - print(f"onnx output shape : {groundtruth_output.shape}") + Log.notice(f"aidge output shape : {estimated_output.shape}") + Log.notice(f"onnx output shape : {groundtruth_output.shape}") exit(-1) return (error, err_threshold) diff --git a/aidge_onnx/unit_tests/test_import_export.py b/aidge_onnx/unit_tests/test_import_export.py index f40e875a4875785d7677f289dfd6bee486e2d656..d40358a49ac13dccb391dcf8be468fab1d62c0e0 100644 --- a/aidge_onnx/unit_tests/test_import_export.py +++ b/aidge_onnx/unit_tests/test_import_export.py @@ -38,7 +38,7 @@ def download_onnx_model(url, destination): Log.notice(f"Downloaded ONNX model {destination}") except Exception as e: - print(f"Error downloading ONNX model: {e}") + Log.error("Error while downloading ONNX model: {e}") class TestLoadSave(unittest.TestCase): @@ -64,7 +64,7 @@ class TestLoadSave(unittest.TestCase): pass def test_import_export(self): - Log.set_console_level(Level.Notice) + Log.set_console_level(Level.Info) for index, test in enumerate(self.test_runs): Log.notice(f"Test {index}\n{test}") Log.notice("\nDownloading model") diff --git a/aidge_onnx/unit_tests/test_models.py b/aidge_onnx/unit_tests/test_models.py index 7ce14befea8877b011e5a0e52650fe970e7654a2..d6b76c0c1b6227464043b25dd905f2f49886cabc 100644 --- a/aidge_onnx/unit_tests/test_models.py +++ b/aidge_onnx/unit_tests/test_models.py @@ -8,13 +8,6 @@ import aidge_core from aidge_core import Log, Level import aidge_backend_cpu -import numpy as np -import onnx -from onnx import helper -from onnx import numpy_helper -from onnx import TensorProto -from test_import_export import download_onnx_model - class test_models(unittest.TestCase):