diff --git a/aidge_export_cpp/export.py b/aidge_export_cpp/export.py index de20f3b4eee728a48cdef7849cd5e4894ee0e3d1..b8fe28a54fd5ccc2cd8e380722b431b51a3e1aa8 100644 --- a/aidge_export_cpp/export.py +++ b/aidge_export_cpp/export.py @@ -77,7 +77,7 @@ def export(export_folder_name, graphview, scheduler): list_inputs_name = [] for node in graphview.get_nodes(): if node.type() == "Producer": - if not node.get_operator().get_attr("Constant"): + if not node.get_operator().attr.constant: export_type = aidge_datatype2ctype(node.get_operator().get_output(0).dtype()) list_inputs_name.append((export_type, node.name())) diff --git a/aidge_export_cpp/operators.py b/aidge_export_cpp/operators.py index 86f61f22607558231bc192a35bc5bc8a4644bd32..6dba9a8f37995b4bb387689e89c4841a5311b5c5 100644 --- a/aidge_export_cpp/operators.py +++ b/aidge_export_cpp/operators.py @@ -71,7 +71,7 @@ class ProducerCPP(ExportNode): def __init__(self, node): super().__init__(node) - self.constant = self.operator.get_attr("Constant") + self.constant = self.operator.attr.constant self.values = np.array(self.operator.get_output(0)) if len(self.values.shape) == 4: @@ -130,7 +130,7 @@ class ReLUCPP(ExportNode): list_actions.append(generate_str( str(ROOT / "templates" / "kernel_forward" / "activation_forward.jinja"), name=self.name, - input_name=self.inputs[0].name(), + input_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input", output_name=self.name )) return list_actions @@ -141,14 +141,17 @@ class ConvCPP(ExportNode): def __init__(self, node): super().__init__(node) - self.kernel = node.get_operator().get_attr("KernelDims") - self.stride = node.get_operator().get_attr("StrideDims") - self.dilation = node.get_operator().get_attr("DilationDims") + self.kernel = node.get_operator().attr.kernel_dims + self.stride = node.get_operator().attr.stride_dims + self.dilation = node.get_operator().attr.dilation_dims # No padding with Conv # Use PaddedConv to add padding attribute self.padding = [0, 0] + self.nb_channels = node.get_operator().in_channels() + self.nb_outputs = node.get_operator().out_channels() + if len(self.inputs_dims[0]) == 4: # if dims == [batch, nb_channels, height, width] # transform to [nb_channels, height, width] @@ -193,7 +196,7 @@ class ConvCPP(ExportNode): list_actions.append(generate_str( str(ROOT / "templates" / "kernel_forward" / "convolution_forward.jinja"), name=self.name, - input_name=self.inputs[0].name(), + input_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input", output_name=self.name, weights_name=self.inputs[1].name(), biases_name=self.inputs[2].name() @@ -208,11 +211,11 @@ class PaddedConvCPP(ConvCPP): for n in self.operator.get_micro_graph().get_nodes(): if n.type() == "Pad": - self.padding = n.get_operator().get_attr("BeginEndBorders") + self.padding = n.get_operator().attr.begin_end_borders if n.type() == "Conv": - self.kernel = n.get_operator().get_attr("KernelDims") - self.stride = n.get_operator().get_attr("StrideDims") - self.dilation = n.get_operator().get_attr("DilationDims") + self.kernel = n.get_operator().attr.kernel_dims + self.stride = n.get_operator().attr.stride_dims + self.dilation = n.get_operator().attr.dilation_dims if len(self.inputs_dims[0]) == 4: # if dims == [batch, nb_channels, height, width] @@ -253,8 +256,8 @@ class AddCPP(ExportNode): list_actions.append(generate_str( str(ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja"), name=self.name, - inputs1_name=self.inputs[0].name(), - inputs2_name=self.inputs[1].name(), + inputs1_name=self.parents[0].name() if self.parents[0] else self.name + "_input1", + inputs2_name=self.parents[1].name() if self.parents[1] else self.name + "_input2", output_name=self.name )) return list_actions @@ -286,8 +289,8 @@ class SubCPP(ExportNode): list_actions.append(generate_str( str(ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja"), name=self.name, - inputs1_name=self.inputs[0].name(), - inputs2_name=self.inputs[1].name(), + inputs1_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input1", + inputs2_name=self.inputs[1].name() if self.inputs[1] else self.name + "_input2", output_name=self.name )) return list_actions @@ -297,8 +300,8 @@ class MaxPoolCPP(ExportNode): def __init__(self, node): super().__init__(node) - self.kernel = node.get_operator().get_attr("KernelDims") - self.stride = node.get_operator().get_attr("StrideDims") + self.kernel = node.get_operator().attr.kernel_dims + self.stride = node.get_operator().attr.stride_dims # No padding with MaxPooling # Use PaddedMaxPooling to add padding attribute @@ -344,7 +347,7 @@ class MaxPoolCPP(ExportNode): list_actions.append(generate_str( str(ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja"), name=self.name, - input_name=self.inputs[0].name(), + input_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input", output_name=self.name )) return list_actions @@ -397,11 +400,10 @@ class FcCPP(ExportNode): def forward(self, list_actions:list): if not self.is_last: list_actions.append(set_up_output(self.name, "float")) - list_actions.append(generate_str( str(ROOT / "templates" / "kernel_forward" / "fullyconnected_forward.jinja"), name=self.name, - inputs_name=self.inputs[0].name(), + inputs_name= self.inputs[0].name() if (self.inputs[0] is not None) else self.name + '_input', weights_name=self.inputs[1].name(), biases_name=self.inputs[2].name(), outputs_name=self.name diff --git a/aidge_export_cpp/utils/converter.py b/aidge_export_cpp/utils/converter.py index d706d5a26f103316adfca0cd16f1146284e8177c..0fe7a675a9a00aad6d5a9447097e744b6493bc5c 100644 --- a/aidge_export_cpp/utils/converter.py +++ b/aidge_export_cpp/utils/converter.py @@ -17,18 +17,18 @@ def numpy_dtype2ctype(dtype): # Add more dtype mappings as needed else: raise ValueError(f"Unsupported {dtype} dtype") - + def aidge_datatype2ctype(datatype): - if datatype == aidge_core.DataType.Int8: + if datatype == aidge_core.dtype.int8: return "int8_t" - elif datatype == aidge_core.DataType.Int32: + elif datatype == aidge_core.dtype.int32: return "int32_t" - elif datatype == aidge_core.DataType.Int64: + elif datatype == aidge_core.dtype.int64: return "int64_t" - elif datatype == aidge_core.DataType.Float32: + elif datatype == aidge_core.dtype.float32: return "float" - elif datatype == aidge_core.DataType.Float64: + elif datatype == aidge_core.dtype.float64: return "double" # Add more dtype mappings as needed else: diff --git a/examples/add_custom_operator/add_custom_operator.ipynb b/examples/add_custom_operator/add_custom_operator.ipynb index 0687e322831f045a17e729b2706ef518f3db6a7a..5477cdaefa00334a64472810e71c04031c06ab0f 100644 --- a/examples/add_custom_operator/add_custom_operator.ipynb +++ b/examples/add_custom_operator/add_custom_operator.ipynb @@ -89,7 +89,7 @@ "model.add(input_node)\n", "\n", "# Configuration for the model + forward dimensions\n", - "model.compile(\"cpu\", aidge_core.DataType.Float32)" + "model.compile(\"cpu\", aidge_core.dtype.float32)" ] }, { @@ -231,9 +231,9 @@ " def __init__(self, node):\n", " super().__init__(node)\n", "\n", - " self.betas = self.operator.get_attr(\"betas\")\n", + " self.betas: float = self.operator.get_attr(\"betas\")\n", "\n", - " def export(self, export_folder:str, list_configs:list):\n", + " def export(self, export_folder: str, list_configs: list[str]) -> list[str]:\n", "\n", " copyfile(\"for_export/swish_kernel.hpp\",\n", " f\"{export_folder}/include/kernels/\")\n", @@ -248,7 +248,7 @@ "\n", " return list_configs\n", "\n", - " def forward(self, list_actions:list):\n", + " def forward(self, list_actions:list[str]) -> list[str]:\n", "\n", " if not self.is_last:\n", " list_actions.append(set_up_output(self.name, \"float\"))\n", @@ -296,7 +296,7 @@ "outputs": [], "source": [ "digit = np.load(\"digit.npy\")\n", - "cpp.generate_input_file(\"inputs\", digit.reshape(-1), \"myexport/inputs.h\")" + "cpp.generate_input_file(array_name=\"inputs\", array=digit.reshape(-1), folder_path=\"myexport\")" ] }, { @@ -334,7 +334,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.1.undefined" } }, "nbformat": 4, diff --git a/examples/export_LeNet/export_lenet_fp32.ipynb b/examples/export_LeNet/export_lenet_fp32.ipynb index 24fe205a9d781bc6f6adfbe8c9a13a996ca674e7..b10ca6b9dbe4fecf1e56ec04ead0c6c774aaf66c 100644 --- a/examples/export_LeNet/export_lenet_fp32.ipynb +++ b/examples/export_LeNet/export_lenet_fp32.ipynb @@ -98,7 +98,7 @@ "model.add(input_node)\n", "\n", "# Configuration for the model + forward dimensions\n", - "model.compile(\"cpu\", aidge_core.DataType.Float32)" + "model.compile(\"cpu\", aidge_core.dtype.float32)" ] }, { @@ -290,7 +290,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.10.14" } }, "nbformat": 4,