Skip to content
Snippets Groups Projects
Commit 341d5dab authored by Axel Farrugia's avatar Axel Farrugia
Browse files

[Feat](Exports) Add label export in generate_main_cpp() function and change...

[Feat](Exports) Add label export in generate_main_cpp() function and change the default inputs and labels destination folder from "ROOT" to "ROOT/data"
parent 7c62b7fb
No related branches found
No related tags found
3 merge requests!414Update version 0.5.1 -> 0.6.0,!408[Add] Dropout Operator,!369[Feat](Exports) Minor features for export generation
import aidge_core import aidge_core
from pathlib import Path from pathlib import Path
from aidge_core.export_utils import generate_file, data_conversion from aidge_core.export_utils import generate_file, data_conversion, generate_input_file
def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView, inputs_tensor=None) -> None: def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView, inputs_tensor=None, labels=None) -> None:
""" """
Generate a C++ file to manage the forward pass of a model using the given graph structure. Generate a C++ file to manage the forward pass of a model using the given graph structure.
...@@ -18,7 +18,10 @@ def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView, inpu ...@@ -18,7 +18,10 @@ def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView, inpu
ordered input/output data within the computational graph. ordered input/output data within the computational graph.
:type graph_view: aidge_core.graph_view :type graph_view: aidge_core.graph_view
:param inputs_tensor: **For future** argument to provide tensor to use in the main function, not implemented yet! :param inputs_tensor: **For future** argument to provide tensor to use in the main function, not implemented yet!
:type inputs_tensor: None By default, the input of the given graph will be exported.
:type inputs_tensor: aidge_core.Tensor
:param labels: Argument to provide labels tensor to generate and use in the main function.
:type labels: aidge_core.Tensor
:raises RuntimeError: If there is an inconsistency in the output arguments (names, data types, sizes), :raises RuntimeError: If there is an inconsistency in the output arguments (names, data types, sizes),
indicating an internal bug in the graph representation. indicating an internal bug in the graph representation.
""" """
...@@ -41,7 +44,18 @@ def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView, inpu ...@@ -41,7 +44,18 @@ def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView, inpu
else: else:
aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.") aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
else: else:
aidge_core.export_utils.generate_input_file(export_folder=export_folder, array_name=in_name, tensor=input_tensor) # Generate input file
generate_input_file(
export_folder=str(Path(export_folder) / "data"),
array_name=in_name,
tensor=input_tensor)
if labels is not None:
# Generate labels
generate_input_file(
export_folder=str(Path(export_folder) / "data"),
array_name="labels",
tensor=labels
)
for out_node, out_id in gv_outputs: for out_node, out_id in gv_outputs:
outputs_name.append(f"{out_node.name()}_output_{out_id}") outputs_name.append(f"{out_node.name()}_output_{out_id}")
...@@ -60,7 +74,8 @@ def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView, inpu ...@@ -60,7 +74,8 @@ def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView, inpu
inputs_name=inputs_name, inputs_name=inputs_name,
outputs_name=outputs_name, outputs_name=outputs_name,
outputs_dtype=outputs_dtype, outputs_dtype=outputs_dtype,
outputs_size=outputs_size outputs_size=outputs_size,
labels=(labels is not None)
) )
...@@ -103,7 +118,7 @@ def generate_main_compare_cpp(export_folder: str, graph_view: aidge_core.GraphVi ...@@ -103,7 +118,7 @@ def generate_main_compare_cpp(export_folder: str, graph_view: aidge_core.GraphVi
else: else:
aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.") aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
else: else:
aidge_core.export_utils.generate_input_file(export_folder=export_folder, array_name=in_name, tensor=input_tensor) generate_input_file(export_folder=export_folder, array_name=in_name, tensor=input_tensor)
for out_node, out_id in gv_outputs: for out_node, out_id in gv_outputs:
out_name = f"{out_node.name()}_output_{out_id}" out_name = f"{out_node.name()}_output_{out_id}"
...@@ -114,7 +129,7 @@ def generate_main_compare_cpp(export_folder: str, graph_view: aidge_core.GraphVi ...@@ -114,7 +129,7 @@ def generate_main_compare_cpp(export_folder: str, graph_view: aidge_core.GraphVi
if out_tensor is None or out_tensor.undefined() or not out_tensor.has_impl(): if out_tensor is None or out_tensor.undefined() or not out_tensor.has_impl():
aidge_core.Log.notice(f"No input tensor set for {out_name}, main generated will not be functionnal after code generation.") aidge_core.Log.notice(f"No input tensor set for {out_name}, main generated will not be functionnal after code generation.")
else: else:
aidge_core.export_utils.generate_input_file(export_folder=export_folder, array_name=out_name+"_expected", tensor=out_tensor) generate_input_file(export_folder=export_folder, array_name=out_name+"_expected", tensor=out_tensor)
if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size): if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.") raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
......
#include <iostream> #include <iostream>
#include "forward.hpp" #include "forward.hpp"
{% for name in inputs_name %} {%- for name in inputs_name %}
#include "{{ name }}.h" #include "data/{{ name }}.h"
{% endfor %} {%- endfor %}
{%- if labels %}
#include "data/labels.h"
{%- endif %}
{% set printf_formats = { {%- set printf_formats = {
"double": "%lf", "double": "%lf",
"float": "%f", "float": "%f",
"int8_t": "%hhd", "int8_t": "%hhd",
...@@ -28,13 +31,36 @@ int main() ...@@ -28,13 +31,36 @@ int main()
// Call the forward function // Call the forward function
{{ func_name }}({{ inputs_name|join(", ") }}{% if inputs_name %}, {% endif %}&{{ outputs_name|join(", &") }}); {{ func_name }}({{ inputs_name|join(", ") }}{% if inputs_name %}, {% endif %}&{{ outputs_name|join(", &") }});
// Print the results of each output // Print the results
{%- if labels %}
int prediction;
int confidence;
{%- for o in range(outputs_name | length) %} {%- for o in range(outputs_name | length) %}
prediction = 0;
confidence = {{ outputs_name[o] }}[0];
for (int o = 0; o < {{ outputs_size[0] }}; ++o) {
if ({{ outputs_name[0] }}[o] > confidence) {
prediction = o;
confidence = {{ outputs_name[0] }}[o];
}
}
printf("Prediction : %d (%d)\n", prediction, confidence);
printf("Label : %d\n", labels[{{ o }}]);
{%- endfor %}
{%- else %}
{%- for o in range(outputs_name | length) %}
printf("{{ outputs_name[o] }}:\n"); printf("{{ outputs_name[o] }}:\n");
for (int o = 0; o < {{ outputs_size[o] }}; ++o) { for (int o = 0; o < {{ outputs_size[o] }}; ++o) {
printf("{{ printf_formats[outputs_dtype[o]] }} ", {{ outputs_name[o] }}[o]); printf("{{ printf_formats[outputs_dtype[o]] }} ", {{ outputs_name[o] }}[o]);
} }
printf("\n"); printf("\n");
{% endfor %}
{%- endfor %}
{%- endif %}
return 0; return 0;
} }
...@@ -16,12 +16,12 @@ ...@@ -16,12 +16,12 @@
// Inputs // Inputs
{% for name in inputs_name %} {% for name in inputs_name %}
#include "{{ name }}.h" #include "data/{{ name }}.h"
{% endfor %} {% endfor %}
// Outputs // Outputs
{% for name in outputs_name %} {% for name in outputs_name %}
#include "{{ name }}_expected.h" #include "data/{{ name }}_expected.h"
{% endfor %} {% endfor %}
int main() int main()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment