Skip to content
Snippets Groups Projects
Commit c132a8a9 authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Merge branch 'AddUnitTestExport' into 'dev'

Add unit test export

See merge request eclipse/aidge/aidge_core!265
parents 00f9c79b b5472812
No related branches found
No related tags found
2 merge requests!279v0.4.0,!265Add unit test export
Pipeline #60379 passed
...@@ -3,4 +3,4 @@ from .code_generation import generate_file, generate_str, copy_file ...@@ -3,4 +3,4 @@ from .code_generation import generate_file, generate_str, copy_file
from .export_registry import ExportLib from .export_registry import ExportLib
from .scheduler_export import scheduler_export from .scheduler_export import scheduler_export
from .tensor_export import tensor_to_c, generate_input_file from .tensor_export import tensor_to_c, generate_input_file
from .generate_main import generate_main_cpp from .generate_main import generate_main_cpp, generate_main_compare_cpp
...@@ -62,3 +62,70 @@ def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView, inpu ...@@ -62,3 +62,70 @@ def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView, inpu
outputs_dtype=outputs_dtype, outputs_dtype=outputs_dtype,
outputs_size=outputs_size outputs_size=outputs_size
) )
def generate_main_compare_cpp(export_folder: str, graph_view: aidge_core.GraphView, inputs_tensor=None) -> None:
"""
Generate a C++ file to manage the forward pass and compare the output of a model.
This function extracts details from the :py:class:`aidge_core.graph_view` object, including input and output node names, data types,
and tensor sizes. It uses this data to populate a C++ file template (`main.jinja`), creating a file (`main.cpp`)
that call the `model_forward` function, which handles data flow and processing for the exported model.
This function also generate files containing input tensor if they have been set.
:param export_folder: Path to the folder where the generated C++ file (`main.cpp`) will be saved.
:type export_folder: str
:param graph_view: An instance of :py:class:`aidge_core.graph_view`, providing access to nodes and
ordered input/output data within the computational graph.
:type graph_view: aidge_core.graph_view
:param inputs_tensor: **For future** argument to provide tensor to use in the main function, not implemented yet!
:type inputs_tensor: None
:raises RuntimeError: If there is an inconsistency in the output arguments (names, data types, sizes),
indicating an internal bug in the graph representation.
"""
outputs_name: list[str] = []
outputs_dtype: list[str] = []
outputs_size: list[int] = []
inputs_name: list[str] = []
gv_inputs: list[tuple[aidge_core.Node, int]] = graph_view.get_ordered_inputs()
gv_outputs: list[tuple[aidge_core.Node, int]] = graph_view.get_ordered_outputs()
for in_node, in_idx in gv_inputs:
in_node_input, in_node_input_idx = in_node.input(in_idx)
in_name = f"{in_node.name()}_input_{in_idx}" if in_node_input is None else f"{in_node_input.name()}_output_{in_node_input_idx}"
inputs_name.append(in_name)
input_tensor = in_node.get_operator().get_input(in_idx)
if input_tensor is None or input_tensor.undefined() or not input_tensor.has_impl():
if inputs_tensor is not None:
aidge_core.Log.notice("No support for inputs_tensor argument yet.")
aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
else:
aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
else:
aidge_core.export_utils.generate_input_file(export_folder=export_folder, array_name=in_name, tensor=input_tensor)
for out_node, out_id in gv_outputs:
out_name = f"{out_node.name()}_output_{out_id}"
outputs_name.append(out_name)
out_tensor = out_node.get_operator().get_output(out_id)
outputs_dtype.append(data_conversion.aidge2c(out_tensor.dtype()))
outputs_size.append(out_tensor.size())
if out_tensor is None or out_tensor.undefined() or not out_tensor.has_impl():
aidge_core.Log.notice(f"No input tensor set for {out_name}, main generated will not be functionnal after code generation.")
else:
aidge_core.export_utils.generate_input_file(export_folder=export_folder, array_name=out_name+"_expected", tensor=out_tensor)
if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
ROOT = Path(__file__).resolve().parents[0]
generate_file(
str(Path(export_folder) / "main.cpp"),
str(ROOT / "templates" / "main_compare.jinja"),
func_name="model_forward",
inputs_name=inputs_name,
outputs_name=outputs_name,
outputs_dtype=outputs_dtype,
outputs_size=outputs_size
)
{% set printf_formats = {
"double": "%lf",
"float": "%f",
"int8_t": "%hhd",
"int16_t": "%hd",
"int32_t": "%d",
"int64_t": "%lld",
"uint8_t": "%hhu",
"uint16_t": "%hu",
"uint32_t": "%u",
"uint64_t": "%llu"
} %}
#include <cstdio> // printf
#include <cmath> // std::abs
#include "forward.hpp" // Exported forward
// Inputs
{% for name in inputs_name %}
#include "{{ name }}.h"
{% endfor %}
// Outputs
{% for name in outputs_name %}
#include "{{ name }}_expected.h"
{% endfor %}
int main()
{
// Initialize the output arrays
{%- for o in range(outputs_name | length) %}
{{ outputs_dtype[o] }}* {{ outputs_name[o] }} = nullptr;
{% endfor %}
const float abs_err = 0.001f;
const float rel_err = 0.0001f;
// Call the forward function
{{ func_name }}({{ inputs_name|join(", ") }}{% if inputs_name %}, {% endif %}&{{ outputs_name|join(", &") }});
int nb_identical;
int nb_out;
// Print the results of each output
{%- for o in range(outputs_name | length) %}
nb_identical = 0;
nb_out = 0;
printf("{{ outputs_name[o] }}:\n");
for (int o = 0; o < {{ outputs_size[o] }}; ++o) {
printf("Expected {{ printf_formats[outputs_dtype[o]] }} <-> Predicted {{ printf_formats[outputs_dtype[o]] }}\n", {{ outputs_name[o] }}_expected[o], {{ outputs_name[o] }}[o]);
if (std::abs({{ outputs_name[o] }}_expected[o] - {{ outputs_name[o] }}[o]) <= abs_err + rel_err * std::abs({{ outputs_name[o] }}_expected[o]))
nb_identical++;
nb_out++;
}
printf("\nNumber of equal outputs: %d / %d\n", nb_identical, nb_out);
{% endfor %}
return 0;
}
...@@ -41,7 +41,7 @@ def compute_default_mem_info(scheduler: aidge_core.Scheduler) -> Tuple[int, List ...@@ -41,7 +41,7 @@ def compute_default_mem_info(scheduler: aidge_core.Scheduler) -> Tuple[int, List
return mem_size, mem_info return mem_size, mem_info
def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder: Path, wrapping: bool = False) -> Tuple[int, List[dict]]: def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder: Path = None, wrapping: bool = False) -> Tuple[int, List[dict]]:
"""Generates optimized memory information for a computation graph managed by a scheduler. """Generates optimized memory information for a computation graph managed by a scheduler.
This function analyzes the memory usage of a computation graph, determining the memory peak This function analyzes the memory usage of a computation graph, determining the memory peak
...@@ -52,8 +52,8 @@ def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder ...@@ -52,8 +52,8 @@ def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder
nodes and facilitates memory planning by invoking its `generate_memory` method. nodes and facilitates memory planning by invoking its `generate_memory` method.
:type scheduler: aidge_core.Scheduler :type scheduler: aidge_core.Scheduler
:param stats_folder: Directory path to store memory statistics and plots generated by `mem_manager`. :param stats_folder: Directory path to store memory statistics and plots generated by `mem_manager`.
If provided as a string, it is converted to a `Path` object. If provided as a string, it is converted to a `Path` object, default=None.
:type stats_folder: Path :type stats_folder: Path, optional
:param wrapping: Boolean flag to enable or disable wrap-around buffer optimization. :param wrapping: Boolean flag to enable or disable wrap-around buffer optimization.
Defaults to `False`. Defaults to `False`.
:type wrapping: bool, optional :type wrapping: bool, optional
...@@ -73,17 +73,21 @@ def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder ...@@ -73,17 +73,21 @@ def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder
# List of nodes which are connected at the input of the graph (None if input is not connected) # List of nodes which are connected at the input of the graph (None if input is not connected)
nodes_at_input = [n[0] for n in scheduler.graph_view().inputs()] nodes_at_input = [n[0] for n in scheduler.graph_view().inputs()]
# Use gnuplot to generate the log # Return 0 ==> gnuplot installed
if isinstance(stats_folder, str): if os.system("gnuplot --version > /dev/null 2>&1") == 0:
stats_folder = Path(stats_folder) # Use gnuplot to generate the log
os.makedirs(str(Path(stats_folder) / "graph"), exist_ok=True) if isinstance(stats_folder, str):
mem_manager.log("memory_info") stats_folder = Path(stats_folder)
os.chmod("memory_info_plot.gnu", 0o777) os.makedirs(str(Path(stats_folder) / "graph"), exist_ok=True)
os.system("./memory_info_plot.gnu") mem_manager.log("memory_info")
shutil.move("memory_info", str(Path(stats_folder) / "graph" / "memory_info")) os.chmod("memory_info_plot.gnu", 0o777)
shutil.move("memory_info_plot.png", str( os.system("./memory_info_plot.gnu")
Path(stats_folder) / "graph" / "memory_info_plot.png")) shutil.move("memory_info", str(Path(stats_folder) / "graph" / "memory_info"))
os.remove("memory_info_plot.gnu") shutil.move("memory_info_plot.png", str(
Path(stats_folder) / "graph" / "memory_info_plot.png"))
os.remove("memory_info_plot.gnu")
elif stats_folder is not None:
aidge_core.Log.warn("Warning: gnuplot is not installed, could not generate stat folder.")
# In the export, we currently use an unified memory buffer whose size # In the export, we currently use an unified memory buffer whose size
# is determined by the memory peak usage # is determined by the memory peak usage
mem_size = mem_manager.get_peak_usage() mem_size = mem_manager.get_peak_usage()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment