Skip to content
Snippets Groups Projects
Commit cda46ffd authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'dev' into 'main'

v0.4.0

Closes #196 and #160

See merge request eclipse/aidge/aidge_core!279
parents 577e654a cc6b7704
No related branches found
No related tags found
No related merge requests found
Showing
with 1769 additions and 77 deletions
This diff is collapsed.
import aidge_core
import os
import shutil
from pathlib import Path
from aidge_core.export_utils import ExportLib, generate_file, copy_file
from typing import List, Tuple
def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib = None, memory_manager=None, memory_manager_args=None, test_mode=False) -> None:
"""Exports an aidge_core.Scheduler to C++ code.
This function generates files for a given computation graph, including forward-pass functions,
configuration headers, and the main API entry point for the exported model. It requires a
memory manager to allocate resources, and optionally an `ExportLib` instance to handle backend
configurations for node operators.
1. **Export Preparation**:
- Initializes export and DNN folders, checking that required memory management functions are defined.
- Retrieves peak memory usage and memory details for each node using the `memory_manager`.
2. **Configuration Generation**:
- Iterates over nodes scheduled by `scheduler`, configuring backends if `export_lib` is specified.
- Exports configuration headers and forward-pass actions for each node by invoking `op.export()`
and `op.forward()`, appending these to `list_configs` and `list_actions`, respectively.
- Collects information on input and output nodes, including their names, data types, and sizes.
3. **Code Generation**:
- Defines the forward-pass function, `model_forward`, with inputs and outputs based on node attributes.
- Generates the following files:
- **forward.cpp**: Implements the model forward pass using templates, applying configurations
and actions for each node.
- **forward.hpp**: Exports the forward API, defining inputs and outputs.
- **main.cpp**: Main entry file, serving as the model's forward-pass interface.
4. **Static File Export (Optional)**:
- If `export_lib` is specified, static files are copied to the export folder based on `export_lib`
specifications.
:param scheduler: Scheduler instance managing the computation graph.
Uses `graph_view` and `get_static_scheduling` methods
to retrieve the computation graph layout and ordered nodes.
:type scheduler: aidge_core.Scheduler
:param export_folder_path: Path to the folder where the generated export files will be saved.
Creates this folder, along with subdirectories for model and source files.
:type export_folder_path: str
:param export_lib: Library providing the backend implementation for node operators.
Defaults to None. If provided, each node's backend is set to the library's name.
:type export_lib: ExportLib, optional
:param memory_manager: Required function for managing memory allocation. It should take
`scheduler` and optional `memory_manager_args` as parameters, returning
`peak_mem` (peak memory usage) and `mem_info` (memory details for each node).
:type memory_manager: callable
:param memory_manager_args: Additional arguments passed to `memory_manager`. Defaults to an empty dictionary.
:type memory_manager_args: dict, optional
:param test_mode: Additional argument which may be used during forward generation.
:type test_mode: bool, optional
"""
graphview = scheduler.graph_view()
export_folder = Path().absolute() / export_folder_path
os.makedirs(str(export_folder), exist_ok=True)
dnn_folder = export_folder / "dnn"
os.makedirs(str(dnn_folder), exist_ok=True)
if memory_manager_args is None:
memory_manager_args = {}
if memory_manager is None:
raise ValueError("A memory manager is required (no default value yet).")
peak_mem, mem_info = memory_manager(
scheduler, **memory_manager_args)
# List of function call for forward.cpp
list_actions: List[str] = []
# List of headers for forward.cpp
list_configs: List[str] = []
inputs_name: List[str] = []
inputs_dtype: List[str] = []
outputs_name: List[str] = []
outputs_dtype: List[str] = []
outputs_size: List[int] = []
# List of aidge_core.Node ordered by scheduler
list_forward_nodes: List[aidge_core.Node] = scheduler.get_static_scheduling()
# If exportLib define use it
# else parse component in platform
# if export_lib is None:
# raise ValueError("Export need an ExportLib.")
for node in list_forward_nodes:
if export_lib is not None:
aidge_core.Log.debug(f"Setting backend {export_lib._name} to {node.name()}[{node.type()}].")
node.get_operator().set_backend(export_lib._name)
op_impl = node.get_operator().get_impl()
if op_impl is None:
raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an implementation.")
if not isinstance(op_impl, ExportLib):
raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an exportable backend ({op_impl}).")
is_input:bool = node in graphview.get_input_nodes()
is_output:bool = node in graphview.get_output_nodes()
if is_input:
# GraphView.get_inputs_nodes() returns the nodes that have an Input set to None or not in the graph
# However, some inputs are Optional and thus the node may not be an input of the graph!
# So we need to check that all the inputs of the nodes or in the graph or not optional
# This is what the following code block is checking.
for idx, node_in in enumerate(node.inputs()):
optional:bool = node.get_operator().input_category(idx) == aidge_core.InputCategory.OptionalData
# Note: node_in is a Tuple(Node, out_idx)
in_graph:bool = node_in[0] in graphview.get_nodes()
is_input &= (in_graph or not optional)
# Get operator current specs
required_specs = op_impl.get_required_spec()
# Get specs of the implementation that match current specs
specs = op_impl.get_best_match(required_specs)
# Retrieve said implementation
export_node = op_impl.get_export_node(specs)
if export_node is None:
raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].")
# Instanciate ExportNode
op = export_node(node, mem_info[node])
# For configuration files
list_configs += op.export(dnn_folder)
# For forward file
list_actions += op.forward()
if is_input:
for idx, node_in in enumerate(node.inputs()):
if node_in[0] not in graphview.get_nodes():
inputs_name.append(op.attributes["in_name"][idx])
inputs_dtype.append(
op.attributes["in_cdtype"][idx]
)
if is_output:
for idx in range(len(node.outputs())):
outputs_name.append(op.attributes["out_name"][idx])
outputs_dtype.append(
op.attributes["out_cdtype"][idx]
)
outputs_size.append(op.attributes["out_size"][idx])
func_name = "model_forward"
ROOT = Path(__file__).resolve().parents[0]
forward_template = str(ROOT / "templates" / "forward.jinja")
if export_lib.forward_template != None:
forward_template = export_lib.forward_template
list_node_names = []
for node in list_forward_nodes:
if node.type() != "Producer":
list_node_names.append(node.name())
generate_file(
str(dnn_folder / "src" / "forward.cpp"),
forward_template,
func_name=func_name,
headers=set(list_configs),
actions=list_actions,
# Note: Graph may not have inputs, so we need to check with output
# In the future, we should remove this as it is not compatible
# with a mix precision approach.
mem_ctype=outputs_dtype[0], # Legacy behavior ...
mem_section=export_lib.mem_section,
peak_mem=peak_mem,
inputs_name=inputs_name,
inputs_dtype=inputs_dtype,
outputs_name=outputs_name,
outputs_dtype=outputs_dtype,
test_mode=test_mode,
list_node_names=list_node_names
)
forward_header_template = str(ROOT / "templates" / "forward_header.jinja")
if export_lib.forward_header_template != None:
forward_header_template = export_lib.forward_header_template
# Generate dnn API
generate_file(
str(dnn_folder / "include" / "forward.hpp"),
forward_header_template,
libraries=[],
func_name=func_name,
inputs_name=inputs_name,
inputs_dtype=inputs_dtype,
outputs_name=outputs_name,
outputs_dtype=outputs_dtype,
test_mode=test_mode
)
if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
if export_lib is not None:
# Copy all static files in the export
for source, destination in export_lib.static_files.items():
copy_file(source, str(export_folder / destination))
{#- For libraries #}
#include <stdint.h>
{# Design header of the array -#}
static const {{ data_t }} {{ name }}[{{ dims |join("*") }}] __attribute__((section("nn_data"))) = {
{{ values |join(", ") }}
};
#include <stdint.h>
#ifdef SAVE_OUTPUTS
#include <sys/types.h>
#include <sys/stat.h>
#endif
#include "include/forward.hpp"
// Layer & memory configurations
{%- for header in headers %}
#include "{{ header }}"
{%- endfor %}
// Memory block
{%- if mem_section == None %}
static {{mem_ctype}} mem[{{peak_mem}}];
{%- else %}
static {{mem_ctype}} mem[{{peak_mem}}] __attribute__((section("{{ mem_section }}")));
{%- endif %}
{# Forward function #}
{#- Support multiple inputs with different datatypes and multiple outputs with different datatypes -#}
void {{ func_name }} (
{%- for i in range(inputs_name | length) -%}
const {{ inputs_dtype[i] }}* {{ inputs_name[i] }},
{%- endfor -%}
{%- for o in range(outputs_name | length) -%}
{{ outputs_dtype[o] }}** {{ outputs_name[o] }}_ptr{% if not loop.last %}, {% endif %}
{%- endfor -%})
{
{%- for action in actions %}
{{ action }}
{%- endfor %}
{%- for output_name in outputs_name %}
*{{ output_name }}_ptr = {{ output_name }};
{%- endfor %}
}
#ifndef DNN_HPP
#define DNN_HPP
#ifdef __cplusplus
extern "C" {
#endif
{#- For libraries #}
{% for lib in libraries %}
#include <{{ lib }}>
{%- endfor %}
void {{ func_name }} (
{%- for i in range(inputs_name | length) -%}
const {{ inputs_dtype[i] }}* {{ inputs_name[i] }},
{%- endfor -%}
{%- for o in range(outputs_name | length) %}
{{ outputs_dtype[o] }}** {{ outputs_name[o] }}{% if not loop.last %}, {% endif %}
{%- endfor -%});
#ifdef __cplusplus
}
#endif
#endif /* DNN_HPP */
#include <iostream>
#include "forward.hpp"
{% for name in inputs_name %}
#include "{{ name }}.h"
{% endfor %}
{% set printf_formats = {
"double": "%lf",
"float": "%f",
"int8_t": "%hhd",
"int16_t": "%hd",
"int32_t": "%d",
"int64_t": "%lld",
"uint8_t": "%hhu",
"uint16_t": "%hu",
"uint32_t": "%u",
"uint64_t": "%llu"
} %}
int main()
{
// Initialize the output arrays
{%- for o in range(outputs_name | length) %}
{{ outputs_dtype[o] }}* {{ outputs_name[o] }} = nullptr;
{% endfor %}
// Call the forward function
{{ func_name }}({{ inputs_name|join(", ") }}{% if inputs_name %}, {% endif %}&{{ outputs_name|join(", &") }});
// Print the results of each output
{%- for o in range(outputs_name | length) %}
printf("{{ outputs_name[o] }}:\n");
for (int o = 0; o < {{ outputs_size[o] }}; ++o) {
printf("{{ printf_formats[outputs_dtype[o]] }} ", {{ outputs_name[o] }}[o]);
}
printf("\n");
{% endfor %}
return 0;
}
{% set printf_formats = {
"double": "%lf",
"float": "%f",
"int8_t": "%hhd",
"int16_t": "%hd",
"int32_t": "%d",
"int64_t": "%lld",
"uint8_t": "%hhu",
"uint16_t": "%hu",
"uint32_t": "%u",
"uint64_t": "%llu"
} %}
#include <cstdio> // printf
#include <cmath> // std::abs
#include "forward.hpp" // Exported forward
// Inputs
{% for name in inputs_name %}
#include "{{ name }}.h"
{% endfor %}
// Outputs
{% for name in outputs_name %}
#include "{{ name }}_expected.h"
{% endfor %}
int main()
{
// Initialize the output arrays
{%- for o in range(outputs_name | length) %}
{{ outputs_dtype[o] }}* {{ outputs_name[o] }} = nullptr;
{% endfor %}
const float abs_err = 0.001f;
const float rel_err = 0.0001f;
// Call the forward function
{{ func_name }}({{ inputs_name|join(", ") }}{% if inputs_name %}, {% endif %}&{{ outputs_name|join(", &") }});
int nb_identical;
int nb_out;
// Print the results of each output
{%- for o in range(outputs_name | length) %}
nb_identical = 0;
nb_out = 0;
printf("{{ outputs_name[o] }}:\n");
for (int o = 0; o < {{ outputs_size[o] }}; ++o) {
printf("Expected {{ printf_formats[outputs_dtype[o]] }} <-> Predicted {{ printf_formats[outputs_dtype[o]] }}\n", {{ outputs_name[o] }}_expected[o], {{ outputs_name[o] }}[o]);
if (std::abs({{ outputs_name[o] }}_expected[o] - {{ outputs_name[o] }}[o]) <= abs_err + rel_err * std::abs({{ outputs_name[o] }}_expected[o]))
nb_identical++;
nb_out++;
}
printf("\nNumber of equal outputs: %d / %d\n", nb_identical, nb_out);
{% endfor %}
return 0;
}
import os
from aidge_core.export_utils.code_generation import generate_file
from aidge_core.export_utils.data_conversion import aidge2c
from aidge_core import Tensor
from pathlib import Path
def tensor_to_c(tensor:Tensor)->str:
"""Given a :py:class:``aigd_core.Tensor``, return a C description of the tensor.
For example:
{
{1, 2},
{3, 4}
}
:param tensor: Tensor to transform to a string
:type tensor: Tensor
:return: String representation of a C array
:rtype: str
"""
return str(tensor)
def generate_input_file(export_folder:str,
array_name:str,
tensor:Tensor):
# If directory doesn't exist, create it
if not os.path.exists(export_folder):
os.makedirs(export_folder)
print(f"gen : {export_folder}/{array_name}.h")
ROOT = Path(__file__).resolve().parents[0]
generate_file(
file_path=f"{export_folder}/{array_name}.h",
template_path=str(ROOT / "templates" / "c_data.jinja"),
dims = tensor.dims(),
data_t = aidge2c(tensor.dtype()),
name = array_name,
values = list(tensor)
)
import os
import subprocess
import shutil
from pathlib import Path
import aidge_core
from typing import Tuple, List
# Default memory management, which can be used for development
def compute_default_mem_info(scheduler: aidge_core.Scheduler) -> Tuple[int, List]:
"""Basic memory management concatenate memory block, no memory reuse !
:param scheduler: Aidge scheduler
:type scheduler: :py:class:`aidge_core.Scheduler`
:return: The total memory size (in number of elements) and a list (of size nb node) of list (of size nb output) of dictionnary (size, offset)
:rtype: Tuple[int, list]
"""
mem_info = {}
mem_size = 0
# Exclude Producers and the last layers (because the results are stored outside the export)
for i, node in enumerate(scheduler.get_static_scheduling()):
if node.type() != "Producer":
node_mem_info = []
for out_id in range(node.get_nb_outputs()):
dims = node.get_operator().get_output(out_id).dims()
mem = 1
for dim in dims:
mem *= dim
# Add memeory info
node_mem_info.append({
"size": mem,
"offset": mem_size
})
# Increment offset for the next layer
mem_size += mem
mem_info[node] = node_mem_info
else:
mem_info[node] = [] # No meminfo for producer
return mem_size, mem_info
def _gnuplot_installed():
try:
# Run gnuplot with the --version flag and capture the output
subprocess.run(["gnuplot", "--version"])
return True
except FileNotFoundError:
aidge_core.Log.warn("Gnuplot is not installed.")
return False
except subprocess.CalledProcessError:
aidge_core.Log.warn("Gnuplot command found but failed to run.")
return False
def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder: Path = None, wrapping: bool = False) -> Tuple[int, List[dict]]:
"""Generates optimized memory information for a computation graph managed by a scheduler.
This function analyzes the memory usage of a computation graph, determining the memory peak
and detailed memory management information for each node in the scheduler. It supports optional
wrapping of memory buffers and logs memory usage statistics to facilitate memory optimization.
:param scheduler: Scheduler instance that organizes the computation graph. It manages the
nodes and facilitates memory planning by invoking its `generate_memory` method.
:type scheduler: aidge_core.Scheduler
:param stats_folder: Directory path to store memory statistics and plots generated by `mem_manager`.
If provided as a string, it is converted to a `Path` object, default=None.
:type stats_folder: Path, optional
:param wrapping: Boolean flag to enable or disable wrap-around buffer optimization.
Defaults to `False`.
:type wrapping: bool, optional
:return: A tuple containing the peak memory size and a list of memory information for each
scheduled node. The memory information for each node includes details such as size,
offset, stride, length, count, and optional wrap-around details.
:rtype: Tuple[int, List[dict]]
"""
# The forward dims has to done outside the function
# Also supposed the generation of the scheduler has been performed outside
# Otherwise decomment the following line
# scheduler.generate_scheduling()
# Generate the memory manager
# So far, the Producers are not take in consideration in the meory manager => inc_producers=False
mem_manager = scheduler.generate_memory(
inc_producers=False, wrap_around_buffer=wrapping)
# List of nodes which are connected at the input of the graph (None if input is not connected)
nodes_at_input = [n[0] for n in scheduler.graph_view().inputs()]
if stats_folder is not None:
if _gnuplot_installed():
# Use gnuplot to generate the log
os.makedirs(str(Path(stats_folder) / "graph"), exist_ok=True)
mem_manager.log("memory_info")
os.chmod("memory_info_plot.gnu", 0o777)
os.system("./memory_info_plot.gnu")
shutil.move("memory_info", str(Path(stats_folder) / "graph" / "memory_info"))
shutil.move("memory_info_plot.png", str(
Path(stats_folder) / "graph" / "memory_info_plot.png"))
os.remove("memory_info_plot.gnu")
else:
aidge_core.Log.warn("Warning: gnuplot is not installed, could not generate stat folder.")
# In the export, we currently use an unified memory buffer whose size
# is determined by the memory peak usage
mem_size = mem_manager.get_peak_usage()
mem_info = {}
mem_planes = mem_manager.get_planes()
for node in scheduler.get_static_scheduling():
node_mem_info = []
if node.type() == "Producer":
pass
elif node in nodes_at_input:
# Input memory management (suppose tensor ends with [:, channel, height, width]))
tensor = node.get_operator().get_output(0)
if tensor is None:
raise RuntimeError("Warning input producer not provided")
if len(tensor.dims()) < 3:
raise RuntimeError(
f"Input producer dimensions must be with [:, channel, height, width] but got {tensor.dims()} instead")
# TODO : use get_chan get_height and get_width function !
node_mem_info.append({
"size": tensor.dims()[-3], # Should be nb_channels
"offset": 0, # Suppose input data is stored outside the export function
# so the memory offset is not important to consider
"stride": tensor.dims()[-3], # Should be nb_channels
"length": tensor.dims()[-1], # Should be width
"count": tensor.dims()[-2], # Should be height
"cont_offset": 0, # Suppose input data is stored outside the export function
# so the memory offset is not important to consider
"cont_size": tensor.dims()[-1] * \
tensor.dims()[-2] * \
tensor.dims()[-3], # Size of input
"wrap_offset": 0, # No wrapping
"wrap_size": 0 # No wrapping
})
else:
for out_id in range(node.get_nb_outputs()):
plane = mem_planes[node][out_id]
node_mem_info.append({
"size": plane.size,
"offset": plane.get_contiguous_offset(),
"stride": plane.stride,
"length": plane.length,
"count": plane.count,
"cont_offset": plane.get_contiguous_offset(),
"cont_size": plane.get_contiguous_size(),
"wrap_offset": plane.get_wrapped_offset(),
"wrap_size": plane.get_wrapped_size()
})
mem_info[node] = node_mem_info
return mem_size, mem_info
import os
import json
import builtins
import aidge_core
import numpy as np
from pathlib import Path
from typing import Any, Dict, List, Optional
def _retrieve_operator_attrs(node : aidge_core.Node) -> Dict[str, Optional[Any]]:
"""
Returns the dictionary containing the attributes of a given Node.
:param graph: A Node in the list of ordered nodes.
:type graph: aidge_core.Node
:return: A dictionary with the Node's attributes.
:rtype: Dict[str, Optional[Any]]
"""
if node.get_operator().attr is not None:
node_attr_dict = node.get_operator().attr.dict()
for key,value in node_attr_dict.items():
if not type(value).__name__ in dir(builtins):
node_attr_dict[key] = value.name
else:
node_attr_dict = {}
return node_attr_dict
def _create_dict(ordered_nodes : List[aidge_core.Node], write_trainable_params_embed : bool, write_trainable_params_ext : bool, path_trainable_params : Path, params_file_format : str) -> Dict[str, Optional[Any]]:
"""
Creates a dictionary to store the information of a given ordered GraphView.
:param ordered_nodes: A list with the GraphView's ordered nodes.
:type graph: list
:param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed).
:type write_trainable_params_embed: bool
:param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file.
:type write_trainable_params_ext: bool
:param path_trainable_params: Path of the external file used to store the Nodes' trainable parameters.
:type path_trainable_params: Path
:param params_file_format: Format of the external file used to store the Nodes' trainable parameters. Options: ``npz`` or ``json``. Default : ``json``. Requires ``write_trainable_params_ext``.
:type params_file_format: str
:return: A dictionary with the GraphView description.
:rtype: Dict[str, Optional[Any]]
"""
graphview_dict = {'graph': []}
for node in ordered_nodes:
if node is not None:
node_dict = {'name' : node.name(),
'optype' : node.get_operator().type(),
'nb_inputs' : node.get_operator().nb_inputs(),
'nb_outputs' : node.get_operator().nb_outputs()}
inputs = []
for input_idx in range(node.get_operator().nb_inputs()):
input_dict = {'dims' : node.get_operator().get_input(input_idx).dims(),
'data_type' : str(node.get_operator().get_input(input_idx).dtype()),
'data_format' : str(node.get_operator().get_input(input_idx).dformat())}
inputs.append(input_dict)
node_dict['inputs'] = inputs
outputs = []
for output_idx in range(node.get_operator().nb_outputs()):
output_dict = {'dims' : node.get_operator().get_output(output_idx).dims(),
'data_type' : str(node.get_operator().get_output(output_idx).dtype()),
'data_format' : str(node.get_operator().get_output(output_idx).dformat())}
outputs.append(output_dict)
node_dict['outputs'] = outputs
parents = node.get_parents()
if None in parents:
if parents[0] is None: parents.append(parents.pop(0))
else:
pass
parents_inputs = []
input_idx = 0
for parent in node.get_parents():
if parent is not None:
for children in parent.outputs():
for child in children:
if child[0] == node and child[1] == input_idx:
parents_inputs.append((parent.name(), input_idx))
elif parent is None:
if input_idx not in [item[1] for item in parents_inputs]:
parents_inputs.append((None, input_idx))
input_idx += 1
node_dict['parents'] = parents_inputs
children_outputs = []
output_idx = 0
for children in node.get_ordered_children():
for child in children:
if child is not None:
for parent in child.inputs():
if parent[0] == node and parent[1] == output_idx:
children_outputs.append((child.name(), output_idx))
output_idx += 1
node_dict['children'] = children_outputs
# Check if my node is a metaop
attributes_dict = {}
if isinstance(node.get_operator(), aidge_core.MetaOperatorOp):
attributes_dict['micro_graph'] = []
for micro_node in node.get_operator().get_micro_graph().get_nodes():
micro_node_dict = {'name' : micro_node.name(),
'optype' : micro_node.type()}
micro_node_attr_dict = _retrieve_operator_attrs(micro_node)
micro_node_dict['attributes'] = micro_node_attr_dict
attributes_dict['micro_graph'].append(micro_node_dict)
else:
node_attr_dict = _retrieve_operator_attrs(node)
attributes_dict.update(node_attr_dict)
node_dict['attributes'] = attributes_dict
if node.type() == 'Producer':
if write_trainable_params_ext:
params_file_format.casefold()
if params_file_format=='npz':
np.savez_compressed(Path(path_trainable_params, node.name()), **{node.name() : node.get_operator().get_output(0)})
node_dict['tensor_data'] = str(Path(path_trainable_params, node.name() + '.npz'))
elif params_file_format=='json':
tensor = np.array(node.get_operator().get_output(0))
tensor_dict = {
node.name() :
{
'dims' : tensor.shape,
'data_type' : str(tensor.dtype),
'tensor_data' : tensor.tolist()
}
}
with open(Path(path_trainable_params, node.name() + '.json'), 'w') as fp:
json.dump(tensor_dict, fp, indent=4)
node_dict['tensor_data'] = str(Path(path_trainable_params, node.name() + '.json'))
else:
raise Exception("File format to write trainable parameters not recognized.")
if write_trainable_params_embed:
node_dict['tensor_data'] = np.array(node.get_operator().get_output(0)).tolist()
else:
pass
graphview_dict['graph'].append(node_dict)
else: # node is None
pass
return graphview_dict
def _write_dict_json(graphview_dict : Dict[str, Optional[Any]], json_path : str) -> None:
"""
Writes dictionary containing GraphView description to a JSON file.
:param graphview_dict: A dictionary with the GraphView description.
:type graphview_dict: dict[str, int, float, bool, None]
:param json_path: Path to write JSON file.
:type json_path: str
"""
with open(json_path, 'w') as fp:
json.dump(graphview_dict, fp, indent=4)
return None
def gview_to_json(gview : aidge_core.GraphView, json_path : Path, write_trainable_params_embed : bool = False, write_trainable_params_ext : bool = False, params_file_format : str = 'json') -> None:
"""
Generates the description for a GraphView in the JSON format.
:param graph: A GraphView of Aidge.
:type graph: aidge_core.GraphView
:param json_path: Path to write JSON file.
:type json_path: Path
:param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed).
:type write_trainable_params_embed: bool, optional
:param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file.
:type write_trainable_params_ext: bool, optional
:param params_file_format: Format of the external file used to store the Nodes' trainable parameters. Options: ``npz`` or ``json``. Default : ``json``. Requires ``write_trainable_params_ext``.
:type params_file_format: str, optional
"""
if not json_path.suffix:
if not json_path.is_dir():
json_path.mkdir(parents=True, exist_ok=True)
json_path = json_path.joinpath('model.json')
else:
if json_path.suffix != '.json':
raise Exception('If ``json_path`` contains a filename, it must be of JSON format.')
if not json_path.parent.is_dir():
json_path.parent.mkdir(parents=True, exist_ok=True)
if write_trainable_params_ext:
path_trainable_params = (json_path.parent).joinpath(json_path.stem + '_trainable_params/')
path_trainable_params.mkdir(parents=True, exist_ok=True)
else:
path_trainable_params = Path()
if isinstance(gview, aidge_core.GraphView):
# Sort GraphView in topological order
ordered_nodes = gview.get_ordered_nodes()
# Create dict from GraphView
graphview_dict = _create_dict(ordered_nodes, write_trainable_params_embed, write_trainable_params_ext, path_trainable_params, params_file_format)
# Write dict to JSON
_write_dict_json(graphview_dict, json_path)
else:
raise Exception("Graph must be an instance of aidge_core.GraphView.")
return None
\ No newline at end of file
import numpy as np
import aidge_core
def simplify_graph(graph: aidge_core.GraphView):
"""
Simplify a graph loaded from ONNX.
:param graph: The GraphView to simplify.
:type graph: aidge_core.GraphView
"""
def check_constant_producer(value):
def _check_constant_producer(node):
out = node.get_operator().get_output(0)
return (len(out) == 1 and np.isclose(out[0], value))
return _check_constant_producer
gm = aidge_core.SinglePassGraphMatching(graph)
gm.add_node_lambda("Constant_sqrt2", check_constant_producer(np.sqrt(2)))
gm.add_node_lambda("Constant_1", check_constant_producer(1))
gm.add_node_lambda("Constant_0_5", check_constant_producer(0.5))
# Linear [from PyTorch ONNX]
aidge_core.fuse_to_metaops(gm, "MatMul-*>Add", "Linear")
# LayerNorm [from PyTorch ONNX]
aidge_core.fuse_to_metaops(gm, "ReduceMean-*>Sub#1~>(Pow#1->ReduceMean-*>Add#1->Sqrt)-*>Div#1-*>Mul#1-*>Add#2;"
"Sub#1~*>Div#1;"
"Pow#1<1~Producer;"
"Add#1<*~Producer;"
"Mul#1<*~Producer;"
"Add#2<*~Producer;"
"Sub#1~>$", "LayerNorm")
# ScaledDotProductAttention [from PyTorch ONNX]
aidge_core.fuse_to_metaops(gm, "MatMul->Div#1->Softmax-*>MatMul;"
"Div#1<1~Producer", "ScaledDotProductAttention")
# MultiHeadAttention [from PyTorch ONNX]
aidge_core.fuse_to_metaops(gm, "ScaledDotProductAttention#1->Transpose->Reshape#1->Linear;"
"Reshape#1<1~Producer;"
"ScaledDotProductAttention#1<0-(Transpose<-Reshape#2<-Add#1);"
"ScaledDotProductAttention#1<1-(Transpose<-Reshape#3<-Add#2);"
"ScaledDotProductAttention#1<2-(Transpose<-Reshape#4<-Add#3);"
"Reshape#2<1~Producer;"
"Add#1<*-0-Split#1;"
"Add#2<*-1-Split#1;"
"Add#3<*-2-Split#1;"
"Split#1<-MatMul;"
"Split#1<1~Producer", "MultiHeadAttention")
# GeLU [from PyTorch ONNX]
aidge_core.fuse_to_metaops(gm, "Div#1->Erf->Add#1-*>Mul->Mul#2;"
"Div#1<1~Producer[Constant_sqrt2];"
"Add#1<*~Producer[Constant_1];"
"Mul#2<*~Producer[Constant_0_5]", "GeLU")
import matplotlib
import matplotlib.pyplot as plt
from functools import partial
import numpy as np
import aidge_core
class StaticAnalysisExt(aidge_core.StaticAnalysis):
def log_nb_params(self, filename, title=None, log_scale=False):
namePtrTable = self.get_graph().get_ranked_nodes_name("{0} ({1}#{3})");
nodes = self.get_graph().get_ordered_nodes()
series = []
legend = None
for node in nodes:
if node.type() == "Producer":
continue
name = namePtrTable[node]
series.append([name, self.get_nb_params(node)])
if title is None: title = "log_nb_params"
if filename is not None:
self._log_bar(series, filename, title, legend, log_scale)
return series
def log_params_size(self, filename, title=None, log_scale=False):
namePtrTable = self.get_graph().get_ranked_nodes_name("{0} ({1}#{3})");
nodes = self.get_graph().get_ordered_nodes()
series = []
legend = None
for node in nodes:
if node.type() == "Producer":
continue
name = namePtrTable[node]
series.append([name, self.log_params_size(node)])
if title is None: title = "log_params_size"
if filename is not None:
self._log_bar(series, filename, title, legend, log_scale)
return series
def log_nb_arithm_ops(self, filename, title=None, log_scale=False):
return self._log_callback(aidge_core.OperatorStats.get_nb_arithm_ops, filename, title, log_scale)
def log_nb_logic_ops(self, filename, title=None, log_scale=False):
return self._log_callback(aidge_core.OperatorStats.get_nb_logic_ops, filename, title, log_scale)
def log_nb_comp_ops(self, filename, title=None, log_scale=False):
return self._log_callback(aidge_core.OperatorStats.get_nb_comp_ops, filename, title, log_scale)
def log_nb_nl_ops(self, filename, title=None, log_scale=False):
return self._log_callback(aidge_core.OperatorStats.get_nb_nl_ops, filename, title, log_scale)
def log_nb_mac_ops(self, filename, title=None, log_scale=False):
return self._log_callback(aidge_core.OperatorStats.get_nb_mac_ops, filename, title, log_scale)
def log_nb_ops(self, filename, title=None, log_scale=False):
return self._log_callback(aidge_core.OperatorStats.get_nb_ops, filename, title, log_scale)
def log_nb_arithm_int_ops(self, filename, title=None, log_scale=False):
return self._log_callback(aidge_core.OperatorStats.get_nb_arithm_int_ops, filename, title, log_scale)
def log_nb_arithm_fp_ops(self, filename, title=None, log_scale=False):
return self._log_callback(aidge_core.OperatorStats.get_nb_arithm_fp_ops, filename, title, log_scale)
def log_nb_ops_by_type(self, filename, title=None, log_scale=False):
return self._log_callback([aidge_core.OperatorStats.get_nb_arithm_int_ops,
aidge_core.OperatorStats.get_nb_arithm_fp_ops,
aidge_core.OperatorStats.get_nb_logic_ops,
aidge_core.OperatorStats.get_nb_comp_ops,
aidge_core.OperatorStats.get_nb_nl_ops], filename, title, log_scale)
def _log_callback(self, callback, filename, title=None, log_scale=False):
"""
Log a statistic given by an OperatorStats callback member function.
Usage:
stats = StaticAnalysisExt(model)
stats.log_callback(aidge_core.OperatorStats.get_nb_params, "stats.png", "Nb params per operator")
:param func: OperatorStats member function to call.
:param filename: Output graph file name.
:type filename: str
:param title: Title of the graph.
:type title: str
"""
namePtrTable = self.get_graph().get_ranked_nodes_name("{0} ({1}#{3})");
nodes = self.get_graph().get_ordered_nodes()
series = []
legend = None
for node in nodes:
if node.type() == "Producer":
continue
stats = self.get_op_stats(node)
name = namePtrTable[node]
attr = {}
if type(node.get_operator()) is aidge_core.GenericOperatorOp:
# Display Generic Op in orange
attr = {'color': 'orange'}
elif not node.get_operator().is_atomic():
# Display Meta Op in bold
attr = {'fontweight': 'bold'}
elif node.type() not in aidge_core.get_keys_OperatorStats():
# Display unsupported operator in red labels
attr = {'color': 'red'}
if attr:
name = (name, attr)
if isinstance(callback, list):
series.append([name, [partial(cb, stats)() for cb in callback]])
legend = [cb.__name__ for cb in callback]
if title is None: title = str(legend)
else:
series.append([name, partial(callback, stats)()])
if title is None: title = callback.__name__
if title is None: title = str(callback)
if filename is not None:
self._log_bar(series, filename, title, legend, log_scale)
return series
def _log_bar(self, series, filename, title=None, legend=None, log_scale=False):
names, values = zip(*series)
names_only = [item[0] if isinstance(item, tuple) else item for item in names]
fig, ax = plt.subplots(figsize=(max(5, len(names)/4), 5))
plt.xlim(-0.5, len(names) - 0.5)
if isinstance(values[0], list):
series = [list(i) for i in zip(*values)]
bot = np.zeros(len(series[0]))
for i, serie in enumerate(series):
plt.bar(names_only, serie, bottom=bot)
bot += serie
else:
plt.bar(names_only, values)
ax.yaxis.minorticks_on()
plt.grid(axis='y', which='major', linestyle='--', color='gray')
plt.grid(axis='y', which='minor', linestyle=':', color='lightgray')
formatter0 = matplotlib.ticker.EngFormatter(unit='')
ax.yaxis.set_major_formatter(formatter0)
plt.gca().set_axisbelow(True)
labels = plt.gca().get_xticks()
tick_labels = plt.gca().get_xticklabels()
for i, label in enumerate(labels):
if isinstance(names[i], tuple):
if 'color' in names[i][1]:
tick_labels[i].set_color(names[i][1]['color'])
elif 'fontweight' in names[i][1]:
tick_labels[i].set_fontweight(names[i][1]['fontweight'])
plt.xticks(rotation='vertical')
if log_scale: plt.yscale('log')
if title is not None: plt.title(title)
if legend is not None: plt.legend(legend)
plt.savefig(filename, bbox_inches='tight')
def _log_barh(self, series, filename, title=None, legend=None, log_scale=False):
names, values = zip(*series)
names_only = [item[0] if isinstance(item, tuple) else item for item in names]
fig, ax = plt.subplots(figsize=(10, max(5, len(names)/4)))
plt.ylim(-0.5, len(names) - 0.5)
if isinstance(values[0], list):
series = [list(i) for i in zip(*values)]
left = np.zeros(len(series[0]))
for i, serie in enumerate(series):
plt.barh(names_only, serie, left=left)
left += serie
else:
plt.barh(names_only, values)
ax.xaxis.minorticks_on()
plt.grid(axis='x', which='major', linestyle='--', color='gray')
plt.grid(axis='x', which='minor', linestyle=':', color='lightgray')
formatter0 = matplotlib.ticker.EngFormatter(unit='')
ax.xaxis.set_major_formatter(formatter0)
plt.gca().set_axisbelow(True)
plt.gca().xaxis.set_label_position('top')
plt.gca().xaxis.tick_top()
labels = plt.gca().get_yticks()
tick_labels = plt.gca().get_yticklabels()
for i, label in enumerate(labels):
if isinstance(names[i], tuple):
if 'color' in names[i][1]:
tick_labels[i].set_color(names[i][1]['color'])
elif 'fontweight' in names[i][1]:
tick_labels[i].set_fontweight(names[i][1]['fontweight'])
if log_scale: plt.xscale('log')
if title is not None: plt.title(title)
if legend is not None: plt.legend(legend)
plt.savefig(filename, bbox_inches='tight')
#
# Do not add there auto import of submodules.
#
# The testing module contains utils and other tools
# related to tests, possibly reusable by other aidge
# components unit_tests.
#
# Import a specific module explicitly with for instance:
# import aidge_core.testing.utils
# or
# from aidge_core.testing.utils import (....,)
#
#
# Should provide some general utility functions for testing.
# For instance:
# - filesystem
# - os dependencies
# - unit tests setup
#
from .tree_cache import tree_update_from_cache
from .tree_utils import tree_move, tree_remove
"""
Provide tree_update_from_cache(path) method which
minimize changes in a generated tree when files are
re-generated but identical.
It takes as argument a generated tree, and optionally a cache path.
Then it will update both the generated tree and the cache tree
to take the cache version of the files when identical, or the newly
generated one otherwise.
This is in particular useful for speeding up iterative compilation
when generating a source/build system tree.
For instance:
- first time, one generates a tree of files:
- generated: path/{t1,t2,t3}
- then call tree_update_from_cache("path")
- will generate: __cache_path/{t1,t2,t3}
- and untouch: path/{t1,t2,t3}
- second time, re-generate a tree of file:
- say generated files are identical: path/{t1,t2,t3}
- then call tree_update_from_cache("path")
- will untouch in cache: __cache_path/{t1,t2,t3}
- and reset to previous timestamps files: path/{t1,t2,t3}
- third time, re-generate again with some changes:
- say t1 is identical, t2 content has changed and no t3: path/{t1,t2'}
- then call tree_update_from_cache("path")
- will update t2' and remove t3 in cache: __cache_path/{t1,t2'}
- and reset to previous timestamp t1: path/{t1,t2'}
Note that by default the `dir`/__cache_`name` cache path is used
for a given path `dir`/`name`.
Though it is also possible to have the cache path inside the generated tree,
in this case use for instance:
tree_update_from_cache(path, Path(path) / "__cache_src")
For more evolved scenarii, specialize the provided FileTreeCache class.
"""
from pathlib import Path
import shutil
import sys
import filecmp
from typing import Optional, Union, List
from .tree_utils import tree_move, tree_remove
__all__ = [
"FileTreeCache",
"tree_update_from_cache",
]
def is_relative_to(path: Path, other: Path) -> bool:
"""
Dynamically choose implementation based on Python version
"""
# Python 3.9+
if sys.version_info >= (3, 9):
return path.is_relative_to(other)
# Python 3.8 and earlier
try:
path.relative_to(other)
return True
except ValueError:
return False
class FileTreeCache():
"""
Class for implementation of the file tree cache.
Can be derived to changes for instance default cache name/tmp name prefixes
or to specialize for other contexts.
"""
default_cache_prefix = "__cache_"
default_tmp_cache_prefix = "__tmp_cache_"
default_tmp_prefix = "__tmp_"
def __init__(self,
src_path: Union[str, Path],
cache_path: Optional[Union[str, Path]] = None
) -> None:
self.src_path = Path(src_path).absolute()
self.cache_path = (
Path(cache_path).absolute()
if cache_path is not None else
(self.src_path.parent /
f"{self.default_cache_prefix}{self.src_path.name}")
)
ctx_msg = f"tree_cache: {src_path = }, {cache_path = }"
assert self.src_path != self.cache_path, f"src_path and cache_path must differ on {ctx_msg}"
assert not is_relative_to(self.src_path, self.cache_path), f"src_path must not be relative to cache_path on {ctx_msg}"
self._tmp_path = (
self.src_path.parent /
f"{self.default_tmp_prefix}{self.src_path.name}")
self._tmp_cache_path = (
self.src_path.parent /
f"{self.default_tmp_cache_prefix}{self.src_path.name}")
@classmethod
def _copytree_or_cache(cls, src_dir: Path, dst_dir: Path, cache_dir: Path, dst_cache_dir: Path) -> None:
assert not dst_dir.exists()
assert not dst_cache_dir.exists()
assert src_dir.is_dir()
assert not cache_dir.exists() or cache_dir.is_dir()
assert not is_relative_to(cache_dir, src_dir)
def copy_or_cache(src, dst):
base_src = Path(src).relative_to(src_dir)
cache_src = cache_dir / base_src
base_dst = Path(dst).relative_to(dst_dir)
cache_dst = dst_cache_dir / base_dst
cache_dst.parent.mkdir(parents=True, exist_ok=True)
if cache_src.exists() and filecmp.cmp(str(src), str(cache_src), shallow=False):
shutil.copy2(str(cache_src), str(cache_dst))
shutil.copy2(str(cache_src), dst)
else:
shutil.copy2(src, str(cache_dst))
shutil.copy2(src, dst)
shutil.copytree(str(src_dir), str(dst_dir), copy_function=copy_or_cache)
def update_from_cache(self) -> None:
assert self.src_path.exists(), f"src path must exist before swapping with cache"
# Move cache path apart first as it may be relative to source path
tree_move(self.cache_path, self._tmp_cache_path, ignore_missing=True, exist_ok=True)
# Move source path apart before recreating merged source tree
tree_move(self.src_path, self._tmp_path, exist_ok=True)
# Manage the source/cache merge to the dst/dst_cahe with a variant of
# copytree.
self._copytree_or_cache(
src_dir=self._tmp_path,
dst_dir=self.src_path,
cache_dir=self._tmp_cache_path,
dst_cache_dir=self.cache_path,
)
# Remove tmp source path
tree_remove(self._tmp_path)
# Note that the tmp cache path may not exist
tree_remove(self._tmp_cache_path, ignore_missing=True)
def tree_update_from_cache(
src_path: Union[str, Path],
cache_path: Optional[Union[str, Path]] = None) -> None:
"""
Update from cache the current generation of a tree from the
older generations, preserving file stamps when files contents are identical.
:param src_path: str or Path object to the generated tree
:param cache_path: optional str or Path object to the cache path,
or defaults to: `cache_path = src_path.parent / f"__cache_{src_path.name}"`
"""
FileTreeCache(src_path, cache_path).update_from_cache()
"""
Provide utility function for file trees manipulations.
"""
import shutil
import sys
from pathlib import Path
from typing import Union
__all__ = [
"tree_move",
"tree_remove",
]
def is_relative_to(path: Path, other: Path) -> bool:
"""
Dynamically choose implementation based on Python version
"""
# Python 3.9+
if sys.version_info >= (3, 9):
return path.is_relative_to(other)
# Python 3.8 and earlier
try:
path.relative_to(other)
return True
except ValueError:
return False
def tree_remove(
path: Union[str, Path],
ignore_missing: bool = False,
) -> None:
"""
Remove the full tree at path.
Optionally ignore if the path does not exist when ignore_missing is True.
:param path: str or Path object to the directory path
:param ignore_missing: if True will return early is path does not exists
"""
path = Path(path)
ctx_msg = f"tree_remove: : {path = }"
assert ignore_missing or path.exists(), f"path must exists when ignore_missing is False on {ctx_msg}"
if ignore_missing and not path.exists():
return
shutil.rmtree(path)
def tree_move(
src_path: Union[str, Path],
dst_path: Union[str, Path],
ignore_missing: bool = False,
exist_ok: bool = False,
) -> None:
"""
Move the whole src_path file tree to dst_path.
Optionally does nothing if the src path does not exists and ignore_missing is True.
Optionally the full dst_path will be removed first when exists_ok is True.
:param src_path: str or Path object to the source directory path
:param dst_path: str or Path object to the new path name for the source directory
:param ignore_missing: if True will return early is src_path does not exists
:param exist_ok: if True will first erase the new path name if it exists
"""
src_path = Path(src_path)
dst_path = Path(dst_path)
ctx_msg = f"tree_move: : {src_path = }, {dst_path = }"
assert ignore_missing or src_path.exists(), f"src_path must exists when ignore_missing is False on {ctx_msg}"
assert exist_ok or not dst_path.exists(), f"dst_path must not exists when exist_ok is False on {ctx_msg}"
assert src_path != dst_path, f"paths must not be identical on {ctx_msg}"
assert not is_relative_to(dst_path, src_path), f"dst_path must not be relative to src_path on {ctx_msg}"
assert not is_relative_to(src_path, dst_path), f"src_path must not be relative to dst_path on {ctx_msg}"
if ignore_missing and not src_path.exists():
return
if exist_ok and dst_path.exists():
shutil.rmtree(dst_path)
shutil.move(src_path, dst_path)
......@@ -8,8 +8,6 @@ http://www.eclipse.org/legal/epl-2.0.
SPDX-License-Identifier: EPL-2.0
"""
import aidge_core
from aidge_core.utils import run_command
import unittest
import os
import pathlib
......@@ -18,6 +16,10 @@ import subprocess
import sys
import aidge_core
from aidge_core.utils import run_command
from aidge_core.testing.utils import tree_update_from_cache, tree_move, tree_remove
def initFiller(model):
# Initialize parameters (weights and biases)
for node in model.get_nodes():
......@@ -26,6 +28,8 @@ def initFiller(model):
value = prod_op.get_output(0)
value.set_backend("cpu")
tuple_out = node.output(0)[0]
# Force seed before filler for reproducibility
aidge_core.random.Generator.set_seed(0)
# No conv in current network
if tuple_out[0].type() == "Conv" and tuple_out[1] == 1:
# Conv weight
......@@ -43,22 +47,6 @@ def initFiller(model):
pass
def clean_dir(dir: pathlib.Path) -> None:
if not dir.is_dir():
print(f"Error : directory {dir} doesn't exist. Exiting clean_dir().")
return
for filename in os.listdir(dir):
file_path = os.path.join(dir, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(f"Failed to delete {file_path}. Reason: {e}")
return
class test_export(unittest.TestCase):
"""Test aidge export"""
......@@ -66,6 +54,10 @@ class test_export(unittest.TestCase):
self.EXPORT_PATH: pathlib.Path = pathlib.Path("dummy_export")
self.BUILD_DIR: pathlib.Path = self.EXPORT_PATH / "build"
self.INSTALL_DIR: pathlib.Path = (self.EXPORT_PATH / "install").absolute()
self.TMP_BUILD_DIR: pathlib.Path = (
self.EXPORT_PATH.parent /
f"__tmp_{self.EXPORT_PATH.name}_build"
)
def tearDown(self):
pass
......@@ -76,28 +68,41 @@ class test_export(unittest.TestCase):
model = aidge_core.sequential(
[
aidge_core.FC(
in_channels=32 * 32 * 3, out_channels=512, name="InputNode"
in_channels=32 * 32 * 3, out_channels=64, name="InputNode"
),
aidge_core.ReLU(name="Relu0"),
aidge_core.FC(in_channels=512, out_channels=256, name="FC1"),
aidge_core.FC(in_channels=64, out_channels=32, name="FC1"),
aidge_core.ReLU(name="Relu1"),
aidge_core.FC(in_channels=256, out_channels=128, name="FC2"),
aidge_core.FC(in_channels=32, out_channels=16, name="FC2"),
aidge_core.ReLU(name="Relu2"),
aidge_core.FC(in_channels=128, out_channels=10, name="OutputNode"),
aidge_core.FC(in_channels=16, out_channels=10, name="OutputNode"),
]
)
initFiller(model)
model.forward_dims([[1, 32*32*3]])
# Export model
aidge_core.export(self.EXPORT_PATH, model)
# Preserve previously generated build if present
tree_move(self.BUILD_DIR, self.TMP_BUILD_DIR, ignore_missing=True, exist_ok=True)
# Clean install dir
tree_remove(self.INSTALL_DIR, ignore_missing=True)
self.assertTrue(
self.EXPORT_PATH.is_dir(), "Export folder has not been generated"
# Export model
aidge_core.serialize_to_cpp(self.EXPORT_PATH, model)
self.assertTrue(self.EXPORT_PATH.is_dir(), "Export folder has not been generated")
# Add other source files
shutil.copyfile(pathlib.Path(__file__).parent / "static/main.cpp", self.EXPORT_PATH / "main.cpp")
# Use cache if any, put cache inside export dir
# such that cleaning export dir also cleans the cache
tree_update_from_cache(
self.EXPORT_PATH,
cache_path=self.EXPORT_PATH / "__cache_export"
)
os.makedirs(self.BUILD_DIR, exist_ok=True)
clean_dir(self.BUILD_DIR) # if build dir existed already ensure its emptyness
clean_dir(self.INSTALL_DIR)
# Move back preserved build dir if any and ensure build dir exists
tree_move(self.TMP_BUILD_DIR, self.BUILD_DIR, ignore_missing=True)
self.BUILD_DIR.mkdir(exist_ok=True)
# Test compilation of export
search_path = (
......@@ -106,11 +111,6 @@ class test_export(unittest.TestCase):
else os.environ["AIDGE_INSTALL"]
)
shutil.copyfile(
pathlib.Path(__file__).parent / "static/main.cpp",
self.EXPORT_PATH / "main.cpp",
)
##########################
# CMAKE EXPORT
try:
......
......@@ -49,8 +49,8 @@ class test_OperatorImpl(unittest.TestCase):
"""Test registering an implementation
"""
global GLOBAL_CPT
aidge_core.register_ConvOp2D("cpu", testImpl)
self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
aidge_core.register_Conv2DOp("cpu", testImpl)
self.assertTrue("cpu" in aidge_core.get_keys_Conv2DOp())
conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
conv.get_operator().set_backend("cpu")
conv.get_operator().set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
......@@ -61,9 +61,9 @@ class test_OperatorImpl(unittest.TestCase):
"""Test registering an implementation
"""
global GLOBAL_CPT
aidge_core.register_ConvOp2D("cpu", testImpl)
aidge_core.register_Conv2DOp("cpu", testImpl)
aidge_core.register_ProducerOp("cpu", testImpl)
self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
self.assertTrue("cpu" in aidge_core.get_keys_Conv2DOp())
conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
model = aidge_core.sequential([conv])
model.set_backend("cpu")
......
......@@ -101,7 +101,7 @@ class test_operator_binding(unittest.TestCase):
self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
op = aidge_core.GenericOperatorOp("any_type", 1,0,1)
with self.assertRaises(RuntimeError):
with self.assertRaises(IndexError):
op.attr.something
op.attr.something = aidge_core.DynamicAttributes()
......
......@@ -46,9 +46,9 @@ class test_recipes(unittest.TestCase):
def test_fuse_matmul_add(self):
matmul0 = aidge_core.MatMul(name="MatMul0")
add0 = aidge_core.Add(2, name="Add0")
add0 = aidge_core.Add(name="Add0")
matmul1 = aidge_core.MatMul(name="MatMul1")
add1 = aidge_core.Add(2, name="Add1")
add1 = aidge_core.Add(name="Add1")
w0 = aidge_core.Producer([1, 1], name="W0")
w0.add_child(matmul0, 0, 0)
b0 = aidge_core.Producer([1], name="B0")
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment