Skip to content
Snippets Groups Projects
Commit cda46ffd authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'dev' into 'main'

v0.4.0

Closes #196 and #160

See merge request !279
parents 577e654a cc6b7704
No related branches found
No related tags found
1 merge request!279v0.4.0
Pipeline #63431 failed
Showing
with 515 additions and 55 deletions
#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H #ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
#define EXPORT_ATTRIBUTES_{{name|upper}}_H #define EXPORT_ATTRIBUTES_{{name|upper}}_H
#define _{{name|upper}}_IN_CHANNELS {{InChannels}} #define _{{name|upper}}_IN_CHANNELS {{in_chan[0]}}
#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}} #define _{{name|upper}}_OUT_CHANNELS {{out_chan[0]}}
{% for i in range(KernelDims|length) %} {% for i in range(kernel_dims|length) %}
#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}} #define _{{name|upper}}_KERNEL_{{i}} {{kernel_dims[i]}}
{%- endfor %} {%- endfor %}
{% for i in range(StrideDims|length) %} {% for i in range(stride_dims|length) %}
#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}} #define _{{name|upper}}_STRIDE_{{i}} {{stride_dims[i]}}
{%- endfor %} {%- endfor %}
{% for i in range(DilationDims|length) %} {% for i in range(dilation_dims|length) %}
#define _{{name|upper}}_DILATION_{{i}} {{DilationDims[i]}} #define _{{name|upper}}_DILATION_{{i}} {{dilation_dims[i]}}
{%- endfor %} {%- endfor %}
#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */ #endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
#define EXPORT_ATTRIBUTES_{{name|upper}}_H
#define _{{name|upper}}_CHANNELS {{out_chan[0]}}
{% for i in range(kernel_dims|length) %}
#define _{{name|upper}}_KERNEL_{{i}} {{kernel_dims[i]}}
{%- endfor %}
{% for i in range(stride_dims|length) %}
#define _{{name|upper}}_STRIDE_{{i}} {{stride_dims[i]}}
{%- endfor %}
{% for i in range(dilation_dims|length) %}
#define _{{name|upper}}_DILATION_{{i}} {{dilation_dims[i]}}
{%- endfor %}
#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H #ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
#define EXPORT_ATTRIBUTES_{{name|upper}}_H #define EXPORT_ATTRIBUTES_{{name|upper}}_H
#define _{{name|upper}}_IN_CHANNELS {{InChannels}} #define _{{name|upper}}_IN_CHANNELS {{in_chan[0]}}
#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}} #define _{{name|upper}}_OUT_CHANNELS {{out_chan[0]}}
#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */ #endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H #ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
#define EXPORT_ATTRIBUTES_{{name|upper}}_H #define EXPORT_ATTRIBUTES_{{name|upper}}_H
{% for i in range(KernelDims|length) %} {% for i in range(kernel_dims|length) %}
#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}} #define _{{name|upper}}_KERNEL_{{i}} {{kernel_dims[i]}}
{%- endfor %} {%- endfor %}
{% for i in range(StrideDims|length) %} {% for i in range(stride_dims|length) %}
#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}} #define _{{name|upper}}_STRIDE_{{i}} {{stride_dims[i]}}
{%- endfor %} {%- endfor %}
#define _{{name|upper}}_CEIL_MODE {{CeilMode|int}} #define _{{name|upper}}_CEIL_MODE {{ceil_mode|int}}
#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */ #endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
#define EXPORT_ATTRIBUTES_{{name|upper}}_H
{%- set half_length = (begin_end_borders|length / 2)|int -%}
{% for i in range(half_length) %}
#define _{{name|upper}}_BEGIN_BORDERS_{{i}} {{begin_end_borders[2*i]}}
#define _{{name|upper}}_END_BORDERS_{{i}} {{begin_end_borders[2*i+1]}}
{%- endfor %}
#define _{{name|upper}}_BORDER_TYPE {{border_type|int}}
#define _{{name|upper}}_BORDER_VALUE {{border_value}}
#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
/*** OPERATOR ATTRIBUTES & PARAMETERS ***/ /*** OPERATOR ATTRIBUTES & PARAMETERS ***/
{%- for header in headers %} {%- for header in headers %}
#include "{{ header }}" #include "{{ header | replace('include/', '') }}"
{%- endfor %} {%- endfor %}
/*** HEADER ***/ /*** HEADER ***/
......
{# NOTE: Trying a shorter notation like {%- for input in inputs if input[0] %} {# NOTE: Trying a shorter notation like {%- for input in inputs if input[0] %}
will mess up loop.index as the input set up at None will not increment ! #} will mess up loop.index as the input set up at None will not increment ! #}
{%- for input in inputs %} {%- for input_node, out_id in node.inputs() %}
{%- if input[0] %} {%- if input_node %}
{{input[0]}}->addChild({{name}}, {{input[1]}}, {{loop.index - 1}}); {# NOTE: loop.index begin at 1 #} {{input_node.name()}}->addChild({{name}}, {{out_id}}, {{loop.index - 1}}); {# NOTE: loop.index begin at 1 #}
{%- endif %} {%- endif %}
{%- endfor %} {%- endfor %}
{% filter indent(width=4, first=False) %}
/*** {{name|upper}} ***/
std::shared_ptr<Aidge::Node> {{name}} =
Aidge::Add(
"{{name}}"
);
{% include "./_set_input.jinja" %}
graph->add({{name}});
{% endfilter %}
...@@ -5,18 +5,18 @@ std::shared_ptr<Aidge::Node> {{name}} = ...@@ -5,18 +5,18 @@ std::shared_ptr<Aidge::Node> {{name}} =
_{{name|upper}}_IN_CHANNELS, _{{name|upper}}_IN_CHANNELS,
_{{name|upper}}_OUT_CHANNELS, _{{name|upper}}_OUT_CHANNELS,
{ {
{%- for i in range(KernelDims|length) -%} {%- for i in range(kernel_dims|length) -%}
_{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%} _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
{%- endfor -%} {%- endfor -%}
}, },
"{{name}}", "{{name}}",
{ {
{%- for i in range(StrideDims|length) -%} {%- for i in range(stride_dims|length) -%}
_{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%} _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
{%- endfor -%} {%- endfor -%}
}, },
{ {
{%- for i in range(DilationDims|length) -%} {%- for i in range(dilation_dims|length) -%}
_{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%} _{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%}
{%- endfor -%} {%- endfor -%}
} }
......
{% filter indent(width=4, first=False) %}
/*** {{name|upper}} ***/
std::shared_ptr<Aidge::Node> {{name}} =
Aidge::ConvDepthWise(
_{{name|upper}}_CHANNELS,
{
{%- for i in range(kernel_dims|length) -%}
_{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
{%- endfor -%}
},
"{{name}}",
{
{%- for i in range(stride_dims|length) -%}
_{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
{%- endfor -%}
},
{
{%- for i in range(dilation_dims|length) -%}
_{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%}
{%- endfor -%}
}
);
{% include "./_set_input.jinja" %}
graph->add({{name}});
{% endfilter %}
...@@ -3,13 +3,13 @@ ...@@ -3,13 +3,13 @@
std::shared_ptr<Aidge::Node> {{name}} = std::shared_ptr<Aidge::Node> {{name}} =
Aidge::MaxPooling( Aidge::MaxPooling(
{ {
{%- for i in range(KernelDims|length) -%} {%- for i in range(kernel_dims|length) -%}
_{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%} _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
{%- endfor -%} {%- endfor -%}
}, },
"{{name}}", "{{name}}",
{ {
{%- for i in range(StrideDims|length) -%} {%- for i in range(stride_dims|length) -%}
_{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%} _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
{%- endfor -%} {%- endfor -%}
}, },
......
{% filter indent(width=4, first=False) %}
/*** {{name|upper}} ***/
{%- set half_length = (begin_end_borders|length / 2)|int -%}
std::shared_ptr<Aidge::Node> {{name}} =
Aidge::Pad<{{half_length}}>(
{
{%- for i in range(half_length) -%}
_{{name|upper}}_BEGIN_BORDERS_{{i}}, _{{name|upper}}_END_BORDERS_{{i}} {%- if not loop.last %}, {% endif -%}
{%- endfor -%}
},
"{{name}}",
static_cast<Aidge::PadBorderType>(_{{name|upper}}_BORDER_TYPE),
_{{name|upper}}_BORDER_VALUE
);
{% include "./_set_input.jinja" %}
graph->add({{name}});
{% endfilter %}
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#include <aidge/data/Tensor.hpp> #include <aidge/data/Tensor.hpp>
#include <memory> #include <memory>
std::shared_ptr<Aidge::Tensor> {{name}} = std::make_shared<Aidge::Tensor>(Aidge::Array{{dims|length}}D<{{data_t}}, {{ dims|join(", ") }}> { std::shared_ptr<Aidge::Tensor> {{tensor_name}} = std::make_shared<Aidge::Tensor>(Aidge::Array{{out_dims[0]|length}}D<{{out_cdtype[0]}}, {{ out_dims[0]|join(", ") }}> {
{{ values }} {{ values }}
}); });
......
from .operator_registry import *
def parse_node_input(node_inputs: list) -> list: def parse_node_input(node_inputs: list) -> list:
"""Parse node intputs in order to adapt the list for Jinja. """Parse node intputs in order to adapt the list for Jinja.
......
OPERATORS_REGISTRY = {}
def operator_register(*args):
key_list = [arg for arg in args]
def decorator(operator):
def wrapper(*args, **kwargs):
return operator(*args, **kwargs)
for key in key_list:
OPERATORS_REGISTRY[key] = operator
return wrapper
return decorator
def supported_operators():
return list(OPERATORS_REGISTRY.keys())
from .node_export import * from .node_export import ExportNode, ExportNodeCpp
from .code_generation import * from .code_generation import generate_file, generate_str, copy_file
from .export_registry import ExportLib
from .scheduler_export import scheduler_export
from .tensor_export import tensor_to_c, generate_input_file
from .generate_main import generate_main_cpp, generate_main_compare_cpp
from pathlib import Path from pathlib import Path
from jinja2 import Environment, FileSystemLoader from jinja2 import Environment, FileSystemLoader, StrictUndefined
from typing import Union from typing import Union
import os
import shutil
def generate_file(file_path: Union[Path, str], template_path: Union[Path, str], **kwargs) -> None: def generate_file(file_path: Union[Path, str], template_path: Union[Path, str], **kwargs) -> None:
"""Generate a file at `file_path` using the jinja template located at `file_path`. """Generate a file at `file_path` using the jinja template located at `file_path`.
...@@ -18,16 +19,14 @@ def generate_file(file_path: Union[Path, str], template_path: Union[Path, str], ...@@ -18,16 +19,14 @@ def generate_file(file_path: Union[Path, str], template_path: Union[Path, str],
file_path = Path(file_path) file_path = Path(file_path)
if isinstance(template_path, str): if isinstance(template_path, str):
template_path = Path(template_path) template_path = Path(template_path)
if not template_path.exists():
raise ValueError(f"Path to template {template_path} is not valid !")
# Make dir # Make dir
file_path.parent.mkdir(parents=True, exist_ok=True) file_path.parent.mkdir(parents=True, exist_ok=True)
# Select template
template = Environment(loader=FileSystemLoader(
template_path.parent)).get_template(template_path.name)
# Generate file # Generate file
with open(file_path, mode="w", encoding="utf-8") as file: with open(file_path, mode="w", encoding="utf-8") as file:
file.write(template.render(kwargs)) file.write(generate_str(template_path, **kwargs))
def generate_str(template_path: Union[Path, str], **kwargs) -> str: def generate_str(template_path: Union[Path, str], **kwargs) -> str:
...@@ -43,4 +42,12 @@ def generate_str(template_path: Union[Path, str], **kwargs) -> str: ...@@ -43,4 +42,12 @@ def generate_str(template_path: Union[Path, str], **kwargs) -> str:
if isinstance(template_path, str): if isinstance(template_path, str):
template_path = Path(template_path) template_path = Path(template_path)
return Environment(loader=FileSystemLoader( return Environment(loader=FileSystemLoader(
template_path.parent)).get_template(template_path.name).render(kwargs) template_path.parent), undefined=StrictUndefined, keep_trailing_newline=True).get_template(template_path.name).render(kwargs)
def copy_file(filename, dst_folder):
# If directory doesn't exist, create it
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
shutil.copy(filename, dst_folder)
import numpy as np
import aidge_core
datatype_converter_aide2c = {
aidge_core.dtype.float64 : "double",
aidge_core.dtype.float32 : "float",
aidge_core.dtype.float16 : "half_float::half",
aidge_core.dtype.int8 : "int8_t",
aidge_core.dtype.int16 : "int16_t",
aidge_core.dtype.int32 : "int32_t",
aidge_core.dtype.int64 : "int64_t",
aidge_core.dtype.uint8 : "uint8_t",
aidge_core.dtype.uint16 : "uint16_t",
aidge_core.dtype.uint32 : "uint32_t",
aidge_core.dtype.uint64 : "uint64_t"
}
def aidge2c(datatype):
"""Convert a aidge datatype to C type
:param datatype: Aidge datatype to convert
:type datatype: :py:object:`aidge_core.DataType`
:return: A string representing the C type
:rtype: string
"""
if datatype in datatype_converter_aide2c:
return datatype_converter_aide2c[datatype]
else:
raise ValueError(f"Unsupported {datatype} aidge datatype")
from typing import Dict, List
import aidge_core
from aidge_core.export_utils import ExportNode
class classproperty:
"""Helper class to define class-level properties.
Equivalent to applying both the ``@property`` and ``@classmethod`` decorators,
allowing methods to be accessed as class properties. These two decorators
are otherwise incompatible prior to Python 3.12.
See discussion: https://discuss.python.org/t/add-a-supported-read-only-classproperty-decorator-in-the-stdlib/18090/12
"""
def __init__(self, fget):
"""
:param fget: Function to be wrapped as a class property.
:type fget: Callable
"""
self.fget = fget
def __get__(self, instance, owner):
return self.fget(owner)
class ExportLib(aidge_core.OperatorImpl):
"""Aidge export library that manages a registry for operators and static files.
This class provides a structure for registering different operator types and
export nodes, facilitating access and management of these elements.
:ivar _name: The name of the export library, used for namespacing.
:ivar static_files: A dictionary mapping paths of static files to their target locations relative to the export root.
"""
# PUBLIC
# Lib name useful ?
# Help define namespace
_name: str = None
# key: Path where static file is
# Value: Path where to copy the file relative to the export root
static_files: Dict[str, str] = {}
# Main memory section
mem_section = None
# Custom forward generation jinja file
forward_template: str = None
forward_header_template: str = None
# PRIVATE
# Registry of exportNode, class level dictionary, shared across all ExportLib
_cls_export_node_registry = {}
def __init__(self, operator):
super(ExportLib, self).__init__(operator, self._name)
if self._name is None:
raise ValueError("ExportLib {self.__class__.__name__} does not define the attribute ``_name``.")
# TODO: This is a patch, setBackend method is used to set an ExportLib as an Implementation.
# But it also set the backend of the Tensor and we don't define a backend for Tensor.
# The following code block associate to the export backend the "cpu" implementation.
# This is tracked by the issue :
# https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/issues/178
aidge_core.register_Tensor([self._name, aidge_core.dtype.float32],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.float32]))
aidge_core.register_Tensor([self._name, aidge_core.dtype.float16],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.float16]))
aidge_core.register_Tensor([self._name, aidge_core.dtype.int8],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int8]))
aidge_core.register_Tensor([self._name, aidge_core.dtype.int16],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int16]))
aidge_core.register_Tensor([self._name, aidge_core.dtype.int32],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int32]))
aidge_core.register_Tensor([self._name, aidge_core.dtype.int64],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int64]))
aidge_core.register_Tensor([self._name, aidge_core.dtype.uint8],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint8]))
aidge_core.register_Tensor([self._name, aidge_core.dtype.uint16],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint16]))
aidge_core.register_Tensor([self._name, aidge_core.dtype.uint32],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint32]))
aidge_core.register_Tensor([self._name, aidge_core.dtype.uint64],
aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint64]))
@classproperty
def _export_node_registry(cls) -> Dict[str, List['ExportNode']]:
"""Define as a class property to access the registry at class level while keeping it at instance level.
:return: The export node registry specific to the class
:rtype: Dict[str, List[ExportNode]]
"""
return cls._cls_export_node_registry.setdefault(cls, {})
def get_available_impl_specs(self) -> List[aidge_core.ImplSpec]:
"""Override the virtual OperatorImpl method, in order to provide available
implementation specifications.
:return: List of implementation specification available for the type of operator.
:rtype: List[aidge_core.ImplSpec]
"""
if self.get_operator().type() in self._export_node_registry:
spec_vec = [i for i, _ in self._export_node_registry[self.get_operator().type()]]
return spec_vec
else:
return []
def get_export_node(self, spec: aidge_core.ImplSpec) -> ExportNode:
"""Given an :py:class:`aidge_core.ImplSpec`, return the ExportNode that is the closest match.
:param spec: Implementation specification to match
:type spec: :py:class:`aidge_core.ImplSpec
:return: The class ExportNode that is the closest match
:rtype: aidge_core.ImplSpec
"""
for registered_spec, export_node in self._export_node_registry[self.get_operator().type()]:
if registered_spec == spec:
return export_node
return None
@classmethod
def register(cls, op_type, spec):
"""Decorator to register an operator implementation for a specified operator type.
Registers an operator under a given operator type and specification,
adding it to the export library registry. This method supports both
single operator types (str) and lists of types (List[str]).
:param op_type: The operator type(s) to register.
:type op_type: Union[str, List[str]]
:param spec: Implementation specification for the operator.
:type spec: :py:class:``aidge_core.ImplSpec``
:return: A wrapper class that initializes the registered operator.
:rtype: Callable
"""
def decorator(operator):
class Wrapper(operator):
def __init__(self, *args, **kwargs):
return operator(*args, **kwargs)
type_list = []
if isinstance(op_type, list):
type_list = op_type
elif isinstance(op_type, str):
type_list = [op_type]
else:
raise TypeError("Argument type of register method should be of type 'List[str]' or 'str', got {type(type)}")
for type_name in type_list:
if (type_name not in cls._export_node_registry):
cls._export_node_registry[type_name] = []
cls._export_node_registry[type_name].append((spec, operator))
register_func: str = f"register_{type_name}Op"
# If operator is not defined, then it means we try to register a MetaOperator
if register_func not in dir(aidge_core):
raise ValueError(f"Operator of type: {type_name} is not declared as registrable!\nHint: If you try to register a MetaOperator use register_metaop instead.")
else:
# Equivalent to aidge_core.register_ConvOp("ExportLibX", ExportLibX)
aidge_core.__getattribute__(register_func)(cls._name, cls)
return Wrapper
return decorator
@classmethod
def register_metaop(cls, op_type, spec):
"""Decorator to register a MetaOperator with the export library.
Registers a MetaOperator under a given operator type and specification. This decorator
is intended for operator types that are grouped as meta operators.
:param op_type: Operator type(s) to register as a ``MetaOperator``.
:type op_type: Union[str, List[str]]
:param spec: Implementation specification for the MetaOperator.
:type spec: aidge_core.ImplSpec
:return: A wrapper class that initializes the registered MetaOperator.
:rtype: Callable
"""
def decorator(operator):
class Wrapper(operator):
def __init__(self, *args, **kwargs):
return operator(*args, **kwargs)
type_list = []
if isinstance(op_type, list):
type_list = op_type
elif isinstance(op_type, str):
type_list = [op_type]
else:
raise TypeError("Argument 'op_type' of register method should be of type 'List[str]' or 'str', got {type(type)}")
for type_name in type_list:
if (type_name not in cls._export_node_registry):
cls._export_node_registry[type_name] = []
cls._export_node_registry[type_name].append((spec, operator))
aidge_core.register_MetaOperatorOp([cls._name, type_name], cls)
spec.attrs.add_attr("type", type_name) # MetaOperator specs need to verify the type
return Wrapper
return decorator
@classmethod
def register_generic(cls, op_type, spec):
"""Decorator to register a GenericOperator with the export library.
Registers a GenericOperator under a given operator type and specification. This decorator
is intended for operator types that are grouped as meta operators.
:param op_type: Operator type(s) to register as a ``GenericOperator``.
:type op_type: Union[str, List[str]]
:param spec: Implementation specification for the GenericOperator.
:type spec: aidge_core.ImplSpec
:return: A wrapper class that initializes the registered GenericOperator.
:rtype: Callable
"""
def decorator(operator):
class Wrapper(operator):
def __init__(self, *args, **kwargs):
return operator(*args, **kwargs)
type_list = []
if isinstance(op_type, list):
type_list = op_type
elif isinstance(op_type, str):
type_list = [op_type]
else:
raise TypeError("Argument 'op_type' of register method should be of type 'List[str]' or 'str', got {type(type)}")
for type_name in type_list:
if (type_name not in cls._export_node_registry):
cls._export_node_registry[type_name] = []
cls._export_node_registry[type_name].append((spec, operator))
aidge_core.register_GenericOperatorOp([cls._name, type_name], cls)
spec.attrs.add_attr("type", type_name) # GenericOperator specs need to verify the type
return Wrapper
return decorator
import aidge_core
from pathlib import Path
from aidge_core.export_utils import generate_file, data_conversion
def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView, inputs_tensor=None) -> None:
"""
Generate a C++ file to manage the forward pass of a model using the given graph structure.
This function extracts details from the :py:class:`aidge_core.graph_view` object, including input and output node names, data types,
and tensor sizes. It uses this data to populate a C++ file template (`main.jinja`), creating a file (`main.cpp`)
that call the `model_forward` function, which handles data flow and processing for the exported model.
This function also generate files containing input tensor if they have been set.
:param export_folder: Path to the folder where the generated C++ file (`main.cpp`) will be saved.
:type export_folder: str
:param graph_view: An instance of :py:class:`aidge_core.graph_view`, providing access to nodes and
ordered input/output data within the computational graph.
:type graph_view: aidge_core.graph_view
:param inputs_tensor: **For future** argument to provide tensor to use in the main function, not implemented yet!
:type inputs_tensor: None
:raises RuntimeError: If there is an inconsistency in the output arguments (names, data types, sizes),
indicating an internal bug in the graph representation.
"""
outputs_name: list[str] = []
outputs_dtype: list[str] = []
outputs_size: list[int] = []
inputs_name: list[str] = []
gv_inputs: list[tuple[aidge_core.Node, int]] = graph_view.get_ordered_inputs()
gv_outputs: list[tuple[aidge_core.Node, int]] = graph_view.get_ordered_outputs()
for in_node, in_idx in gv_inputs:
in_node_input, in_node_input_idx = in_node.input(in_idx)
in_name = f"{in_node.name()}_input_{in_idx}" if in_node_input is None else f"{in_node_input.name()}_output_{in_node_input_idx}"
inputs_name.append(in_name)
input_tensor = in_node.get_operator().get_input(in_idx)
if input_tensor is None or input_tensor.undefined() or not input_tensor.has_impl():
if inputs_tensor is not None:
aidge_core.Log.notice("No support for inputs_tensor argument yet.")
aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
else:
aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
else:
aidge_core.export_utils.generate_input_file(export_folder=export_folder, array_name=in_name, tensor=input_tensor)
for out_node, out_id in gv_outputs:
outputs_name.append(f"{out_node.name()}_output_{out_id}")
out_tensor = out_node.get_operator().get_output(out_id)
outputs_dtype.append(data_conversion.aidge2c(out_tensor.dtype()))
outputs_size.append(out_tensor.size())
if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
ROOT = Path(__file__).resolve().parents[0]
generate_file(
str(Path(export_folder) / "main.cpp"),
str(ROOT / "templates" / "main.jinja"),
func_name="model_forward",
inputs_name=inputs_name,
outputs_name=outputs_name,
outputs_dtype=outputs_dtype,
outputs_size=outputs_size
)
def generate_main_compare_cpp(export_folder: str, graph_view: aidge_core.GraphView, inputs_tensor=None) -> None:
"""
Generate a C++ file to manage the forward pass and compare the output of a model.
This function extracts details from the :py:class:`aidge_core.graph_view` object, including input and output node names, data types,
and tensor sizes. It uses this data to populate a C++ file template (`main.jinja`), creating a file (`main.cpp`)
that call the `model_forward` function, which handles data flow and processing for the exported model.
This function also generate files containing input tensor if they have been set.
:param export_folder: Path to the folder where the generated C++ file (`main.cpp`) will be saved.
:type export_folder: str
:param graph_view: An instance of :py:class:`aidge_core.graph_view`, providing access to nodes and
ordered input/output data within the computational graph.
:type graph_view: aidge_core.graph_view
:param inputs_tensor: **For future** argument to provide tensor to use in the main function, not implemented yet!
:type inputs_tensor: None
:raises RuntimeError: If there is an inconsistency in the output arguments (names, data types, sizes),
indicating an internal bug in the graph representation.
"""
outputs_name: list[str] = []
outputs_dtype: list[str] = []
outputs_size: list[int] = []
inputs_name: list[str] = []
gv_inputs: list[tuple[aidge_core.Node, int]] = graph_view.get_ordered_inputs()
gv_outputs: list[tuple[aidge_core.Node, int]] = graph_view.get_ordered_outputs()
for in_node, in_idx in gv_inputs:
in_node_input, in_node_input_idx = in_node.input(in_idx)
in_name = f"{in_node.name()}_input_{in_idx}" if in_node_input is None else f"{in_node_input.name()}_output_{in_node_input_idx}"
inputs_name.append(in_name)
input_tensor = in_node.get_operator().get_input(in_idx)
if input_tensor is None or input_tensor.undefined() or not input_tensor.has_impl():
if inputs_tensor is not None:
aidge_core.Log.notice("No support for inputs_tensor argument yet.")
aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
else:
aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
else:
aidge_core.export_utils.generate_input_file(export_folder=export_folder, array_name=in_name, tensor=input_tensor)
for out_node, out_id in gv_outputs:
out_name = f"{out_node.name()}_output_{out_id}"
outputs_name.append(out_name)
out_tensor = out_node.get_operator().get_output(out_id)
outputs_dtype.append(data_conversion.aidge2c(out_tensor.dtype()))
outputs_size.append(out_tensor.size())
if out_tensor is None or out_tensor.undefined() or not out_tensor.has_impl():
aidge_core.Log.notice(f"No input tensor set for {out_name}, main generated will not be functionnal after code generation.")
else:
aidge_core.export_utils.generate_input_file(export_folder=export_folder, array_name=out_name+"_expected", tensor=out_tensor)
if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
ROOT = Path(__file__).resolve().parents[0]
generate_file(
str(Path(export_folder) / "main.cpp"),
str(ROOT / "templates" / "main_compare.jinja"),
func_name="model_forward",
inputs_name=inputs_name,
outputs_name=outputs_name,
outputs_dtype=outputs_dtype,
outputs_size=outputs_size
)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment