Skip to content
Snippets Groups Projects
Commit 8800bafa authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Working export with ImplSpecs.

parent 5fccd193
No related branches found
No related tags found
3 merge requests!279v0.4.0,!253v0.4.0,!163Export refactor
Pipeline #55229 failed
......@@ -10,5 +10,6 @@ SPDX-License-Identifier: EPL-2.0
from aidge_core.aidge_core import * # import so generated by PyBind
import aidge_core.export_utils
import aidge_core.utils
from aidge_core.aidge_export_aidge import serialize_to_cpp
# TODO: Commented for dev the new register system
# from aidge_core.aidge_export_aidge import serialize_to_cpp
from ._version import *
......@@ -55,15 +55,15 @@ class ExportLib(aidge_core.OperatorImpl):
def get_available_impl_specs(self):
if self.get_operator().type() in self._export_node_registry:
spec_vec = [i for i, _ in self._export_node_registry[self.get_operator().type()]]
# print(spec_vec)
# return aidge_core.spec_vec_to_set(spec_vec)
return spec_vec
else:
return []
def get_export_node(self, spec: aidge_core.aidge_core.ImplSpec):
for registered_spec, export_node in self._export_node_registry[self.get_operator().type()]:
print(f"{registered_spec} vs {spec}")
if registered_spec == spec:
return export_node
return None
......@@ -85,7 +85,7 @@ class ExportLib(aidge_core.OperatorImpl):
else:
# Equivalent to aidge_core.register_ConvOp("ExportLibX", ExportLibX)
aidge_core.__getattribute__(register_func)(cls._name, cls)
aidge_core.Log.debug(f"Registring operator {type} to {cls._name}")
aidge_core.Log.info(f"Registring operator {type} to {cls._name}")
return Wrapper
return decorator
......@@ -100,8 +100,8 @@ class ExportLib(aidge_core.OperatorImpl):
cls._export_node_registry[type] = []
cls._export_node_registry[type].append((spec, operator))
aidge_core.Log.debug(f"Registring metaop {type} to {cls._name}")
aidge_core.Log.info(f"Registring metaop {type} to {cls._name}")
aidge_core.register_MetaOperatorOp([cls._name, type], cls)
spec.attrs.add_attr("type", type) # MetaOperator specs need to verify the type
return Wrapper
return decorator
......@@ -37,21 +37,28 @@ def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib =
list_forward_nodes = scheduler.get_static_scheduling()
# If exportLib define use it
# else parse component in platform
if export_lib is None:
raise ValueError("Export need an ExportLib.")
# if export_lib is None:
# raise ValueError("Export need an ExportLib.")
for node in list_forward_nodes:
node.get_operator().set_backend(export_lib._name)
if export_lib is not None:
aidge_core.Log.debug(f"Setting backend {export_lib._name} to {node.name()}[{node.type()}].")
node.get_operator().set_backend(export_lib._name)
elif not isinstance(op_impl, ExportLib):
raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an exportable backend ({op_impl}).")
is_input = node in graphview.get_input_nodes()
is_output = node in graphview.get_output_nodes()
op_impl = node.get_operator().get_impl()
if op_impl is None:
raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an implementation.")
required_specs = op_impl.get_required_spec()
specs = op_impl.get_best_match(required_specs)
export_node = op_impl.get_export_node(specs)
if export_node is None:
raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].")
op = export_node(
node, mem_info[node], is_input, is_output)
if op is None:
raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].")
# For configuration files
list_configs += op.export(dnn_folder)
# For forward file
......@@ -108,6 +115,8 @@ def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib =
outputs_dtype=outputs_dtype,
outputs_size=outputs_size
)
# Copy all static files in the export
for source, destination in export_lib.static_files.items():
copy_file(source, str(export_folder / destination))
if export_lib is None:
# Copy all static files in the export
for source, destination in export_lib.static_files.items():
copy_file(source, str(export_folder / destination))
......@@ -49,7 +49,7 @@ class test_OperatorImpl(unittest.TestCase):
"""Test registering an implementation
"""
global GLOBAL_CPT
aidge_core.register_ConvOp2D("cpu", testImpl)
aidge_core.register_Conv2DOp("cpu", testImpl)
self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
conv.get_operator().set_backend("cpu")
......@@ -61,9 +61,9 @@ class test_OperatorImpl(unittest.TestCase):
"""Test registering an implementation
"""
global GLOBAL_CPT
aidge_core.register_ConvOp2D("cpu", testImpl)
aidge_core.register_Conv2DOp("cpu", testImpl)
aidge_core.register_ProducerOp("cpu", testImpl)
self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
self.assertTrue("cpu" in aidge_core.get_keys_Conv2DOp())
conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
model = aidge_core.sequential([conv])
model.set_backend("cpu")
......
......@@ -126,7 +126,7 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &
MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode)
});
return MetaOperator("PaddedMaxPooling", graph, name);
return MetaOperator(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph, name);
}
template <std::array<DimSize_t, 1>::size_type DIM>
......@@ -140,7 +140,7 @@ inline std::shared_ptr<MetaOperator_Op> PaddedMaxPooling_Op(const std::array<Dim
MaxPooling(kernel_dims, "", stride_dims, ceil_mode)
});
return std::make_shared<MetaOperator_Op>("PaddedMaxPooling", graph);
return std::make_shared<MetaOperator_Op>(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
......
......@@ -85,6 +85,9 @@ void init_OperatorImpl(py::module& m){
.def("__repr__", [](ImplSpec self){
return fmt::format("{}\n", self);
})
.def_readwrite("inputs", &ImplSpec::inputs)
.def_readwrite("outputs", &ImplSpec::outputs)
.def_readwrite("attrs", &ImplSpec::attrs)
;
py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr())
......
......@@ -124,10 +124,12 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
name = attrName.substr(0, (qualifierPos - attrName.begin()));
qualifier = attrName.substr((qualifierPos - attrName.begin())+1);
}
fmt::println("{} : {}", name, qualifier);
const bool mandatory = (qualifier == "!");
if (mandatory) {
// Required attribute:
if (!spec.attrs.hasAttr(name)) {
Log::debug("Could not find mandatory attribute {} value {}.", name);
// Missing attribute
match = false;
break;
......@@ -135,6 +137,7 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
else if (requiredSpecs.attrs.getAny(attrName) < spec.attrs.getAny(name)
|| spec.attrs.getAny(name) < requiredSpecs.attrs.getAny(attrName))
{
Log::debug("Attribute ({}) value mismatch {} != {}.", name, requiredSpecs.attrs.getAttr<std::string>(attrName), spec.attrs.getAttr<std::string>(name));
// Attribute value mismatch
match = false;
break;
......
......@@ -77,6 +77,7 @@ std::string Aidge::MetaOperator_Op::backend() const noexcept {
}
void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
fmt::println("Setting backend {} for {}", name, type());
if (Registrar<MetaOperator_Op>::exists({name, type()})) {
// A custom implementation exists for this meta operator
mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
......@@ -249,4 +250,4 @@ std::shared_ptr<Aidge::Node> Aidge::MetaOperator(const char *type,
auto node = std::make_shared<Node>(op, name);
op->setUpperNode(node);
return node;
}
\ No newline at end of file
}
......@@ -41,7 +41,7 @@ std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_
AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
});
return MetaOperator("PaddedAvgPooling", graph, name);
return MetaOperator(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), graph, name);
}
template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
......@@ -75,7 +75,7 @@ inline std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<Dim
AvgPooling(kernel_dims, "", stride_dims)
});
return std::make_shared<MetaOperator_Op>("PaddedAvgPooling", graph);
return std::make_shared<MetaOperator_Op>(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), graph);
}
template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<1>(const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
......
......@@ -43,7 +43,7 @@ std::shared_ptr<Aidge::Node> Aidge::PaddedConv(Aidge::DimSize_t in_channels,
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "")
});
auto metaOpNode = MetaOperator("PaddedConv", graph, name);
auto metaOpNode = MetaOperator(("PaddedConv" + std::to_string(DIM) + "D").c_str(), graph, name);
addProducer(metaOpNode, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
if (!no_bias) {
addProducer(metaOpNode, 2, {out_channels}, "b");
......@@ -63,7 +63,7 @@ std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op(
auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
return std::make_shared<MetaOperator_Op>(("PaddedConv" + std::to_string(DIM) + "D").c_str(), Sequential({pad, conv}));
}
template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<1>(const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&);
template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
......
......@@ -40,7 +40,7 @@ std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(const Aidge::DimSize_t n
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv_depth_wise" : "")
});
auto metaOpNode = MetaOperator("PaddedConvDepthWise", graph, name);
auto metaOpNode = MetaOperator(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), graph, name);
addProducer(metaOpNode, 1, append(nb_channels, append(Aidge::DimSize_t(1), kernel_dims)), "w");
if (!no_bias) {
addProducer(metaOpNode, 2, {nb_channels}, "b");
......@@ -61,7 +61,7 @@ std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op(
auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
return std::make_shared<MetaOperator_Op>(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), Sequential({pad, conv}));
}
template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<1>(const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&);
template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment