From 8800bafab730da5d17d967fae5c3d61fcea8f11c Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Mon, 23 Sep 2024 06:17:05 +0000
Subject: [PATCH] Working export with ImplSpecs.

---
 aidge_core/__init__.py                        |  3 ++-
 aidge_core/export_utils/export_registry.py    | 10 ++++----
 aidge_core/export_utils/scheduler_export.py   | 25 +++++++++++++------
 aidge_core/unit_tests/test_impl.py            |  6 ++---
 include/aidge/operator/MetaOperatorDefs.hpp   |  4 +--
 .../backend/pybind_OperatorImpl.cpp           |  3 +++
 src/backend/OperatorImpl.cpp                  |  3 +++
 src/operator/MetaOperator.cpp                 |  3 ++-
 .../MetaOperatorDefs/PaddedAvgPooling.cpp     |  4 +--
 src/operator/MetaOperatorDefs/PaddedConv.cpp  |  4 +--
 .../MetaOperatorDefs/PaddedConvDepthWise.cpp  |  4 +--
 11 files changed, 43 insertions(+), 26 deletions(-)

diff --git a/aidge_core/__init__.py b/aidge_core/__init__.py
index 8544c5647..56c19a5fa 100644
--- a/aidge_core/__init__.py
+++ b/aidge_core/__init__.py
@@ -10,5 +10,6 @@ SPDX-License-Identifier: EPL-2.0
 from aidge_core.aidge_core import * # import so generated by PyBind
 import aidge_core.export_utils
 import aidge_core.utils
-from aidge_core.aidge_export_aidge import serialize_to_cpp
+# TODO: Commented for dev the new register system
+# from aidge_core.aidge_export_aidge import serialize_to_cpp
 from ._version import *
diff --git a/aidge_core/export_utils/export_registry.py b/aidge_core/export_utils/export_registry.py
index 66487a68f..fffa86a56 100644
--- a/aidge_core/export_utils/export_registry.py
+++ b/aidge_core/export_utils/export_registry.py
@@ -55,15 +55,15 @@ class ExportLib(aidge_core.OperatorImpl):
     def get_available_impl_specs(self):
         if self.get_operator().type() in self._export_node_registry:
             spec_vec = [i for i, _ in self._export_node_registry[self.get_operator().type()]]
-            # print(spec_vec)
-            # return aidge_core.spec_vec_to_set(spec_vec)
             return spec_vec
         else:
             return []
 
     def get_export_node(self, spec: aidge_core.aidge_core.ImplSpec):
         for registered_spec, export_node in self._export_node_registry[self.get_operator().type()]:
+            print(f"{registered_spec} vs {spec}")
             if registered_spec == spec:
+
                 return export_node
         return None
 
@@ -85,7 +85,7 @@ class ExportLib(aidge_core.OperatorImpl):
             else:
                 # Equivalent to aidge_core.register_ConvOp("ExportLibX", ExportLibX)
                 aidge_core.__getattribute__(register_func)(cls._name, cls)
-                aidge_core.Log.debug(f"Registring operator {type} to {cls._name}")
+                aidge_core.Log.info(f"Registring operator {type} to {cls._name}")
             return Wrapper
         return decorator
 
@@ -100,8 +100,8 @@ class ExportLib(aidge_core.OperatorImpl):
                 cls._export_node_registry[type] = []
 
             cls._export_node_registry[type].append((spec, operator))
-            aidge_core.Log.debug(f"Registring metaop {type} to {cls._name}")
+            aidge_core.Log.info(f"Registring metaop {type} to {cls._name}")
             aidge_core.register_MetaOperatorOp([cls._name, type], cls)
-
+            spec.attrs.add_attr("type", type) # MetaOperator specs need to verify the type
             return Wrapper
         return decorator
diff --git a/aidge_core/export_utils/scheduler_export.py b/aidge_core/export_utils/scheduler_export.py
index 2a7db393f..a109189cf 100644
--- a/aidge_core/export_utils/scheduler_export.py
+++ b/aidge_core/export_utils/scheduler_export.py
@@ -37,21 +37,28 @@ def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib =
         list_forward_nodes = scheduler.get_static_scheduling()
         # If exportLib define use it
         # else parse component in platform
-        if export_lib is None:
-            raise ValueError("Export need an ExportLib.")
+        # if export_lib is None:
+        #     raise ValueError("Export need an ExportLib.")
         for node in list_forward_nodes:
-            node.get_operator().set_backend(export_lib._name)
+            if export_lib is not None:
+                aidge_core.Log.debug(f"Setting backend {export_lib._name} to {node.name()}[{node.type()}].")
+                node.get_operator().set_backend(export_lib._name)
+            elif not isinstance(op_impl, ExportLib):
+                raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an exportable backend ({op_impl}).")
+
             is_input = node in graphview.get_input_nodes()
             is_output = node in graphview.get_output_nodes()
 
             op_impl = node.get_operator().get_impl()
+            if op_impl is None:
+                raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an implementation.")
             required_specs = op_impl.get_required_spec()
             specs = op_impl.get_best_match(required_specs)
             export_node = op_impl.get_export_node(specs)
+            if export_node is None:
+                raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].")
             op = export_node(
                 node, mem_info[node], is_input, is_output)
-            if op is None:
-                raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].")
             # For configuration files
             list_configs += op.export(dnn_folder)
             # For forward file
@@ -108,6 +115,8 @@ def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib =
             outputs_dtype=outputs_dtype,
             outputs_size=outputs_size
         )
-        # Copy all static files in the export
-        for source, destination in export_lib.static_files.items():
-            copy_file(source, str(export_folder / destination))
+
+        if export_lib is None:
+            # Copy all static files in the export
+            for source, destination in export_lib.static_files.items():
+                copy_file(source, str(export_folder / destination))
diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py
index 26d60f2fb..6bfe122d2 100644
--- a/aidge_core/unit_tests/test_impl.py
+++ b/aidge_core/unit_tests/test_impl.py
@@ -49,7 +49,7 @@ class test_OperatorImpl(unittest.TestCase):
         """Test registering an implementation
         """
         global GLOBAL_CPT
-        aidge_core.register_ConvOp2D("cpu", testImpl)
+        aidge_core.register_Conv2DOp("cpu", testImpl)
         self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
         conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
         conv.get_operator().set_backend("cpu")
@@ -61,9 +61,9 @@ class test_OperatorImpl(unittest.TestCase):
         """Test registering an implementation
         """
         global GLOBAL_CPT
-        aidge_core.register_ConvOp2D("cpu", testImpl)
+        aidge_core.register_Conv2DOp("cpu", testImpl)
         aidge_core.register_ProducerOp("cpu", testImpl)
-        self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
+        self.assertTrue("cpu" in aidge_core.get_keys_Conv2DOp())
         conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
         model = aidge_core.sequential([conv])
         model.set_backend("cpu")
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index bc3348377..ac37d1d71 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -126,7 +126,7 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &
         MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode)
     });
 
-    return MetaOperator("PaddedMaxPooling", graph, name);
+    return MetaOperator(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph, name);
 }
 
 template <std::array<DimSize_t, 1>::size_type DIM>
@@ -140,7 +140,7 @@ inline std::shared_ptr<MetaOperator_Op> PaddedMaxPooling_Op(const std::array<Dim
         MaxPooling(kernel_dims, "", stride_dims, ceil_mode)
     });
 
-    return std::make_shared<MetaOperator_Op>("PaddedMaxPooling", graph);
+    return std::make_shared<MetaOperator_Op>(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph);
 }
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp
index 0c5aac004..49e45ed7e 100644
--- a/python_binding/backend/pybind_OperatorImpl.cpp
+++ b/python_binding/backend/pybind_OperatorImpl.cpp
@@ -85,6 +85,9 @@ void init_OperatorImpl(py::module& m){
     .def("__repr__", [](ImplSpec self){
         return fmt::format("{}\n", self);
     })
+    .def_readwrite("inputs", &ImplSpec::inputs)
+    .def_readwrite("outputs", &ImplSpec::outputs)
+    .def_readwrite("attrs", &ImplSpec::attrs)
     ;
 
     py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr())
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index 876d3b2e9..598f8b798 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -124,10 +124,12 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
                 name = attrName.substr(0, (qualifierPos - attrName.begin()));
                 qualifier = attrName.substr((qualifierPos - attrName.begin())+1);
             }
+            fmt::println("{} : {}", name, qualifier);
             const bool mandatory = (qualifier == "!");
             if (mandatory) {
                 // Required attribute:
                 if (!spec.attrs.hasAttr(name)) {
+                    Log::debug("Could not find mandatory attribute {} value {}.", name);
                     // Missing attribute
                     match = false;
                     break;
@@ -135,6 +137,7 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
                 else if (requiredSpecs.attrs.getAny(attrName) < spec.attrs.getAny(name)
                     || spec.attrs.getAny(name) < requiredSpecs.attrs.getAny(attrName))
                 {
+                    Log::debug("Attribute ({}) value mismatch {} != {}.", name, requiredSpecs.attrs.getAttr<std::string>(attrName), spec.attrs.getAttr<std::string>(name));
                     // Attribute value mismatch
                     match = false;
                     break;
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index e3acba9b4..96bdfd8e7 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -77,6 +77,7 @@ std::string Aidge::MetaOperator_Op::backend() const noexcept {
 }
 
 void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    fmt::println("Setting backend {} for {}", name, type());
     if (Registrar<MetaOperator_Op>::exists({name, type()})) {
         // A custom implementation exists for this meta operator
         mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
@@ -249,4 +250,4 @@ std::shared_ptr<Aidge::Node> Aidge::MetaOperator(const char *type,
     auto node = std::make_shared<Node>(op, name);
     op->setUpperNode(node);
     return node;
-}
\ No newline at end of file
+}
diff --git a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
index ef319ef38..34a32ded2 100644
--- a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
@@ -41,7 +41,7 @@ std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_
         AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
     });
 
-    return MetaOperator("PaddedAvgPooling", graph, name);
+    return MetaOperator(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), graph, name);
 }
 
 template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
@@ -75,7 +75,7 @@ inline std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<Dim
         AvgPooling(kernel_dims, "", stride_dims)
     });
 
-    return std::make_shared<MetaOperator_Op>("PaddedAvgPooling", graph);
+    return std::make_shared<MetaOperator_Op>(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), graph);
 }
 
 template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<1>(const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
diff --git a/src/operator/MetaOperatorDefs/PaddedConv.cpp b/src/operator/MetaOperatorDefs/PaddedConv.cpp
index 31b1c675e..ba163e04c 100644
--- a/src/operator/MetaOperatorDefs/PaddedConv.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedConv.cpp
@@ -43,7 +43,7 @@ std::shared_ptr<Aidge::Node> Aidge::PaddedConv(Aidge::DimSize_t in_channels,
         Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
         std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "")
     });
-    auto metaOpNode = MetaOperator("PaddedConv", graph, name);
+    auto metaOpNode = MetaOperator(("PaddedConv" + std::to_string(DIM) + "D").c_str(), graph, name);
     addProducer(metaOpNode, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
     if (!no_bias) {
         addProducer(metaOpNode, 2, {out_channels}, "b");
@@ -63,7 +63,7 @@ std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op(
     auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
     auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
 
-    return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
+    return std::make_shared<MetaOperator_Op>(("PaddedConv" + std::to_string(DIM) + "D").c_str(), Sequential({pad, conv}));
 }
 template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<1>(const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&);
 template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
diff --git a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
index 1c073b78a..c4c506c64 100644
--- a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
@@ -40,7 +40,7 @@ std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(const Aidge::DimSize_t n
         Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
         std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv_depth_wise" : "")
     });
-    auto metaOpNode = MetaOperator("PaddedConvDepthWise", graph, name);
+    auto metaOpNode = MetaOperator(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), graph, name);
     addProducer(metaOpNode, 1, append(nb_channels, append(Aidge::DimSize_t(1), kernel_dims)), "w");
     if (!no_bias) {
         addProducer(metaOpNode, 2, {nb_channels}, "b");
@@ -61,7 +61,7 @@ std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op(
     auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
     auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
 
-    return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
+    return std::make_shared<MetaOperator_Op>(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), Sequential({pad, conv}));
 }
 template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<1>(const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&);
 template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
-- 
GitLab