From cdff2b208b68b538444e37de6c8abb406586218d Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 27 Mar 2025 15:43:27 +0100
Subject: [PATCH 1/8] Added TopK operator

---
 include/aidge/operator/TopK.hpp | 117 ++++++++++++++++++++++++++++++++
 src/operator/TopK.cpp           | 102 ++++++++++++++++++++++++++++
 2 files changed, 219 insertions(+)
 create mode 100644 include/aidge/operator/TopK.hpp
 create mode 100644 src/operator/TopK.cpp

diff --git a/include/aidge/operator/TopK.hpp b/include/aidge/operator/TopK.hpp
new file mode 100644
index 000000000..95e33dbb9
--- /dev/null
+++ b/include/aidge/operator/TopK.hpp
@@ -0,0 +1,117 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_TOPK_H_
+#define AIDGE_CORE_OPERATOR_TOPK_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+#define LIST_TOPK_ATTR(X) \
+    X(Axis, "axis", int64_t), \
+    X(Largest, "largest", bool), \
+    X(Sorted, "sorted", bool), \
+    X(K, "k", IOIndex_t)
+
+namespace Aidge {
+/**
+ * @enum TopKAttr
+ * @brief Attributes for the TopK operation.
+ *
+ * - Axis: Dimension on which to do the sort. Negative value means counting 
+ *         dimensions from the back. Accepted range is [-r, r-1] where r = rank(input).
+ * - Largest: Whether to return the top-K largest or smallest elements.
+ * - Sorted: Whether to return the elements in sorted order.
+ * - K: Positive value corresponding to the number of top elements to retrieve.
+ */
+enum class TopKAttr {
+    GENERATE_LIST_ATTR_ENUM(LIST_TOPK_ATTR)
+};
+} // namespace Aidge
+
+namespace {
+template <>
+struct EnumStrings<Aidge::TopKAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::TopKAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_TOPK_ATTR)
+};
+}
+
+namespace Aidge {
+
+class TopK_Op : public OperatorTensor,
+    public Registrable<TopK_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const TopK_Op&)>> {
+private:
+    using Attributes_ =
+        StaticAttributes<TopKAttr,
+                         GENERATE_LIST_ATTR_TYPE(LIST_TOPK_ATTR)>;
+    template <TopKAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    static const std::string Type;
+
+    TopK_Op(int64_t axis = -1,
+        bool largest = true,
+        bool sorted = true,
+        IOIndex_t k = 0);
+
+    /**
+     * @brief Copy-constructor.
+     * @param op TopK_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
+     */
+    TopK_Op(const TopK_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::TopK_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<TopK_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline int64_t& axis() { return mAttributes->template getAttr<TopKAttr::Axis>(); }
+    inline bool& largest() { return mAttributes->template getAttr<TopKAttr::Largest>(); }
+    inline bool& sorted() { return mAttributes->template getAttr<TopKAttr::Sorted>(); }
+    inline IOIndex_t& k() { return mAttributes->template getAttr<TopKAttr::K>(); }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"x", "k"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"values", "indices"};
+    }
+};
+
+std::shared_ptr<Node> TopK(const std::string& name = "");
+
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_OPERATOR_TOPK_H_ */
diff --git a/src/operator/TopK.cpp b/src/operator/TopK.cpp
new file mode 100644
index 000000000..6f942a639
--- /dev/null
+++ b/src/operator/TopK.cpp
@@ -0,0 +1,102 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>    // std::size_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/TopK.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+const std::string Aidge::TopK_Op::Type = "TopK";
+
+TopK_Op::TopK_Op(
+    int64_t axis,
+    bool largest,
+    bool sorted,
+    IOIndex_t k)
+    : OperatorTensor(Type,
+                     {InputCategory::Data,
+                      InputCategory::OptionalData},
+                     2),
+      mAttributes(std::make_shared<Attributes_>(
+          attr<TopKAttr::Axis>(axis),
+          attr<TopKAttr::Largest>(largest),
+          attr<TopKAttr::Sorted>(sorted),
+          attr<TopKAttr::K>(k))) {}
+
+TopK_Op::TopK_Op(const TopK_Op& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(TopK_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+bool Aidge::TopK_Op::forwardDims(bool allowDataDependency) {
+    if (inputsAssociated()) {
+        // Copy optional input #1, if present, to attribute K
+        if (getInput(1)) {
+            if (k() > 0) {
+                Log::notice("TopK_Op: ignoring non-empty K attribute because input#1 takes precedence");
+            }
+
+            if (!allowDataDependency) {
+                Log::warn("TopK_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
+
+            std::shared_ptr<Tensor> fallback;
+            const auto& kTensor = getInput(1)->refCastFrom(fallback, NativeType_v<int64_t>, "cpu");
+
+            if (!kTensor.empty()) {
+                k() = kTensor.get<int64_t>(0);
+            }
+        }
+
+        AIDGE_ASSERT(k() > 0, "Missing input#1 or k attribute");
+
+        auto outDims = getInput(0)->dims();
+        const auto kAxis = (axis() >= 0) ? axis() : axis() + static_cast<std::int8_t>(outDims.size());
+        outDims[kAxis] = k();
+        mOutputs[0]->resize(outDims);
+        return true;
+    }
+
+    return false;
+}
+
+void Aidge::TopK_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(TopK_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::TopK_Op::getAvailableBackends() const {
+    return Registrar<TopK_Op>::getKeys();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Node> TopK(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<TopK_Op>(), name);
+}
+
+} // namespace AIdge
\ No newline at end of file
-- 
GitLab


From 570068b476fa0e7ee5b36f083b4f8584c9aba9a3 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 27 Mar 2025 19:02:12 +0100
Subject: [PATCH 2/8] Added binding for Fold and Unfold (fixed case issue with
 Expand)

---
 python_binding/operator/pybind_Expand.cpp |   4 +-
 python_binding/operator/pybind_Fold.cpp   | 107 ++++++++++++++++++++++
 python_binding/operator/pybind_Unfold.cpp |  97 ++++++++++++++++++++
 python_binding/pybind_core.cpp            |   4 +
 src/graph/Node.cpp                        |  10 +-
 5 files changed, 215 insertions(+), 7 deletions(-)
 create mode 100644 python_binding/operator/pybind_Fold.cpp
 create mode 100644 python_binding/operator/pybind_Unfold.cpp

diff --git a/python_binding/operator/pybind_Expand.cpp b/python_binding/operator/pybind_Expand.cpp
index ce8a32329..c20e47e84 100644
--- a/python_binding/operator/pybind_Expand.cpp
+++ b/python_binding/operator/pybind_Expand.cpp
@@ -50,11 +50,11 @@ void init_Expand(py::module &m) {
 
     declare_registrable<Expand_Op>(m, pyClassName);
 
-    m.def("expand",
+    m.def("Expand",
           &Expand,
           py::arg("name") = "",
           R"mydelimiter(
-    Initialize a node containing an expand operator.
+    Initialize a node containing an Expand operator.
     This operator will broadcast values given via input#0 to a shape given via input#1's values
     If one of the inputs has less dimensions than the other, dimension will be appended 1's to the left.
 
diff --git a/python_binding/operator/pybind_Fold.cpp b/python_binding/operator/pybind_Fold.cpp
new file mode 100644
index 000000000..747abc161
--- /dev/null
+++ b/python_binding/operator/pybind_Fold.cpp
@@ -0,0 +1,107 @@
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+#include <array>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Fold.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/Registrar.hpp" // declare_registrable
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <DimIdx_t DIM> 
+void declare_FoldOp(py::module &m) {
+  const std::string pyClassName("Fold" + std::to_string(DIM) + "DOp");
+  py::class_<Fold_Op<DIM>, std::shared_ptr<Fold_Op<DIM>>, OperatorTensor>(
+    m, pyClassName.c_str(),
+    py::multiple_inheritance(),
+    R"mydelimiter(
+    Initialize a Fold operator.
+
+    :param output_dims : The dimensions of output.
+    :type output_dims : List[int]
+    :param kernel_dims : The dimensions of the fold kernel (filter size).
+    :type kernel_dims : List[int]
+    :param stride_dims : The stride size for the fold.
+    :type stride_dims : List[int]
+    :param dilation_dims : The dilation size for the fold.
+    :type dilation_dims : List[int]
+    )mydelimiter")
+        .def(py::init([](const std::vector<DimSize_t>& output_dims,
+                         const std::vector<DimSize_t>& kernel_dims,
+                         const std::vector<DimSize_t> &stride_dims,
+                         const std::vector<DimSize_t> &dilation_dims) {
+            AIDGE_ASSERT(output_dims.size() == DIM, "output_dims size [{}] does not match DIM [{}]", output_dims.size(), DIM);
+            AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+            AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+            AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+            return new Fold_Op<DIM>(to_array<DIM>(output_dims.begin()), to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+        }), py::arg("output_dims"),
+            py::arg("kernel_dims"),
+            py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
+        .def_static("get_inputs_name", &Fold_Op<DIM>::getInputsName)
+        .def_static("get_outputs_name", &Fold_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Fold_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<FoldAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
+        .def_readonly_static("Type", &Fold_Op<DIM>::Type)
+        ;
+
+  declare_registrable<Fold_Op<DIM>>(m, pyClassName);
+
+  m.def(("Fold" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims,
+                                                         const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::string& name,
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &dilation_dims) {
+        AIDGE_ASSERT(output_dims.size() == DIM, "output_dims size [{}] does not match DIM [{}]", output_dims.size(), DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+        return Fold<DIM>(to_array<DIM>(output_dims.begin()), to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+    }, 
+    py::arg("output_dims"),
+    py::arg("kernel_dims"),
+    py::arg("name") = "",
+    py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+    py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+    R"mydelimiter(
+    Initialize a node containing a Fold operator.
+
+    :param output_dims : The dimensions of output.
+    :type output_dims : List[int]
+    :param kernel_dims : The dimensions of the fold kernel (filter size).
+    :type kernel_dims : List[int]
+    :param name : The name of the operator (optional).
+    :type name : str
+    :param stride_dims : The stride size for the fold (default is [1]).
+    :type stride_dims : List[int]
+    :param dilation_dims : The dilation size for the fold (default is [1]).
+    :type dilation_dims : List[int]
+    :return : A new Fold operator node.
+    :rtype : :py:class:`FoldOp`
+    )mydelimiter");
+}
+
+
+void init_Fold(py::module &m) {
+//  declare_FoldOp<1>(m);
+  declare_FoldOp<2>(m);
+//   declare_FoldOp<3>(m);
+}
+
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Unfold.cpp b/python_binding/operator/pybind_Unfold.cpp
new file mode 100644
index 000000000..86f8f26d6
--- /dev/null
+++ b/python_binding/operator/pybind_Unfold.cpp
@@ -0,0 +1,97 @@
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+#include <array>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Unfold.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/Registrar.hpp" // declare_registrable
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <DimIdx_t DIM> 
+void declare_UnfoldOp(py::module &m) {
+  const std::string pyClassName("Unfold" + std::to_string(DIM) + "DOp");
+  py::class_<Unfold_Op<DIM>, std::shared_ptr<Unfold_Op<DIM>>, OperatorTensor>(
+    m, pyClassName.c_str(),
+    py::multiple_inheritance(),
+    R"mydelimiter(
+    Initialize a Unfold operator.
+
+    :param kernel_dims : The dimensions of the unfold kernel (filter size).
+    :type kernel_dims : List[int]
+    :param stride_dims : The stride size for the unfold.
+    :type stride_dims : List[int]
+    :param dilation_dims : The dilation size for the unfold.
+    :type dilation_dims : List[int]
+    )mydelimiter")
+        .def(py::init([](const std::vector<DimSize_t>& kernel_dims,
+                         const std::vector<DimSize_t> &stride_dims,
+                         const std::vector<DimSize_t> &dilation_dims) {
+            AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+            AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+            AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+            return new Unfold_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+        }), py::arg("kernel_dims"),
+            py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
+        .def_static("get_inputs_name", &Unfold_Op<DIM>::getInputsName)
+        .def_static("get_outputs_name", &Unfold_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Unfold_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<UnfoldAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
+        .def_readonly_static("Type", &Unfold_Op<DIM>::Type)
+        ;
+
+  declare_registrable<Unfold_Op<DIM>>(m, pyClassName);
+
+  m.def(("Unfold" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::string& name,
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &dilation_dims) {
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+        return Unfold<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+    }, 
+    py::arg("kernel_dims"),
+    py::arg("name") = "",
+    py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+    py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+    R"mydelimiter(
+    Initialize a node containing a Unfold operator.
+
+    :param kernel_dims : The dimensions of the unfold kernel (filter size).
+    :type kernel_dims : List[int]
+    :param name : The name of the operator (optional).
+    :type name : str
+    :param stride_dims : The stride size for the unfold (default is [1]).
+    :type stride_dims : List[int]
+    :param dilation_dims : The dilation size for the unfold (default is [1]).
+    :type dilation_dims : List[int]
+    :return : A new Unfold operator node.
+    :rtype : :py:class:`UnfoldOp`
+    )mydelimiter");
+}
+
+
+void init_Unfold(py::module &m) {
+//  declare_UnfoldOp<1>(m);
+  declare_UnfoldOp<2>(m);
+//   declare_UnfoldOp<3>(m);
+}
+
+} // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 7fef82847..de045a4c9 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -61,6 +61,7 @@ void init_Erf(py::module&);
 void init_Expand(py::module&);
 void init_FC(py::module&);
 void init_Flatten(py::module&);
+void init_Fold(py::module&);
 void init_Gather(py::module&);
 void init_GenericOperator(py::module&);
 void init_GlobalAveragePooling(py::module&);
@@ -98,6 +99,7 @@ void init_Stack(py::module&);
 void init_Sub(py::module&);
 void init_Tanh(py::module&);
 void init_Transpose(py::module&);
+void init_Unfold(py::module&);
 void init_Unsqueeze(py::module&);
 void init_WeightInterleaving(py::module&);
 
@@ -169,6 +171,7 @@ void init_Aidge(py::module& m) {
     init_Expand(m);
     init_FC(m);
     init_Flatten(m);
+    init_Fold(m);
     init_Gather(m);
     init_GenericOperator(m);
     init_GlobalAveragePooling(m);
@@ -205,6 +208,7 @@ void init_Aidge(py::module& m) {
     init_Sub(m);
     init_Tanh(m);
     init_Transpose(m);
+    init_Unfold(m);
     init_Unsqueeze(m);
     init_WeightInterleaving(m);
 
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 33c336a84..8a3975ece 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -349,11 +349,11 @@ void Aidge::Node::addChildOp(const std::shared_ptr<Node>& otherNode, const IOInd
     AIDGE_ASSERT(outId < nbOutputs(),
         "Output index (#{}) of the node {} (of type {}) is out of bound (it has {} outputs), when trying to add the child node {} (of type {})",
         outId, name(), type(), nbOutputs(), otherNode->name(), otherNode->type());
-    if (otherNode.use_count() == 1) {
-        Log::debug("Node::addChild(): the node {} (of type {}) only holds a weak reference to the added child node {} (of type {})."
-            "If the child node goes out of scope, it will be destructed, leading to a dangling connection."
-            "To avoid this message, consider adding the child node to a GraphView first.", name(), type(), otherNode->name(), otherNode->type());
-    }
+    // if (otherNode.use_count() == 1) {
+    //     Log::debug("Node::addChild(): the node {} (of type {}) only holds a weak reference to the added child node {} (of type {})."
+    //         "If the child node goes out of scope, it will be destructed, leading to a dangling connection."
+    //         "To avoid this message, consider adding the child node to a GraphView first.", name(), type(), otherNode->name(), otherNode->type());
+    // }
     if (otherNode->input(otherInId).second != gk_IODefaultIndex) {
         Log::notice("the {}-th Parent of the child node {} (of type {}) already existed", otherInId, otherNode->name(), otherNode->type());
     }
-- 
GitLab


From 6a0808ed82a539e388bbb9fe2797d790218c7dbd Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 27 Mar 2025 22:32:40 +0100
Subject: [PATCH 3/8] Added const qualifier for attributes

---
 include/aidge/operator/TopK.hpp | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/include/aidge/operator/TopK.hpp b/include/aidge/operator/TopK.hpp
index 95e33dbb9..67d40ace4 100644
--- a/include/aidge/operator/TopK.hpp
+++ b/include/aidge/operator/TopK.hpp
@@ -97,10 +97,10 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline int64_t& axis() { return mAttributes->template getAttr<TopKAttr::Axis>(); }
-    inline bool& largest() { return mAttributes->template getAttr<TopKAttr::Largest>(); }
-    inline bool& sorted() { return mAttributes->template getAttr<TopKAttr::Sorted>(); }
-    inline IOIndex_t& k() { return mAttributes->template getAttr<TopKAttr::K>(); }
+    inline int64_t& axis() const { return mAttributes->template getAttr<TopKAttr::Axis>(); }
+    inline bool& largest() const { return mAttributes->template getAttr<TopKAttr::Largest>(); }
+    inline bool& sorted() const { return mAttributes->template getAttr<TopKAttr::Sorted>(); }
+    inline IOIndex_t& k() const { return mAttributes->template getAttr<TopKAttr::K>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"x", "k"};
-- 
GitLab


From 32b1a74402c9e4967478cbb82a100aab95b0c230 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Fri, 28 Mar 2025 11:36:10 +0100
Subject: [PATCH 4/8] Added binding for TopK

---
 include/aidge/operator/TopK.hpp         |  8 +++++
 python_binding/operator/pybind_TopK.cpp | 39 +++++++++++++++++++++++++
 python_binding/pybind_core.cpp          |  2 ++
 src/graph/Node.cpp                      |  2 ++
 4 files changed, 51 insertions(+)
 create mode 100644 python_binding/operator/pybind_TopK.cpp

diff --git a/include/aidge/operator/TopK.hpp b/include/aidge/operator/TopK.hpp
index 67d40ace4..1b9a27851 100644
--- a/include/aidge/operator/TopK.hpp
+++ b/include/aidge/operator/TopK.hpp
@@ -108,6 +108,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"values", "indices"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static constexpr const char* const* attributesName(){
+		return EnumStrings<Aidge::TopKAttr>::data;
+	}
 };
 
 std::shared_ptr<Node> TopK(const std::string& name = "");
diff --git a/python_binding/operator/pybind_TopK.cpp b/python_binding/operator/pybind_TopK.cpp
new file mode 100644
index 000000000..314a3283b
--- /dev/null
+++ b/python_binding/operator/pybind_TopK.cpp
@@ -0,0 +1,39 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/TopK.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_TopK(py::module& m) {
+    py::class_<TopK_Op, std::shared_ptr<TopK_Op>, OperatorTensor>(m, "TopKOp", py::multiple_inheritance())
+    .def(py::init<int64_t, bool, bool, IOIndex_t>(), py::arg("axis") = -1, py::arg("largest") = true, py::arg("sorted") = true, py::arg("k") = 0)
+    .def_static("get_inputs_name", &TopK_Op::getInputsName)
+    .def_static("get_outputs_name", &TopK_Op::getOutputsName)
+    .def_static("attributes_name", []() {
+        std::vector<std::string> result;
+        auto attributes = TopK_Op::attributesName();
+        for (size_t i = 0; i < size(EnumStrings<TopKAttr>::data); ++i) {
+            result.emplace_back(attributes[i]);
+        }
+        return result;
+    })
+    .def_readonly_static("Type", &TopK_Op::Type);
+
+    m.def("TopK", &TopK, py::arg("name") = "");
+}
+
+}  // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index de045a4c9..61a0a271c 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -98,6 +98,7 @@ void init_Squeeze(py::module&);
 void init_Stack(py::module&);
 void init_Sub(py::module&);
 void init_Tanh(py::module&);
+void init_TopK(py::module&);
 void init_Transpose(py::module&);
 void init_Unfold(py::module&);
 void init_Unsqueeze(py::module&);
@@ -207,6 +208,7 @@ void init_Aidge(py::module& m) {
     init_Stack(m);
     init_Sub(m);
     init_Tanh(m);
+    init_TopK(m);
     init_Transpose(m);
     init_Unfold(m);
     init_Unsqueeze(m);
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 8a3975ece..74e0cab37 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -233,6 +233,8 @@ std::string Aidge::Node::outputName(Aidge::IOIndex_t outID) const {
 }
 
 std::string Aidge::Node::outputName(Aidge::IOIndex_t outID, std::string newName) {
+    AIDGE_ASSERT(outID < mIdInChildren.size(), "Output index out of bound.");
+
     this->mOutputNames[outID] = newName;
     for (std::size_t i = 0; i < mIdInChildren[outID].size(); ++i) {
         if (std::shared_ptr<Node> child = mChildren[outID][i].lock()) {
-- 
GitLab


From dbf80ab9989cdf298b53f94b05ab2c6ec5bf349e Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Fri, 28 Mar 2025 12:09:30 +0100
Subject: [PATCH 5/8] Fixed 2nd output handling

---
 include/aidge/operator/TopK.hpp | 1 +
 src/operator/TopK.cpp           | 8 ++++++++
 2 files changed, 9 insertions(+)

diff --git a/include/aidge/operator/TopK.hpp b/include/aidge/operator/TopK.hpp
index 1b9a27851..e1aa193bb 100644
--- a/include/aidge/operator/TopK.hpp
+++ b/include/aidge/operator/TopK.hpp
@@ -94,6 +94,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    void setDataType(const DataType& dataType) const override final;
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
diff --git a/src/operator/TopK.cpp b/src/operator/TopK.cpp
index 6f942a639..52bb37554 100644
--- a/src/operator/TopK.cpp
+++ b/src/operator/TopK.cpp
@@ -78,6 +78,8 @@ bool Aidge::TopK_Op::forwardDims(bool allowDataDependency) {
         const auto kAxis = (axis() >= 0) ? axis() : axis() + static_cast<std::int8_t>(outDims.size());
         outDims[kAxis] = k();
         mOutputs[0]->resize(outDims);
+        mOutputs[1]->resize(outDims);
+        mOutputs[1]->setDataType(DataType::Int64);
         return true;
     }
 
@@ -87,6 +89,12 @@ bool Aidge::TopK_Op::forwardDims(bool allowDataDependency) {
 void Aidge::TopK_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(TopK_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+    mOutputs[1]->setBackend(name, device);
+}
+
+void Aidge::TopK_Op::setDataType(const DataType& dataType) const {
+    mOutputs[0]->setDataType(dataType);
+    // mOutputs[1] data type is fixed (Int64)
 }
 
 std::set<std::string> Aidge::TopK_Op::getAvailableBackends() const {
-- 
GitLab


From 5b2f5a506945bdfa5d34b6ba04aecaed6f951045 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Fri, 28 Mar 2025 15:35:26 +0100
Subject: [PATCH 6/8] Fixed issues

---
 python_binding/data/pybind_Tensor.cpp | 7 +++++--
 src/operator/Flatten.cpp              | 3 ++-
 2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 2171d4897..ff11c9c6c 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -642,7 +642,10 @@ void init_Tensor(py::module& m){
                 break;
             case DataType::Float32:
                 dataFormatDescriptor = py::format_descriptor<float>::format();
-                break;;
+                break;
+            case DataType::Float16:
+                dataFormatDescriptor = py::format_descriptor<float>::format();
+                break;
             case DataType::Int4:
                 dataFormatDescriptor = py::format_descriptor<std::int8_t>::format();
                 break;
@@ -710,7 +713,7 @@ void init_Tensor(py::module& m){
                 dataFormatDescriptor = py::format_descriptor<std::uint64_t>::format();
                 break;
             default:
-                throw py::value_error("Unsupported data format");
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data format: {}", b.dataType());
         }
 
         return py::buffer_info(
diff --git a/src/operator/Flatten.cpp b/src/operator/Flatten.cpp
index e02c7abe6..4714feb11 100644
--- a/src/operator/Flatten.cpp
+++ b/src/operator/Flatten.cpp
@@ -53,7 +53,8 @@ std::shared_ptr<Aidge::Operator> Aidge::Flatten_Op::clone() const {
 bool Aidge::Flatten_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         const auto inDims(getInput(0)->dims());
-        const auto firstDim = std::accumulate(inDims.begin(), inDims.begin() + axis(), 1ULL, std::multiplies<DimSize_t>());
+        const auto kAxis = (axis() >= 0) ? axis() : axis() + static_cast<std::int8_t>(inDims.size());
+        const auto firstDim = std::accumulate(inDims.begin(), inDims.begin() + kAxis, 1ULL, std::multiplies<DimSize_t>());
         mOutputs[0]->resize({firstDim, getInput(0)->size() / firstDim});
         return true;
     }
-- 
GitLab


From bc09c6acc560191e0d941bfbe9d08dc1cb518058 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Fri, 28 Mar 2025 15:50:45 +0100
Subject: [PATCH 7/8] Set output data type in constructor

---
 src/operator/TopK.cpp | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/src/operator/TopK.cpp b/src/operator/TopK.cpp
index 52bb37554..660865100 100644
--- a/src/operator/TopK.cpp
+++ b/src/operator/TopK.cpp
@@ -38,7 +38,10 @@ TopK_Op::TopK_Op(
           attr<TopKAttr::Axis>(axis),
           attr<TopKAttr::Largest>(largest),
           attr<TopKAttr::Sorted>(sorted),
-          attr<TopKAttr::K>(k))) {}
+          attr<TopKAttr::K>(k)))
+{
+    mOutputs[1]->setDataType(DataType::Int64);
+}
 
 TopK_Op::TopK_Op(const TopK_Op& op)
     : OperatorTensor(op),
@@ -79,7 +82,6 @@ bool Aidge::TopK_Op::forwardDims(bool allowDataDependency) {
         outDims[kAxis] = k();
         mOutputs[0]->resize(outDims);
         mOutputs[1]->resize(outDims);
-        mOutputs[1]->setDataType(DataType::Int64);
         return true;
     }
 
-- 
GitLab


From 61ece703aff8a26f9603f5a6db8bc781296c0ffa Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Fri, 28 Mar 2025 18:08:57 +0100
Subject: [PATCH 8/8] Fix compilation issue with NVCC

---
 include/aidge/utils/DynamicAttributes.hpp | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 03513a5d8..7e1b465ff 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -451,10 +451,9 @@ namespace std {
     struct hash<Container<T, Args...>> {
         std::size_t operator()(const Container<T, Args...>& iterable) const {
             std::size_t seed = 0;
-            for (const auto& v : iterable) {
+            for (const typename Container<T, Args...>::value_type& v : iterable) {
                 // Recursively hash the value pointed by the iterator
-                // Use decltype(v) instead of T to make it work for std::map for example.
-                Aidge::hash_combine(seed, std::hash<std::remove_const_t<std::remove_reference_t<decltype(v)>>>()(v));
+                Aidge::hash_combine(seed, std::hash<std::remove_const_t<std::remove_reference_t<typename Container<T, Args...>::value_type>>>()(v));
             }
             return seed;
         }
-- 
GitLab