diff --git a/include/aidge/operator/TopK.hpp b/include/aidge/operator/TopK.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e1aa193bb0b7720fce0d1161d3a352f2e8109324
--- /dev/null
+++ b/include/aidge/operator/TopK.hpp
@@ -0,0 +1,126 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_TOPK_H_
+#define AIDGE_CORE_OPERATOR_TOPK_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+#define LIST_TOPK_ATTR(X) \
+    X(Axis, "axis", int64_t), \
+    X(Largest, "largest", bool), \
+    X(Sorted, "sorted", bool), \
+    X(K, "k", IOIndex_t)
+
+namespace Aidge {
+/**
+ * @enum TopKAttr
+ * @brief Attributes for the TopK operation.
+ *
+ * - Axis: Dimension on which to do the sort. Negative value means counting 
+ *         dimensions from the back. Accepted range is [-r, r-1] where r = rank(input).
+ * - Largest: Whether to return the top-K largest or smallest elements.
+ * - Sorted: Whether to return the elements in sorted order.
+ * - K: Positive value corresponding to the number of top elements to retrieve.
+ */
+enum class TopKAttr {
+    GENERATE_LIST_ATTR_ENUM(LIST_TOPK_ATTR)
+};
+} // namespace Aidge
+
+namespace {
+template <>
+struct EnumStrings<Aidge::TopKAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::TopKAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_TOPK_ATTR)
+};
+}
+
+namespace Aidge {
+
+class TopK_Op : public OperatorTensor,
+    public Registrable<TopK_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const TopK_Op&)>> {
+private:
+    using Attributes_ =
+        StaticAttributes<TopKAttr,
+                         GENERATE_LIST_ATTR_TYPE(LIST_TOPK_ATTR)>;
+    template <TopKAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    static const std::string Type;
+
+    TopK_Op(int64_t axis = -1,
+        bool largest = true,
+        bool sorted = true,
+        IOIndex_t k = 0);
+
+    /**
+     * @brief Copy-constructor.
+     * @param op TopK_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
+     */
+    TopK_Op(const TopK_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::TopK_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<TopK_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    void setDataType(const DataType& dataType) const override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline int64_t& axis() const { return mAttributes->template getAttr<TopKAttr::Axis>(); }
+    inline bool& largest() const { return mAttributes->template getAttr<TopKAttr::Largest>(); }
+    inline bool& sorted() const { return mAttributes->template getAttr<TopKAttr::Sorted>(); }
+    inline IOIndex_t& k() const { return mAttributes->template getAttr<TopKAttr::K>(); }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"x", "k"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"values", "indices"};
+    }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static constexpr const char* const* attributesName(){
+		return EnumStrings<Aidge::TopKAttr>::data;
+	}
+};
+
+std::shared_ptr<Node> TopK(const std::string& name = "");
+
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_OPERATOR_TOPK_H_ */
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 03513a5d83b98d88ca28393b4612cefd0d9453cc..7e1b465fff5a6af91930841875168a601e81c078 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -451,10 +451,9 @@ namespace std {
     struct hash<Container<T, Args...>> {
         std::size_t operator()(const Container<T, Args...>& iterable) const {
             std::size_t seed = 0;
-            for (const auto& v : iterable) {
+            for (const typename Container<T, Args...>::value_type& v : iterable) {
                 // Recursively hash the value pointed by the iterator
-                // Use decltype(v) instead of T to make it work for std::map for example.
-                Aidge::hash_combine(seed, std::hash<std::remove_const_t<std::remove_reference_t<decltype(v)>>>()(v));
+                Aidge::hash_combine(seed, std::hash<std::remove_const_t<std::remove_reference_t<typename Container<T, Args...>::value_type>>>()(v));
             }
             return seed;
         }
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 2171d48975db8f4029abe7982bf6dfc17640dd52..ff11c9c6c8911b8f48e1a6d30333577796511b0c 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -642,7 +642,10 @@ void init_Tensor(py::module& m){
                 break;
             case DataType::Float32:
                 dataFormatDescriptor = py::format_descriptor<float>::format();
-                break;;
+                break;
+            case DataType::Float16:
+                dataFormatDescriptor = py::format_descriptor<float>::format();
+                break;
             case DataType::Int4:
                 dataFormatDescriptor = py::format_descriptor<std::int8_t>::format();
                 break;
@@ -710,7 +713,7 @@ void init_Tensor(py::module& m){
                 dataFormatDescriptor = py::format_descriptor<std::uint64_t>::format();
                 break;
             default:
-                throw py::value_error("Unsupported data format");
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data format: {}", b.dataType());
         }
 
         return py::buffer_info(
diff --git a/python_binding/operator/pybind_Expand.cpp b/python_binding/operator/pybind_Expand.cpp
index ce8a32329ab51ea3086c689b0156b62244f752c2..c20e47e849bbfad7331b8f5f2de82bc32bda033e 100644
--- a/python_binding/operator/pybind_Expand.cpp
+++ b/python_binding/operator/pybind_Expand.cpp
@@ -50,11 +50,11 @@ void init_Expand(py::module &m) {
 
     declare_registrable<Expand_Op>(m, pyClassName);
 
-    m.def("expand",
+    m.def("Expand",
           &Expand,
           py::arg("name") = "",
           R"mydelimiter(
-    Initialize a node containing an expand operator.
+    Initialize a node containing an Expand operator.
     This operator will broadcast values given via input#0 to a shape given via input#1's values
     If one of the inputs has less dimensions than the other, dimension will be appended 1's to the left.
 
diff --git a/python_binding/operator/pybind_Fold.cpp b/python_binding/operator/pybind_Fold.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..747abc1611a1a51d9b317de365b5036436b1494a
--- /dev/null
+++ b/python_binding/operator/pybind_Fold.cpp
@@ -0,0 +1,107 @@
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+#include <array>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Fold.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/Registrar.hpp" // declare_registrable
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <DimIdx_t DIM> 
+void declare_FoldOp(py::module &m) {
+  const std::string pyClassName("Fold" + std::to_string(DIM) + "DOp");
+  py::class_<Fold_Op<DIM>, std::shared_ptr<Fold_Op<DIM>>, OperatorTensor>(
+    m, pyClassName.c_str(),
+    py::multiple_inheritance(),
+    R"mydelimiter(
+    Initialize a Fold operator.
+
+    :param output_dims : The dimensions of output.
+    :type output_dims : List[int]
+    :param kernel_dims : The dimensions of the fold kernel (filter size).
+    :type kernel_dims : List[int]
+    :param stride_dims : The stride size for the fold.
+    :type stride_dims : List[int]
+    :param dilation_dims : The dilation size for the fold.
+    :type dilation_dims : List[int]
+    )mydelimiter")
+        .def(py::init([](const std::vector<DimSize_t>& output_dims,
+                         const std::vector<DimSize_t>& kernel_dims,
+                         const std::vector<DimSize_t> &stride_dims,
+                         const std::vector<DimSize_t> &dilation_dims) {
+            AIDGE_ASSERT(output_dims.size() == DIM, "output_dims size [{}] does not match DIM [{}]", output_dims.size(), DIM);
+            AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+            AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+            AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+            return new Fold_Op<DIM>(to_array<DIM>(output_dims.begin()), to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+        }), py::arg("output_dims"),
+            py::arg("kernel_dims"),
+            py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
+        .def_static("get_inputs_name", &Fold_Op<DIM>::getInputsName)
+        .def_static("get_outputs_name", &Fold_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Fold_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<FoldAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
+        .def_readonly_static("Type", &Fold_Op<DIM>::Type)
+        ;
+
+  declare_registrable<Fold_Op<DIM>>(m, pyClassName);
+
+  m.def(("Fold" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims,
+                                                         const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::string& name,
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &dilation_dims) {
+        AIDGE_ASSERT(output_dims.size() == DIM, "output_dims size [{}] does not match DIM [{}]", output_dims.size(), DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+        return Fold<DIM>(to_array<DIM>(output_dims.begin()), to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+    }, 
+    py::arg("output_dims"),
+    py::arg("kernel_dims"),
+    py::arg("name") = "",
+    py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+    py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+    R"mydelimiter(
+    Initialize a node containing a Fold operator.
+
+    :param output_dims : The dimensions of output.
+    :type output_dims : List[int]
+    :param kernel_dims : The dimensions of the fold kernel (filter size).
+    :type kernel_dims : List[int]
+    :param name : The name of the operator (optional).
+    :type name : str
+    :param stride_dims : The stride size for the fold (default is [1]).
+    :type stride_dims : List[int]
+    :param dilation_dims : The dilation size for the fold (default is [1]).
+    :type dilation_dims : List[int]
+    :return : A new Fold operator node.
+    :rtype : :py:class:`FoldOp`
+    )mydelimiter");
+}
+
+
+void init_Fold(py::module &m) {
+//  declare_FoldOp<1>(m);
+  declare_FoldOp<2>(m);
+//   declare_FoldOp<3>(m);
+}
+
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_TopK.cpp b/python_binding/operator/pybind_TopK.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..314a3283baf251171904c497aa93cc9da282d0d0
--- /dev/null
+++ b/python_binding/operator/pybind_TopK.cpp
@@ -0,0 +1,39 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/TopK.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_TopK(py::module& m) {
+    py::class_<TopK_Op, std::shared_ptr<TopK_Op>, OperatorTensor>(m, "TopKOp", py::multiple_inheritance())
+    .def(py::init<int64_t, bool, bool, IOIndex_t>(), py::arg("axis") = -1, py::arg("largest") = true, py::arg("sorted") = true, py::arg("k") = 0)
+    .def_static("get_inputs_name", &TopK_Op::getInputsName)
+    .def_static("get_outputs_name", &TopK_Op::getOutputsName)
+    .def_static("attributes_name", []() {
+        std::vector<std::string> result;
+        auto attributes = TopK_Op::attributesName();
+        for (size_t i = 0; i < size(EnumStrings<TopKAttr>::data); ++i) {
+            result.emplace_back(attributes[i]);
+        }
+        return result;
+    })
+    .def_readonly_static("Type", &TopK_Op::Type);
+
+    m.def("TopK", &TopK, py::arg("name") = "");
+}
+
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Unfold.cpp b/python_binding/operator/pybind_Unfold.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..86f8f26d63ef1c59ad68842a6e7291fa4706e8e1
--- /dev/null
+++ b/python_binding/operator/pybind_Unfold.cpp
@@ -0,0 +1,97 @@
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+#include <array>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Unfold.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/Registrar.hpp" // declare_registrable
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <DimIdx_t DIM> 
+void declare_UnfoldOp(py::module &m) {
+  const std::string pyClassName("Unfold" + std::to_string(DIM) + "DOp");
+  py::class_<Unfold_Op<DIM>, std::shared_ptr<Unfold_Op<DIM>>, OperatorTensor>(
+    m, pyClassName.c_str(),
+    py::multiple_inheritance(),
+    R"mydelimiter(
+    Initialize a Unfold operator.
+
+    :param kernel_dims : The dimensions of the unfold kernel (filter size).
+    :type kernel_dims : List[int]
+    :param stride_dims : The stride size for the unfold.
+    :type stride_dims : List[int]
+    :param dilation_dims : The dilation size for the unfold.
+    :type dilation_dims : List[int]
+    )mydelimiter")
+        .def(py::init([](const std::vector<DimSize_t>& kernel_dims,
+                         const std::vector<DimSize_t> &stride_dims,
+                         const std::vector<DimSize_t> &dilation_dims) {
+            AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+            AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+            AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+            return new Unfold_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+        }), py::arg("kernel_dims"),
+            py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
+        .def_static("get_inputs_name", &Unfold_Op<DIM>::getInputsName)
+        .def_static("get_outputs_name", &Unfold_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Unfold_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<UnfoldAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
+        .def_readonly_static("Type", &Unfold_Op<DIM>::Type)
+        ;
+
+  declare_registrable<Unfold_Op<DIM>>(m, pyClassName);
+
+  m.def(("Unfold" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::string& name,
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &dilation_dims) {
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+        return Unfold<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+    }, 
+    py::arg("kernel_dims"),
+    py::arg("name") = "",
+    py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+    py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+    R"mydelimiter(
+    Initialize a node containing a Unfold operator.
+
+    :param kernel_dims : The dimensions of the unfold kernel (filter size).
+    :type kernel_dims : List[int]
+    :param name : The name of the operator (optional).
+    :type name : str
+    :param stride_dims : The stride size for the unfold (default is [1]).
+    :type stride_dims : List[int]
+    :param dilation_dims : The dilation size for the unfold (default is [1]).
+    :type dilation_dims : List[int]
+    :return : A new Unfold operator node.
+    :rtype : :py:class:`UnfoldOp`
+    )mydelimiter");
+}
+
+
+void init_Unfold(py::module &m) {
+//  declare_UnfoldOp<1>(m);
+  declare_UnfoldOp<2>(m);
+//   declare_UnfoldOp<3>(m);
+}
+
+} // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 7fef82847f3a9e5252e14c1aff584b21f182e36c..61a0a271c6dd23f30065f31a711d0383395f5d9d 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -61,6 +61,7 @@ void init_Erf(py::module&);
 void init_Expand(py::module&);
 void init_FC(py::module&);
 void init_Flatten(py::module&);
+void init_Fold(py::module&);
 void init_Gather(py::module&);
 void init_GenericOperator(py::module&);
 void init_GlobalAveragePooling(py::module&);
@@ -97,7 +98,9 @@ void init_Squeeze(py::module&);
 void init_Stack(py::module&);
 void init_Sub(py::module&);
 void init_Tanh(py::module&);
+void init_TopK(py::module&);
 void init_Transpose(py::module&);
+void init_Unfold(py::module&);
 void init_Unsqueeze(py::module&);
 void init_WeightInterleaving(py::module&);
 
@@ -169,6 +172,7 @@ void init_Aidge(py::module& m) {
     init_Expand(m);
     init_FC(m);
     init_Flatten(m);
+    init_Fold(m);
     init_Gather(m);
     init_GenericOperator(m);
     init_GlobalAveragePooling(m);
@@ -204,7 +208,9 @@ void init_Aidge(py::module& m) {
     init_Stack(m);
     init_Sub(m);
     init_Tanh(m);
+    init_TopK(m);
     init_Transpose(m);
+    init_Unfold(m);
     init_Unsqueeze(m);
     init_WeightInterleaving(m);
 
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 33c336a843bcf168497cd86fea241d4ad2dec362..74e0cab37489c275512f5ba53290bdb5eac065e0 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -233,6 +233,8 @@ std::string Aidge::Node::outputName(Aidge::IOIndex_t outID) const {
 }
 
 std::string Aidge::Node::outputName(Aidge::IOIndex_t outID, std::string newName) {
+    AIDGE_ASSERT(outID < mIdInChildren.size(), "Output index out of bound.");
+
     this->mOutputNames[outID] = newName;
     for (std::size_t i = 0; i < mIdInChildren[outID].size(); ++i) {
         if (std::shared_ptr<Node> child = mChildren[outID][i].lock()) {
@@ -349,11 +351,11 @@ void Aidge::Node::addChildOp(const std::shared_ptr<Node>& otherNode, const IOInd
     AIDGE_ASSERT(outId < nbOutputs(),
         "Output index (#{}) of the node {} (of type {}) is out of bound (it has {} outputs), when trying to add the child node {} (of type {})",
         outId, name(), type(), nbOutputs(), otherNode->name(), otherNode->type());
-    if (otherNode.use_count() == 1) {
-        Log::debug("Node::addChild(): the node {} (of type {}) only holds a weak reference to the added child node {} (of type {})."
-            "If the child node goes out of scope, it will be destructed, leading to a dangling connection."
-            "To avoid this message, consider adding the child node to a GraphView first.", name(), type(), otherNode->name(), otherNode->type());
-    }
+    // if (otherNode.use_count() == 1) {
+    //     Log::debug("Node::addChild(): the node {} (of type {}) only holds a weak reference to the added child node {} (of type {})."
+    //         "If the child node goes out of scope, it will be destructed, leading to a dangling connection."
+    //         "To avoid this message, consider adding the child node to a GraphView first.", name(), type(), otherNode->name(), otherNode->type());
+    // }
     if (otherNode->input(otherInId).second != gk_IODefaultIndex) {
         Log::notice("the {}-th Parent of the child node {} (of type {}) already existed", otherInId, otherNode->name(), otherNode->type());
     }
diff --git a/src/operator/Flatten.cpp b/src/operator/Flatten.cpp
index e02c7abe6557469763e042044609fe8b59115a45..4714feb11659a879a81984c638ad6872545c23b0 100644
--- a/src/operator/Flatten.cpp
+++ b/src/operator/Flatten.cpp
@@ -53,7 +53,8 @@ std::shared_ptr<Aidge::Operator> Aidge::Flatten_Op::clone() const {
 bool Aidge::Flatten_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         const auto inDims(getInput(0)->dims());
-        const auto firstDim = std::accumulate(inDims.begin(), inDims.begin() + axis(), 1ULL, std::multiplies<DimSize_t>());
+        const auto kAxis = (axis() >= 0) ? axis() : axis() + static_cast<std::int8_t>(inDims.size());
+        const auto firstDim = std::accumulate(inDims.begin(), inDims.begin() + kAxis, 1ULL, std::multiplies<DimSize_t>());
         mOutputs[0]->resize({firstDim, getInput(0)->size() / firstDim});
         return true;
     }
diff --git a/src/operator/TopK.cpp b/src/operator/TopK.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..660865100a817b32c42328b34032541479b3aefc
--- /dev/null
+++ b/src/operator/TopK.cpp
@@ -0,0 +1,112 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>    // std::size_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/TopK.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+const std::string Aidge::TopK_Op::Type = "TopK";
+
+TopK_Op::TopK_Op(
+    int64_t axis,
+    bool largest,
+    bool sorted,
+    IOIndex_t k)
+    : OperatorTensor(Type,
+                     {InputCategory::Data,
+                      InputCategory::OptionalData},
+                     2),
+      mAttributes(std::make_shared<Attributes_>(
+          attr<TopKAttr::Axis>(axis),
+          attr<TopKAttr::Largest>(largest),
+          attr<TopKAttr::Sorted>(sorted),
+          attr<TopKAttr::K>(k)))
+{
+    mOutputs[1]->setDataType(DataType::Int64);
+}
+
+TopK_Op::TopK_Op(const TopK_Op& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(TopK_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+bool Aidge::TopK_Op::forwardDims(bool allowDataDependency) {
+    if (inputsAssociated()) {
+        // Copy optional input #1, if present, to attribute K
+        if (getInput(1)) {
+            if (k() > 0) {
+                Log::notice("TopK_Op: ignoring non-empty K attribute because input#1 takes precedence");
+            }
+
+            if (!allowDataDependency) {
+                Log::warn("TopK_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
+
+            std::shared_ptr<Tensor> fallback;
+            const auto& kTensor = getInput(1)->refCastFrom(fallback, NativeType_v<int64_t>, "cpu");
+
+            if (!kTensor.empty()) {
+                k() = kTensor.get<int64_t>(0);
+            }
+        }
+
+        AIDGE_ASSERT(k() > 0, "Missing input#1 or k attribute");
+
+        auto outDims = getInput(0)->dims();
+        const auto kAxis = (axis() >= 0) ? axis() : axis() + static_cast<std::int8_t>(outDims.size());
+        outDims[kAxis] = k();
+        mOutputs[0]->resize(outDims);
+        mOutputs[1]->resize(outDims);
+        return true;
+    }
+
+    return false;
+}
+
+void Aidge::TopK_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(TopK_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+    mOutputs[1]->setBackend(name, device);
+}
+
+void Aidge::TopK_Op::setDataType(const DataType& dataType) const {
+    mOutputs[0]->setDataType(dataType);
+    // mOutputs[1] data type is fixed (Int64)
+}
+
+std::set<std::string> Aidge::TopK_Op::getAvailableBackends() const {
+    return Registrar<TopK_Op>::getKeys();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Node> TopK(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<TopK_Op>(), name);
+}
+
+} // namespace AIdge
\ No newline at end of file