diff --git a/.codespellrc b/.codespellrc
index 2f4534c93c4ba60d51964f12db06fece83b9411d..9c7d8904823b688375d1de455e6e3e0ac0c7b19a 100644
--- a/.codespellrc
+++ b/.codespellrc
@@ -9,4 +9,7 @@ quiet-level = 2
 # deque : cpp data struct
 # inout : commented code variable
 # nd : commented code
-ignore-words-list = childs, dOut, inH, ro, deque, inout, stdio, nd
+# neighbours : exception to the gb to us english rule
+# neighbouring : exception to the gb to us english rule
+# endcode : documentation keyword
+ignore-words-list = childs, dOut, inH, ro, deque, inout, stdio, nd, neighbours, neighbouring, endcode
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 68e2a57b498551f6600d6b5720919d03b9bf037c..8a6684b22b4c4659353fa5b5dee2b0820c46a11f 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -14,73 +14,177 @@
 
 #include <string>
 #include <vector>
+#include <functional>
 
 #include "aidge/utils/Types.h"
+#include "aidge/utils/DynamicAttributes.hpp"
+#include "aidge/data/Data.hpp"
 #include "aidge/data/Elts.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
 
 namespace Aidge {
+class Node;
 class Operator;
 
+/**
+ * @brief ImplSpec stores the requirements or the specifications of an implementation.
+ *
+ */
+struct ImplSpec {
+    struct IOSpec {
+        IOSpec(DataType type_, DataFormat format_ = DataFormat::Any, const std::vector<std::pair<int, int>>& dims_ = {}):
+            type(type_),
+            format(format_),
+            dims(dims_)
+        {}
+
+        DataType type;
+        DataFormat format;
+        std::vector<std::pair<int, int>> dims;
+    };
+
+    ImplSpec(const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const IOSpec& io, const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const IOSpec& i, const IOSpec& o, const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const std::vector<IOSpec>& i, const std::vector<IOSpec>& o, const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const Aidge::ImplSpec&);
+    ~ImplSpec() noexcept;
+
+    std::vector<IOSpec> inputs;
+    std::vector<IOSpec> outputs;
+    DynamicAttributes attrs;
+};
+
+inline bool operator==(const ImplSpec::IOSpec& lhs, const ImplSpec::IOSpec& rhs) {
+    return (lhs.type == rhs.type)
+        && (lhs.format == rhs.format)
+        && (lhs.dims == rhs.dims);
+}
+
+inline bool operator<(const ImplSpec::IOSpec& lhs, const ImplSpec::IOSpec& rhs) {
+    return (lhs.type < rhs.type)
+        || (lhs.type == rhs.type && lhs.format < rhs.format)
+        || (lhs.type == rhs.type && lhs.format == rhs.format && lhs.dims < rhs.dims);
+}
+
+inline bool operator<(const ImplSpec& lhs, const ImplSpec& rhs) {
+    return (lhs.inputs < rhs.inputs)
+        || (lhs.inputs == rhs.inputs && lhs.outputs < rhs.outputs)
+        || (lhs.inputs == rhs.inputs && lhs.outputs == rhs.outputs && lhs.attrs < rhs.attrs);
+}
+
+
+inline bool operator==(const ImplSpec& lhs, const ImplSpec& rhs) {
+    return !(lhs < rhs) && !(rhs < lhs);
+}
+
+/**
+ * @brief Impl stores the details of a specific implementation.
+ * It is associated to a ImplSpec in a registry.
+ *
+ */
+template <class FwdFunc, class BwdFunc>
+struct Impl {
+    Impl(std::function<std::unique_ptr<ProdConso>(const Operator&)> prodConso_,
+      std::function<FwdFunc> forward_,
+      std::function<BwdFunc> backward_ = nullptr):
+        prodConso(prodConso_), forward(forward_), backward(backward_) {}
+
+    std::function<std::unique_ptr<ProdConso>(const Operator&)> prodConso;
+    std::function<FwdFunc> forward;
+    std::function<BwdFunc> backward;
+};
+
 class OperatorImpl {
 public:
     OperatorImpl(const Operator& op, const std::string& backend = "");
     virtual void forward();
     virtual void backward();
+    virtual std::shared_ptr<ProdConso> prodConso();
 
     const std::string& backend() const noexcept {
         return mBackend;
     }
-    /**
-     * @brief Minimum amount of data from a specific input required by the
-     * implementation to be run.
-     *
-     * @param inputIdx Index of the input analyzed.
-     * @return std::size_t
-     */
-    virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
 
-    // Amount of input data that cannot be overwritten during the execution.
-    virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
-
-    // Memory required at an output for a given input size.
-    virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
+    const Operator& getOperator() const noexcept {
+        return mOp;
+    }
 
     /**
-     * @brief Total amount of consumed data from a specific input.
+     * @brief Get the operator required implementation specification, according
+     * to the current operator configuration.
      *
-     * @param inputIdx Index of the input analyzed.
-     * @return DimSize_t
      */
-    virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
+    ImplSpec getRequiredSpec() const;
 
     /**
-     * @brief Total amount of produced data ready to be used on a specific output.
+     * @brief Get the best implementation that matches \p requiredSpecs.
+     * If no implementation matches \p requiredSpecs, \p requiredSpecs is
+     * returned.
      *
-     * @param outputIdx Index of the output analyzed.
-     * @return DimSize_t
      */
-    virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
+    ImplSpec getBestMatch(const ImplSpec& requiredSpecs) const;
 
     /**
-     * @brief Update the Consumer Producer system by simulating the consumption and production of i/o
+     * @brief Get an adapted meta operator corresponding to the required
+     * specifications \p requiredSpecs from the implementation specifications
+     * \p spec.
      *
+     * @param spec Implementation specification
+     * @param requiredSpecs Required specifications
+     * @return std::shared_ptr<Node> Adapted meta op or nullptr
      */
-    virtual void updateConsummerProducer();
+    std::shared_ptr<Node> getAdaptation(const ImplSpec& spec, const ImplSpec& requiredSpecs) const;
 
     /**
-     * @brief Reset the Consumer Producer system.
+     * @brief Get the best adapted meta operator corresponding to the required
+     * specifications \p requiredSpecs.
+     * The best adaptation is the one with the lowest overhead cost.
+     * Currently, it is the one requiring the least number of additional
+     * operators to match the available implementations.
      *
+     * @param requiredSpecs Required specifications
+     * @return std::shared_ptr<Node> Adapted meta op or nullptr
      */
-    virtual void resetConsummerProducer();
+    std::shared_ptr<Node> getBestAdaptation(const ImplSpec& requiredSpecs) const;
 
     virtual ~OperatorImpl() = default;
 
 protected:
+    virtual std::shared_ptr<ProdConso> getProdConso() const;
+    virtual std::vector<ImplSpec> getAvailableImplSpecs() const;
+    bool checkIOSpec(const ImplSpec::IOSpec& required, const ImplSpec::IOSpec& spec) const;
+
     const Operator &mOp;
     const std::string mBackend;
-    std::vector<Elts_t> mNbConsumedData;
-    std::vector<Elts_t> mNbProducedData;
+    std::shared_ptr<ProdConso> mProdConso;
 };
 } // namespace Aidge
 
+template<>
+struct fmt::formatter<Aidge::ImplSpec::IOSpec> {
+    template<typename ParseContext>
+    inline constexpr auto parse(ParseContext& ctx) {
+        return ctx.begin();
+    }
+
+    template<typename FormatContext>
+    inline auto format(Aidge::ImplSpec::IOSpec const& ioSpec, FormatContext& ctx) const {
+        return fmt::format_to(ctx.out(), "{}, {}, {}", ioSpec.type, ioSpec.format, ioSpec.dims);
+    }
+};
+
+template<>
+struct fmt::formatter<Aidge::ImplSpec> {
+    template<typename ParseContext>
+    inline constexpr auto parse(ParseContext& ctx) {
+        return ctx.begin();
+    }
+
+    template<typename FormatContext>
+    inline auto format(Aidge::ImplSpec const& implSpec, FormatContext& ctx) const {
+        return fmt::format_to(ctx.out(), "{}, {}", implSpec.inputs, implSpec.outputs);
+    }
+};
+
 #endif /* AIDGE_BACKEND_OPERATORIMPL_H_ */
diff --git a/include/aidge/data/Interpolation.hpp b/include/aidge/data/Interpolation.hpp
index 2d53ebdd0dd5141acc9a3bce8e906f42f7a557a2..d1af3e831617660356fe48d7d5665564f125c21d 100644
--- a/include/aidge/data/Interpolation.hpp
+++ b/include/aidge/data/Interpolation.hpp
@@ -56,7 +56,7 @@ class Interpolation {
      * @param[in] transformedCoords : coords in interpolated tensor
      * @param[in] inputDims: input dimensions of tensor
      * @param[in] inputDims: output dimensions of tensor
-     * @return std::vector containing coords in orginal tensor reference frame
+     * @return std::vector containing coords in original tensor reference frame
      */
     static std::vector<float> untransformCoordinates(
         const std::vector<DimSize_t> &transformedCoords,
@@ -101,7 +101,7 @@ class Interpolation {
     /*
      * @brief Interpolates values given via input in given mode.
      *
-     * @warning This function is empty and is meant to be overriden in derived
+     * @warning This function is empty and is meant to be overridden in derived
      * class in backend libraries.
      *
      * Values are contiguously arranged in a "square" shape around the point to
diff --git a/include/aidge/graph/StaticAnalysis.hpp b/include/aidge/graph/StaticAnalysis.hpp
index d92356b72b8f1408c3084f9afa6f467d2043e620..d3fe681749eeb69e4816a38f302d510f1c81381a 100644
--- a/include/aidge/graph/StaticAnalysis.hpp
+++ b/include/aidge/graph/StaticAnalysis.hpp
@@ -49,7 +49,7 @@ public:
      * accesses are not included.
      * A naive implementation is considered (more operations might be required 
      * for numerical stability in an actual implementation).
-     * Example of Operator with only arithmetic operatons: Conv.
+     * Example of Operator with only arithmetic operations: Conv.
      * 
      * @return size_t Number of arithmetic operations.
      */
@@ -62,7 +62,7 @@ public:
      * accesses are not included.
      * A naive implementation is considered (more operations might be required 
      * for numerical stability in an actual implementation).
-     * Example of Operator with only logic operatons: BitShift.
+     * Example of Operator with only logic operations: BitShift.
      * 
      * @return size_t Number of logic operations.
      */
@@ -75,7 +75,7 @@ public:
      * accesses are not included.
      * A naive implementation is considered (more operations might be required 
      * for numerical stability in an actual implementation).
-     * Example of Operator with only comparison operatons: MaxPool.
+     * Example of Operator with only comparison operations: MaxPool.
      * 
      * @return size_t Number of comparison operations.
      */
@@ -88,7 +88,7 @@ public:
      * accesses are not included.
      * A naive implementation is considered (more operations might be required 
      * for numerical stability in an actual implementation).
-     * Example of Operator with only NL operatons: Tanh.
+     * Example of Operator with only NL operations: Tanh.
      * Non-linear operations are necessarily of floating-point type.
      * 
      * @return size_t Number of non-linear (NL) operations.
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index 17eca02261704e98341adca81636b594d92c2318..24476f231806bf38ae48b9e2d5ec405e072afdb2 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -26,6 +26,11 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
+class Identity_OpImpl : public OperatorImpl {
+public:
+    Identity_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
 
 /**
  * @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes.
@@ -35,7 +40,7 @@ namespace Aidge {
  *
  */
 class Identity_Op : public OperatorTensor,
-    public Registrable<Identity_Op, std::string, std::unique_ptr<OperatorImpl>(const Identity_Op&)> {
+    public Registrable<Identity_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Identity_Op&)>> {
 public:
     static const std::string Type;
 
@@ -54,29 +59,8 @@ public:
      */
     std::shared_ptr<Operator> clone() const override;
 
-    // bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
-
-    /**
-     * @brief Check if output dimensions have been computed.
-     * @note Since Identity has no output Tensor, this function checks if its
-     * only input's dimensions have been computed.
-     *
-     * @return true Input has dimensions.
-     * @return false Input has no dimensions or is a nullptr.
-     */
-    bool dimsForwarded() const override final;
-
-
-    void forward() override final;
-
-    void backward() override final { }
-
-    void setBackend(const std::string& /*name*/, DeviceIdx_t /*device*/ = 0) override final {
-        // setBackend do nothing, Identity node has no backend it just pass the same Tensor
-    }
-    void setDataType(const DataType& /*dataType*/) const override final {
-        // setDatatype do nothing, Identity node has no backend it just pass the same Tensor
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 44c2b006dff40bb07d0a9a18112d4afc56a747f8..e9988b4421b785a91ec170796be49c0c8df52142 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -207,8 +207,8 @@ public:
     inline IOIndex_t nbOutputs() const noexcept { return mNbOut; };
 
     /**
-     * @brief Set the back edge input indexes for recurring operators.
-     * Any recuring operators should specify it's back edges, otherwise
+     * @brief Set the back edge input indexes for recurting operators.
+     * Any recurring operators should specify it's back edges, otherwise
      * the interpretation of the data flow graph may not be possible.
      */
     inline void setBackEdges(const std::set<IOIndex_t>& backEdges) { mBackEdges = backEdges; }
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index beeca8d72a2067ed2dfcd98cf3d9ff0cb7b6ff3a..c6341e934ea415cb23a7d4ce201351a0825e6081 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -40,7 +40,7 @@ public:
 
 enum class TransposeAttr {
   /**
-   * @brief order of the ouput dims from the input dims. If left empty,
+   * @brief order of the output dims from the input dims. If left empty,
    * the dimensions of input will be reversed.
    */
     OutputDimsOrder
diff --git a/include/aidge/scheduler/ProdConso.hpp b/include/aidge/scheduler/ProdConso.hpp
index fce8d7f6548aaeb04300291d33cc2a5e44fb6fe7..cfc83cbf91cb7eeef2a3bbb0a4c5017a2480fe9b 100644
--- a/include/aidge/scheduler/ProdConso.hpp
+++ b/include/aidge/scheduler/ProdConso.hpp
@@ -37,7 +37,7 @@ public:
      * @brief Minimum amount of data from a specific input required by the
      * implementation to be run.
      *
-     * @param inputIdx Index of the input analysed.
+     * @param inputIdx Index of the input analyzed.
      * @return std::size_t
      */
     virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
@@ -55,7 +55,7 @@ public:
     /**
      * @brief Total amount of consumed data from a specific input.
      *
-     * @param inputIdx Index of the input analysed.
+     * @param inputIdx Index of the input analyzed.
      * @return DimSize_t
      */
     virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
@@ -63,19 +63,19 @@ public:
     /**
      * @brief Total amount of produced data ready to be used on a specific output.
      *
-     * @param outputIdx Index of the output analysed.
+     * @param outputIdx Index of the output analyzed.
      * @return DimSize_t
      */
     virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
 
     /**
-     * @brief Update the Consummer Producer system by simulating the consumption and production of i/o
+     * @brief Update the Consumer Producer system by simulating the consumption and production of i/o
      *
      */
     virtual void updateConsummerProducer();
 
     /**
-     * @brief Reset the Consummer Producer system.
+     * @brief Reset the Consumer Producer system.
      *
      */
     virtual void resetConsummerProducer();
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index 20f93398df0e1453bad95be22479a37451665ee7..f73de2ea31681a60c85229102a9b2ebbce4d3c3e 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -14,6 +14,9 @@
 
 #include <string>
 #include <set>
+#include <map>
+
+#include "aidge/utils/future_std/any.hpp"
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
@@ -63,9 +66,9 @@ public:
     */
     virtual std::set<std::string> getAttrsName() const = 0;
 
-#ifdef PYBIND
-    virtual bool hasAttrPy(const std::string& name) const = 0;
+    virtual std::map<std::string, future_std::any> getAttrs() const = 0;
 
+#ifdef PYBIND
     /* Bindable get function, does not require any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from its return type.
@@ -84,6 +87,7 @@ public:
     virtual py::dict dict() const = 0;
 
 #endif
+
     virtual ~Attributes() {}
 };
 }
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 8f6f5c7dea1d099e8061644d5ae034309ba4185a..3ecd4da393eaac9881d008e27989a52e883ecb6a 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -18,6 +18,7 @@
 #include <typeinfo>
 #include <cassert>
 #include <string>
+#include <typeindex>
 
 #include "aidge/utils/future_std/any.hpp"
 #include "aidge/utils/Attributes.hpp"
@@ -38,6 +39,9 @@ namespace Aidge {
 ///\todo managing complex types or excluding non-trivial, non-aggregate types
 class DynamicAttributes : public Attributes {
 public:
+    DynamicAttributes() = default;
+    DynamicAttributes(const std::map<std::string, future_std::any>& attrs): mAttrs(attrs) {}
+
     /**
      * \brief Returning an Attribute identified by its name
      * \tparam T expected Attribute type
@@ -46,23 +50,23 @@ public:
      *  exist
      * \note at() throws if the Attribute does not exist, using find to test for Attribute existence
      */
-    template<class T> const T& getAttr(const std::string& name) const
+    template<class T> T getAttr(const std::string& name) const
     {
         const auto dot = name.find('.');
         if (dot == name.npos) {
+            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
+
+            const auto& attr = mAttrs.at(name);
 #ifdef PYBIND
-            // If attribute does not exist in C++, it might have been created or modified in Python
-            auto it = mAttrs.find(name);
-            if (it == mAttrs.end()) {
-                auto itPy = mAttrsPy.find(name);
-                if (itPy != mAttrsPy.end()) {
-                    // Insert the attribute back in C++
-                    mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
-                }
+            if (attr.type() == typeid(py::object)) {
+                // Note: because of cast<T>(), this function cannot return a const reference!
+                return future_std::any_cast<const py::object&>(attr).cast<T>();
             }
+            else
 #endif
-
-            return future_std::any_cast<const T&>(mAttrs.at(name));
+            {
+                return future_std::any_cast<const T&>(attr);
+            }
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -72,9 +76,21 @@ public:
     }
 
     template<class T> T& getAttr(const std::string& name) {
-        // Scott Meyers' solution to avoid code duplication
-        return const_cast<T&>(
-            static_cast<const DynamicAttributes&>(*this).getAttr<T>(name));
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
+
+            auto& attr = mAttrs.at(name);
+#ifdef PYBIND
+            AIDGE_ASSERT(attr.type() != typeid(py::object), "getAttr(): cannot return a reference to a Python-defined attribute.");
+#endif
+            return future_std::any_cast<T&>(attr);
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            return future_std::any_cast<DynamicAttributes&>(mAttrs.at(ns)).getAttr<T>(nsName);
+        }
     }
 
     ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
@@ -85,17 +101,10 @@ public:
     {
         const auto dot = name.find('.');
         if (dot == name.npos) {
+            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
+
             const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
             AIDGE_ASSERT(res.second, "addAttr(): attribute \"{}\" already exists. Use setAttr() if this is expected.", name);
-
-#ifdef PYBIND
-            // We cannot handle Python object if the Python interpreter is not running
-            if (Py_IsInitialized()) {
-                // Keep a copy of the attribute in py::object that is updated every time
-                const auto& resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
-                AIDGE_ASSERT(resPy.second, "addAttr(): attribute \"{}\" already exists (added in Python). Use setAttr() if this is expected.", name);
-            }
-#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -113,19 +122,11 @@ public:
     {
         const auto dot = name.find('.');
         if (dot == name.npos) {
+            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
+
             auto res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
             if (!res.second)
                 res.first->second = future_std::any(value);
-
-#ifdef PYBIND
-            // We cannot handle Python object if the Python interpreter is not running
-            if (Py_IsInitialized()) {
-                // Keep a copy of the attribute in py::object that is updated every time
-                auto resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
-                if (!resPy.second)
-                    resPy.first->second = std::move(py::cast(value));
-            }
-#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -139,9 +140,6 @@ public:
         const auto dot = name.find('.');
         if (dot == name.npos) {
             mAttrs.erase(name);
-#ifdef PYBIND
-            mAttrsPy.erase(name);
-#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -153,41 +151,12 @@ public:
 #ifdef PYBIND
     void addAttrPy(const std::string& name, py::object&& value)
     {
-        const auto dot = name.find('.');
-        if (dot == name.npos) {
-            auto it = mAttrs.find(name);
-            AIDGE_ASSERT(it == mAttrs.end(), "add_attr(): attribute \"{}\" already exists (added in C++). Use set_attr() if this is expected.", name);
-
-            const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
-            AIDGE_ASSERT(res.second, "add_attr(): attribute \"{}\" already exists. Use set_attr() if this is expected.", name);
-        }
-        else {
-            const auto ns = name.substr(0, dot);
-            const auto nsName = name.substr(dot + 1);
-            const auto& res = mAttrs.emplace(std::make_pair(ns, DynamicAttributes()));
-
-            future_std::any_cast<DynamicAttributes&>(res.first->second).addAttrPy(nsName, std::move(value));
-        }
+        addAttr(name, std::move(value));
     }
 
     void setAttrPy(const std::string& name, py::object&& value) override final
     {
-        const auto dot = name.find('.');
-        if (dot == name.npos) {
-            auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
-            if (!resPy.second)
-                resPy.first->second = std::move(value);
-
-            // Force getAttr() to take attribute value from mAttrsPy and update mAttrs
-            mAttrs.erase(name);
-        }
-        else {
-            const auto ns = name.substr(0, dot);
-            const auto nsName = name.substr(dot + 1);
-            const auto& res = mAttrs.emplace(std::make_pair(ns, DynamicAttributes()));
-
-            future_std::any_cast<DynamicAttributes&>(res.first->second).setAttrPy(nsName, std::move(value));
-        }
+        setAttr(name, std::move(value));
     }
 
     py::dict dict() const override {
@@ -196,9 +165,16 @@ public:
             if (elt.second.type() == typeid(DynamicAttributes)) {
                 attributes[elt.first.c_str()] = future_std::any_cast<const DynamicAttributes&>(elt.second).dict();
             }
-        }
-        for (const auto& elt : mAttrsPy) {
-            attributes[elt.first.c_str()] = elt.second;
+            else {
+                // At this point, not every attribute may be known to mAnyUtils
+                const auto anyUtilsIt = mAnyUtils.find(elt.second.type());
+                if (anyUtilsIt != mAnyUtils.end()) {
+                    attributes[elt.first.c_str()] = anyUtilsIt->second->cast(elt.second);
+                }
+                else {
+                    attributes[elt.first.c_str()] = "???";
+                }
+            }
         }
         return attributes;
     }
@@ -221,12 +197,7 @@ public:
     bool hasAttr(const std::string& name) const override final {
         const auto dot = name.find('.');
         if (dot == name.npos) {
-#ifdef PYBIND
-            return (mAttrs.find(name) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
-
-#else
             return (mAttrs.find(name) != mAttrs.cend());
-#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -241,45 +212,22 @@ public:
         }
     }
 
-#ifdef PYBIND
-    bool hasAttrPy(const std::string& name) const override final {
-        const auto dot = name.find('.');
-        if (dot == name.npos) {
-            // Attributes might have been created in Python, the second condition is necessary.
-            return (mAttrs.find(name) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
-        }
-        else {
-            const auto ns = name.substr(0, dot);
-            const auto it = mAttrs.find(ns);
-            if (it != mAttrs.cend()) {
-                const auto nsName = name.substr(dot + 1);
-                return future_std::any_cast<const DynamicAttributes&>(it->second).hasAttrPy(nsName);
-            }
-            else {
-                return false;
-            }
-        }
-    }
-#endif
-
     std::string getAttrType(const std::string& name) const override final {
         // In order to remain consistent between C++ and Python, with or without PyBind, the name of the type is:
         // - C-style for C++ created attributes
         // - Python-style for Python created attributes
         const auto dot = name.find('.');
         if (dot == name.npos) {
+            const auto& attr = mAttrs.at(name);
 #ifdef PYBIND
-            // If attribute does not exist in C++, it might have been created in Python
-            auto it = mAttrs.find(name);
-            if (it == mAttrs.end()) {
-                auto itPy = mAttrsPy.find(name);
-                if (itPy != mAttrsPy.end()) {
-                    return std::string(Py_TYPE(itPy->second.ptr())->tp_name);
-                }
+            if (attr.type() == typeid(py::object)) {
+                return std::string(Py_TYPE(future_std::any_cast<const py::object&>(attr).ptr())->tp_name);
             }
+            else
 #endif
-
-            return mAttrs.at(name).type().name();
+            {
+                return attr.type().name();
+            }
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -292,11 +240,6 @@ public:
         std::set<std::string> attrsName;
         for(auto const& it: mAttrs)
             attrsName.insert(it.first);
-#ifdef PYBIND
-        // Attributes might have been created in Python
-        for(auto const& it: mAttrsPy)
-            attrsName.insert(it.first);
-#endif
         return attrsName;
     }
 
@@ -304,21 +247,13 @@ public:
     /**
      * @detail See https://github.com/pybind/pybind11/issues/1590 as to why a
      * generic type caster for std::any is not feasible.
-     * The strategy here is to keep a copy of each attribute in py::object that is updated every time.
+     * The strategy here is to store a cast() function for each attribute type ever used.
     */
     inline py::object getAttrPy(const std::string& name) const override final {
         const auto dot = name.find('.');
         if (dot == name.npos) {
-            auto itPy = mAttrsPy.find(name);
-            if (itPy == mAttrsPy.end()) {
-                // Attribute may be a namespace
-                auto it = mAttrs.find(name);
-                AIDGE_ASSERT(it != mAttrs.end() && it->second.type() == typeid(DynamicAttributes), "get_attr(): attribute \"{}\" not found", name);
-                return py::cast(future_std::any_cast<const DynamicAttributes&>(it->second));
-            }
-            else {
-                return itPy->second;
-            }
+            const auto& attr = mAttrs.at(name);
+            return mAnyUtils.at(attr.type())->cast(attr);
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -328,25 +263,150 @@ public:
     };
 #endif
 
-    virtual ~DynamicAttributes() {}
+    future_std::any getAny(const std::string& name) const
+    {
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            return mAttrs.at(name);
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            return future_std::any_cast<const DynamicAttributes&>(mAttrs.at(ns)).getAny(nsName);
+        }
+    }
 
-private:
+    std::map<std::string, future_std::any> getAttrs() const override {
+        return mAttrs;
+    }
+
+    virtual ~DynamicAttributes() {
 #ifdef PYBIND
-    // Stores C++ attributes (copy) and Python-only attributes
-    // Code should be compiled with -fvisibility=hidden
-    // See https://pybind11.readthedocs.io/en/stable/faq.html:
-    // “‘SomeClass’ declared with greater visibility than the type of its
-    // field ‘SomeClass::member’ [-Wattributes]”
-    // This map will only be populated if Python interpreter is running
-    std::map<std::string, py::object> mAttrsPy;
-    // Stores C++ attributes only
-    // mutable because it may be updated in getAttr() from Python
-    mutable std::map<std::string, future_std::any> mAttrs;
-#else
+        if (!Py_IsInitialized()) {
+            // Resets the internal pointer of py::object to nullptr without decreasing the object's reference count.
+            // At this point, the Python interpreter may have exited (it is the case if the current DynamicAttribute being destroyed is static),
+            // in which case py:object has already being destroyed despite the reference counting being > 0.
+            // See https://github.com/pybind/pybind11/issues/1598
+            for (auto& attr : mAttrs) {
+                if (attr.second.type() == typeid(py::object)) {
+                    future_std::any_cast<py::object&>(attr.second).release();
+                }
+            }
+        }
+#endif
+    }
+
+    friend bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs);
+    friend struct std::hash<DynamicAttributes>;
+
+private:
     std::map<std::string, future_std::any> mAttrs;
+
+public:
+    struct AnyUtils_ {
+#ifdef PYBIND
+        virtual py::object cast(const future_std::any& attr) const = 0;
+#endif
+        virtual bool compare(const future_std::any&, const future_std::any&) const = 0;
+        virtual size_t hash(const future_std::any&) const = 0;
+        virtual ~AnyUtils_() = default;
+    };
+
+    template <class T>
+    struct AnyUtils : public AnyUtils_ {
+#ifdef PYBIND
+        py::object cast(const future_std::any& attr) const override final {
+            return py::cast(future_std::any_cast<const T&>(attr));
+        }
 #endif
+
+        bool compare(const future_std::any& lhs, const future_std::any& rhs) const override final {
+#ifdef PYBIND
+            if (lhs.type() == typeid(py::object) && rhs.type() != typeid(py::object)) {
+                return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
+            }
+            else if (lhs.type() != typeid(py::object) && rhs.type() == typeid(py::object)) {
+                return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
+            }
+            else
+#endif
+            {
+                return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
+            }
+        }
+
+        size_t hash(const future_std::any& attr) const override final {
+            return std::hash<T>()(future_std::any_cast<T>(attr));
+        }
+    };
+
+    // Stores typed utils functions for each attribute type ever used
+    static std::map<std::type_index, std::unique_ptr<AnyUtils_>> mAnyUtils;
 };
 
+template<> void DynamicAttributes::setAttr<future_std::any>(const std::string& name, const future_std::any& value);
+
+#ifdef PYBIND
+template <>
+struct DynamicAttributes::AnyUtils<py::object> : public DynamicAttributes::AnyUtils_ {
+    py::object cast(const future_std::any& attr) const override {
+        return future_std::any_cast<const py::object&>(attr);
+    }
+
+    bool compare(const future_std::any& lhs, const future_std::any& rhs) const override {
+        return (future_std::any_cast<py::object>(lhs) < future_std::any_cast<py::object>(rhs));
+    }
+
+    size_t hash(const future_std::any& attr) const override final {
+        // Here we are mixing Python and C++ hashes... if both are
+        // well implemented, this should not increase the collision 
+        // probability for the same number of stored hashes.
+        return py::hash(future_std::any_cast<py::object>(attr));
+    }
+};
+#endif
+
+inline bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs) {
+    return (lhs.mAttrs < rhs.mAttrs);
+}
+
+// Combine the hashes (boost-like hash combining, see boost::hash_combine())
+inline void hash_combine(std::size_t& seed, const std::size_t& value) {
+    seed ^= value + 0x9e3779b9 + (seed << 6) + (seed >> 2);
+}
+}
+
+namespace std {
+    // Make DynamicAttributes hashable so that is can be stored in hash-based containers.
+    // This is particularly useful in Python since set() and dict() are hash-based.
+    template <>
+    struct hash<Aidge::DynamicAttributes> {
+        size_t operator()(const Aidge::DynamicAttributes& attrs) const {
+            std::size_t seed = 0;
+            for (const auto& pair : attrs.mAttrs) {
+                Aidge::hash_combine(seed, std::hash<std::string>()(pair.first));
+                Aidge::hash_combine(seed, Aidge::DynamicAttributes::mAnyUtils.at(pair.second.type())->hash(pair.second));
+            }
+            return seed;
+        }
+    };
+
+    // General specialization of std::hash for any container that has iterators (e.g., std::vector, std::list, std::set)
+    template <template <typename...> class Container, typename T, typename... Args>
+    struct hash<Container<T, Args...>> {
+        std::size_t operator()(const Container<T, Args...>& iterable) const {
+            std::size_t seed = 0;
+            for (const auto& v : iterable) {
+                // Recursively hash the value pointed by the iterator
+                Aidge::hash_combine(seed, std::hash<T>()(v));
+            }
+            return seed;
+        }
+    };
+}
+
+namespace future_std {
+bool operator<(const future_std::any& lhs, const future_std::any& rhs);
 }
 
 #endif /* AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_ */
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index 6914d45109099a81d46a2111ffbdbae8d0f5d7ee..d6851f1e42233f9d8af88d10da9046f73f94b8c4 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -9,7 +9,6 @@
  *
  ********************************************************************************/
 
-
 #ifndef AIDGE_LOG_H_
 #define AIDGE_LOG_H_
 
@@ -19,44 +18,36 @@
 #include <fmt/format.h>
 #include <fmt/ranges.h>
 
+#include "aidge/data/half_fmt.hpp"
+
 #include "aidge/utils/Attributes.hpp"
 
 namespace Aidge {
 /**
  * Helper to define a context anywhere, hiding the scoped variable name
  * which has no relevance.
-*/
-#define AIDGE_LOG_CONTEXT(...) const Log::Context logContext_##__LINE__(__VA_ARGS__)
+ */
+#define AIDGE_LOG_CONTEXT(...)                                                \
+    const Log::Context logContext_##__LINE__(__VA_ARGS__)
 
-
-template<class U>
-static void discard_args(U parg) {
+template <class U> static void discard_args(U parg) {
     (void)parg;
 }
-template<class U, class... Us>
-static void discard_args(U parg, Us... pargs) {
+template <class U, class... Us> static void discard_args(U parg, Us... pargs) {
     (void)parg;
     discard_args(pargs...);
 }
 
 /**
  * Aidge logging class, for displaying and file logging of events.
-*/
+ */
 class Log {
-public:
-    enum Level {
-        Debug = 0,
-        Info,
-        Notice,
-        Warn,
-        Error,
-        Fatal
-    };
+  public:
+    enum Level { Debug = 0, Info, Notice, Warn, Error, Fatal };
 
     class Context {
-    public:
-        template <typename... Args>
-        Context(Args&&... args) {
+      public:
+        template <typename... Args> Context(Args &&...args) {
             Log::mContext.push_back(fmt::format(std::forward<Args>(args)...));
         }
 
@@ -68,13 +59,12 @@ public:
     /**
      * Detailed messages for debugging purposes, providing information helpful
      * for developers to trace and identify issues.
-     * Detailed insights of what is happening in an operation, not useful for the
-     * end-user. The operation is performed nominally.
+     * Detailed insights of what is happening in an operation, not useful for
+     * the end-user. The operation is performed nominally.
      * @note This level is disabled at compile time for Release, therefore
      * inducing no runtime overhead for Release.
-    */
-    template <typename... Args>
-    static void debug(Args&&... args) {
+     */
+    template <typename... Args> static void debug(Args &&...args) {
 #ifndef NDEBUG
         // only when compiled in Debug
         log(Debug, fmt::format(std::forward<Args>(args)...));
@@ -86,22 +76,19 @@ public:
     /**
      * Messages that provide a record of the normal operation, about
      * the application's state, progress, or important events.
-     * Reports normal start, end and key steps in an operation. The operation is
-     * performed nominally.
-    */
-    template <typename... Args>
-    static void info(Args&&... args) {
+     * Reports normal start, end and key steps in an operation. The operation
+     * is performed nominally.
+     */
+    template <typename... Args> static void info(Args &&...args) {
         log(Info, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
-     * Applies to normal but significant conditions that may require monitoring,
-     * like unusual or normal fallback events.
-     * Reports specific paths in an operation. The operation can still be
-     * performed normally.
-    */
-    template <typename... Args>
-    static void notice(Args&&... args) {
+     * Applies to normal but significant conditions that may require
+     * monitoring, like unusual or normal fallback events. Reports specific
+     * paths in an operation. The operation can still be performed normally.
+     */
+    template <typename... Args> static void notice(Args &&...args) {
         log(Notice, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -110,9 +97,8 @@ public:
      * not necessarily cause immediate problems.
      * Some specific steps of the operation could not be performed, but it can
      * still provide an exploitable result.
-    */
-    template <typename... Args>
-    static void warn(Args&&... args) {
+     */
+    template <typename... Args> static void warn(Args &&...args) {
         log(Warn, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -121,26 +107,24 @@ public:
      * recover from, but attention is needed to prevent further issues.
      * The operation could not be performed, but it does not prevent potential
      * further operations.
-    */
-    template <typename... Args>
-    static void error(Args&&... args) {
+     */
+    template <typename... Args> static void error(Args &&...args) {
         log(Error, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
-     * Represents a critical error or condition that leads to the termination of
-     * the application, indicating a severe and unrecoverable problem.
-     * The operation could not be performed and any further operation is
+     * Represents a critical error or condition that leads to the termination
+     * of the application, indicating a severe and unrecoverable problem. The
+     * operation could not be performed and any further operation is
      * impossible.
-    */
-    template <typename... Args>
-    static void fatal(Args&&... args) {
+     */
+    template <typename... Args> static void fatal(Args &&...args) {
         log(Fatal, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
      * Set the minimum log level displayed in the console.
-    */
+     */
     static void setConsoleLevel(Level level) {
         mConsoleLevel = level;
     }
@@ -148,14 +132,14 @@ public:
     /**
      * Set or disable colors on console.
      * Initial value should be assumed true.
-    */
+     */
     static void setConsoleColor(bool enabled) {
         mConsoleColor = enabled;
     }
 
     /**
      * Set the minimum log level saved in the log file.
-    */
+     */
     constexpr static void setFileLevel(Level level) {
         mFileLevel = level;
     }
@@ -164,8 +148,8 @@ public:
      * Set the log file name.
      * Close the current log file and open the one with the new file name.
      * If empty, stop logging into a file.
-    */
-    static void setFileName(const std::string& fileName) {
+     */
+    static void setFileName(const std::string &fileName) {
         if (fileName != mFileName) {
             mFileName = fileName;
             mFile.release();
@@ -187,8 +171,8 @@ public:
      * warnings.
      */
     struct fcloseDeleter {
-        void operator()(FILE *f) const noexcept { 
-            std::fclose(f); 
+        void operator()(FILE *f) const noexcept {
+            std::fclose(f);
         }
     };
 
@@ -203,11 +187,12 @@ private:
     static std::unique_ptr<FILE, fcloseDeleter> mFile;
     static std::vector<std::string> mContext;
 };
-}
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::Log::Level>::data[] = {"Debug", "Info", "Notice", "Warn", "Error", "Fatal"};
+const char *const EnumStrings<Aidge::Log::Level>::data[] =
+    {"Debug", "Info", "Notice", "Warn", "Error", "Fatal"};
 }
 
-#endif //AIDGE_LOG_H_
+#endif // AIDGE_LOG_H_
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 48597e2b6fa95ff3195ed2eea6b8c39dcef86771..1354281933b69bb6e038587cc27ee0397d05c6f1 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -103,7 +103,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
         std::string givenName =
             (node_ptr->name().empty())
                 ? "<em>" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + "</em>"
-                : "\"" + node_ptr->name() + "\\n<sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
+                : "\"" + node_ptr->name() + "<br/><sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
 
         std::string nodeCls = "";
         if (node_ptr->type() == "Producer") {
@@ -144,27 +144,31 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
       }
       IOIndex_t outputIdx = 0;
       for (const auto& childs : node_ptr->getOrderedChildren()) {
-        for (const auto& child : childs) {
+        // Keep only unique childs in order to avoid duplicating connections
+        const auto uniqueChilds = std::set<NodePtr>(childs.begin(), childs.end());
+        for (const auto& child : uniqueChilds) {
           if (child != nullptr) {
             IOIndex_t inputIdx = 0;
             for (auto parent : child->inputs()) {
               if (parent.first == node_ptr && parent.second == outputIdx) {
                 // Add-on to display the operator's output dimensions
                 std::string dims = "";
+                std::string dtype = "";
                 const auto op = std::dynamic_pointer_cast<OperatorTensor>(node_ptr->getOperator());
                 if (op && !op->getOutput(outputIdx)->undefined()) {
                   dims += " " + fmt::format("{}", op->getOutput(outputIdx)->dims());
+                  dtype += "\n" + fmt::format("{}", op->getOutput(outputIdx)->dataType());
                 }
 
                 if (mNodes.find(child) != mNodes.end()) {
-                  fmt::print(fp.get(), "{}_{}-->|\"{}{}&rarr;{}\"|{}_{}\n", node_ptr->type(), namePtrTable.at(node_ptr),
-                              outputIdx, dims, inputIdx, child->type(), namePtrTable.at(child));
+                  fmt::print(fp.get(), "{}_{}-->|\"{}{}{}&rarr;{}\"|{}_{}\n", node_ptr->type(), namePtrTable.at(node_ptr),
+                              outputIdx, dims, dtype, inputIdx, child->type(), namePtrTable.at(child));
                 }
                 else if (verbose) {
-                  fmt::print(fp.get(), "{}_{}-->|\"{}{}&rarr;{}\"|{}:::externalCls\n", node_ptr->type(), namePtrTable.at(node_ptr),
-                              outputIdx, dims, inputIdx, static_cast<void*>(child.get()));
+                  fmt::print(fp.get(), "{}_{}-->|\"{}{}{}&rarr;{}\"|{}:::externalCls\n", node_ptr->type(), namePtrTable.at(node_ptr),
+                              outputIdx, dims, dtype, inputIdx, static_cast<void*>(child.get()));
                 }
-                break;
+                // Do no break here because the same child can be connected to several inputs
               }
               ++inputIdx;
             }
@@ -270,7 +274,10 @@ void Aidge::GraphView::setRootNode(NodePtr node) {
 std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::inputNodes() const {
     std::set<std::shared_ptr<Aidge::Node>> nodes;
     for (const auto& node : mInputNodes) {
-        nodes.insert(node.first);
+        // Do not include dummy inputs
+        if (node.first) {
+            nodes.insert(node.first);
+        }
     }
     return nodes;
 }
@@ -278,7 +285,10 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::inputNodes() const {
 std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::outputNodes() const {
     std::set<std::shared_ptr<Aidge::Node>> nodes;
     for (const auto& node : mOutputNodes) {
-        nodes.insert(node.first);
+        // Do not include dummy outputs
+        if (node.first) {
+            nodes.insert(node.first);
+        }
     }
     return nodes;
 }
@@ -522,6 +532,9 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                 }
 
                 if (parentsForwarded && op->forwardDims(allowDataDependency)) {
+                    Log::debug("Dimensions forwarded for node {} (of type {})",
+                        nodePtr->name(), nodePtr->type());
+
                     // Recompute every time, even if it was already computed in a
                     // previous call of forwardDims(), as the graph may have changed!
                     dimsForwarded.insert(nodePtr);
@@ -532,7 +545,9 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                     }
                 }
                 else {
-                    Log::debug("Unable to forward dimensions for node {} (of type {}) yet", nodePtr->name(), nodePtr->type());
+                    if (parentsForwarded) {
+                        Log::debug("Unable to forward dimensions for node {} (of type {})", nodePtr->name(), nodePtr->type());
+                    }
                     nextList.insert(nodePtr);
                 }
             }
@@ -685,6 +700,61 @@ std::pair<std::vector<Aidge::NodePtr>, size_t> Aidge::GraphView::getRankedNodes(
   return std::make_pair(rankedNodes, orderUnicityLimit);
 }
 
+std::vector<Aidge::NodePtr> Aidge::GraphView::getOrderedNodes(bool reversed) const {
+    // We compute the order from a post-dfs walk on the reverse graph starting from
+    // ordered output nodes.
+    // Also, we walk the graph upward left to right in order
+    // to get a topological left-right order when possible.
+    // For the case where reversed is true, we walk the graph upward right to left
+    // and reverse the final order to get a post-dfs left-right order when possible.
+    std::vector<std::pair<NodePtr,std::pair<size_t, std::vector<NodePtr>>>> stack;
+    std::vector<NodePtr> reversePostDfs;
+    std::set<NodePtr> visited;
+    std::vector<NodePtr> outNodes(mNodes.size());
+    auto reverse_if_dfs = [reversed](auto &parents) {
+        if (reversed) std::reverse(parents.begin(), parents.end());
+    };
+    for (const auto& output : mOutputNodes) {
+            outNodes.push_back(output.first);
+    }
+    reverse_if_dfs(outNodes);
+    stack.push_back(std::make_pair(nullptr, std::make_pair(0, std::move(outNodes))));
+    while (!stack.empty()) {
+        auto node = stack.back().first;
+        auto& parentIdx = stack.back().second.first;
+        auto& parents = stack.back().second.second;
+        if (parentIdx == parents.size()) {
+            stack.pop_back();
+            if (node) {
+                reversePostDfs.push_back(node);
+            }
+        } else {
+            auto backEdgeIdx = reversed ? parents.size() - 1 - parentIdx: parentIdx;
+            auto isBackEdge = node != nullptr ? node->parentIsBackEdge(backEdgeIdx): false;
+            auto parent = parents[parentIdx++];
+            if (parent != nullptr && inView(parent) &&
+                visited.find(parent) == visited.end()) {
+                if (isBackEdge) {
+                    stack[0].second.second.push_back(parent);
+                } else {
+                    visited.insert(parent);
+                    auto next_parents = parent->getParents();
+                    reverse_if_dfs(next_parents);
+                    stack.push_back(std::make_pair(parent, std::make_pair(0, std::move(next_parents))));
+                }
+            }
+        }
+    }
+
+    if (reversePostDfs.size() != mNodes.size()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+                             "Could not enumerate all nodes, set output nodes such that all graph nodes are connected.");
+    }
+
+    reverse_if_dfs(reversePostDfs);
+    return reversePostDfs;
+}
+
 std::map<Aidge::NodePtr, std::string> Aidge::GraphView::getRankedNodesName(const std::string& format, bool markNonUnicity) const {
   const auto rankedNodes = getRankedNodes();
   std::map<NodePtr, std::string> rankedNodesName;
diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp
index efe6296a351f69ef3a11d4e1bc04bd0b52d46a06..4ca7cc9831c091a8ea79051115decd489a4a03be 100644
--- a/src/operator/Stack.cpp
+++ b/src/operator/Stack.cpp
@@ -31,7 +31,7 @@ Elts_t StackProdConso::getRequiredMemory(
 
     const StackOp &op = dynamic_cast<const StackOp &>(mOp);
     // The produced data after one forward pass is simply the input size,
-    // we do not produce the whole output tensor everytime.
+    // we do not produce the whole output tensor every time.
     if (op.forwardStep() <= op.maxElements()) {
         return Elts_t::DataElts(op.getInput(inputIdx)->size());
     } else {