diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index 8d6f2686d9010ac4ebed80cd04f74effe763e977..21b62e92b501215fc51651b7ca7a7bcb5237b027 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -101,7 +101,7 @@ class test_operator_binding(unittest.TestCase):
         self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
 
         op = aidge_core.GenericOperatorOp("any_type", 1,0,1)
-        with self.assertRaises(RuntimeError):
+        with self.assertRaises(IndexError):
             op.attr.something
 
         op.attr.something = aidge_core.DynamicAttributes()
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index cadd8c85ca541862cc6f298fa055713a6f65e3ed..dc0a12c76c8c72d656229ec90a81f1724f88faf7 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -65,6 +65,7 @@
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Reshape.hpp"
 #include "aidge/operator/Resize.hpp"
+#include "aidge/operator/Round.hpp"
 #include "aidge/operator/Shape.hpp"
 #include "aidge/operator/Scaling.hpp"
 #include "aidge/operator/Slice.hpp"
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 32932fa6f598737644f74d4e2ce5da89557b5d3d..e014b041fdad94f5f17d636a2da92180de59e152 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -54,7 +54,7 @@ private:
           return sharedA < sharedB; // shared_ptr has a valid comparison operator
       }
   };
-  std::string mName; /** Name of the Node. Should be unique. */
+  std::shared_ptr<DynamicAttributes> mAttrs;
 
   std::set<std::weak_ptr<GraphView>, weakCompare> mViews; /** Set of pointers to GraphView instances including this Node instance. */
   const std::shared_ptr<Operator> mOperator; // Pointer to the associated Operator
@@ -70,6 +70,14 @@ private:
 public:
   Node() = delete;
 
+  /**
+   * @brief Construct a new Node object associated with the input Operator.
+   * @param op Operator giving the Node its number of connections.
+   * @param attrs Attributes for the Node.
+   */
+  Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttributes> attrs);
+  Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs);
+
   /**
    * @brief Construct a new Node object associated with the input Operator.
    * @param op Operator giving the Node its number of connections.
@@ -120,7 +128,7 @@ public:
    * @brief Name of the Node.
    * @return std::string
    */
-  inline std::string name() const noexcept { return mName; }
+  inline std::string name() const noexcept { return (mAttrs->hasAttr("name")) ? mAttrs->getAttr<std::string>("name") : ""; }
 
   /**
    * @brief Set the Node name.
@@ -164,7 +172,7 @@ public:
    * @brief Get the Operator object of the Node.
    * @return std::shared_ptr<Operator>
    */
-  inline std::shared_ptr<Operator> getOperator() const { return mOperator; }
+  inline std::shared_ptr<Operator> getOperator() const { return (*mOperator)(mAttrs); }
 
   ///////////////////////////////////////////////////////
   //        TENSOR MANAGEMENT
diff --git a/include/aidge/hook/ExecTime.hpp b/include/aidge/hook/ExecTime.hpp
deleted file mode 100644
index 0964d9575b7ad345d5e07c9f19c7e56a3b69c813..0000000000000000000000000000000000000000
--- a/include/aidge/hook/ExecTime.hpp
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * \file execTime.hpp
- * \brief execTime structure
- * \version file 1.0.0
- * \date Creation 27 June 2023
- * \date 27 June 2023
- * \par ChangeLog
- * \par
- *  v1.0.0, 27 June 2023<br>
- *  - Initial version.
- * \author mn271187, ik243221
- * \copyright
- *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
- *  rights reserved.
- */
-
-#ifndef execTime_H_
-#define execTime_H_
-
-#include "aidge/operator/Operator.hpp"
-#include "aidge/hook/Hook.hpp"
-#include <memory>
-#include <chrono>
-#include <vector>
-
-namespace Aidge {
-
-class ExecTime : public Hook {
-private:
-    std::vector<std::chrono::high_resolution_clock::time_point> registeredTimes = std::vector<std::chrono::high_resolution_clock::time_point>();
-public:
-    ExecTime(const std::shared_ptr<Operator> op) : Hook(op) {}
-    ~ExecTime() = default;
-
-    void call() override final {
-        registeredTimes.push_back(std::chrono::high_resolution_clock::now());
-    }
-
-    static std::shared_ptr<ExecTime> create(const std::shared_ptr<Operator> op)
-    {
-        return std::make_shared<ExecTime>(op);
-    }
-
-    std::vector<std::chrono::high_resolution_clock::time_point> getTimes() {
-        return  registeredTimes;
-    }
-
-    std::chrono::high_resolution_clock::time_point getTime(size_t idx) {
-        return registeredTimes[idx];
-    }
-
-};
-
-namespace {
-    static Registrar<Hook> registrarHook_ExecTime({"execution_time"}, Aidge::ExecTime::create);
-}
-}
-
-#endif /* execTime_H_ */
\ No newline at end of file
diff --git a/include/aidge/hook/Hook.hpp b/include/aidge/hook/Hook.hpp
deleted file mode 100644
index 5edf231d51f913f58351b4817e145b5f48953ddd..0000000000000000000000000000000000000000
--- a/include/aidge/hook/Hook.hpp
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * \file Hook.hpp
- * \brief Hook structure
- * \version file 1.0.0
- * \date Creation 27 June 2023
- * \date 27 June 2023
- * \par ChangeLog
- * \par
- *  v1.0.0, 27 June 2023<br>
- *  - Initial version.
- * \author mn271187, ik243221
- * \copyright
- *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
- *  rights reserved.
- */
-
-#ifndef Hook_H_
-#define Hook_H_
-
-#include "aidge/utils/Attributes.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include <memory>
-
-namespace Aidge {
-
-class Operator;
-class Hook : public Registrable<Hook, std::tuple<std::string>, std::function<std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>> {
-//class Hook : public Registrable<Hook, std::tuple<std::string>, std::function<std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>>{
-protected:
-    const std::shared_ptr<Operator> mOperator;
-
-public:
-    Hook(std::shared_ptr<Operator> op) : mOperator(op) {}
-    virtual ~Hook() = default;
-
-    virtual void call() = 0;
-
-};
-}
-
-#endif /* Hook_H_ */
diff --git a/include/aidge/hook/OutputRange.hpp b/include/aidge/hook/OutputRange.hpp
deleted file mode 100644
index 355f4aaa15a6bcd77d99ec2dad344a45f8f9edc0..0000000000000000000000000000000000000000
--- a/include/aidge/hook/OutputRange.hpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * \file execTime.hpp
- * \brief execTime structure
- * \version file 1.0.0
- * \date Creation 27 June 2023
- * \date 27 June 2023
- * \par ChangeLog
- * \par
- *  v1.0.0, 27 June 2023<br>
- *  - Initial version.
- * \author ik243221
- * \copyright
- *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
- *  rights reserved.
- */
-
-#ifndef AIDGE_CORE_HOOK_OUTPUTRANGE_H_
-#define AIDGE_CORE_HOOK_OUTPUTRANGE_H_
-
-#include "aidge/operator/Operator.hpp"
-#include "aidge/hook/Hook.hpp"
-#include <memory>
-#include <chrono>
-#include <vector>
-#include <cmath>
-namespace Aidge {
-
-class OutputRange : public Hook {
-private:
-    std::vector<float> registeredOutputs = std::vector<float>();
-public:
-    OutputRange(const std::shared_ptr<Operator> op) : Hook(op) {}
-    ~OutputRange() = default;
-
-    void call() override final {
-        //std::cout << "call() outputRange hook " << std::endl;
-        //this assumes there is only 1 output possible
-        std::shared_ptr<Tensor> tensor = mOperator->getOutput(0);
-        //tensor->print();
-        //std::cout << "call() outputRange hook : tensor printed" << std::endl;
-        float max_value = 0.;
-        float * casted_tensor = static_cast<float *>(tensor->getImpl()->rawPtr());
-        //find the absolute max value in the tensor, save it to registered outputs
-        for(std::size_t i = 0; i < tensor->size(); ++i) {
-            //std::cout << "call() outputRange hook : casted_tensor[i] = " << casted_tensor[i] << std::endl;
-            if(std::abs(casted_tensor[i]) > max_value){
-                max_value = std::abs(casted_tensor[i]);
-            }
-        }
-        //std::cout << "call() outputRange hook : max_value = " << max_value << std::endl;
-        registeredOutputs.push_back(max_value);
-    }
-
-    static std::shared_ptr<OutputRange> create(const std::shared_ptr<Operator> op)
-    {
-        return std::make_shared<OutputRange>(op);
-    }
-
-    std::vector<float> getOutputs() {
-        return  registeredOutputs;
-    }
-
-    float getOutput(size_t idx) {
-        return registeredOutputs[idx];
-    }
-
-};
-
-namespace {
-    static Registrar<Hook> registrarHook_OutputRange({"output_range"}, Aidge::OutputRange::create);
-}
-}
-
-#endif /* outputRange_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index daf50771703d6608dbbe90364aac8667aefbdd1d..f96996079b9e89f80c78b8e409830369480705a8 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -18,8 +18,9 @@
 
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
 
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 2812da066887d63133ede2d69b5804f0b8a8101e..89b2c06a52f180ffb35363cb6ab07d4242e12033 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -64,7 +64,7 @@ public:
     inline T& getAttr(const std::string& name)
     { return mAttributes -> template getAttr<T>(name); }
     template <class T>
-    inline const T& getAttr(const std::string& name) const
+    inline T getAttr(const std::string& name) const
     { return mAttributes -> template getAttr<T>(name); }
 
     ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index ccff976cbb7cf8efc59223dfd658ca2a4d03a80b..744dbd1327a83267b7840e03ba83190326ee6cdd 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -37,7 +37,7 @@ public:
     std::weak_ptr<Node> mUpperNode;
 
    public:
-    MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph);
+    MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph, const std::vector<InputCategory>& forcedInputsCategory = {});
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -113,6 +113,7 @@ public:
 
 std::shared_ptr<Node> MetaOperator(const char *type,
                                   const std::shared_ptr<GraphView>& graph,
+                                  const std::vector<InputCategory>& forcedInputsCategory = {},
                                   const std::string& name = "");
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index bc3348377525cdd2e5b2c030c8fc6b7cb8177e7b..750a808aaeb23447578501f8b27c7eba3d34234c 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -126,7 +126,7 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &
         MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode)
     });
 
-    return MetaOperator("PaddedMaxPooling", graph, name);
+    return MetaOperator("PaddedMaxPooling", graph, {}, name);
 }
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 87aa4080e57d14d0d8a738afed2e976521b42048..a799153e1db5eb83964ed06dd3bc0fb06da64de8 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -28,7 +28,7 @@
 #include "aidge/data/Data.hpp"
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/hook/Hook.hpp"
+
 
 #ifdef PYBIND
 namespace py = pybind11;
@@ -50,7 +50,7 @@ enum class InputCategory {
 class Operator : public std::enable_shared_from_this<Operator> {
 protected:
     std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator
-    std::map<std::string, std::shared_ptr<Hook>> mHooks;
+    std::shared_ptr<DynamicAttributes> mInheritedAttrs;
 
 private:
     std::string mType;
@@ -81,7 +81,10 @@ public:
         mImpl = nullptr;
         // Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation.
         // See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion.
-        // Hooks are not copied.
+    }
+    std::shared_ptr<Operator> operator()(std::shared_ptr<DynamicAttributes> attrs) {
+        mInheritedAttrs = attrs;
+        return shared_from_this();
     }
 
     virtual ~Operator() noexcept;
@@ -90,6 +93,7 @@ public:
     virtual std::shared_ptr<Operator> clone() const = 0;
 
     virtual std::shared_ptr<Attributes> attributes() const { return nullptr; };
+    virtual std::shared_ptr<DynamicAttributes> inheritedAttributes() const { return mInheritedAttrs; };
     /**
      * @brief Set the specified input with a shallow copy.
      * @param inputIdx Index of the input to set.
@@ -114,14 +118,6 @@ public:
     virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const = 0;
     virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0;
 
-    std::shared_ptr<Hook> getHook(const std::string& hookName) {
-        return mHooks[hookName];
-    }
-    void addHook(const std::string& hookName) {
-        mHooks.insert(std::pair<std::string, std::shared_ptr<Hook>>(hookName,Registrar<Hook>::create({hookName})(shared_from_this())));
-    }
-
-    void runHooks() const;
 
 ///////////////////////////////////////////////////////
 //        IMPLEMENTATION
diff --git a/include/aidge/operator/Round.hpp b/include/aidge/operator/Round.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..00352421d193eff543f1351b57f8db54ac742393
--- /dev/null
+++ b/include/aidge/operator/Round.hpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ROUND_H_
+#define AIDGE_CORE_OPERATOR_ROUND_H_
+
+#include <memory>
+#include <vector>
+#include <string>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Round_Op : public OperatorTensor,
+                public Registrable<Round_Op,
+                                std::string,
+                                std::function<std::shared_ptr<OperatorImpl>(const Round_Op&)>> {
+
+
+public:
+    static const std::string Type;
+
+    Round_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Round_Op(const Round_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Round_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> Round(const std::string& name = "");
+}
+
+
+#endif /* AIDGE_CORE_OPERATOR_ROUND_H_ */
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index cf71ed0b5953fa1759e04c66311d3d829a603a01..fd29bf4ce57ac94e0860172d2d1c15dc40f15ae0 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -69,8 +69,6 @@ public:
     virtual std::map<std::string, future_std::any> getAttrs() const = 0;
 
 #ifdef PYBIND
-    virtual bool hasAttrPy(const std::string& name) const = 0;
-
     /* Bindable get function, does not recquire any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from its return type.
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 04ed58f7e636d6a0d528f1946ead110857312576..dc066664b6b5d41336facf60d785f00774862f96 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -39,8 +39,12 @@ namespace Aidge {
 ///\todo managing complex types or excluding non-trivial, non-aggregate types
 class DynamicAttributes : public Attributes {
 public:
-    DynamicAttributes() = default;
-    DynamicAttributes(const std::map<std::string, future_std::any>& attrs): mAttrs(attrs) {}
+    DynamicAttributes() {
+        mAnyUtils.emplace(typeid(DynamicAttributes), std::unique_ptr<AnyUtils<DynamicAttributes>>(new AnyUtils<DynamicAttributes>()));
+    }
+    DynamicAttributes(const std::map<std::string, future_std::any>& attrs): mAttrs(attrs) {
+        mAnyUtils.emplace(typeid(DynamicAttributes), std::unique_ptr<AnyUtils<DynamicAttributes>>(new AnyUtils<DynamicAttributes>()));
+    }
 
     /**
      * \brief Returning an Attribute identified by its name
@@ -50,39 +54,23 @@ public:
      *  exist
      * \note at() throws if the Attribute does not exist, using find to test for Attribute existance
      */
-    template<class T> const T& getAttr(const std::string& name) const
+    template<class T> T getAttr(const std::string& name) const
     {
-        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
-            [](const future_std::any& lhs, const future_std::any& rhs) {
-#ifdef PYBIND
-                if (lhs.type() == typeid(py::object)) {
-                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
-                }
-                else if (rhs.type() == typeid(py::object)) {
-                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
-                }
-                else
-#endif
-                {
-                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
-                }
-            }));
-
         const auto dot = name.find('.');
         if (dot == name.npos) {
+            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
+
+            const auto& attr = mAttrs.at(name);
 #ifdef PYBIND
-            // If attribute does not exist in C++, it might have been created or modified in Python
-            auto it = mAttrs.find(name);
-            if (it == mAttrs.end()) {
-                auto itPy = mAttrsPy.find(name);
-                if (itPy != mAttrsPy.end()) {
-                    // Insert the attribute back in C++
-                    mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
-                }
+            if (attr.type() == typeid(py::object)) {
+                // Note: because of cast<T>(), this function cannot return a const reference!
+                return future_std::any_cast<const py::object&>(attr).cast<T>();
             }
+            else
 #endif
-
-            return future_std::any_cast<const T&>(mAttrs.at(name));
+            {
+                return future_std::any_cast<const T&>(attr);
+            }
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -92,9 +80,21 @@ public:
     }
 
     template<class T> T& getAttr(const std::string& name) {
-        // Scott Meyers' solution to avoid code duplication
-        return const_cast<T&>(
-            static_cast<const DynamicAttributes&>(*this).getAttr<T>(name));
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
+
+            auto& attr = mAttrs.at(name);
+#ifdef PYBIND
+            AIDGE_ASSERT(attr.type() != typeid(py::object), "getAttr(): cannot return a reference to a Python-defined attribute.");
+#endif
+            return future_std::any_cast<T&>(attr);
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            return future_std::any_cast<DynamicAttributes&>(mAttrs.at(ns)).getAttr<T>(nsName);
+        }
     }
 
     ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
@@ -103,35 +103,12 @@ public:
     ///\param value Attribute value
     template<class T> void addAttr(const std::string& name, const T& value)
     {
-        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
-            [](const future_std::any& lhs, const future_std::any& rhs) {
-#ifdef PYBIND
-                if (lhs.type() == typeid(py::object)) {
-                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
-                }
-                else if (rhs.type() == typeid(py::object)) {
-                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
-                }
-                else
-#endif
-                {
-                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
-                }
-            }));
-
         const auto dot = name.find('.');
         if (dot == name.npos) {
+            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
+
             const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
             AIDGE_ASSERT(res.second, "addAttr(): attribute \"{}\" already exists. Use setAttr() if this is expected.", name);
-
-#ifdef PYBIND
-            // We cannot handle Python object if the Python interpreter is not running
-            if (Py_IsInitialized()) {
-                // Keep a copy of the attribute in py::object that is updated everytime
-                const auto& resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
-                AIDGE_ASSERT(resPy.second, "addAttr(): attribute \"{}\" already exists (added in Python). Use setAttr() if this is expected.", name);
-            }
-#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -147,37 +124,13 @@ public:
     ///\param value Attribute value
     template<class T> void setAttr(const std::string& name, const T& value)
     {
-        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
-            [](const future_std::any& lhs, const future_std::any& rhs) {
-#ifdef PYBIND
-                if (lhs.type() == typeid(py::object)) {
-                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
-                }
-                else if (rhs.type() == typeid(py::object)) {
-                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
-                }
-                else
-#endif
-                {
-                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
-                }
-            }));
-
         const auto dot = name.find('.');
         if (dot == name.npos) {
+            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
+
             auto res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
             if (!res.second)
                 res.first->second = future_std::any(value);
-
-#ifdef PYBIND
-            // We cannot handle Python object if the Python interpreter is not running
-            if (Py_IsInitialized()) {
-                // Keep a copy of the attribute in py::object that is updated everytime
-                auto resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
-                if (!resPy.second)
-                    resPy.first->second = std::move(py::cast(value));
-            }
-#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -191,9 +144,6 @@ public:
         const auto dot = name.find('.');
         if (dot == name.npos) {
             mAttrs.erase(name);
-#ifdef PYBIND
-            mAttrsPy.erase(name);
-#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -205,41 +155,12 @@ public:
 #ifdef PYBIND
     void addAttrPy(const std::string& name, py::object&& value)
     {
-        const auto dot = name.find('.');
-        if (dot == name.npos) {
-            auto it = mAttrs.find(name);
-            AIDGE_ASSERT(it == mAttrs.end(), "add_attr(): attribute \"{}\" already exists (added in C++). Use set_attr() if this is expected.", name);
-
-            const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
-            AIDGE_ASSERT(res.second, "add_attr(): attribute \"{}\" already exists. Use set_attr() if this is expected.", name);
-        }
-        else {
-            const auto ns = name.substr(0, dot);
-            const auto nsName = name.substr(dot + 1);
-            const auto& res = mAttrs.emplace(std::make_pair(ns, DynamicAttributes()));
-
-            future_std::any_cast<DynamicAttributes&>(res.first->second).addAttrPy(nsName, std::move(value));
-        }
+        addAttr(name, std::move(value));
     }
 
     void setAttrPy(const std::string& name, py::object&& value) override final
     {
-        const auto dot = name.find('.');
-        if (dot == name.npos) {
-            auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
-            if (!resPy.second)
-                resPy.first->second = std::move(value);
-
-            // Force getAttr() to take attribute value from mAttrsPy and update mAttrs
-            mAttrs.erase(name);
-        }
-        else {
-            const auto ns = name.substr(0, dot);
-            const auto nsName = name.substr(dot + 1);
-            const auto& res = mAttrs.emplace(std::make_pair(ns, DynamicAttributes()));
-
-            future_std::any_cast<DynamicAttributes&>(res.first->second).setAttrPy(nsName, std::move(value));
-        }
+        setAttr(name, std::move(value));
     }
 
     py::dict dict() const override {
@@ -248,9 +169,9 @@ public:
             if (elt.second.type() == typeid(DynamicAttributes)) {
                 attributes[elt.first.c_str()] = future_std::any_cast<const DynamicAttributes&>(elt.second).dict();
             }
-        }
-        for (const auto& elt : mAttrsPy) {
-            attributes[elt.first.c_str()] = elt.second;
+            else {
+                attributes[elt.first.c_str()] = mAnyUtils.at(elt.second.type())->cast(elt.second);
+            }
         }
         return attributes;
     }
@@ -273,12 +194,7 @@ public:
     bool hasAttr(const std::string& name) const override final {
         const auto dot = name.find('.');
         if (dot == name.npos) {
-#ifdef PYBIND
-            return (mAttrs.find(name) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
-
-#else
             return (mAttrs.find(name) != mAttrs.cend());
-#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -293,45 +209,22 @@ public:
         }
     }
 
-#ifdef PYBIND
-    bool hasAttrPy(const std::string& name) const override final {
-        const auto dot = name.find('.');
-        if (dot == name.npos) {
-            // Attributes might have been created in Python, the second condition is necessary.
-            return (mAttrs.find(name) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
-        }
-        else {
-            const auto ns = name.substr(0, dot);
-            const auto it = mAttrs.find(ns);
-            if (it != mAttrs.cend()) {
-                const auto nsName = name.substr(dot + 1);
-                return future_std::any_cast<const DynamicAttributes&>(it->second).hasAttrPy(nsName);
-            }
-            else {
-                return false;
-            }
-        }
-    }
-#endif
-
     std::string getAttrType(const std::string& name) const override final {
         // In order to remain consistent between C++ and Python, with or without PyBind, the name of the type is:
         // - C-style for C++ created attributes
         // - Python-style for Python created attributes
         const auto dot = name.find('.');
         if (dot == name.npos) {
+            const auto& attr = mAttrs.at(name);
 #ifdef PYBIND
-            // If attribute does not exist in C++, it might have been created in Python
-            auto it = mAttrs.find(name);
-            if (it == mAttrs.end()) {
-                auto itPy = mAttrsPy.find(name);
-                if (itPy != mAttrsPy.end()) {
-                    return std::string(Py_TYPE(itPy->second.ptr())->tp_name);
-                }
+            if (attr.type() == typeid(py::object)) {
+                return std::string(Py_TYPE(future_std::any_cast<const py::object&>(attr).ptr())->tp_name);
             }
+            else
 #endif
-
-            return mAttrs.at(name).type().name();
+            {
+                return attr.type().name();
+            }
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -344,11 +237,6 @@ public:
         std::set<std::string> attrsName;
         for(auto const& it: mAttrs)
             attrsName.insert(it.first);
-#ifdef PYBIND
-        // Attributes might have been created in Python
-        for(auto const& it: mAttrsPy)
-            attrsName.insert(it.first);
-#endif
         return attrsName;
     }
 
@@ -356,21 +244,13 @@ public:
     /**
      * @detail See https://github.com/pybind/pybind11/issues/1590 as to why a
      * generic type caster for std::any is not feasable.
-     * The strategy here is to keep a copy of each attribute in py::object that is updated everytime.
+     * The strategy here is to store a cast() function for each attribute type ever used.
     */
     inline py::object getAttrPy(const std::string& name) const override final {
         const auto dot = name.find('.');
         if (dot == name.npos) {
-            auto itPy = mAttrsPy.find(name);
-            if (itPy == mAttrsPy.end()) {
-                // Attribute may be a namespace
-                auto it = mAttrs.find(name);
-                AIDGE_ASSERT(it != mAttrs.end() && it->second.type() == typeid(DynamicAttributes), "get_attr(): attribute \"{}\" not found", name);
-                return py::cast(future_std::any_cast<const DynamicAttributes&>(it->second));
-            }
-            else {
-                return itPy->second;
-            }
+            const auto& attr = mAttrs.at(name);
+            return mAnyUtils.at(attr.type())->cast(attr);
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -384,24 +264,6 @@ public:
     {
         const auto dot = name.find('.');
         if (dot == name.npos) {
-#ifdef PYBIND
-            // If attribute does not exist in C++, it might have been created or modified in Python
-            auto it = mAttrs.find(name);
-            if (it == mAttrs.end()) {
-                auto itPy = mAttrsPy.find(name);
-                if (itPy != mAttrsPy.end()) {
-                    // Attribute exists in Python, but its type is not known
-                    // Return a std::any of py::object, which will be comparable
-                    mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(py::object),
-                        [](const future_std::any& lhs, const future_std::any& rhs) {
-                            return (future_std::any_cast<py::object>(lhs) < future_std::any_cast<py::object>(rhs));
-                        }));
-
-                    return future_std::any(itPy->second);
-                }
-            }
-#endif
-
             return mAttrs.at(name);
         }
         else {
@@ -415,34 +277,129 @@ public:
         return mAttrs;
     }
 
-    virtual ~DynamicAttributes() {}
+    virtual ~DynamicAttributes() {
+#ifdef PYBIND
+        if (!Py_IsInitialized()) {
+            // Resets the internal pointer of py::object to nullptr without decreasing the object's reference count.
+            // At this point, the Python interpreter may have exited (it is the case if the current DynamicAttribute being destroyed is static),
+            // in which case py:object has already being destroyed despite the reference counting being > 0.
+            // See https://github.com/pybind/pybind11/issues/1598
+            for (auto& attr : mAttrs) {
+                if (attr.second.type() == typeid(py::object)) {
+                    future_std::any_cast<py::object&>(attr.second).release();
+                }
+            }
+        }
+#endif
+    }
 
     friend bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs);
+    friend struct std::hash<DynamicAttributes>;
 
 private:
-#ifdef PYBIND
-    // Stores C++ attributes (copy) and Python-only attributes
-    // Code should be compiled with -fvisibility=hidden
-    // See https://pybind11.readthedocs.io/en/stable/faq.html:
-    // “‘SomeClass’ declared with greater visibility than the type of its
-    // field ‘SomeClass::member’ [-Wattributes]”
-    // This map will only be populated if Python interpreter is running
-    std::map<std::string, py::object> mAttrsPy;
-    // Stores C++ attributes only
-    // mutable because it may be updated in getAttr() from Python
-    mutable std::map<std::string, future_std::any> mAttrs;
-#else
     std::map<std::string, future_std::any> mAttrs;
-#endif
 
 public:
-    // Stores the comparison function for each attribute type ever used
-    static std::map<std::type_index, bool(*)(const future_std::any&, const future_std::any&)> mAnyCompare;
+    struct AnyUtils_ {
+#ifdef PYBIND
+        virtual py::object cast(const future_std::any& attr) const = 0;
+#endif
+        virtual bool compare(const future_std::any&, const future_std::any&) const = 0;
+        virtual size_t hash(const future_std::any&) const = 0;
+        virtual ~AnyUtils_() = default;
+    };
+
+    template <class T>
+    struct AnyUtils : public AnyUtils_ {
+#ifdef PYBIND
+        py::object cast(const future_std::any& attr) const override final {
+            return py::cast(future_std::any_cast<const T&>(attr));
+        }
+#endif
+
+        bool compare(const future_std::any& lhs, const future_std::any& rhs) const override final {
+#ifdef PYBIND
+            if (lhs.type() == typeid(py::object) && rhs.type() != typeid(py::object)) {
+                return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
+            }
+            else if (lhs.type() != typeid(py::object) && rhs.type() == typeid(py::object)) {
+                return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
+            }
+            else
+#endif
+            {
+                return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
+            }
+        }
+
+        size_t hash(const future_std::any& attr) const override final {
+            return std::hash<T>()(future_std::any_cast<T>(attr));
+        }
+    };
+
+    // Stores typed utils functions for each attribute type ever used
+    static std::map<std::type_index, std::unique_ptr<AnyUtils_>> mAnyUtils;
 };
 
+template<> void DynamicAttributes::setAttr<future_std::any>(const std::string& name, const future_std::any& value);
+
+#ifdef PYBIND
+template <>
+struct DynamicAttributes::AnyUtils<py::object> : public DynamicAttributes::AnyUtils_ {
+    py::object cast(const future_std::any& attr) const override {
+        return future_std::any_cast<const py::object&>(attr);
+    }
+
+    bool compare(const future_std::any& lhs, const future_std::any& rhs) const override {
+        return (future_std::any_cast<py::object>(lhs) < future_std::any_cast<py::object>(rhs));
+    }
+
+    size_t hash(const future_std::any& attr) const override final {
+        // Here we are mixing Python and C++ hashes... if both are
+        // well implemented, this should not increase the collision 
+        // probability for the same number of stored hashes.
+        return py::hash(future_std::any_cast<py::object>(attr));
+    }
+};
+#endif
+
 inline bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs) {
     return (lhs.mAttrs < rhs.mAttrs);
 }
+
+// Combine the hashes (boost-like hash combining, see boost::hash_combine())
+inline void hash_combine(std::size_t& seed, const std::size_t& value) {
+    seed ^= value + 0x9e3779b9 + (seed << 6) + (seed >> 2);
+}
+}
+
+namespace std {
+    // Make DynamicAttributes hashable so that is can be stored in hash-based containers.
+    // This is particularly useful in Python since set() and dict() are hash-based.
+    template <>
+    struct hash<Aidge::DynamicAttributes> {
+        size_t operator()(const Aidge::DynamicAttributes& attrs) const {
+            std::size_t seed = 0;
+            for (const auto& pair : attrs.mAttrs) {
+                Aidge::hash_combine(seed, std::hash<std::string>()(pair.first));
+                Aidge::hash_combine(seed, Aidge::DynamicAttributes::mAnyUtils.at(pair.second.type())->hash(pair.second));
+            }
+            return seed;
+        }
+    };
+
+    // General specialization of std::hash for any container that has iterators (e.g., std::vector, std::list, std::set)
+    template <template <typename...> class Container, typename T, typename... Args>
+    struct hash<Container<T, Args...>> {
+        std::size_t operator()(const Container<T, Args...>& iterable) const {
+            std::size_t seed = 0;
+            for (const auto& v : iterable) {
+                // Recursively hash the value pointed by the iterator
+                Aidge::hash_combine(seed, std::hash<T>()(v));
+            }
+            return seed;
+        }
+    };
 }
 
 namespace future_std {
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 414381891ce52046ee7c2df5b82a17e1314773cd..636863e292eeb677055dea379441ce422a6c90d8 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -199,18 +199,6 @@ public:
         return false;
     }
 
-#ifdef PYBIND
-        bool hasAttrPy(const std::string& name) const override final {
-        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
-                return true;
-            }
-        }
-
-        return false;
-    }
-#endif
-
     // Runtime type access with name
     std::string getAttrType(const std::string& name) const override final {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index d021a79c5ff4e337bebf424465458ddabf056a56..afd682f3e546b408b231a14e55a7ba5432fef430 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -195,14 +195,17 @@ void init_MetaOperatorDefs(py::module &m) {
   declare_LSTMOp(m);
 
   py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperator_Op", py::multiple_inheritance())
-  .def(py::init<const char *, const std::shared_ptr<GraphView>&>(),
+  .def(py::init<const char *, const std::shared_ptr<GraphView>&, const std::vector<InputCategory>&>(),
           py::arg("type"),
-          py::arg("graph"))
-  .def("get_micro_graph", &MetaOperator_Op::getMicroGraph);
+          py::arg("graph"),
+          py::arg("forced_inputs_category") = std::vector<InputCategory>())
+  .def("get_micro_graph", &MetaOperator_Op::getMicroGraph)
+  .def("set_upper_node", &MetaOperator_Op::setUpperNode);
 
   m.def("meta_operator", &MetaOperator,
     py::arg("type"),
     py::arg("graph"),
+    py::arg("forced_inputs_category") = std::vector<InputCategory>(),
     py::arg("name") = ""
   );
 
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index 6ffbdd007b9f929ccac18de12f2319dcd68b1eda..e22f88687eff6856ce57fab6621781ffc86873b4 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -60,8 +60,6 @@ void init_Operator(py::module& m){
     .def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>())
     .def("type", &Operator::type)
     .def("get_impl", &Operator::getImpl)
-    .def("get_hook", &Operator::getHook)
-    .def("add_hook", &Operator::addHook)
     .def_property_readonly("attr", &Operator::attributes)
     .def("set_back_edges", &Operator::setBackEdges, py::arg("input_indexes"))
     .def("is_back_edge", &Operator::isBackEdge, py::arg("input_index"))
diff --git a/python_binding/operator/pybind_Round.cpp b/python_binding/operator/pybind_Round.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e9ed0e473eaa820537590633a89ca47382d36672
--- /dev/null
+++ b/python_binding/operator/pybind_Round.cpp
@@ -0,0 +1,36 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Round.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Round(py::module& m) {
+    py::class_<Round_Op, std::shared_ptr<Round_Op>, OperatorTensor>(m, "RoundOp", py::multiple_inheritance())
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Round_Op::getInputsName)
+    .def_static("get_outputs_name", &Round_Op::getOutputsName)
+    .def_readonly_static("Type", &Round_Op::Type);
+    declare_registrable<Round_Op>(m, "RoundOp");
+    m.def("Round", &Round, py::arg("name") = "", R"mydelimiter(
+    RoundOp is a tensor operator that rounds the values of a tensor element-wise.
+        This class rounds each value to the nearest integer. In the case of halves, 
+        the rule is to round them to the nearest even integer.
+        :param X: input tensor.
+        :type X: tensor of type float, double, float16, or bfloat16.
+        :param Y: output tensor with the same shape and type as the input tensor.
+    )mydelimiter");
+}
+}  // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index d7729ff5f573e884fcd15d75a1eda4fd174a6d91..f52db7c16d1111ac9dda747a5be15cc9d8576cdd 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -62,6 +62,7 @@ void init_ReduceMean(py::module&);
 void init_ReduceSum(py::module&);
 void init_Reshape(py::module&);
 void init_Resize(py::module&);
+void init_Round(py::module&);
 void init_Scaling(py::module&);
 void init_Shape(py::module&);
 void init_Sigmoid(py::module&);
@@ -145,6 +146,7 @@ void init_Aidge(py::module& m) {
     init_ReduceSum(m);
     init_Reshape(m);
     init_Resize(m);
+    init_Round(m);
     init_Scaling(m);
     init_Shape(m);
     init_Sigmoid(m);
diff --git a/python_binding/utils/pybind_Attributes.cpp b/python_binding/utils/pybind_Attributes.cpp
index bc0ccb3f4053e37c186acd919fcadae9d5d19a40..691691a86861b7a5ac731a842d8902d3520d4a23 100644
--- a/python_binding/utils/pybind_Attributes.cpp
+++ b/python_binding/utils/pybind_Attributes.cpp
@@ -30,13 +30,13 @@ DynamicAttributes test_DynamicAttributes_binding() {
     return attrs;
 }
 
-double test_DynamicAttributes_binding_check(DynamicAttributes& attrs) {
+double test_DynamicAttributes_binding_check(const DynamicAttributes& attrs) {
     return attrs.getAttr<double>("d");
 }
 
 void init_Attributes(py::module& m){
     py::class_<Attributes, std::shared_ptr<Attributes>>(m, "Attributes")
-    .def("has_attr", &Attributes::hasAttrPy, py::arg("name"))
+    .def("has_attr", &Attributes::hasAttr, py::arg("name"))
     .def("get_attr", &Attributes::getAttrPy, py::arg("name"))
     .def("__getattr__", &Attributes::getAttrPy, py::arg("name"))
     .def("set_attr", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index 0fa2cfdadb3af350a5668444c0a330e023818a41..e2215e704e32367a7ca273b067398bc19fc3fc01 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -81,6 +81,13 @@ Aidge::ImplSpec Aidge::OperatorImpl::getRequiredSpec() const {
     else {
         requiredSpec.attrs.setAttr("type", mOp.type());
     }
+
+    const auto& inhAttrs = mOp.inheritedAttributes();
+    if (inhAttrs) {
+        if (inhAttrs->hasAttr("impl")) {
+            requiredSpec.attrs.setAttr("impl", inhAttrs->getAny("impl"));
+        }
+    }
     return requiredSpec;
 }
 
@@ -120,9 +127,9 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
             std::string qualifier;
             const auto qualifierPos = std::find_if(attrName.begin(), attrName.end(),
                 [](char c) { return c == ':'; });
-            if (qualifierPos != attrName.begin()) {
+            if (qualifierPos != attrName.end()) {
                 name = attrName.substr(0, qualifierPos - attrName.begin());
-                qualifier = attrName.substr(qualifierPos - attrName.begin());
+                qualifier = attrName.substr(qualifierPos - attrName.begin() + 1);
             }
 
             const bool mandatory = (qualifier == "!");
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index f7b6b97cdf2e23080e17b3a162b72a327a893ca4..b2c03e794888a0909ada5db208fc07ad266d4ae2 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -144,7 +144,9 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
       }
       IOIndex_t outputIdx = 0;
       for (const auto& childs : node_ptr->getOrderedChildren()) {
-        for (const auto& child : childs) {
+        // Keep only unique childs in order to avoid duplicating connections
+        const auto uniqueChilds = std::set<NodePtr>(childs.begin(), childs.end());
+        for (const auto& child : uniqueChilds) {
           if (child != nullptr) {
             IOIndex_t inputIdx = 0;
             for (auto parent : child->inputs()) {
@@ -164,7 +166,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
                   fmt::print(fp.get(), "{}_{}-->|\"{}{}&rarr;{}\"|{}:::externalCls\n", node_ptr->type(), namePtrTable.at(node_ptr),
                               outputIdx, dims, inputIdx, static_cast<void*>(child.get()));
                 }
-                break;
+                // Do no break here because the same child can be connected to several inputs
               }
               ++inputIdx;
             }
@@ -270,7 +272,10 @@ void Aidge::GraphView::setRootNode(NodePtr node) {
 std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::inputNodes() const {
     std::set<std::shared_ptr<Aidge::Node>> nodes;
     for (const auto& node : mInputNodes) {
-        nodes.insert(node.first);
+        // Do not include dummy inputs
+        if (node.first) {
+            nodes.insert(node.first);
+        }
     }
     return nodes;
 }
@@ -278,7 +283,10 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::inputNodes() const {
 std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::outputNodes() const {
     std::set<std::shared_ptr<Aidge::Node>> nodes;
     for (const auto& node : mOutputNodes) {
-        nodes.insert(node.first);
+        // Do not include dummy outputs
+        if (node.first) {
+            nodes.insert(node.first);
+        }
     }
     return nodes;
 }
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index b2ceb903d51dbb880979cd2191825a6310f9e5ff..c19eab12ae34418386b1481702f64e4a82e9f771 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -19,8 +19,8 @@
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
-    : mName(name),
+Aidge::Node::Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttributes> attrs)
+    : mAttrs(attrs),
       mOperator(op),
       mParents(std::vector<std::shared_ptr<Node>>(static_cast<std::size_t>(op->nbInputs()),
                                                   nullptr)),
@@ -38,6 +38,18 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
     }
 }
 
+Aidge::Node::Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs)
+    : Node(op, std::make_shared<DynamicAttributes>(attrs)) {}
+
+Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
+    : Node(op, DynamicAttributes())
+{
+    // ctor
+    if (!name.empty()) {
+        mAttrs->setAttr<std::string>("name", name);
+    }
+}
+
 ///////////////////////////////////////////////////////
 //        FUNCTIONAL DESCRIPTION
 ///////////////////////////////////////////////////////
@@ -70,7 +82,7 @@ Aidge::Connector Aidge::Node::operator()(const std::vector<Connector>& ctors) {
 
 void Aidge::Node::setName(const std::string& name) {
     for (auto graphView : views()) graphView->updateNodeName(shared_from_this(), name);
-    mName = name;
+    mAttrs->setAttr<std::string>("name", name);
 }
 
 std::string Aidge::Node::createUniqueName(std::string baseName)
@@ -399,18 +411,18 @@ void Aidge::Node::resetConnections(bool includeLearnableParam) {
 ///////////////////////////////////////////////////////
 
 Aidge::NodePtr Aidge::Node::cloneSharedOperators() const {
-    return std::make_shared<Node>(mOperator, mName);
+    return std::make_shared<Node>(mOperator, mAttrs);
 }
 
 Aidge::NodePtr Aidge::Node::cloneSharedProducers() const {
     std::shared_ptr<Operator> op =
             (mOperator->type() == Producer_Op::Type) ? mOperator : mOperator->clone();
 
-    return std::make_shared<Node>(op, mName);
+    return std::make_shared<Node>(op, mAttrs);
 }
 
 Aidge::NodePtr Aidge::Node::clone() const {
-    return std::make_shared<Node>(mOperator->clone(), mName);
+    return std::make_shared<Node>(mOperator->clone(), mAttrs);
 }
 
 std::set<Aidge::NodePtr> Aidge::Node::getNodeDelta(int delta, std::set<Aidge::NodePtr> nodeSee) {
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index 1a71737479f0c98cddcd4d1437012bfb16d2dc85..ab6bde74fb73011f7b49e6958d8cfa8320d0bc1b 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -20,17 +20,22 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
 
-Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph)
-    : OperatorTensor(type, [graph]() {
+Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph, const std::vector<InputCategory>& forcedInputsCategory)
+    : OperatorTensor(type, [graph, forcedInputsCategory]() {
+        IOIndex_t inputIdx = 0;
         std::vector<InputCategory> inputsCategory;
         for (const auto& in : graph->getOrderedInputs()) {
-            if (in.first) {
+            if (inputIdx < forcedInputsCategory.size()) {
+                inputsCategory.push_back(forcedInputsCategory[inputIdx]);
+            }
+            else if (in.first) {
                 inputsCategory.push_back(in.first->getOperator()->inputCategory(in.second));
             }
             else {
                 // Dummy input, default to OptionalData
                 inputsCategory.push_back(InputCategory::OptionalData);
             }
+            ++inputIdx;
         }
         return inputsCategory;
     }(), graph->getOrderedOutputs().size()),
@@ -54,6 +59,7 @@ void Aidge::MetaOperator_Op::associateInput(const IOIndex_t inputIdx, const std:
     AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx);
 
     const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
+    AIDGE_ASSERT(inputOp.first, "associateInput(): inputIdx ({}) is a dummy input for this MetaOperator, cannot associate data!", inputIdx);
     inputOp.first->getOperator()->associateInput(inputOp.second, data);
 
     // Associate inputs for custom implementation
@@ -64,6 +70,7 @@ void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, const std
     AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
 
     const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
+    AIDGE_ASSERT(inputOp.first, "setInput(): inputIdx ({}) is a dummy input for this MetaOperator, cannot associate data!", inputIdx);
     inputOp.first->getOperator()->setInput(inputOp.second, data);
 
     // Associate inputs for custom implementation
@@ -243,9 +250,10 @@ void Aidge::MetaOperator_Op::forward() {
 
 std::shared_ptr<Aidge::Node> Aidge::MetaOperator(const char *type,
                                   const std::shared_ptr<Aidge::GraphView>& graph,
+                                  const std::vector<InputCategory>& forcedInputsCategory,
                                   const std::string& name)
 {
-    auto op = std::make_shared<MetaOperator_Op>(type, graph);
+    auto op = std::make_shared<MetaOperator_Op>(type, graph, forcedInputsCategory);
     auto node = std::make_shared<Node>(op, name);
     op->setUpperNode(node);
     return node;
diff --git a/src/operator/MetaOperatorDefs/LSTM.cpp b/src/operator/MetaOperatorDefs/LSTM.cpp
index 910e7c67aad0068679ca2d240b23312add3e42d7..9620f040472aed984afb99018cde5476ec5f60d3 100644
--- a/src/operator/MetaOperatorDefs/LSTM.cpp
+++ b/src/operator/MetaOperatorDefs/LSTM.cpp
@@ -115,7 +115,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
         {hiddenState, 1}, {cellState, 1}});
     microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}});
 
-    auto metaOp = MetaOperator("LSTM", microGraph, name);
+    auto metaOp = MetaOperator("LSTM", microGraph, {}, name);
     addProducer(metaOp, 1, {hiddenChannel, inChannel}, "wi");
     addProducer(metaOp, 2, {hiddenChannel, inChannel}, "wo");
     addProducer(metaOp, 3, {hiddenChannel, inChannel}, "wf");
diff --git a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
index ef319ef38ad18de9eaed0a1d4a92c3877ee7cf8e..c35d964d0cdd224e9d00eadf6e158bc87b4c776f 100644
--- a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
@@ -41,7 +41,7 @@ std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_
         AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
     });
 
-    return MetaOperator("PaddedAvgPooling", graph, name);
+    return MetaOperator("PaddedAvgPooling", graph, {}, name);
 }
 
 template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
diff --git a/src/operator/MetaOperatorDefs/PaddedConv.cpp b/src/operator/MetaOperatorDefs/PaddedConv.cpp
index 31b1c675e9d577002350ea11dd0b42601a91ef76..49373341a3a7cd1dd764dbfcb385a1817079e8b0 100644
--- a/src/operator/MetaOperatorDefs/PaddedConv.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedConv.cpp
@@ -43,7 +43,7 @@ std::shared_ptr<Aidge::Node> Aidge::PaddedConv(Aidge::DimSize_t in_channels,
         Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
         std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "")
     });
-    auto metaOpNode = MetaOperator("PaddedConv", graph, name);
+    auto metaOpNode = MetaOperator("PaddedConv", graph, {}, name);
     addProducer(metaOpNode, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
     if (!no_bias) {
         addProducer(metaOpNode, 2, {out_channels}, "b");
diff --git a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
index 1c073b78a61763b46e330089cccfcc4bced352a4..12d980b4073c115443fe0ed8db38f978aa98dcad 100644
--- a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
@@ -40,7 +40,7 @@ std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(const Aidge::DimSize_t n
         Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
         std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv_depth_wise" : "")
     });
-    auto metaOpNode = MetaOperator("PaddedConvDepthWise", graph, name);
+    auto metaOpNode = MetaOperator("PaddedConvDepthWise", graph, {}, name);
     addProducer(metaOpNode, 1, append(nb_channels, append(Aidge::DimSize_t(1), kernel_dims)), "w");
     if (!no_bias) {
         addProducer(metaOpNode, 2, {nb_channels}, "b");
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index f15a7dc3899a7bc864e8e76ff0946fb70584bf05..bd09e9d1297ec612b08634f59bfe33f0802ef3fd 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -65,20 +65,14 @@ void Aidge::Operator::resetConsummerProducer(){
     mImpl->prodConso()->resetConsummerProducer();
 }
 
-void Aidge::Operator::runHooks() const {
-    for (auto& hook : mHooks) {
-        hook.second->call();
-    }
-}
 void Aidge::Operator::forward() {
     AIDGE_ASSERT(mImpl != nullptr, "forward(): an implementation is required for {}!", type());
     mImpl->forward();
-    runHooks();
 }
 
 void Aidge::Operator::backward() {
     AIDGE_ASSERT(mImpl != nullptr, "backward(): an implementation is required for {}!", type());
-    mImpl->backward(); 
+    mImpl->backward();
 }
 
 void Aidge::Operator::setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends) {
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index fdba4ac2e22d857a31779df2e5ff789c3eb92f5c..3d48b88ab400596d68cbfa34502e795766ff94f0 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -92,8 +92,6 @@ void Aidge::Producer_Op::forward() {
     if (!backend().empty()) {
         mImpl->forward();
     }
-
-    runHooks();
 }
 
 void Aidge::Producer_Op::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const {
diff --git a/src/operator/Round.cpp b/src/operator/Round.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ba4eff9d1e1cf06cc5a4bbda54010aec8c2f2f63
--- /dev/null
+++ b/src/operator/Round.cpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Round.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Round_Op::Type = "Round";
+
+Aidge::Round_Op::Round_Op(const Aidge::Round_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Round_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+
+std::shared_ptr<Aidge::Operator> Aidge::Round_Op::clone() const {
+    return std::make_shared<Round_Op>(*this);
+}
+
+void Aidge::Round_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    mImpl = Registrar<Round_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Round_Op::getAvailableBackends() const {
+    return Registrar<Round_Op>::getKeys();
+}
+
+std::shared_ptr<Aidge::Node> Aidge::Round(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Round_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 1613450508ea84a230f36ba6526a1322c6a70559..958b2543208dfdce3eee4e1ba7a22cc8bd0be74b 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -63,22 +63,15 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S
     std::vector<std::shared_ptr<StaticSchedulingElement>> schedule;
 
 
-    // 1) Initialize consumers list:
-    // 1.1) List of the GraphView's input nodes
-    std::set<std::shared_ptr<Node>> consumers = mGraphView->inputNodes();
-
-    // 1.2) List of nodes inside the GraphView connected to an inner Producer
+    // 1) Initialize consumers list: start from the output nodes and
+    // find the required prior producers/consumers at step 2).
+    // Beware that generateBaseScheduling() can be called multiple time
+    // with some node having already produced some data. In this case,
+    // we should always consume available data first. This is ensured
+    // by setting the consumers list to the output nodes and then recursively
+    // find the dependencies.
+    std::set<std::shared_ptr<Node>> consumers = mGraphView->outputNodes();
     std::set<std::shared_ptr<Node>> producers;
-    for (const std::shared_ptr<Node>& nodePtr : mGraphView->getNodes()) {
-        if (nodePtr->type() == Producer_Op::Type) {
-            for (const auto& child : nodePtr->getChildren()) {
-                // Do not schedule childs outside current graph!
-                if (mGraphView->inView(child)) {
-                    consumers.insert(child);
-                }
-            }
-        }
-    }
 
     do {
         // 2) From the current consumers list, check if any prior consumer node
diff --git a/src/utils/DynamicAttributes.cpp b/src/utils/DynamicAttributes.cpp
index 909d3bb2f5fda977ac497a19e1a1088eb52cfc88..3bbf400382964ef7a4ab8f80aacc66409d872afa 100644
--- a/src/utils/DynamicAttributes.cpp
+++ b/src/utils/DynamicAttributes.cpp
@@ -11,18 +11,36 @@
 
 #include "aidge/utils/DynamicAttributes.hpp"
 
-std::map<std::type_index, bool(*)(const future_std::any&, const future_std::any&)> Aidge::DynamicAttributes::mAnyCompare;
+std::map<std::type_index, std::unique_ptr<Aidge::DynamicAttributes::AnyUtils_>> Aidge::DynamicAttributes::mAnyUtils;
+
+template<> void Aidge::DynamicAttributes::setAttr<future_std::any>(const std::string& name, const future_std::any& value)
+{
+    const auto dot = name.find('.');
+    if (dot == name.npos) {
+        AIDGE_ASSERT(mAnyUtils.find(value.type()) != mAnyUtils.end(), "DynamicAttributes::setAttr(): cannot set value to std::any of never seen type.");
+
+        auto res = mAttrs.emplace(std::make_pair(name, value));
+        if (!res.second)
+            res.first->second = value;
+    }
+    else {
+        const auto ns = name.substr(0, dot);
+        const auto nsName = name.substr(dot + 1);
+        auto res = mAttrs.emplace(std::make_pair(ns, future_std::any(DynamicAttributes())));
+        future_std::any_cast<DynamicAttributes&>(res.first->second).setAttr<future_std::any>(nsName, value);
+    }
+}
 
 bool future_std::operator<(const future_std::any& lhs, const future_std::any& rhs) {
     if (lhs.type() == rhs.type()) {
-        return Aidge::DynamicAttributes::mAnyCompare.at(lhs.type())(lhs, rhs);
+        return Aidge::DynamicAttributes::mAnyUtils.at(lhs.type())->compare(lhs, rhs);
     }
 #ifdef PYBIND
     else if (lhs.type() == typeid(py::object)) {
-        return Aidge::DynamicAttributes::mAnyCompare.at(rhs.type())(lhs, rhs);
+        return Aidge::DynamicAttributes::mAnyUtils.at(rhs.type())->compare(lhs, rhs);
     }
     else if (rhs.type() == typeid(py::object)) {
-        return Aidge::DynamicAttributes::mAnyCompare.at(lhs.type())(lhs, rhs);
+        return Aidge::DynamicAttributes::mAnyUtils.at(lhs.type())->compare(lhs, rhs);
     }
 #endif
     else {