diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 327f4f7c3d43b5194f23cfaed8674ee0b47bd6a2..f2e4722aa6b02d6f1d5ffa13cecb9578dd8cf034 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -75,6 +75,10 @@ public:
     inline void addAttr(const std::string& name, const T& value) const
     { mAttributes -> template addAttr<T>(name, value); }
 
+    inline void setAttrs(const std::map<std::string, future_std::any>& attrs) {
+        *mAttributes = attrs;
+    }
+
     // Helper functions that can be used with setForwardDims():
     static const ComputeDimsFunc Identity;
     static const ComputeDimsFunc InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs);
@@ -84,9 +88,9 @@ public:
 };
 
 /**
- * @brief Fictive custom operator not associated with any implementation.
+ * @brief Generic operator not associated with any implementation.
  * Allows to import unknown operators and simulate new ones.
- * @param type Type of the fictive operator.
+ * @param type Type of the generic operator.
  * @param inputCategory List inputs with their category
  * @param nbOut Number of output data.
  * @param name (optional) name of the Operator.
@@ -96,9 +100,9 @@ std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector
                                              const std::string& name = "");
 
 /**
- * @brief Fictive custom operator not associated with any implementation.
+ * @brief Generic operator not associated with any implementation.
  * Allows to import unknown operators and simulate new ones.
- * @param type Type of the fictive operator.
+ * @param type Type of the generic operator.
  * @param nbData Number of input data.
  * @param nbParam Number of parameters.
  * @param nbOut Number of output data.
@@ -107,6 +111,18 @@ std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector
  */
 std::shared_ptr<Node> GenericOperator(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
                                              const std::string& name = "");
+
+/**
+ * @brief Generic operator not associated with any implementation.
+ * Create a generic operator from another existing operator.
+ * @param type Type of the generic operator.
+ * @param op Original operator from witch one wants to derive a generic operator.
+ * @param name (optional) name of the Operator.
+ * @return std::shared_ptr<Node> Node associated with the Generic Operator.
+ */
+std::shared_ptr<Aidge::Node> GenericOperator(const std::string& type,
+                                            std::shared_ptr<OperatorTensor> op,
+                                            const std::string& name = "");
 }  // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index e9988b4421b785a91ec170796be49c0c8df52142..95698b751a9f0f4c0cc8e716eb5140ee74e21a3f 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -196,6 +196,10 @@ public:
         return mOperatorType;
     }
 
+    inline std::vector<InputCategory> inputCategory() const {
+        return mInputsCategory;
+    }
+
     inline InputCategory inputCategory(IOIndex_t idx) const {
         // AIDGE_ASSERT(idx < mInputsCategory.size(), "Input #{} out of range (number of inputs is {})", idx, mInputsCategory.size());
         return mInputsCategory.at(idx);
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index 86c722b158657633d4509c1181b1f18201d0d514..0fb405bfe5e74f159fbd5504cc199e3b29842254 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -180,6 +180,19 @@ size_t convToMatMul(std::shared_ptr<GraphView> graph);
  */
 void adaptToBackend(std::shared_ptr<GraphView> graph);
 
+// /**
+//  * @brief The node passed contains an operator which input of index 1 is supposed be be weights of type Int4, Int3, Int2, binary.
+//  *        This recipie only operates memory transformations on the weight tensor. 
+//  *        First, permutes the dimensions to match the dataformat NHWC
+//  *        Second, compact the last dimension (Channel dimension) into int8_t
+//  * 
+//  * @param node Node 
+//  */
+// void applyWeightInterleaving(std::shared_ptr<Node> node);
+
+
+void toGenericOp(std::shared_ptr<Node> node);
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_RECIPES_H_ */
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 3ecd4da393eaac9881d008e27989a52e883ecb6a..0fc350f1a10227e417f3b09baf2c7bebeb84d875 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -34,6 +34,23 @@ namespace py = pybind11;
 
 
 namespace Aidge {
+#if __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
+#define AIDGE_DYNATTR_HAVE_CPP17
+#endif
+
+#if defined(AIDGE_DYNATTR_HAVE_CPP17) || defined(__cpp_lib_void_t)
+using std::void_t;
+#else
+template <typename...>
+using void_t = void;
+#endif
+
+// Detection idiom to check if a type T has a less-than operator
+template <typename T, typename = void>
+struct has_less_than_operator : std::false_type {};
+
+template <typename T>
+struct has_less_than_operator<T, void_t<decltype(std::declval<T>() < std::declval<T>())>> : std::true_type {};
 
 ///\todo store also a fix-sized code that indicates the type
 ///\todo managing complex types or excluding non-trivial, non-aggregate types
@@ -41,6 +58,10 @@ class DynamicAttributes : public Attributes {
 public:
     DynamicAttributes() = default;
     DynamicAttributes(const std::map<std::string, future_std::any>& attrs): mAttrs(attrs) {}
+    DynamicAttributes& operator=(const std::map<std::string, future_std::any>& attrs) {
+        mAttrs = attrs;
+        return *this;
+    }
 
     /**
      * \brief Returning an Attribute identified by its name
@@ -340,6 +361,14 @@ public:
         }
     };
 
+    template<typename T>
+    static inline typename std::enable_if<!has_less_than_operator<T>::value, void>::type makeTypeConditionallyAvailable() {}
+
+    template<typename T>
+    static inline typename std::enable_if<has_less_than_operator<T>::value, void>::type makeTypeConditionallyAvailable() {
+        mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
+    }
+
     // Stores typed utils functions for each attribute type ever used
     static std::map<std::type_index, std::unique_ptr<AnyUtils_>> mAnyUtils;
 };
@@ -403,6 +432,30 @@ namespace std {
             return seed;
         }
     };
+
+    // Special case for std::array
+    template <typename T, std::size_t N>
+    struct hash<std::array<T, N>> {
+        std::size_t operator()(const std::array<T, N>& iterable) const {
+            std::size_t seed = 0;
+            for (const auto& v : iterable) {
+                // Recursively hash the value pointed by the iterator
+                Aidge::hash_combine(seed, std::hash<T>()(v));
+            }
+            return seed;
+        }
+    };
+
+    // Specialization of std::hash for std::pair<T1, T2>
+    template <typename T1, typename T2>
+    struct hash<std::pair<T1, T2>> {
+        std::size_t operator()(const std::pair<T1, T2>& p) const {
+            std::size_t seed = 0;
+            Aidge::hash_combine(seed, std::hash<T1>()(p.first));
+            Aidge::hash_combine(seed, std::hash<T2>()(p.second));
+            return seed;
+        }
+    };
 }
 
 namespace future_std {
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 439d2c638731b40bec0696a73b62b99e3bfddd41..9c18d3cefe466f68edde70536bca4d493f9f9b18 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -24,6 +24,7 @@
 #endif
 
 #include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
@@ -322,7 +323,11 @@ private:
     inline typename std::enable_if<I == sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& /*t*/, std::map<std::string, future_std::any>& /*attrs*/) const {}
 
     template<std::size_t I = 0, typename... Tp>
-    inline typename std::enable_if<I < sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& t, std::map<std::string, future_std::any>& attrs) const {
+    inline typename std::enable_if<I < sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& t, std::map<std::string, future_std::any>& attrs) const {   
+        // Ensure that the type will be known to DynamicAttributes
+        using ElementType = typename std::tuple_element<I,std::tuple<Tp...>>::type;     
+        DynamicAttributes::makeTypeConditionallyAvailable<ElementType>();
+
         attrs.insert(std::make_pair(EnumStrings<ATTRS_ENUM>::data[I], future_std::any(std::get<I>(t))));
         appendAttr<I + 1, Tp...>(t, attrs);
     }
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index f125291fafb89ec7ae81678a37e2bde2222a1054..f5ab29c679b7cbb06e5bd86876b63117fd8ce56d 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -39,6 +39,30 @@ void init_GenericOperator(py::module& m) {
         .def("set_forward_dims", &GenericOperator_Op::setForwardDims, py::arg("computation_function"));
 
     // &GenericOperator
+    m.def("GenericOperator",
+        []( const std::string& type,
+            const std::vector<Aidge::InputCategory>& inputCategory,
+            IOIndex_t nbOut,
+            const std::string& name,
+            const py::kwargs kwargs){
+            std::shared_ptr<Node> genericNode = GenericOperator(
+                type,
+                inputCategory,
+                nbOut,
+                name
+            );
+            if (kwargs){
+                std::shared_ptr<GenericOperator_Op> gop = std::static_pointer_cast<GenericOperator_Op>(genericNode->getOperator());
+                std::shared_ptr<DynamicAttributes> attr = std::dynamic_pointer_cast<DynamicAttributes>(gop->attributes());
+                for (auto item : kwargs) {
+                    std::string key = py::cast<std::string>(item.first);
+                    py::object value = py::reinterpret_borrow<py::object>(item.second);
+                    attr->setAttrPy(key, std::move(value));
+                }
+            }
+            return genericNode;
+        }, py::arg("type"), py::arg("input_category"), py::arg("nb_out"), py::arg("name") = "");
+
     m.def("GenericOperator",
         []( const std::string& type,
             IOIndex_t nbData,
@@ -65,6 +89,8 @@ void init_GenericOperator(py::module& m) {
             return genericNode;
         }, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"), py::arg("name") = "");
 
+    m.def("GenericOperator", py::overload_cast<const std::string&, std::shared_ptr<OperatorTensor>, const std::string&>(&GenericOperator), py::arg("type"), py::arg("op"), py::arg("name") = "");
+
     declare_registrable<GenericOperator_Op>(m, "GenericOperatorOp");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index 7fa9e5825983eb0c82d2b1f84b77557e656a7d78..ded3b54088e6d1ed473ed614e23fc08cd89a0346 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -45,7 +45,14 @@ void init_Operator(py::module& m){
     .def("get_raw_input", &Operator::getRawInput, py::arg("inputIdx"))
     .def("nb_inputs", &Operator::nbInputs)
     .def("nb_outputs", &Operator::nbOutputs)
-    .def("input_category", &Operator::inputCategory, py::arg("idx"),
+    .def("input_category", static_cast<std::vector<InputCategory>(Operator::*)() const>(&Operator::inputCategory),
+    R"mydelimiter(
+    Category of the inputs (Data or Param, optional or not).
+    Data inputs exclude inputs expecting parameters (weights or bias).
+
+    :rtype: list(InputCategory)
+    )mydelimiter")
+    .def("input_category", static_cast<InputCategory(Operator::*)(IOIndex_t) const>(&Operator::inputCategory), py::arg("idx"),
     R"mydelimiter(
     Category of a specific input (Data or Param, optional or not).
     Data inputs exclude inputs expecting parameters (weights or bias).
diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp
index 77f20b9d655c6d9f6e95b23c4884bd1bc4f9ffd6..f656af70dfa05678875afd4b4748f358437852a8 100644
--- a/python_binding/recipes/pybind_Recipes.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -144,6 +144,13 @@ void init_Recipes(py::module &m)
     :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
+
+  m.def("to_generic_op", toGenericOp, py::arg("node"), R"mydelimiter(
+    Transform to a Generic Operator.
+
+    :param node: Node which Operator will turn into a Generic Operator
+    :type graph_view: :py:class:`aidge_core.Node`
+    )mydelimiter");
 }
 
 } // namespace Aidge
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index c5bca92406e518df593fcc6c3a40525a4ba81dfa..1e28cf289960dee280457cd6ea119fcc9477cf9f 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -22,7 +22,8 @@
 Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type,
                                             const std::vector<Aidge::InputCategory>& inputsCategory,
                                             Aidge::IOIndex_t nbOut)
-    : OperatorTensor(type, inputsCategory, nbOut)
+    : OperatorTensor(type, inputsCategory, nbOut),
+        mAttributes(std::make_shared<DynamicAttributes>())
 {
     mImpl = std::make_shared<OperatorImpl>(*this);
 }
@@ -73,7 +74,8 @@ bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) {
         }
 
         const auto& outputsDims = mForwardDims(inputsDims);
-        AIDGE_ASSERT((outputsDims.size() == nbOutputs()), "The provided ComputeDimsFunc function returns the wrong number of outputs");
+        AIDGE_ASSERT(!outputsDims.empty(), "The provided ComputeDimsFunc cannot compute the output dims (an empty vector was returned)");
+        AIDGE_ASSERT(outputsDims.size() == nbOutputs(), "The provided ComputeDimsFunc function returned the wrong number of outputs: {}, but {} are expected", outputsDims.size(), nbOutputs());
         for (std::size_t i = 0; i < nbOutputs(); ++i) {
             mOutputs[i]->resize(outputsDims[i]);
         }
@@ -117,3 +119,40 @@ std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
                                                 const std::string& name) {
     return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
 }
+
+std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
+                                            std::shared_ptr<OperatorTensor> op,
+                                            const std::string& name)
+{
+    // Create a generic op with the same inputs/outputs
+    auto genericOp = std::make_shared<GenericOperator_Op>(type, op->inputCategory(), op->nbOutputs());
+
+    // Copy attributes
+    genericOp->setAttrs(op->attributes()->getAttrs());
+
+    // Set a default forward dims if possible
+    if (op->dimsForwarded()) {
+        auto opInputDims = std::vector<std::vector<DimSize_t>>(op->nbInputs());
+        for (size_t i = 0; i < op->nbInputs(); ++i) {
+            opInputDims[i] = op->getInput(i)->dims();
+        }
+
+        auto opOutputDims = std::vector<std::vector<DimSize_t>>(op->nbOutputs());
+        for (size_t o = 0; o < op->nbOutputs(); ++o) {
+            opOutputDims[o] = op->getOutput(o)->dims();
+        }
+
+        genericOp->setForwardDims([opInputDims, opOutputDims](const std::vector<std::vector<std::size_t>>& inputsDims) {
+            // Check input dims
+            for (size_t i = 0; i < opInputDims.size(); ++i) {
+                if (inputsDims[i] != opInputDims[i]) {
+                    // No matching => unable to compute output dims!
+                    return std::vector<std::vector<std::size_t>>();
+                }
+            }
+            return opOutputDims;
+        });
+    }
+
+    return std::make_shared<Node>(genericOp, name);
+}
diff --git a/src/recipes/ToGenericOp.cpp b/src/recipes/ToGenericOp.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5f151d8904a21ca49c96dbe089fdc96cd77e7501
--- /dev/null
+++ b/src/recipes/ToGenericOp.cpp
@@ -0,0 +1,23 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+void Aidge::toGenericOp(std::shared_ptr<Node> node) {
+    auto newGenOp = {GenericOperator(node->type(),  std::dynamic_pointer_cast<Aidge::OperatorTensor>(node->getOperator()), node->name())};
+    auto OldOp = {node};
+    GraphView::replace(OldOp, newGenOp);
+}
diff --git a/unit_tests/recipes/Test_ToGenericOp.cpp b/unit_tests/recipes/Test_ToGenericOp.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cb75fdb1072dee476c88c1f6d502a792b2e6abd9
--- /dev/null
+++ b/unit_tests/recipes/Test_ToGenericOp.cpp
@@ -0,0 +1,94 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <memory>
+#include <set>
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+namespace Aidge {
+
+TEST_CASE("[graph/convert] toGenericOp", "[toGenericOp][recipies]") {
+    // Create a convolution operator
+    std::shared_ptr<GraphView> g =
+                Sequential({
+                    Conv(1, 3, {3, 3}, "conv1"),
+                    ReLU(),
+                    Conv(3, 4, {1, 1}, "conv2"),
+                    ReLU(),
+                    Conv(4, 3, {1, 1}, "conv3"),
+                    ReLU(),
+                    FC(2028, 256, false, "fc1"),
+                    ReLU(),
+                    FC(256, 10, false, "fc2")});
+    
+    // NCHW - MNIST DATA like
+    g->forwardDims({{5, 1, 28, 28}});
+
+    SECTION("Test Operator to Generic Operator") {
+        auto convOp = g->getNode("conv2");
+
+        // Convert to GenericOperator
+        toGenericOp(convOp);
+
+        auto newGenOp = g->getNode("conv2");
+
+        // Ensure the conversion
+        REQUIRE(newGenOp->type() == "Conv2D");
+
+        const auto convOpAttr = convOp->getOperator()->attributes()->getAttrs();
+        const auto newGenOpAttr = (newGenOp->getOperator()->attributes()->getAttrs());
+        REQUIRE((!(newGenOpAttr < convOpAttr) && !(convOpAttr < newGenOpAttr)));
+    }
+
+    SECTION("Test MetaOperator to Generic Operator") {
+
+        const auto nbFused = fuseToMetaOps(g, "Conv2D->ReLU->FC", "ConvReLUFC");
+
+        REQUIRE(nbFused == 1);
+
+        std::shared_ptr<Node> metaOpNode;
+
+        const auto nodes = g->getNodes(); // g nodes gets modified in the loop!
+        for (const auto& nodePtr : nodes) 
+        {
+            if (nodePtr->type() == "ConvReLUFC") 
+            {
+                nodePtr->setName("ConvReLUFC_0");
+                metaOpNode = nodePtr;
+                // Convert to GenericOperator
+                toGenericOp(nodePtr);
+            }
+        }
+
+        REQUIRE(metaOpNode);
+        REQUIRE(!metaOpNode->getOperator()->isAtomic());
+        auto newGenOp = g->getNode("ConvReLUFC_0");
+
+        // Ensure the conversion
+        REQUIRE(newGenOp->type() == "ConvReLUFC");
+        REQUIRE(std::dynamic_pointer_cast<GenericOperator_Op>(newGenOp->getOperator()));
+
+        const auto metaOpAttr = *std::static_pointer_cast<DynamicAttributes>(metaOpNode->getOperator()->attributes());
+        const auto newGenOpAttr = *std::static_pointer_cast<DynamicAttributes>(newGenOp->getOperator()->attributes());
+        REQUIRE((!(newGenOpAttr < metaOpAttr) && !(metaOpAttr < newGenOpAttr)));
+
+    }
+
+}
+
+} // namespace Aidge