From dda8ab68879c174ea8251fe33fcdaaf90ec13030 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 27 Feb 2025 09:39:07 +0000
Subject: [PATCH 01/12] First draft for OperatorTensorWithImpl

---
 include/aidge/operator/Add.hpp            | 35 +++++++++--------
 include/aidge/operator/OperatorTensor.hpp | 47 ++++++++++++++++++++++-
 src/operator/Add.cpp                      | 12 +++---
 3 files changed, 71 insertions(+), 23 deletions(-)

diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index fcd154b6e..491dc234d 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -45,13 +45,18 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Add_Op : public OperatorTensor,
-    public Registrable<Add_Op,
-                       std::string,
-                       std::function<std::shared_ptr<OperatorImpl>(const Add_Op&)>>
+
+class Add_Op : public OperatorTensorWithImpl<Add_Op>
+
+// class Add_Op : public OperatorTensor,
+//     public Registrable<Add_Op,
+//                        std::string,
+//                        std::function<std::shared_ptr<OperatorImpl>(const Add_Op&)>>
 {
 public:
-    static const std::string Type;
+    OPERATOR_TENSOR_NAMINGS("Add", ARRAY_LITERAL({"data_input_0", "data_input_n"}), {"data_output"});
+
+    //static const std::string Type;
 
     Add_Op();
 
@@ -61,13 +66,13 @@ public:
      * @details Copies the operator attributes and its output tensor(s), but not
      * its input tensors. The new operator has no associated input.
      */
-    Add_Op(const Add_Op& op);
+    //Add_Op(const Add_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Add_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    //std::shared_ptr<Operator> clone() const override;
 
     // Data operator[](const char* inputName) override final {
     //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
@@ -81,15 +86,15 @@ public:
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
+    //void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    //std::set<std::string> getAvailableBackends() const override;
 
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input_0", "data_input_n"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
+    // static const std::vector<std::string> getInputsName() {
+    //     return {"data_input_0", "data_input_n"};
+    // }
+    // static const std::vector<std::string> getOutputsName() {
+    //     return {"data_output"};
+    // }
 };
 
 std::shared_ptr<Node> Add(const std::string& name = "");
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index a515ecb5b..99670fc94 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -16,14 +16,14 @@
 #include <string>
 #include <vector>
 
+#include "aidge/data/Tensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
 
-class Tensor;
-
 /**
  * @class OperatorTensor
  * @brief Base class for all operators that work with tensor inputs and outputs.
@@ -205,6 +205,49 @@ protected:
     bool inputsAssociated(bool checkNonEmpty = true) const;
 };
 
+
+template <class T>
+class OperatorTensorWithImpl : public OperatorTensor,
+                               public Registrable<T, std::string, std::function<std::shared_ptr<OperatorImpl>(const T&)>>
+{
+public:
+    OperatorTensorWithImpl(const std::string& type, const std::vector<InputCategory>& inputsCategory,
+        const IOIndex_t nbOut): OperatorTensor(type, inputsCategory, nbOut) {}
+    
+    OperatorTensorWithImpl(const T& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl) {
+            SET_IMPL_MACRO(T, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy constructor.
+     * @return A shared pointer to the cloned T object.
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<T>(*static_cast<const T*>(this));
+    }
+
+    virtual void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
+        SET_IMPL_MACRO(T, *static_cast<T*>(this), name);
+        mOutputs[0]->setBackend(name, device);
+    }
+
+    std::set<std::string> getAvailableBackends() const override {
+        return Registrar<T>::getKeys();
+    }
+};
+
+#define ARRAY_LITERAL(...) __VA_ARGS__ 
+#define OPERATOR_TENSOR_NAMINGS(TYPE, INPUTS, OUTPUTS) \
+static const std::string Type() { return TYPE; } \
+static const std::vector<std::string> getInputsName() { return INPUTS; } \
+static const std::vector<std::string> getOutputsName() { return OUTPUTS; }
+
 }  // namespace Aidge
 
 #endif  // AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index f6fd0cd9f..0b3425329 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -20,14 +20,14 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 
-const std::string Aidge::Add_Op::Type = "Add";
+//const std::string Aidge::Add_Op::Type = "Add";
 
 Aidge::Add_Op::Add_Op()
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1)
+    : OperatorTensorWithImpl(Type(), {InputCategory::Data, InputCategory::Data}, 1)
 {
     // ctor
 }
-
+/*
 Aidge::Add_Op::Add_Op(const Add_Op& op)
     : OperatorTensor(op)
 {
@@ -41,7 +41,7 @@ Aidge::Add_Op::Add_Op(const Add_Op& op)
 std::shared_ptr<Aidge::Operator> Aidge::Add_Op::clone() const {
     return std::make_shared<Add_Op>(*this);
 }
-
+*/
 bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
@@ -77,7 +77,7 @@ bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
 
     return false;
 }
-
+/*
 void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(Add_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
@@ -86,7 +86,7 @@ void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
 std::set<std::string> Aidge::Add_Op::getAvailableBackends() const {
     return Registrar<Add_Op>::getKeys();
 }
-
+*/
 ////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Add(const std::string& name) {
-- 
GitLab


From f035ce9650a2a14faa241903a01b730ff7adfd6f Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 27 Feb 2025 11:03:40 +0000
Subject: [PATCH 02/12] More generic concept

---
 include/aidge/operator/Add.hpp            | 43 +-----------------
 include/aidge/operator/OperatorTensor.hpp | 29 +++++++++---
 include/aidge/operator/Reshape.hpp        | 55 ++---------------------
 src/operator/Add.cpp                      | 25 -----------
 src/operator/Reshape.cpp                  | 34 +-------------
 5 files changed, 27 insertions(+), 159 deletions(-)

diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 491dc234d..1deebceaf 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -47,54 +47,13 @@ namespace Aidge {
  */
 
 class Add_Op : public OperatorTensorWithImpl<Add_Op>
-
-// class Add_Op : public OperatorTensor,
-//     public Registrable<Add_Op,
-//                        std::string,
-//                        std::function<std::shared_ptr<OperatorImpl>(const Add_Op&)>>
 {
 public:
-    OPERATOR_TENSOR_NAMINGS("Add", ARRAY_LITERAL({"data_input_0", "data_input_n"}), {"data_output"});
-
-    //static const std::string Type;
+    OPERATOR_DESC("Add", PROTECT_ARRAY_LITERAL({"data_input_0", "data_input_n"}), {"data_output"});
 
     Add_Op();
 
-    /**
-     * @brief Copy-constructor.
-     * @param op Add_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    //Add_Op(const Add_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Add_Op
-     */
-    //std::shared_ptr<Operator> clone() const override;
-
-    // Data operator[](const char* inputName) override final {
-    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
-    //         (strcmp(inputName, "weight") ? mInputs[1] :
-    //         (strcmp(inputName, "bias") ? mInputs[2] :
-    //         nullptr));
-    //     assert((in!=nullptr) && "No such parameter");
-    //     return *in;
-    // }
-
-
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    //void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    //std::set<std::string> getAvailableBackends() const override;
-
-    // static const std::vector<std::string> getInputsName() {
-    //     return {"data_input_0", "data_input_n"};
-    // }
-    // static const std::vector<std::string> getOutputsName() {
-    //     return {"data_output"};
-    // }
 };
 
 std::shared_ptr<Node> Add(const std::string& name = "");
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index 99670fc94..0615eb47d 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -206,7 +206,7 @@ protected:
 };
 
 
-template <class T>
+template <class T, class DEF_IMPL = OperatorImpl>
 class OperatorTensorWithImpl : public OperatorTensor,
                                public Registrable<T, std::string, std::function<std::shared_ptr<OperatorImpl>(const T&)>>
 {
@@ -217,9 +217,13 @@ public:
     OperatorTensorWithImpl(const T& op)
         : OperatorTensor(op)
     {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(T, *this, op.backend());
-        } else {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(T, *static_cast<T*>(this), op.backend());
+        }
+        else if (!std::is_same<DEF_IMPL, OperatorImpl>::value) {
+            mImpl = std::make_shared<DEF_IMPL>(*static_cast<T*>(this));
+        }
+        else {
             mImpl = nullptr;
         }
     }
@@ -233,7 +237,18 @@ public:
     }
 
     virtual void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(T, *static_cast<T*>(this), name);
+        if (!std::is_same<DEF_IMPL, OperatorImpl>::value) {
+            if (Registrar<T>::exists({name})){
+                SET_IMPL_MACRO(T, *static_cast<T*>(this), name);
+            }
+            else {
+                mImpl = std::make_shared<DEF_IMPL>(*static_cast<T*>(this));
+            }
+        }
+        else {
+            SET_IMPL_MACRO(T, *static_cast<T*>(this), name);
+        }
+
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -242,8 +257,8 @@ public:
     }
 };
 
-#define ARRAY_LITERAL(...) __VA_ARGS__ 
-#define OPERATOR_TENSOR_NAMINGS(TYPE, INPUTS, OUTPUTS) \
+#define PROTECT_ARRAY_LITERAL(...) __VA_ARGS__ 
+#define OPERATOR_DESC(TYPE, INPUTS, OUTPUTS) \
 static const std::string Type() { return TYPE; } \
 static const std::vector<std::string> getInputsName() { return INPUTS; } \
 static const std::vector<std::string> getOutputsName() { return OUTPUTS; }
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index e69c42d4d..146ef4260 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -84,21 +84,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Reshape_Op : public OperatorTensor,
-                   public Registrable<Reshape_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Reshape_Op&)>> {
-
-public:
-    /**
-     * @brief Static type string for the Reshape operator.
-     */
-    static const std::string Type;
-
+class Reshape_Op : public OperatorTensorWithImpl<Reshape_Op, Reshape_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<ReshapeAttr, std::vector<std::int64_t>, bool>;
     template <ReshapeAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    OPERATOR_DESC("Reshape", {"data_input"}, {"data_output"});
+
     /**
      * @brief Deleted default constructor.
      */
@@ -111,20 +105,6 @@ public:
      */
     Reshape_Op(const std::vector<std::int64_t>& shape = {}, bool allowzero = false);
 
-    /**
-     * @brief Copy-constructor.
-     * @param[in] op Reshape_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Reshape_Op(const Reshape_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Check whether the dimensions have been forwarded successfully.
      * @return True if dimensions were successfully forwarded.
@@ -138,19 +118,6 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Set the backend for the Reshape operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the available backends for the Reshape operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -169,22 +136,6 @@ public:
      */
     inline bool& allowZero() const { return mAttributes->template getAttr<ReshapeAttr::AllowZero>(); }
 
-    /**
-     * @brief Get the input tensor names for the Reshape operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names for the Reshape operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
 	/**
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 0b3425329..6f29e9e24 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -20,28 +20,12 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 
-//const std::string Aidge::Add_Op::Type = "Add";
-
 Aidge::Add_Op::Add_Op()
     : OperatorTensorWithImpl(Type(), {InputCategory::Data, InputCategory::Data}, 1)
 {
     // ctor
 }
-/*
-Aidge::Add_Op::Add_Op(const Add_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Add_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
 
-std::shared_ptr<Aidge::Operator> Aidge::Add_Op::clone() const {
-    return std::make_shared<Add_Op>(*this);
-}
-*/
 bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
@@ -77,16 +61,7 @@ bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
 
     return false;
 }
-/*
-void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(Add_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
 
-std::set<std::string> Aidge::Add_Op::getAvailableBackends() const {
-    return Registrar<Add_Op>::getKeys();
-}
-*/
 ////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Add(const std::string& name) {
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 8b42cb514..17a32e6e7 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -39,10 +39,8 @@ void Aidge::Reshape_OpImpl::backward() {
 
 //////////////////////////////////////////////////
 
-const std::string Aidge::Reshape_Op::Type = "Reshape";
-
 Aidge::Reshape_Op::Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+    : OperatorTensorWithImpl(Type(), {InputCategory::Data, InputCategory::OptionalData}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<ReshapeAttr::Shape>(shape),
         attr<ReshapeAttr::AllowZero>(allowzero)))
@@ -50,22 +48,6 @@ Aidge::Reshape_Op::Reshape_Op(const std::vector<std::int64_t>& shape, bool allow
     mImpl = std::make_shared<Reshape_OpImpl>(*this);
 }
 
-Aidge::Reshape_Op::Reshape_Op(const Aidge::Reshape_Op& op)
-    : OperatorTensor(op),
-        mAttributes(op.mAttributes)
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Reshape_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Reshape_Op::clone() const {
-    return std::make_shared<Reshape_Op>(*this);
-}
-
 bool Aidge::Reshape_Op::dimsForwarded() const {
     if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
@@ -135,20 +117,6 @@ bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
     return false;
 }
 
-void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Reshape_Op>::exists({name})){
-        SET_IMPL_MACRO(Reshape_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Reshape_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Reshape_Op::getAvailableBackends() const {
-    return Registrar<Reshape_Op>::getKeys();
-}
-
 //////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Reshape(const std::vector<std::int64_t>& shape,
-- 
GitLab


From 9b59db4afd081059dc098ff9a62824debd29daf1 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 27 Feb 2025 13:23:59 +0000
Subject: [PATCH 03/12] Adapted Conv to OperatorTensorWithImpl

---
 include/aidge/operator/Add.hpp            |  2 +-
 include/aidge/operator/Conv.hpp           | 47 ++++++-----------------
 include/aidge/operator/OperatorTensor.hpp | 13 ++++++-
 src/operator/Conv.cpp                     | 38 +-----------------
 src/recipes/FuseBatchNorm.cpp             |  4 +-
 src/recipes/LabelGraph.cpp                |  2 +-
 6 files changed, 29 insertions(+), 77 deletions(-)

diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 1deebceaf..36bc162d5 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -49,7 +49,7 @@ namespace Aidge {
 class Add_Op : public OperatorTensorWithImpl<Add_Op>
 {
 public:
-    OPERATOR_DESC("Add", PROTECT_ARRAY_LITERAL({"data_input_0", "data_input_n"}), {"data_output"});
+    OPERATOR_DESC("Add", SINGLE_ARG({"data_input_0", "data_input_n"}), {"data_output"});
 
     Add_Op();
 
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 283d0136e..1ffd9662f 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -73,12 +73,8 @@ enum class ConvAttr {
  * @see Registrable
  */
 template <DimIdx_t DIM>
-class Conv_Op : public OperatorTensor,
-                public Registrable<Conv_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>> {
-
+class Conv_Op : public OperatorTensorWithImpl<Conv_Op<DIM>> {
 public:
-    static const std::string Type;
-
     // Use the external enum so that Aidge::Conv_Op<DIM>::Attr is valid.
     using Attr = ConvAttr;
 
@@ -89,6 +85,15 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    OPERATOR_DESC("Conv" + std::to_string(DIM) + "D", SINGLE_ARG({"data_input", "weight", "bias"}), {"data_output"});
+
+    using OperatorTensorWithImpl<Conv_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<Conv_Op<DIM>>::getOutput;
+    using OperatorTensorWithImpl<Conv_Op<DIM>>::OperatorTensorWithImpl;
+    using OperatorTensorWithImpl<Conv_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<Conv_Op<DIM>>::mOutputs;
+    using OperatorTensorWithImpl<Conv_Op<DIM>>::dimsForwarded;
+
     Conv_Op() = delete;
 
     /**
@@ -100,7 +105,7 @@ public:
     constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
                       const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                       const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
+        : OperatorTensorWithImpl<Conv_Op<DIM>>(Type(), {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<Attr::StrideDims>(strideDims),
             attr<Attr::DilationDims>(dilationDims),
@@ -114,15 +119,6 @@ public:
      */
     Conv_Op(const Conv_Op<DIM>& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned Conv_Op object.
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Conv_Op<DIM>>(*this);
-    }
-
-
     /**
      * @brief Compute forward dimensions for the operator.
      * @param allowDataDependency Flag to allow data dependency in dimension calculation.
@@ -142,19 +138,6 @@ public:
                           const std::vector<DimSize_t>& outputDims,
                           const IOIndex_t outputIdx = 0) const override;
 
-    /**
-     * @brief Set the backend for the operator.
-     * @param name The name of the backend.
-     * @param device The device index (default is 0).
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the list of available backends for the operator.
-     * @return A set of available backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the number of input channels.
      * @return The number of input channels.
@@ -193,14 +176,6 @@ public:
      */
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<Attr::KernelDims>(); }
 
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input", "weight", "bias"};
-    }
-
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
-
 	/**
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index 0615eb47d..63f6d9659 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -250,6 +250,17 @@ public:
         }
 
         mOutputs[0]->setBackend(name, device);
+
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            if (inputCategory(i) == InputCategory::Param || inputCategory(i) == InputCategory::OptionalParam) {
+                if (getInput(i)) {
+                    getInput(i)->setBackend(name, device);
+                }
+                else if (inputCategory(i) != InputCategory::OptionalParam) {
+                    Log::notice("{}_Op::setBackend(): could not set backend for {} input, because input is not connected", type(), getInputsName()[i]);
+                }
+            }
+        }
     }
 
     std::set<std::string> getAvailableBackends() const override {
@@ -257,7 +268,7 @@ public:
     }
 };
 
-#define PROTECT_ARRAY_LITERAL(...) __VA_ARGS__ 
+#define SINGLE_ARG(...) __VA_ARGS__ 
 #define OPERATOR_DESC(TYPE, INPUTS, OUTPUTS) \
 static const std::string Type() { return TYPE; } \
 static const std::vector<std::string> getInputsName() { return INPUTS; } \
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index d69aad616..1f3aba1a4 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -24,20 +24,10 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Conv_Op<DIM>::Type = "Conv" + std::to_string(DIM) + "D";
-
 template <Aidge::DimIdx_t DIM>
 Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
-    : OperatorTensor(op),
-      mAttributes(op.mAttributes)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
+    : OperatorTensorWithImpl<Conv_Op<DIM>>(op),
+      mAttributes(op.mAttributes) {}
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -155,25 +145,6 @@ Aidge::Conv_Op<DIM>::computeReceptiveField(
     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
 }
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Conv_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-
-    // By default, automatically set backend for weight and bias inputs
-    if (getInput(1)) {
-        getInput(1)->setBackend(name, device);
-    }
-    else {
-        Log::notice("Conv_Op::setBackend(): could not set backend for weight input, because input is not connected");
-    }
-
-    if (getInput(2)) {
-        // Bias is optional
-        getInput(2)->setBackend(name, device);
-    }
-}
-
 template <Aidge::DimIdx_t DIM>
 Aidge::DimSize_t Aidge::Conv_Op<DIM>::inChannels() const {
     if (!getInput(1)) {
@@ -196,11 +167,6 @@ Aidge::DimSize_t Aidge::Conv_Op<DIM>::outChannels() const {
     return getInput(1)->template dims<DIM+2>()[0];
 }
 
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::Conv_Op<DIM>::getAvailableBackends() const {
-    return Registrar<Conv_Op<DIM>>::getKeys();
-}
-
 template class Aidge::Conv_Op<1>;
 template class Aidge::Conv_Op<2>;
 
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index 0fd9d7b44..bbca75cf0 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -42,7 +42,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
         convNode = outputNodes[0].first;
     }
 
-    AIDGE_ASSERT(((convNode->type() == Conv_Op<2>::Type) || (convNode->type() == ConvDepthWise_Op<2>::Type)), "Wrong type");
+    AIDGE_ASSERT(((convNode->type() == Conv_Op<2>::Type()) || (convNode->type() == ConvDepthWise_Op<2>::Type)), "Wrong type");
     AIDGE_ASSERT(batchnormNode->type() == BatchNorm_Op<2>::Type, "Wrong type for batchnorm node.");
 
     // TODO: Find a way to remove the template
@@ -55,7 +55,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
     std::array<DimSize_t, 2> kernelDims = {1,1};
     AIDGE_ASSERT(convNode->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
     std::shared_ptr<OperatorTensor> convOp = std::static_pointer_cast<OperatorTensor>(convNode->getOperator());
-    if (convNode->type() == Conv_Op<2>::Type) {
+    if (convNode->type() == Conv_Op<2>::Type()) {
         const std::shared_ptr<Conv_Op<2>> convOpPtr =
             std::static_pointer_cast<Conv_Op<2>>(convNode->getOperator());
         convNbOutChannels = convOpPtr->outChannels();
diff --git a/src/recipes/LabelGraph.cpp b/src/recipes/LabelGraph.cpp
index 75bcd36bf..790364bc9 100644
--- a/src/recipes/LabelGraph.cpp
+++ b/src/recipes/LabelGraph.cpp
@@ -19,7 +19,7 @@
 
 Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     // Conv => MaxPooling
-    if (node->type() == Conv_Op<2>::Type) {
+    if (node->type() == Conv_Op<2>::Type()) {
         auto op = std::dynamic_pointer_cast<Conv_Op<2>>(node->getOperator());
 
         auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims());
-- 
GitLab


From 2b3c5f1260e828eba296b8c16e0ac15e1f7f5340 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Fri, 28 Feb 2025 12:52:04 +0000
Subject: [PATCH 04/12] Removed macro

---
 include/aidge/operator/Add.hpp            |  9 ++++++++-
 include/aidge/operator/Conv.hpp           | 14 +++++++++++---
 include/aidge/operator/OperatorTensor.hpp |  6 ------
 include/aidge/operator/Reshape.hpp        | 21 ++++++++++++++++++++-
 src/operator/Add.cpp                      |  4 +++-
 src/operator/Conv.cpp                     |  3 +++
 src/operator/Reshape.cpp                  |  4 +++-
 src/recipes/FuseBatchNorm.cpp             |  4 ++--
 src/recipes/LabelGraph.cpp                |  2 +-
 9 files changed, 51 insertions(+), 16 deletions(-)

diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 36bc162d5..b2d4f4f15 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -49,11 +49,18 @@ namespace Aidge {
 class Add_Op : public OperatorTensorWithImpl<Add_Op>
 {
 public:
-    OPERATOR_DESC("Add", SINGLE_ARG({"data_input_0", "data_input_n"}), {"data_output"});
+    static const std::string Type;
 
     Add_Op();
 
     bool forwardDims(bool allowDataDependency = false) override final;
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input_0", "data_input_n"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
 };
 
 std::shared_ptr<Node> Add(const std::string& name = "");
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 1ffd9662f..05263f6c8 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -75,6 +75,8 @@ enum class ConvAttr {
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensorWithImpl<Conv_Op<DIM>> {
 public:
+    static const std::string Type;
+
     // Use the external enum so that Aidge::Conv_Op<DIM>::Attr is valid.
     using Attr = ConvAttr;
 
@@ -85,8 +87,6 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    OPERATOR_DESC("Conv" + std::to_string(DIM) + "D", SINGLE_ARG({"data_input", "weight", "bias"}), {"data_output"});
-
     using OperatorTensorWithImpl<Conv_Op<DIM>>::getInput;
     using OperatorTensorWithImpl<Conv_Op<DIM>>::getOutput;
     using OperatorTensorWithImpl<Conv_Op<DIM>>::OperatorTensorWithImpl;
@@ -105,7 +105,7 @@ public:
     constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
                       const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                       const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
-        : OperatorTensorWithImpl<Conv_Op<DIM>>(Type(), {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
+        : OperatorTensorWithImpl<Conv_Op<DIM>>(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<Attr::StrideDims>(strideDims),
             attr<Attr::DilationDims>(dilationDims),
@@ -176,6 +176,14 @@ public:
      */
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<Attr::KernelDims>(); }
 
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "weight", "bias"};
+    }
+
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+
 	/**
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index 63f6d9659..ce29a73f9 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -268,12 +268,6 @@ public:
     }
 };
 
-#define SINGLE_ARG(...) __VA_ARGS__ 
-#define OPERATOR_DESC(TYPE, INPUTS, OUTPUTS) \
-static const std::string Type() { return TYPE; } \
-static const std::vector<std::string> getInputsName() { return INPUTS; } \
-static const std::vector<std::string> getOutputsName() { return OUTPUTS; }
-
 }  // namespace Aidge
 
 #endif  // AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 74316ca4f..6cad57c37 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -96,7 +96,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    OPERATOR_DESC("Reshape", {"data_input"}, {"data_output"});
+    /**
+     * @brief Static type string for the Reshape operator.
+     */
+    static const std::string Type;
 
     /**
      * @brief Deleted default constructor.
@@ -141,6 +144,22 @@ public:
      */
     inline bool& allowZero() const { return mAttributes->template getAttr<ReshapeAttr::AllowZero>(); }
 
+    /**
+     * @brief Get the input tensor names for the Reshape operator.
+     * @return A vector of input tensor names.
+     */
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+
+    /**
+     * @brief Get the output tensor names for the Reshape operator.
+     * @return A vector of output tensor names.
+     */
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+
 	/**
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 6f29e9e24..3b6221447 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -20,8 +20,10 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 
+const std::string Aidge::Add_Op::Type = "Add";
+
 Aidge::Add_Op::Add_Op()
-    : OperatorTensorWithImpl(Type(), {InputCategory::Data, InputCategory::Data}, 1)
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1)
 {
     // ctor
 }
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 1f3aba1a4..4c87bf50d 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -24,6 +24,9 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::Conv_Op<DIM>::Type = "Conv" + std::to_string(DIM) + "D";
+
 template <Aidge::DimIdx_t DIM>
 Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
     : OperatorTensorWithImpl<Conv_Op<DIM>>(op),
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 17a32e6e7..1ac57d669 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -39,8 +39,10 @@ void Aidge::Reshape_OpImpl::backward() {
 
 //////////////////////////////////////////////////
 
+const std::string Aidge::Reshape_Op::Type = "Reshape";
+
 Aidge::Reshape_Op::Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
-    : OperatorTensorWithImpl(Type(), {InputCategory::Data, InputCategory::OptionalData}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<ReshapeAttr::Shape>(shape),
         attr<ReshapeAttr::AllowZero>(allowzero)))
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index bbca75cf0..0fd9d7b44 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -42,7 +42,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
         convNode = outputNodes[0].first;
     }
 
-    AIDGE_ASSERT(((convNode->type() == Conv_Op<2>::Type()) || (convNode->type() == ConvDepthWise_Op<2>::Type)), "Wrong type");
+    AIDGE_ASSERT(((convNode->type() == Conv_Op<2>::Type) || (convNode->type() == ConvDepthWise_Op<2>::Type)), "Wrong type");
     AIDGE_ASSERT(batchnormNode->type() == BatchNorm_Op<2>::Type, "Wrong type for batchnorm node.");
 
     // TODO: Find a way to remove the template
@@ -55,7 +55,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
     std::array<DimSize_t, 2> kernelDims = {1,1};
     AIDGE_ASSERT(convNode->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
     std::shared_ptr<OperatorTensor> convOp = std::static_pointer_cast<OperatorTensor>(convNode->getOperator());
-    if (convNode->type() == Conv_Op<2>::Type()) {
+    if (convNode->type() == Conv_Op<2>::Type) {
         const std::shared_ptr<Conv_Op<2>> convOpPtr =
             std::static_pointer_cast<Conv_Op<2>>(convNode->getOperator());
         convNbOutChannels = convOpPtr->outChannels();
diff --git a/src/recipes/LabelGraph.cpp b/src/recipes/LabelGraph.cpp
index 790364bc9..75bcd36bf 100644
--- a/src/recipes/LabelGraph.cpp
+++ b/src/recipes/LabelGraph.cpp
@@ -19,7 +19,7 @@
 
 Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     // Conv => MaxPooling
-    if (node->type() == Conv_Op<2>::Type()) {
+    if (node->type() == Conv_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<Conv_Op<2>>(node->getOperator());
 
         auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims());
-- 
GitLab


From 53df1692b61abfeb12a10a99f2637e40207d4f6b Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Sat, 1 Mar 2025 18:20:04 +0000
Subject: [PATCH 05/12] More code factorization

---
 include/aidge/operator/Abs.hpp            | 45 +++------------------
 include/aidge/operator/Add.hpp            | 14 ++-----
 include/aidge/operator/And.hpp            | 43 +++-----------------
 include/aidge/operator/ArgMax.hpp         | 49 +++--------------------
 include/aidge/operator/Conv.hpp           | 24 +++++------
 python_binding/operator/pybind_Abs.cpp    |  8 +++-
 python_binding/operator/pybind_Add.cpp    |  8 +++-
 python_binding/operator/pybind_And.cpp    |  9 ++++-
 python_binding/operator/pybind_ArgMax.cpp |  9 ++++-
 python_binding/operator/pybind_Conv.cpp   | 10 +++--
 src/operator/Abs.cpp                      | 29 --------------
 src/operator/Add.cpp                      |  2 -
 src/operator/And.cpp                      | 11 -----
 src/operator/ArgMax.cpp                   | 25 +-----------
 src/operator/Conv.cpp                     |  3 --
 15 files changed, 64 insertions(+), 225 deletions(-)
 delete mode 100644 src/operator/Abs.cpp

diff --git a/include/aidge/operator/Abs.hpp b/include/aidge/operator/Abs.hpp
index d8dc62752..e662bf415 100644
--- a/include/aidge/operator/Abs.hpp
+++ b/include/aidge/operator/Abs.hpp
@@ -36,48 +36,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Abs_Op : public OperatorTensor,
-    public Registrable<Abs_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::shared_ptr<OperatorImpl>(const Abs_Op&)>> {
+class Abs_Op : public OperatorTensorWithImpl<Abs_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Abs";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    Abs_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Abs_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Abs_Op(const Abs_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Abs_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Abs_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Abs_Op>(*this);
-    }
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
+    Abs_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 };
 
 inline std::shared_ptr<Node> Abs(const std::string& name = "") {
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index b2d4f4f15..ed64b97f5 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -46,21 +46,15 @@ namespace Aidge {
  * @see Registrable
  */
 
-class Add_Op : public OperatorTensorWithImpl<Add_Op>
-{
+class Add_Op : public OperatorTensorWithImpl<Add_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Add";
+    static constexpr const char* const InputsName[] = {"data_input_0", "data_input_n"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Add_Op();
 
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input_0", "data_input_n"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Add(const std::string& name = "");
diff --git a/include/aidge/operator/And.hpp b/include/aidge/operator/And.hpp
index 32b6684a0..20cd3a8fb 100644
--- a/include/aidge/operator/And.hpp
+++ b/include/aidge/operator/And.hpp
@@ -46,48 +46,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class And_Op : public OperatorTensor,
-    public Registrable<And_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const And_Op&)>> {
+class And_Op : public OperatorTensorWithImpl<And_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Add";
+    static constexpr const char* const InputsName[] = {"data_input_1", "data_input_2"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    And_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op And_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    And_Op(const And_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(And_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::And_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<And_Op>(*this);
-    }
+    And_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input_1", "data_input_2"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 inline std::shared_ptr<Node> And(const std::string& name = "") {
diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index 5057310d3..dd43f2d11 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -82,13 +82,7 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class ArgMax_Op : public OperatorTensor,
-                public Registrable<ArgMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ArgMax_Op &)>> {
-
-public:
-    /// The type of the operator as a string.
-    static const std::string Type;
-
+class ArgMax_Op : public OperatorTensorWithImpl<ArgMax_Op> {
 private:
     using Attributes_ = StaticAttributes<ArgMaxAttr,
                             GENERATE_LIST_ATTR_TYPE(LIST_ARGMAX_ATTR)
@@ -99,6 +93,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Add";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     ArgMax_Op() = delete;
 
     /**
@@ -108,7 +106,7 @@ public:
      * @param[in] select_last_index Whether to select the last occurrence of the maximum value (`true`) or the first (`false`).
      */
     ArgMax_Op(std::int32_t axis = 0, bool keep_dims = true, bool select_last_index = false)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
+        : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<ArgMaxAttr::Axis>(axis),
             attr<ArgMaxAttr::KeepDims>(keep_dims),
@@ -125,12 +123,6 @@ public:
      */
     ArgMax_Op(const ArgMax_Op& op);
 
-    /**
-     * @brief Creates a copy of the current ArgMax operator.
-     * @return A shared pointer to the new ArgMax operator instance.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Performs dimension inference for the ArgMax operation.
      * @param[in] allowDataDependency Whether data dependency is allowed during dimension inference.
@@ -138,19 +130,6 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Sets the backend for the operator.
-     * @param name The name of the backend.
-     * @param device The device index on which the backend operates (default is 0).
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Retrieves a list of available backends for the ArgMax operator.
-     * @return A set of strings representing the available backends.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Gets the attribute storage for the ArgMax operator.
      * @return A shared pointer to the attribute storage.
@@ -175,22 +154,6 @@ public:
      */
     inline bool& selectLastIndex() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::SelectLastIndex>(); }
 
-    /**
-     * @brief Returns the names of the input tensors for the ArgMax operator.
-     * @return A vector of strings containing the input names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Returns the names of the output tensors for the ArgMax operator.
-     * @return A vector of strings containing the output names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
 	/**
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 05263f6c8..fb4992d3d 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -40,9 +40,10 @@ namespace Aidge {
  * - DilationDims: The dilation dimensions.
  * - KernelDims: The kernel dimensions.
  */
-enum class ConvAttr {
-    GENERATE_LIST_ATTR_ENUM(LIST_CONV_ATTR)
-};
+template <DimIdx_t DIM> struct Conv_Op_Type {};
+template <> struct Conv_Op_Type<1> { static constexpr const char* const value = "Conv1D"; };
+template <> struct Conv_Op_Type<2> { static constexpr const char* const value = "Conv2D"; };
+template <> struct Conv_Op_Type<3> { static constexpr const char* const value = "Conv3D"; };
 
 /**
  * @class Conv_Op
@@ -74,13 +75,10 @@ enum class ConvAttr {
  */
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensorWithImpl<Conv_Op<DIM>> {
-public:
-    static const std::string Type;
-
+private:
     // Use the external enum so that Aidge::Conv_Op<DIM>::Attr is valid.
     using Attr = ConvAttr;
 
-private:
     using Attributes_ = StaticAttributes<Attr, GENERATE_LIST_ATTR_TYPE(LIST_CONV_ATTR)>;
     template <Attr e>
     using attr = typename Attributes_::template attr<e>;
@@ -94,6 +92,10 @@ public:
     using OperatorTensorWithImpl<Conv_Op<DIM>>::mOutputs;
     using OperatorTensorWithImpl<Conv_Op<DIM>>::dimsForwarded;
 
+    static constexpr const char* const Type = Conv_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input", "weight", "bias"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     Conv_Op() = delete;
 
     /**
@@ -176,14 +178,6 @@ public:
      */
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<Attr::KernelDims>(); }
 
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input", "weight", "bias"};
-    }
-
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
-
 	/**
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
diff --git a/python_binding/operator/pybind_Abs.cpp b/python_binding/operator/pybind_Abs.cpp
index 8df1bfd13..0c49061b7 100644
--- a/python_binding/operator/pybind_Abs.cpp
+++ b/python_binding/operator/pybind_Abs.cpp
@@ -22,8 +22,12 @@ namespace Aidge {
 void init_Abs(py::module& m) {
     py::class_<Abs_Op, std::shared_ptr<Abs_Op>, OperatorTensor>(m, "AbsOp", py::multiple_inheritance())
     .def(py::init<>())
-    .def_static("get_inputs_name", &Abs_Op::getInputsName)
-    .def_static("get_outputs_name", &Abs_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Abs_Op::InputsName), std::end(Abs_Op::InputsName));
+    })
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Abs_Op::OutputsName), std::end(Abs_Op::OutputsName));
+    })
     .def_readonly_static("Type", &Abs_Op::Type);
     declare_registrable<Abs_Op>(m, "AbsOp");
 
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 814500070..79512b73c 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -34,8 +34,12 @@ void declare_Add(py::module &m) {
     :type name : str
     )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Add_Op::getInputsName)
-    .def_static("get_outputs_name", &Add_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Add_Op::InputsName), std::end(Add_Op::InputsName));
+    })
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Add_Op::OutputsName), std::end(Add_Op::OutputsName));
+    })
     .def_readonly_static("Type", &Add_Op::Type);
 
   declare_registrable<Add_Op>(m, "AddOp");
diff --git a/python_binding/operator/pybind_And.cpp b/python_binding/operator/pybind_And.cpp
index bd3366ef8..9d2d410fa 100644
--- a/python_binding/operator/pybind_And.cpp
+++ b/python_binding/operator/pybind_And.cpp
@@ -31,8 +31,13 @@ void init_And(py::module& m) {
         :type name : str
         )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &And_Op::getInputsName)
-    .def_static("get_outputs_name", &And_Op::getOutputsName);
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(And_Op::InputsName), std::end(And_Op::InputsName));
+    })
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(And_Op::OutputsName), std::end(And_Op::OutputsName));
+    })
+    .def_readonly_static("Type", &And_Op::Type);
 
     declare_registrable<And_Op>(m, "AndOp");
 
diff --git a/python_binding/operator/pybind_ArgMax.cpp b/python_binding/operator/pybind_ArgMax.cpp
index 75f325749..4fddae3e5 100644
--- a/python_binding/operator/pybind_ArgMax.cpp
+++ b/python_binding/operator/pybind_ArgMax.cpp
@@ -41,8 +41,13 @@ void init_ArgMax(py::module &m) {
 			:type select_last_index: bool
 		)mydelimiter")
     .def(py::init<std::int32_t, bool, bool>(), py::arg("axis"), py::arg("keep_dims"), py::arg("select_last_index"))
-    .def_static("get_inputs_name", &ArgMax_Op::getInputsName)
-    .def_static("get_outputs_name", &ArgMax_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(ArgMax_Op::InputsName), std::end(ArgMax_Op::InputsName));
+    })
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(ArgMax_Op::OutputsName), std::end(ArgMax_Op::OutputsName));
+    })
+    .def_readonly_static("Type", &ArgMax_Op::Type)
 	.def_static("attributes_name", []() {
 		std::vector<std::string> result;
 		auto attributes = ArgMax_Op::attributesName();
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index e65a74c0c..544125a48 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -41,8 +41,13 @@ void declare_ConvOp(py::module &m) {
         }), py::arg("kernel_dims"),
             py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
             py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
-        .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Conv_Op<DIM>::InputsName), std::end(Conv_Op<DIM>::InputsName));
+        })
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Conv_Op<DIM>::OutputsName), std::end(Conv_Op<DIM>::OutputsName));
+        })
+        .def_readonly_static("Type", &Conv_Op<DIM>::Type)
 
 		.def_static("attributes_name", []() {
 			std::vector<std::string> result;
@@ -54,7 +59,6 @@ void declare_ConvOp(py::module &m) {
 		})
         .def("in_channels", &Conv_Op<DIM>::inChannels)
         .def("out_channels", &Conv_Op<DIM>::outChannels)
-        .def_readonly_static("Type", &Conv_Op<DIM>::Type)
         ;
 
   declare_registrable<Conv_Op<DIM>>(m, pyClassName);
diff --git a/src/operator/Abs.cpp b/src/operator/Abs.cpp
deleted file mode 100644
index 1dd7836ad..000000000
--- a/src/operator/Abs.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include "aidge/operator/Abs.hpp"
-
-#include <string>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/utils/Types.h"
-
-const std::string Aidge::Abs_Op::Type = "Abs";
-
-void Aidge::Abs_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Abs_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Abs_Op::getAvailableBackends() const {
-    return Registrar<Abs_Op>::getKeys();
-}
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 3b6221447..ea6f73b85 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -20,8 +20,6 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 
-const std::string Aidge::Add_Op::Type = "Add";
-
 Aidge::Add_Op::Add_Op()
     : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1)
 {
diff --git a/src/operator/And.cpp b/src/operator/And.cpp
index aebd5a717..b7121513f 100644
--- a/src/operator/And.cpp
+++ b/src/operator/And.cpp
@@ -21,8 +21,6 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::And_Op::Type = "And";
-
 bool Aidge::And_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
@@ -51,12 +49,3 @@ bool Aidge::And_Op::forwardDims(bool /*allowDataDependency*/) {
 
     return false;
 }
-
-void Aidge::And_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(And_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::And_Op::getAvailableBackends() const {
-    return Registrar<And_Op>::getKeys();
-}
diff --git a/src/operator/ArgMax.cpp b/src/operator/ArgMax.cpp
index 531c41596..35dc281c3 100644
--- a/src/operator/ArgMax.cpp
+++ b/src/operator/ArgMax.cpp
@@ -22,22 +22,10 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::ArgMax_Op::Type = "ArgMax";
-
 Aidge::ArgMax_Op::ArgMax_Op(const Aidge::ArgMax_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
       mAttributes(op.mAttributes)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(ArgMax_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::ArgMax_Op::clone() const {
-    return std::make_shared<ArgMax_Op>(*this);
-}
+{}
 
 bool Aidge::ArgMax_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -61,15 +49,6 @@ bool Aidge::ArgMax_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::ArgMax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(ArgMax_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::ArgMax_Op::getAvailableBackends() const {
-    return Registrar<ArgMax_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ArgMax(std::int32_t axis,
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 4c87bf50d..1f3aba1a4 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -24,9 +24,6 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Conv_Op<DIM>::Type = "Conv" + std::to_string(DIM) + "D";
-
 template <Aidge::DimIdx_t DIM>
 Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
     : OperatorTensorWithImpl<Conv_Op<DIM>>(op),
-- 
GitLab


From 4b6743120a8e4e7edf8bda4a2e30a1f8fa1844b0 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Sun, 2 Mar 2025 09:40:56 +0000
Subject: [PATCH 06/12] More changes

---
 include/aidge/operator/Atan.hpp               | 33 ++--------
 include/aidge/operator/AvgPooling.hpp         | 64 ++++++-------------
 include/aidge/operator/Conv.hpp               |  4 ++
 python_binding/operator/pybind_Atan.cpp       |  9 ++-
 python_binding/operator/pybind_AvgPooling.cpp | 12 ++--
 src/operator/Atan.cpp                         | 28 +-------
 src/operator/AvgPooling.cpp                   | 31 +--------
 7 files changed, 45 insertions(+), 136 deletions(-)

diff --git a/include/aidge/operator/Atan.hpp b/include/aidge/operator/Atan.hpp
index 6f81ab0a8..ae16c8b81 100644
--- a/include/aidge/operator/Atan.hpp
+++ b/include/aidge/operator/Atan.hpp
@@ -37,39 +37,14 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Atan_Op : public OperatorTensor,
-    public Registrable<Atan_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::shared_ptr<OperatorImpl>(const Atan_Op&)>>
+class Atan_Op : public OperatorTensorWithImpl<Atan_Op>
 {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Atan";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Atan_Op();
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Atan_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Atan_Op(const Atan_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Atan_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Atan(const std::string& name = "");
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 505a06398..66e7314a5 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -63,6 +63,12 @@ constexpr const char* const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
 }
 
 namespace Aidge {
+template <DimIdx_t DIM> struct AvgPooling_Op_Type {};
+template <> struct AvgPooling_Op_Type<1> { static constexpr const char* const value = "AvgPooling1D"; };
+template <> struct AvgPooling_Op_Type<2> { static constexpr const char* const value = "AvgPooling2D"; };
+template <> struct AvgPooling_Op_Type<3> { static constexpr const char* const value = "AvgPooling3D"; };
+template <> struct AvgPooling_Op_Type<4> { static constexpr const char* const value = "AvgPooling4D"; };
+
 /**
  * @brief Class representing an Average Pooling operation.
  *
@@ -93,15 +99,7 @@ namespace Aidge {
  */
 
 template <DimIdx_t DIM>
-class AvgPooling_Op : public OperatorTensor,
-                public Registrable<AvgPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>> {
-
-public:
-    /**
-     * @brief Type identifier for the AvgPooling operation.
-     */
-    static const std::string Type;
-
+class AvgPooling_Op : public OperatorTensorWithImpl<AvgPooling_Op<DIM>> {
 private:
     /**
      * @brief Static attributes representing kernel and stride dimensions.
@@ -118,6 +116,17 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    using OperatorTensorWithImpl<AvgPooling_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<AvgPooling_Op<DIM>>::getOutput;
+    using OperatorTensorWithImpl<AvgPooling_Op<DIM>>::OperatorTensorWithImpl;
+    using OperatorTensorWithImpl<AvgPooling_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<AvgPooling_Op<DIM>>::mOutputs;
+    using OperatorTensorWithImpl<AvgPooling_Op<DIM>>::dimsForwarded;
+
+    static constexpr const char* const Type = AvgPooling_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Default constructor is deleted.
      */
@@ -135,7 +144,7 @@ public:
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
                             const std::array<DimSize_t, DIM> &dilations = create_array<DimSize_t, DIM>(1),
                             bool ceil_mode = false)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
+        : OperatorTensorWithImpl<AvgPooling_Op<DIM>>(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
                         attr<AvgPoolingAttr::StrideDims>(stride_dims),
                         attr<AvgPoolingAttr::KernelDims>(kernel_dims),
@@ -151,12 +160,6 @@ public:
      */
     AvgPooling_Op(const AvgPooling_Op<DIM>& op);
 
-    /**
-     * @brief Clones the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override final;
-
     /**
      * @brief Calculates the output dimensions based on the input dimensions and operator attributes.
      * @param[in] allowDataDependency If true, considers data-dependent operations. Defaults to false.
@@ -176,19 +179,6 @@ public:
                           const std::vector<DimSize_t>& outputDims,
                           const IOIndex_t outputIdx = 0) const override final;
 
-    /**
-     * @brief Sets the backend for the operation.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index. Defaults to 0.
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Retrieves the available backends for the operation.
-     * @return A set of strings representing the available backends.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Accessor for the operation attributes.
      * @return Shared pointer to the attributes.
@@ -219,22 +209,6 @@ public:
      */
     inline bool& ceilMode() const { return mAttributes->template getAttr<AvgPoolingAttr::CeilMode>(); }
 
-    /**
-     * @brief Retrieves the names of the input tensors.
-     * @return A vector of strings representing the input tensors names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Retrieves the names of the output tensors.
-     * @return A vector of strings representing the output tensors names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
 	/**
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index fb4992d3d..faf6ee31f 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -40,6 +40,10 @@ namespace Aidge {
  * - DilationDims: The dilation dimensions.
  * - KernelDims: The kernel dimensions.
  */
+enum class ConvAttr {
+    GENERATE_LIST_ATTR_ENUM(LIST_CONV_ATTR)
+};
+
 template <DimIdx_t DIM> struct Conv_Op_Type {};
 template <> struct Conv_Op_Type<1> { static constexpr const char* const value = "Conv1D"; };
 template <> struct Conv_Op_Type<2> { static constexpr const char* const value = "Conv2D"; };
diff --git a/python_binding/operator/pybind_Atan.cpp b/python_binding/operator/pybind_Atan.cpp
index 6f2e00333..3f05fc148 100644
--- a/python_binding/operator/pybind_Atan.cpp
+++ b/python_binding/operator/pybind_Atan.cpp
@@ -27,8 +27,13 @@ void init_Atan(py::module& m) {
         :type type : :py:class:`str`
         )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Atan_Op::getInputsName)
-    .def_static("get_outputs_name", &Atan_Op::getOutputsName);
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Atan_Op::InputsName), std::end(Atan_Op::InputsName));
+    })
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Atan_Op::OutputsName), std::end(Atan_Op::OutputsName));
+    })
+    .def_readonly_static("Type", &Atan_Op::Type);
 
     declare_registrable<Atan_Op>(m, "AtanOp");
 
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 6130fc271..8654de586 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -60,8 +60,13 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
             py::arg("stride_dims") = create_array<DimSize_t, DIM>(1),
             py::arg("dilations") = create_array<DimSize_t, DIM>(1),
             py::arg("ceil_mode") = false)
-    .def_static("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
-    .def_static("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(AvgPooling_Op<DIM>::InputsName), std::end(AvgPooling_Op<DIM>::InputsName));
+    })
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(AvgPooling_Op<DIM>::OutputsName), std::end(AvgPooling_Op<DIM>::OutputsName));
+    })
+    .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type)
 
 		.def_static("attributes_name", []() {
 			std::vector<std::string> result;
@@ -70,8 +75,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
 				result.emplace_back(attributes[i]);
 			}
 			return result;
-		})
-    .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type);
+		});
 
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
 
diff --git a/src/operator/Atan.cpp b/src/operator/Atan.cpp
index c0a494ee6..a9e92fb9f 100644
--- a/src/operator/Atan.cpp
+++ b/src/operator/Atan.cpp
@@ -18,33 +18,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Atan_Op::Type = "Atan";
-
-Aidge::Atan_Op::Atan_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-Aidge::Atan_Op::Atan_Op(const Aidge::Atan_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Atan_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Atan_Op::clone() const {
-    return std::make_shared<Atan_Op>(*this);
-}
-
-
-void Aidge::Atan_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Atan_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Atan_Op::getAvailableBackends() const {
-    return Registrar<Atan_Op>::getKeys();
-}
+Aidge::Atan_Op::Atan_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
 ///////////////////////////////////////////////////
 
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 79341687c..42075567e 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -23,26 +23,11 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling" + std::to_string(DIM) + "D";
-
-
 template <Aidge::DimIdx_t DIM>
 Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl<AvgPooling_Op<DIM>>(op),
       mAttributes(op.mAttributes)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::AvgPooling_Op<DIM>::clone() const {
-    return std::make_shared<AvgPooling_Op<DIM>>(*this);
-}
+{}
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -118,18 +103,6 @@ Aidge::AvgPooling_Op<DIM>::computeReceptiveField(const std::vector<Aidge::DimSiz
     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
 }
 
-
-template <Aidge::DimIdx_t DIM>
-void Aidge::AvgPooling_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::AvgPooling_Op<DIM>::getAvailableBackends() const {
-    return Registrar<AvgPooling_Op<DIM>>::getKeys();
-}
-
 template class Aidge::AvgPooling_Op<1>;
 template class Aidge::AvgPooling_Op<2>;
 template class Aidge::AvgPooling_Op<3>;
-- 
GitLab


From e8702a0838ab10e8811d764111a81c43380dae73 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Sun, 2 Mar 2025 10:52:41 +0000
Subject: [PATCH 07/12] Removed attributesName() which is not useful for the
 C++ API

---
 include/aidge/operator/ArgMax.hpp             |  8 ---
 include/aidge/operator/Atan.hpp               |  3 +-
 include/aidge/operator/AvgPooling.hpp         |  8 ---
 include/aidge/operator/BatchNorm.hpp          | 51 +++++---------
 include/aidge/operator/BitShift.hpp           | 70 +++----------------
 include/aidge/operator/Cast.hpp               |  8 ---
 include/aidge/operator/Clip.hpp               |  8 ---
 include/aidge/operator/Concat.hpp             |  8 ---
 include/aidge/operator/ConstantOfShape.hpp    |  8 ---
 include/aidge/operator/Conv.hpp               | 11 ---
 include/aidge/operator/ConvDepthWise.hpp      |  8 ---
 include/aidge/operator/DepthToSpace.hpp       |  8 ---
 include/aidge/operator/Flatten.hpp            |  8 ---
 include/aidge/operator/Fold.hpp               |  8 ---
 include/aidge/operator/Gather.hpp             | 10 ---
 include/aidge/operator/GridSample.hpp         |  8 ---
 include/aidge/operator/Heaviside.hpp          | 10 ---
 include/aidge/operator/LRN.hpp                | 10 ---
 include/aidge/operator/LeakyReLU.hpp          | 10 ---
 include/aidge/operator/MaxPooling.hpp         |  8 ---
 include/aidge/operator/Memorize.hpp           | 10 ---
 include/aidge/operator/Pad.hpp                |  8 ---
 include/aidge/operator/Pop.hpp                |  8 ---
 include/aidge/operator/ReduceMean.hpp         | 10 ---
 include/aidge/operator/ReduceSum.hpp          | 10 ---
 include/aidge/operator/Reshape.hpp            |  8 ---
 include/aidge/operator/Resize.hpp             |  8 ---
 include/aidge/operator/Scaling.hpp            |  8 ---
 include/aidge/operator/Shape.hpp              |  8 ---
 include/aidge/operator/Slice.hpp              |  8 ---
 include/aidge/operator/Softmax.hpp            |  8 ---
 include/aidge/operator/Split.hpp              |  8 ---
 include/aidge/operator/Squeeze.hpp            |  8 ---
 include/aidge/operator/Stack.hpp              |  8 ---
 include/aidge/operator/Transpose.hpp          | 10 ---
 include/aidge/operator/Unfold.hpp             |  8 ---
 include/aidge/operator/Unsqueeze.hpp          |  8 ---
 python_binding/operator/pybind_ArgMax.cpp     |  7 +-
 python_binding/operator/pybind_AvgPooling.cpp |  7 +-
 python_binding/operator/pybind_BatchNorm.cpp  | 19 +++--
 python_binding/operator/pybind_BitShift.cpp   | 16 ++---
 python_binding/operator/pybind_Cast.cpp       |  7 +-
 python_binding/operator/pybind_Clip.cpp       |  7 +-
 python_binding/operator/pybind_Concat.cpp     |  7 +-
 .../operator/pybind_ConstantOfShape.cpp       |  7 +-
 python_binding/operator/pybind_Conv.cpp       |  7 +-
 .../operator/pybind_ConvDepthWise.cpp         |  7 +-
 .../operator/pybind_DepthToSpace.cpp          |  7 +-
 python_binding/operator/pybind_Gather.cpp     |  7 +-
 python_binding/operator/pybind_GridSample.cpp |  7 +-
 python_binding/operator/pybind_Heaviside.cpp  |  7 +-
 python_binding/operator/pybind_LRN.cpp        |  7 +-
 python_binding/operator/pybind_LeakyReLU.cpp  |  7 +-
 python_binding/operator/pybind_MaxPooling.cpp |  7 +-
 python_binding/operator/pybind_Memorize.cpp   |  7 +-
 python_binding/operator/pybind_Pad.cpp        |  7 +-
 python_binding/operator/pybind_Pop.cpp        |  7 +-
 python_binding/operator/pybind_ReduceMean.cpp |  7 +-
 python_binding/operator/pybind_ReduceSum.cpp  |  7 +-
 python_binding/operator/pybind_Reshape.cpp    |  7 +-
 python_binding/operator/pybind_Resize.cpp     |  7 +-
 python_binding/operator/pybind_Scaling.cpp    |  7 +-
 python_binding/operator/pybind_Shape.cpp      |  7 +-
 python_binding/operator/pybind_Slice.cpp      |  7 +-
 python_binding/operator/pybind_Softmax.cpp    |  7 +-
 python_binding/operator/pybind_Split.cpp      |  7 +-
 python_binding/operator/pybind_Squeeze.cpp    |  7 +-
 python_binding/operator/pybind_Stack.cpp      |  7 +-
 python_binding/operator/pybind_Transpose.cpp  |  7 +-
 python_binding/operator/pybind_Unsqueeze.cpp  |  7 +-
 src/operator/BatchNorm.cpp                    | 58 +--------------
 src/operator/BitShift.cpp                     | 12 ----
 72 files changed, 78 insertions(+), 659 deletions(-)

diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index dd43f2d11..e0c938032 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -153,14 +153,6 @@ public:
      * @return A reference to the selectLastIndex attribute.
      */
     inline bool& selectLastIndex() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::SelectLastIndex>(); }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ArgMaxAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Atan.hpp b/include/aidge/operator/Atan.hpp
index ae16c8b81..a0aed085a 100644
--- a/include/aidge/operator/Atan.hpp
+++ b/include/aidge/operator/Atan.hpp
@@ -37,8 +37,7 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Atan_Op : public OperatorTensorWithImpl<Atan_Op>
-{
+class Atan_Op : public OperatorTensorWithImpl<Atan_Op> {
 public:
     static constexpr const char* const Type = "Atan";
     static constexpr const char* const InputsName[] = {"data_input"};
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 66e7314a5..033231f29 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -208,14 +208,6 @@ public:
      * @return Boolean value indicating whether ceil mode is enabled.
      */
     inline bool& ceilMode() const { return mAttributes->template getAttr<AvgPoolingAttr::CeilMode>(); }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::AvgPoolingAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 81a679502..fffd68b29 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -56,6 +56,10 @@ constexpr const char* const EnumStrings<Aidge::BatchNormAttr>::data[] = {
 }
 
 namespace Aidge {
+template <DimIdx_t DIM> struct BatchNorm_Op_Type {};
+template <> struct BatchNorm_Op_Type<2> { static constexpr const char* const value = "BatchNorm2D"; };
+template <> struct BatchNorm_Op_Type<3> { static constexpr const char* const value = "BatchNorm3D"; };
+template <> struct BatchNorm_Op_Type<4> { static constexpr const char* const value = "BatchNorm4D"; };
 /**
  * @class BatchNorm_Op
  * @brief Implements the Batch Normalization (BN) operation, a technique used to normalize the inputs of a layer.
@@ -76,12 +80,7 @@ namespace Aidge {
  * @see Registrable
  */
 template <DimIdx_t DIM>
-class BatchNorm_Op : public OperatorTensor,
-                public Registrable<BatchNorm_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>> {
-
-public:
-    static const std::string Type;
-
+class BatchNorm_Op : public OperatorTensorWithImpl<BatchNorm_Op<DIM>> {
 private:
     using Attributes_ = StaticAttributes<BatchNormAttr,
                             GENERATE_LIST_ATTR_TYPE(LIST_BATCHNORM_ATTR)
@@ -92,6 +91,19 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::getOutput;
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::nbInputs;
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::inputCategory;
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::OperatorTensorWithImpl;
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::mOutputs;
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::dimsForwarded;
+
+    static constexpr const char* const Type = BatchNorm_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input", "scale", "shift", "mean", "variance"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     BatchNorm_Op() = delete;
 
     /**
@@ -101,7 +113,7 @@ public:
      * @param[in] trainingMode Flag indicating whether to use current or moving average statistics.
      */
     constexpr BatchNorm_Op(float epsilon, float momentum, bool trainingMode)
-        : OperatorTensor(Type,
+        : OperatorTensorWithImpl<BatchNorm_Op<DIM>>(Type,
                             {InputCategory::Data,
                              InputCategory::Param,
                              InputCategory::Param,
@@ -120,17 +132,8 @@ public:
      */
     BatchNorm_Op(const BatchNorm_Op<DIM>& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::BatchNorm_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      */
@@ -150,22 +153,6 @@ public:
      * @brief Get whether the operator is in training mode.
      */
     inline bool& trainingMode() const { return mAttributes->template getAttr<BatchNormAttr::TrainingMode>(); }
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "scale", "shift", "mean", "variance"};
-    }
-
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::BatchNormAttr>::data;
-	}
 };
 
 extern template class Aidge::BatchNorm_Op<2>;
diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
index c54d6a99f..619bffcb8 100644
--- a/include/aidge/operator/BitShift.hpp
+++ b/include/aidge/operator/BitShift.hpp
@@ -57,9 +57,7 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class BitShift_Op : public OperatorTensor,
-    public Registrable<BitShift_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const BitShift_Op&)>> {
-
+class BitShift_Op : public OperatorTensorWithImpl<BitShift_Op> {
 public:
     /**
      * @enum BitShiftDirection
@@ -67,11 +65,6 @@ public:
      */
     enum BitShiftDirection { left, right };
 
-    /**
-     * @brief Type identifier for the operator.
-     */
-    static const std::string Type;
-
 private:
     using Attributes_ = StaticAttributes<BitShiftAttr,
                             GENERATE_LIST_ATTR_TYPE(LIST_BITSHIFT_ATTR)
@@ -83,12 +76,16 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Add";
+    static constexpr const char* const InputsName[] = {"InputTensor", "ShiftAmount"};
+    static constexpr const char* const OutputsName[] = {"OutputTensor"};
+
     /**
      * @brief Constructor to initialize the `BitShift_Op` with a shift direction.
      * @param[in] direction The direction of the bitwise shift (left or right).
      */
     BitShift_Op(BitShiftDirection direction)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1),
+        : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
               attr<BitShiftAttr::BitShiftdirection>(direction))) {}
 
@@ -97,39 +94,12 @@ public:
      * @param[in] op Operator instance to copy.
      */
     BitShift_Op(const BitShift_Op& op)
-        : OperatorTensor(op),
+        : OperatorTensorWithImpl(op),
           mAttributes(op.mAttributes)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(BitShift_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<BitShift_Op>(*this);
-    }
+    {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Set the backend to be used for this operator.
-     * @param[in] name Backend name.
-     * @param[in] device Device index (default: 0).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the set of available backends for this operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -143,30 +113,6 @@ public:
     inline BitShiftDirection& direction() const noexcept {
         return mAttributes->template getAttr<BitShiftAttr::BitShiftdirection>();
     }
-
-    /**
-     * @brief Get the names of the input tensors.
-     * @return A vector containing the input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return { "InputTensor", "ShiftAmount" };
-    }
-
-    /**
-     * @brief Get the names of the output tensors.
-     * @return A vector containing the output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return { "OutputTensor" };
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::BitShiftAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index f003e30c3..6a4332aa8 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -152,14 +152,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::CastAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp
index 4d5d2a93c..193e20b9c 100644
--- a/include/aidge/operator/Clip.hpp
+++ b/include/aidge/operator/Clip.hpp
@@ -168,14 +168,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return { "data_output" };
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ClipAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 3e5efb5f9..e17ecdd67 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -188,14 +188,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return { "data_output" };
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ConcatAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
index 886df95a8..ec5ab389e 100644
--- a/include/aidge/operator/ConstantOfShape.hpp
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -129,14 +129,6 @@ public:
     static const std::vector<std::string> getOutputsName() noexcept {
         return {"constant_of_shape"};
     }
-
-    /**
-     * @brief Retrieves the names of the attributes for the operator.
-     * @return A vector containing the attributes name.
-     */
-    static constexpr const char* const* attributesName() noexcept {
-        return EnumStrings<Aidge::ConstantOfShapeAttr>::data;
-    }
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index faf6ee31f..923c14d38 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -181,12 +181,6 @@ public:
      * @return The kernel dimensions as a reference.
      */
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<Attr::KernelDims>(); }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -242,11 +236,6 @@ constexpr const char* const EnumStrings<Aidge::ConvAttr>::data[] = {
 };
 }
 
-template <Aidge::DimIdx_t DIM>
-constexpr const char* const* Aidge::Conv_Op<DIM>::attributesName() {
-    return EnumStrings<Aidge::Conv_Op<DIM>::Attr>::data;
-}
-
 extern template class Aidge::Conv_Op<1>;
 extern template class Aidge::Conv_Op<2>;
 
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 341b6f766..7324551ec 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -211,14 +211,6 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ConvDepthWiseAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
index 7bf6ffdf3..cf7f5e890 100644
--- a/include/aidge/operator/DepthToSpace.hpp
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -184,14 +184,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::DepthToSpaceAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Flatten.hpp b/include/aidge/operator/Flatten.hpp
index 0ccc54eb7..6d617ad1a 100644
--- a/include/aidge/operator/Flatten.hpp
+++ b/include/aidge/operator/Flatten.hpp
@@ -171,14 +171,6 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::FlattenAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index 9b71057fb..6b5f37cdf 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -202,14 +202,6 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::FoldAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 964e1b45d..e480aa282 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -184,12 +184,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -221,10 +215,6 @@ constexpr const char* const EnumStrings<Aidge::Gather_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::Gather_Op::attributesName() {
-    return EnumStrings<Aidge::Gather_Op::Attr>::data;
-}
-
 #undef LIST_GATHER_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index 2388cd0c1..53061ea0a 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -194,14 +194,6 @@ public:
 	static const std::vector<std::string> getOutputsName() {
 		return {"data_output"};
 	}
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::GridSampleAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Heaviside.hpp b/include/aidge/operator/Heaviside.hpp
index 49f905903..b8aea020f 100644
--- a/include/aidge/operator/Heaviside.hpp
+++ b/include/aidge/operator/Heaviside.hpp
@@ -114,12 +114,6 @@ public:
         return {"output"};
     }
 
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
-
     /**
      * @brief Get the attributes of the operator.
      */
@@ -161,10 +155,6 @@ constexpr const char* const EnumStrings<Aidge::Heaviside_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::Heaviside_Op::attributesName() {
-    return EnumStrings<Aidge::Heaviside_Op::Attr>::data;
-}
-
 #undef LIST_HEAVISIDE_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_HEAVISIDE_H_ */
diff --git a/include/aidge/operator/LRN.hpp b/include/aidge/operator/LRN.hpp
index b1cbc143d..f4c87d9c8 100644
--- a/include/aidge/operator/LRN.hpp
+++ b/include/aidge/operator/LRN.hpp
@@ -170,12 +170,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -199,10 +193,6 @@ constexpr const char* const EnumStrings<Aidge::LRN_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::LRN_Op::attributesName() {
-    return EnumStrings<Aidge::LRN_Op::Attr>::data;
-}
-
 #undef LIST_LRN_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_LRN_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 867f324d3..0f9a1e087 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -121,12 +121,6 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -149,10 +143,6 @@ constexpr const char* const EnumStrings<Aidge::LeakyReLU_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::LeakyReLU_Op::attributesName() {
-    return EnumStrings<Attr>::data;
-}
-
 #undef LIST_LEAKYRELU_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_LEAKYRELU_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 011042621..de3b2a58c 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -204,14 +204,6 @@ public:
      * @return A vector of output tensors names.
      */
     static const std::vector<std::string> getOutputsName(){ return {"data_output"}; }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::MaxPoolingAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index e1eea4a28..23638305d 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -253,12 +253,6 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output", "data_output_rec"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -280,10 +274,6 @@ constexpr const char* const EnumStrings<Aidge::Memorize_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::Memorize_Op::attributesName() {
-    return EnumStrings<Aidge::Memorize_Op::Attr>::data;
-}
-
 #undef LIST_MEMORIZE_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_MEMORIZE_H_ */
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 491a8a369..6641d0319 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -243,14 +243,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::PadAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 9790f05e9..43ebd846a 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -231,14 +231,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::PopAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index cdb139f96..59a91835b 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -157,12 +157,6 @@ public:
         return {"data_output"};
     }
 
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
-
     virtual ~ReduceMean_Op() noexcept;
 };
 
@@ -194,10 +188,6 @@ constexpr const char* const EnumStrings<Aidge::ReduceMean_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::ReduceMean_Op::attributesName(){
-    return EnumStrings<Aidge::ReduceMean_Op::Attr>::data;
-}
-
 #undef LIST_REDUCEMEAN_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
index 73f59c25d..1f9d19eb8 100644
--- a/include/aidge/operator/ReduceSum.hpp
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -161,12 +161,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -201,10 +195,6 @@ constexpr const char* const EnumStrings<Aidge::ReduceSum_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::ReduceSum_Op::attributesName() {
-    return EnumStrings<Aidge::ReduceSum_Op::Attr>::data;
-}
-
 #undef LIST_REDUCESUM_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCESUM_H_ */
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 6cad57c37..55dc777b0 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -159,14 +159,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ReshapeAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 32ddbe488..a548da32f 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -206,14 +206,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ResizeAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index c5264fe55..b073943e2 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -138,14 +138,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ScalingAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 290d95eef..310650169 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -178,14 +178,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ShapeAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index b425fe752..2f33c01ba 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -191,14 +191,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::SliceAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index b0c6a2eda..92ac5e080 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -150,14 +150,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::SoftmaxAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 038879f05..f76132740 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -188,14 +188,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output_0", "data_output_n"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::SplitAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index 987f1e6af..2be3b0eab 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -162,14 +162,6 @@ public:
   static const std::vector<std::string> getOutputsName() {
     return {"squeezed"};
   }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::SqueezeAttr>::data;
-	}
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
index 843413756..f030777f8 100644
--- a/include/aidge/operator/Stack.hpp
+++ b/include/aidge/operator/Stack.hpp
@@ -257,14 +257,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::StackAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 25d8d92f6..387189622 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -170,12 +170,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -200,10 +194,6 @@ constexpr const char* const EnumStrings<Aidge::Transpose_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::Transpose_Op::attributesName() {
-    return EnumStrings<Aidge::Transpose_Op::Attr>::data;
-}
-
 #undef LIST_TRANSPOSE_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_TRANSPOSE_H_ */
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index fe85f9d5e..53a14de35 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -204,14 +204,6 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::UnfoldAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index 5975ff057..19dbd94d7 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -159,14 +159,6 @@ public:
   static const std::vector<std::string> getOutputsName() {
     return {"unsqueezed"};
   }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::UnsqueezeAttr>::data;
-	}
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
diff --git a/python_binding/operator/pybind_ArgMax.cpp b/python_binding/operator/pybind_ArgMax.cpp
index 4fddae3e5..6e1b7e315 100644
--- a/python_binding/operator/pybind_ArgMax.cpp
+++ b/python_binding/operator/pybind_ArgMax.cpp
@@ -49,12 +49,7 @@ void init_ArgMax(py::module &m) {
     })
     .def_readonly_static("Type", &ArgMax_Op::Type)
 	.def_static("attributes_name", []() {
-		std::vector<std::string> result;
-		auto attributes = ArgMax_Op::attributesName();
-		for (size_t i = 0; i < size(EnumStrings<ArgMaxAttr>::data); ++i) {
-			result.emplace_back(attributes[i]);
-		}
-		return result;
+		return std::vector<std::string>(std::begin(EnumStrings<ArgMaxAttr>::data), std::end(EnumStrings<ArgMaxAttr>::data));
 	})
     ;
   declare_registrable<ArgMax_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 8654de586..9b025d819 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -69,12 +69,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
     .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = AvgPooling_Op<DIM>::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<AvgPoolingAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+      return std::vector<std::string>(std::begin(EnumStrings<AvgPoolingAttr>::data), std::end(EnumStrings<AvgPoolingAttr>::data));
 		});
 
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 4bcb94c4a..f869d8e2e 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -40,18 +40,17 @@ void declare_BatchNormOp(py::module& m) {
             py::arg("epsilon"),
             py::arg("momentum"),
             py::arg("training_mode"))
-        .def_static("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(BatchNorm_Op<DIM>::InputsName), std::end(BatchNorm_Op<DIM>::InputsName));
+        })
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(BatchNorm_Op<DIM>::OutputsName), std::end(BatchNorm_Op<DIM>::OutputsName));
+        })
+        .def_readonly_static("Type", &BatchNorm_Op<DIM>::Type)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = BatchNorm_Op<DIM>::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<BatchNormAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
-		})
-        .def_readonly_static("Type", &BatchNorm_Op<DIM>::Type);
+            return std::vector<std::string>(std::begin(EnumStrings<BatchNormAttr>::data), std::end(EnumStrings<BatchNormAttr>::data));
+		});
 
     declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_BitShift.cpp b/python_binding/operator/pybind_BitShift.cpp
index f2f4b223d..246280460 100644
--- a/python_binding/operator/pybind_BitShift.cpp
+++ b/python_binding/operator/pybind_BitShift.cpp
@@ -34,15 +34,15 @@ void init_BitShift(py::module &m) {
     )mydelimiter")
         .def(py::init<BitShift_Op::BitShiftDirection>(), py::arg("direction"))
         .def("direction", &BitShift_Op::direction, "Get the direction of the bit shift (left or right).")
-        .def_static("get_inputs_name", &BitShift_Op::getInputsName, "Get the names of the input tensors.")
-        .def_static("get_outputs_name", &BitShift_Op::getOutputsName, "Get the names of the output tensors.")
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(BitShift_Op::InputsName), std::end(BitShift_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(BitShift_Op::OutputsName), std::end(BitShift_Op::OutputsName));
+        }, "Get the names of the output tensors.")
+        .def_readonly_static("Type", &BitShift_Op::Type)
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = BitShift_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<BitShiftAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<BitShiftAttr>::data), std::end(EnumStrings<BitShiftAttr>::data));
 		});
 
     // Enum binding under BitShiftOp class
diff --git a/python_binding/operator/pybind_Cast.cpp b/python_binding/operator/pybind_Cast.cpp
index 1e0ad7f9b..24420e96a 100644
--- a/python_binding/operator/pybind_Cast.cpp
+++ b/python_binding/operator/pybind_Cast.cpp
@@ -34,12 +34,7 @@ void init_Cast(py::module &m) {
         .def_static("get_inputs_name", &Cast_Op::getInputsName, "Get the names of the input tensors.")
         .def_static("get_outputs_name", &Cast_Op::getOutputsName, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Cast_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<CastAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<CastAttr>::data), std::end(EnumStrings<CastAttr>::data));
 		});
 
     // Binding for the Cast function
diff --git a/python_binding/operator/pybind_Clip.cpp b/python_binding/operator/pybind_Clip.cpp
index a22a002d4..6cbb6d93e 100644
--- a/python_binding/operator/pybind_Clip.cpp
+++ b/python_binding/operator/pybind_Clip.cpp
@@ -35,12 +35,7 @@ void init_Clip(py::module& m) {
     .def_static("get_outputs_name", &Clip_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Clip_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ClipAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+      return std::vector<std::string>(std::begin(EnumStrings<ClipAttr>::data), std::end(EnumStrings<ClipAttr>::data));
 		})
     .def("min", &Clip_Op::min, py::return_value_policy::reference_internal)
     .def("max", &Clip_Op::max, py::return_value_policy::reference_internal);
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 236f16922..0fd09f8a1 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -36,12 +36,7 @@ void init_Concat(py::module& m) {
         .def_static("get_outputs_name", &Concat_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Concat_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ConcatAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<ConcatAttr>::data), std::end(EnumStrings<ConcatAttr>::data));
 		})
         .def_readonly_static("Type", &Concat_Op::Type);
 
diff --git a/python_binding/operator/pybind_ConstantOfShape.cpp b/python_binding/operator/pybind_ConstantOfShape.cpp
index b185f2f80..08c3e549c 100644
--- a/python_binding/operator/pybind_ConstantOfShape.cpp
+++ b/python_binding/operator/pybind_ConstantOfShape.cpp
@@ -34,12 +34,7 @@ void init_ConstantOfShape(py::module &m) {
       .def_static("get_inputs_name", &ConstantOfShape_Op::getInputsName)
       .def_static("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
       .def_static("attributes_name", []() {
-        std::vector<std::string> result;
-        auto attributes = ConstantOfShape_Op::attributesName();
-        for (size_t i = 0; i < size(EnumStrings<ConstantOfShapeAttr>::data); ++i) {
-          result.emplace_back(attributes[i]);
-        }
-        return result;
+        return std::vector<std::string>(std::begin(EnumStrings<ConstantOfShapeAttr>::data), std::end(EnumStrings<ConstantOfShapeAttr>::data));
       })
       .def("value", &ConstantOfShape_Op::value);
 
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 544125a48..20e7b411d 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -50,12 +50,7 @@ void declare_ConvOp(py::module &m) {
         .def_readonly_static("Type", &Conv_Op<DIM>::Type)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Conv_Op<DIM>::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ConvAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<ConvAttr>::data), std::end(EnumStrings<ConvAttr>::data));
 		})
         .def("in_channels", &Conv_Op<DIM>::inChannels)
         .def("out_channels", &Conv_Op<DIM>::outChannels)
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 7ddbefd3d..ce2cdba01 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -58,12 +58,7 @@ void declare_ConvDepthWiseOp(py::module &m) {
   .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = ConvDepthWise_Op<DIM>::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ConvDepthWiseAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-				return result;
+      return std::vector<std::string>(std::begin(EnumStrings<ConvDepthWiseAttr>::data), std::end(EnumStrings<ConvDepthWiseAttr>::data));
 		})
   .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels)
   .def_readonly_static("Type", &ConvDepthWise_Op<DIM>::Type);
diff --git a/python_binding/operator/pybind_DepthToSpace.cpp b/python_binding/operator/pybind_DepthToSpace.cpp
index d33386711..469a3f264 100644
--- a/python_binding/operator/pybind_DepthToSpace.cpp
+++ b/python_binding/operator/pybind_DepthToSpace.cpp
@@ -39,12 +39,7 @@ void declare_DepthToSpace(py::module &m) {
     .def_static("get_outputs_name", &DepthToSpace_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = DepthToSpace_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<DepthToSpaceAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+      return std::vector<std::string>(std::begin(EnumStrings<DepthToSpaceAttr>::data), std::end(EnumStrings<DepthToSpaceAttr>::data));
 		})
     .def_readonly_static("Type", &DepthToSpace_Op::Type)
     .def("__repr__", [](DepthToSpace_Op& b) {
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index 1c1f027dc..0a7358cc5 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -46,12 +46,7 @@ void init_Gather(py::module& m) {
         .def_static("get_outputs_name", &Gather_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Gather_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<Gather_Op::Attr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<Gather_Op::Attr>::data), std::end(EnumStrings<Gather_Op::Attr>::data));
 		})
         .def_readonly_static("Type", &Gather_Op::Type);
 
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
index f4f0335fd..41250beda 100644
--- a/python_binding/operator/pybind_GridSample.cpp
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -67,12 +67,7 @@ void declare_GridSampleOp(py::module &m) {
         .def_static("get_outputs_name", &GridSample_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = GridSample_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<GridSampleAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<GridSampleAttr>::data), std::end(EnumStrings<GridSampleAttr>::data));
 		})
         .def_readonly_static("Type", &GridSample_Op::Type)
         ;
diff --git a/python_binding/operator/pybind_Heaviside.cpp b/python_binding/operator/pybind_Heaviside.cpp
index 078b766a0..55a0ae1c8 100644
--- a/python_binding/operator/pybind_Heaviside.cpp
+++ b/python_binding/operator/pybind_Heaviside.cpp
@@ -39,12 +39,7 @@ void init_Heaviside(py::module &m) {
         .def_static("get_outputs_name", &Heaviside_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Heaviside_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<Heaviside_Op::Attr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<Heaviside_Op::Attr>::data), std::end(EnumStrings<Heaviside_Op::Attr>::data));
 		})
         .def_readonly_static("Type", &Heaviside_Op::Type);
 
diff --git a/python_binding/operator/pybind_LRN.cpp b/python_binding/operator/pybind_LRN.cpp
index f802152ba..6aad786d7 100644
--- a/python_binding/operator/pybind_LRN.cpp
+++ b/python_binding/operator/pybind_LRN.cpp
@@ -32,12 +32,7 @@ void init_LRN(py::module& m) {
         .def_static("get_outputs_name", &LRN_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = LRN_Op::attributesName();
-			for (size_t i = 0; attributes[i] != nullptr; ++i) {
-				result.emplace_back(attributes[i]);
-			}
-				return result;
+            return std::vector<std::string>(std::begin(EnumStrings<LRN_Op::Attr>::data), std::end(EnumStrings<LRN_Op::Attr>::data));
 		})
         .def_readonly_static("Type", &LRN_Op::Type);
 
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 8bc120c8a..46b906244 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -32,12 +32,7 @@ void init_LeakyReLU(py::module& m) {
         .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = LeakyReLU_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<LeakyReLU_Op::Attr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<LeakyReLU_Op::Attr>::data), std::end(EnumStrings<LeakyReLU_Op::Attr>::data));
 		})
         .def_readonly_static("Type", &LeakyReLU_Op::Type);
 
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 953e56ebe..811543ebc 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -54,12 +54,7 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
   .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
 
   .def_static("attributes_name", []() {
-    std::vector<std::string> result;
-    auto attributes = MaxPooling_Op<DIM>::attributesName();
-    for (size_t i = 0; i < size(EnumStrings<MaxPoolingAttr>::data); ++i) {
-      result.emplace_back(attributes[i]);
-    }
-    return result;
+    return std::vector<std::string>(std::begin(EnumStrings<MaxPoolingAttr>::data), std::end(EnumStrings<MaxPoolingAttr>::data));
   })
   .def_readonly_static("Type", &MaxPooling_Op<DIM>::Type);
   
diff --git a/python_binding/operator/pybind_Memorize.cpp b/python_binding/operator/pybind_Memorize.cpp
index ed87f68c7..647b9b094 100644
--- a/python_binding/operator/pybind_Memorize.cpp
+++ b/python_binding/operator/pybind_Memorize.cpp
@@ -25,12 +25,7 @@ void init_Memorize(py::module& m) {
         .def_static("get_inputs_name", &Memorize_Op::getInputsName)
         .def_static("get_outputs_name", &Memorize_Op::getOutputsName)
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Memorize_Op::attributesName();
-			for (size_t i = 0;i < size(EnumStrings<Memorize_Op::Attr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+			return std::vector<std::string>(std::begin(EnumStrings<Memorize_Op::Attr>::data), std::end(EnumStrings<Memorize_Op::Attr>::data));
 		});
 
     declare_registrable<Memorize_Op>(m, "MemorizeOp");
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 7b37bb206..4a42a9539 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -51,12 +51,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
     .def_static("get_inputs_name", &Pad_Op<DIM>::getInputsName)
     .def_static("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Pad_Op<DIM>::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<PadAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+			return std::vector<std::string>(std::begin(EnumStrings<PadAttr>::data), std::end(EnumStrings<PadAttr>::data));
 		})
     .def_readonly_static("Type", &Pad_Op<DIM>::Type);
 
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
index 20606d24d..1be6d753e 100644
--- a/python_binding/operator/pybind_Pop.cpp
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -25,12 +25,7 @@ void init_Pop(py::module& m) {
     .def_static("get_outputs_name", &Pop_Op::getOutputsName)
 
 	.def_static("attributes_name", []() {
-		std::vector<std::string> result;
-		auto attributes = Pop_Op::attributesName();
-		for (size_t i = 0; i < size(EnumStrings<PopAttr>::data); ++i) {
-			result.emplace_back(attributes[i]);
-		}
-		return result;
+		return std::vector<std::string>(std::begin(EnumStrings<PopAttr>::data), std::end(EnumStrings<PopAttr>::data));
 	})
     .def_readonly_static("Type", &Pop_Op::Type);
 
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index a97c3795b..ccbed6347 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -43,12 +43,7 @@ void declare_ReduceMeanOp(py::module &m) {
     .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
 	.def_static("attributes_name", []() {
-		std::vector<std::string> result;
-		auto attributes = ReduceMean_Op::attributesName();
-		for (size_t i = 0; i < size(EnumStrings<ReduceMean_Op::Attr>::data); ++i) {
-			result.emplace_back(attributes[i]);
-		}
-		return result;
+		return std::vector<std::string>(std::begin(EnumStrings<ReduceMean_Op::Attr>::data), std::end(EnumStrings<ReduceMean_Op::Attr>::data));
 	})
     .def_readonly_static("Type", &ReduceMean_Op::Type)
     ;
diff --git a/python_binding/operator/pybind_ReduceSum.cpp b/python_binding/operator/pybind_ReduceSum.cpp
index 7517c62d2..4d9d56c74 100644
--- a/python_binding/operator/pybind_ReduceSum.cpp
+++ b/python_binding/operator/pybind_ReduceSum.cpp
@@ -44,12 +44,7 @@ void init_ReduceSum(py::module &m) {
     .def_static("get_outputs_name", &ReduceSum_Op::getOutputsName)
 
 	.def_static("attributes_name", []() {
-		std::vector<std::string> result;
-		auto attributes = ReduceSum_Op::attributesName();
-		for (size_t i = 0; i < size(EnumStrings<ReduceSum_Op::Attr>::data); ++i) {
-			result.emplace_back(attributes[i]);
-		}
-		return result;
+		return std::vector<std::string>(std::begin(EnumStrings<ReduceSum_Op::Attr>::data), std::end(EnumStrings<ReduceSum_Op::Attr>::data));
 	})
     ;
   declare_registrable<ReduceSum_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index d263796ce..6f3fce2d2 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -37,12 +37,7 @@ void init_Reshape(py::module& m) {
     .def_static("get_outputs_name", &Reshape_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Reshape_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ReshapeAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+      return std::vector<std::string>(std::begin(EnumStrings<ReshapeAttr>::data), std::end(EnumStrings<ReshapeAttr>::data));
 		})
     .def_readonly_static("Type", &Reshape_Op::Type);
 
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
index 10a60e1f9..137366ad3 100644
--- a/python_binding/operator/pybind_Resize.cpp
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -29,12 +29,7 @@ void init_Resize(py::module &m) {
         .def_static("get_inputs_name", &Resize_Op::getInputsName)
         .def_static("get_outputs_name", &Resize_Op::getOutputsName)
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Resize_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ResizeAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-		    return result;
+            return std::vector<std::string>(std::begin(EnumStrings<ResizeAttr>::data), std::end(EnumStrings<ResizeAttr>::data));
 		})
         .def_readonly_static("Type", &Resize_Op::Type);
 
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
index ba975bb06..f0b5d13e1 100644
--- a/python_binding/operator/pybind_Scaling.cpp
+++ b/python_binding/operator/pybind_Scaling.cpp
@@ -43,12 +43,7 @@ void init_Scaling(py::module& m) {
         .def_static("get_outputs_name", &Scaling_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Scaling_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ScalingAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<ScalingAttr>::data), std::end(EnumStrings<ScalingAttr>::data));
 		})
         .def_readonly_static("Type", &Scaling_Op::Type);
 
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
index 3c8974bf0..dced85174 100644
--- a/python_binding/operator/pybind_Shape.cpp
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -36,12 +36,7 @@ void init_Shape(py::module& m) {
         .def_static("get_outputs_name", &Shape_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Shape_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ShapeAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<ShapeAttr>::data), std::end(EnumStrings<ShapeAttr>::data));
 		})
         .def_readonly_static("Type", &Shape_Op::Type);
 
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index 1cfd63f65..7c4449605 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -47,12 +47,7 @@ void init_Slice(py::module& m) {
     .def_static("get_outputs_name", &Slice_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Slice_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<SliceAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+      return std::vector<std::string>(std::begin(EnumStrings<SliceAttr>::data), std::end(EnumStrings<SliceAttr>::data));
 		})
     .def_readonly_static("Type", &Slice_Op::Type);
 
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 7a4a687fd..7fbd3851a 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -32,12 +32,7 @@ void init_Softmax(py::module& m) {
         .def_static("get_outputs_name", &Softmax_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Softmax_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<SoftmaxAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<SoftmaxAttr>::data), std::end(EnumStrings<SoftmaxAttr>::data));
 		})
         .def_readonly_static("Type", &Softmax_Op::Type);
     declare_registrable<Softmax_Op>(m, "SoftmaxOp");
diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp
index 052fa277e..643c6cca5 100644
--- a/python_binding/operator/pybind_Split.cpp
+++ b/python_binding/operator/pybind_Split.cpp
@@ -38,12 +38,7 @@ void init_Split(py::module& m) {
     .def_static("get_outputs_name", &Split_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Split_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<SplitAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+      return std::vector<std::string>(std::begin(EnumStrings<SplitAttr>::data), std::end(EnumStrings<SplitAttr>::data));
 		})
     .def_readonly_static("Type", &Split_Op::Type);
 
diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp
index 7808c78da..22779cd12 100644
--- a/python_binding/operator/pybind_Squeeze.cpp
+++ b/python_binding/operator/pybind_Squeeze.cpp
@@ -36,12 +36,7 @@ void init_Squeeze(py::module &m) {
     .def_static("get_outputs_name", &Squeeze_Op::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Squeeze_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<SqueezeAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+      return std::vector<std::string>(std::begin(EnumStrings<SqueezeAttr>::data), std::end(EnumStrings<SqueezeAttr>::data));
 		})
     .def("axes", &Squeeze_Op::axes);
 
diff --git a/python_binding/operator/pybind_Stack.cpp b/python_binding/operator/pybind_Stack.cpp
index 026167446..ca7720dee 100644
--- a/python_binding/operator/pybind_Stack.cpp
+++ b/python_binding/operator/pybind_Stack.cpp
@@ -28,12 +28,7 @@ void init_Stack(py::module &m) {
         .def_static("get_outputs_name", &StackOp::getOutputsName)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = StackOp::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<StackAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<StackAttr>::data), std::end(EnumStrings<StackAttr>::data));
 		})
         .def_readonly_static("Type", &StackOp::s_type);
 
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index 75bedca30..05245038c 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -39,12 +39,7 @@ void declare_Transpose(py::module &m) {
     .def_static("get_inputs_name", &Transpose_Op::getInputsName)
     .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
 	.def_static("attributes_name", []() {
-		std::vector<std::string> result;
-		auto attributes = Transpose_Op::attributesName();
-		for (size_t i = 0; i < size(EnumStrings<Transpose_Op::Attr>::data); ++i) {
-			result.emplace_back(attributes[i]);
-		}
-		return result;
+		return std::vector<std::string>(std::begin(EnumStrings<Transpose_Op::Attr>::data), std::end(EnumStrings<Transpose_Op::Attr>::data));
 	})
     .def_readonly_static("Type", &Transpose_Op::Type);
   declare_registrable<Transpose_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
index 1ef94202c..09d65f728 100644
--- a/python_binding/operator/pybind_Unsqueeze.cpp
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -31,12 +31,7 @@ void init_Unsqueeze(py::module &m) {
       .def_static("get_inputs_name", &Unsqueeze_Op::getInputsName)
       .def_static("get_outputs_name", &Unsqueeze_Op::getOutputsName)
         .def_static("attributes_name", []() {
-            std::vector<std::string> result;
-            auto attributes = Unsqueeze_Op::attributesName();
-            for (size_t i = 0; i < size(EnumStrings<UnsqueezeAttr>::data); ++i) {
-                result.emplace_back(attributes[i]);
-            }
-            return result;
+            return std::vector<std::string>(std::begin(EnumStrings<UnsqueezeAttr>::data), std::end(EnumStrings<UnsqueezeAttr>::data));
         })
       .def_readonly_static("Type", &Unsqueeze_Op::Type)
       ;
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index 24a49e56c..8df7e3816 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -23,25 +23,11 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::BatchNorm_Op<DIM>::Type = "BatchNorm" + std::to_string(DIM) + "D";
-
 template <Aidge::DimIdx_t DIM>
 Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl<BatchNorm_Op<DIM>>(op),
       mAttributes(op.mAttributes)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::BatchNorm_Op<DIM>::clone() const {
-    return std::make_shared<BatchNorm_Op<DIM>>(*this);
-}
+{}
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -60,46 +46,6 @@ bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::BatchNorm_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-
-    // By default, automatically set backend for scale, shift, mean and variance
-    if (getInput(1)) {
-        getInput(1)->setBackend(name, device);
-    }
-    else {
-        Log::notice("BatchNorm_Op::setBackend(): could not set backend for scale input, because input is not connected");
-    }
-
-    if (getInput(2)) {
-        getInput(2)->setBackend(name, device);
-    }
-    else {
-        Log::notice("BatchNorm_Op::setBackend(): could not set backend for shift input, because input is not connected");
-    }
-
-    if (getInput(3)) {
-        getInput(3)->setBackend(name, device);
-    }
-    else {
-        Log::notice("BatchNorm_Op::setBackend(): could not set backend for variance input, because input is not connected");
-    }
-
-    if (getInput(4)) {
-        getInput(4)->setBackend(name, device);
-    }
-    else {
-        Log::notice("BatchNorm_Op::setBackend(): could not set backend for mean input, because input is not connected");
-    }
-}
-
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::BatchNorm_Op<DIM>::getAvailableBackends() const {
-    return Registrar<BatchNorm_Op<DIM>>::getKeys();
-}
-
 template class Aidge::BatchNorm_Op<2>;
 template class Aidge::BatchNorm_Op<3>;
 template class Aidge::BatchNorm_Op<4>;
diff --git a/src/operator/BitShift.cpp b/src/operator/BitShift.cpp
index 7595590f7..5486a9e11 100644
--- a/src/operator/BitShift.cpp
+++ b/src/operator/BitShift.cpp
@@ -21,8 +21,6 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::BitShift_Op::Type = "BitShift";
-
 bool Aidge::BitShift_Op::forwardDims(bool /*allowDataDependency*/) {
     if (!inputsAssociated()) {
     return false;
@@ -52,13 +50,3 @@ bool Aidge::BitShift_Op::forwardDims(bool /*allowDataDependency*/) {
     mOutputs[0]->resize(outDims);
     return true;
 }
-
-
-void Aidge::BitShift_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(BitShift_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::BitShift_Op::getAvailableBackends() const {
-    return Registrar<BitShift_Op>::getKeys();
-}
-- 
GitLab


From 6453a70623ac5ab0ceea1a9fddaff513f46d7342 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Sun, 2 Mar 2025 15:24:26 +0000
Subject: [PATCH 08/12] More changes

---
 include/aidge/operator/Cast.hpp               | 54 +++---------------
 include/aidge/operator/Clip.hpp               | 57 +++----------------
 include/aidge/operator/Concat.hpp             | 49 ++--------------
 include/aidge/operator/ConstantOfShape.hpp    | 42 +++-----------
 python_binding/operator/pybind_Cast.cpp       |  9 ++-
 python_binding/operator/pybind_Clip.cpp       |  9 ++-
 python_binding/operator/pybind_Concat.cpp     | 13 +++--
 .../operator/pybind_ConstantOfShape.cpp       |  9 ++-
 src/operator/Cast.cpp                         | 20 +------
 src/operator/Clip.cpp                         | 10 +---
 src/operator/Concat.cpp                       | 36 ++----------
 src/operator/ConstantOfShape.cpp              |  9 +--
 12 files changed, 64 insertions(+), 253 deletions(-)

diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 6a4332aa8..dd9f9c2e6 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -65,15 +65,7 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Cast_Op : public OperatorTensor,
-    public Registrable<Cast_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Cast_Op&)>> {
-
-public:
-    /**
-     * @brief Type string identifying this operator.
-     */
-    static const std::string Type;
-
+class Cast_Op : public OperatorTensorWithImpl<Cast_Op, Cast_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<CastAttr,
         GENERATE_LIST_ATTR_TYPE(LIST_CAST_ATTR)
@@ -85,6 +77,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Cast";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Deleted default constructor.
      */
@@ -101,29 +97,9 @@ public:
      * @param op Operator to copy.
      */
     Cast_Op(const Cast_Op& op)
-        : OperatorTensor(op),
+        : OperatorTensorWithImpl(op),
           mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Cast_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Cast_OpImpl>(*this);
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Cast_Op>(*this);
-    }
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    std::set<std::string> getAvailableBackends() const override;
+    {}
 
     /**
      * @brief Access the attributes of the operator.
@@ -136,22 +112,6 @@ public:
      * @return Reference to the target data type.
      */
     inline DataType& targetType() const { return mAttributes->template getAttr<CastAttr::TargetType>(); }
-
-    /**
-     * @brief Get the input tensor names for the Cast operator.
-     * @return A vector containing the input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names for the Cast operator.
-     * @return A vector containing the output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp
index 193e20b9c..7fc8ccb95 100644
--- a/include/aidge/operator/Clip.hpp
+++ b/include/aidge/operator/Clip.hpp
@@ -68,15 +68,7 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Clip_Op : public OperatorTensor,
-    public Registrable<Clip_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Clip_Op&)>> {
-
-public:
-    /**
-     * @brief Type string identifying this operator.
-     */
-    static const std::string Type;
-
+class Clip_Op : public OperatorTensorWithImpl<Clip_Op> {
 private:
     using Attributes_ = StaticAttributes<ClipAttr,
         GENERATE_LIST_ATTR_TYPE(LIST_CLIP_ATTR)
@@ -88,6 +80,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Clip";
+    static constexpr const char* const InputsName[] = {"data_input", "min_empty_tensor", "max_empty_tensor"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Deleted default constructor.
      */
@@ -99,7 +95,7 @@ public:
      * @param[in] max Maximum value for clipping.
      */
     Clip_Op(float min, float max)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData}, 1),
+        : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData}, 1),
           mAttributes(std::make_shared<Attributes_>(attr<ClipAttr::Min>(min), attr<ClipAttr::Max>(max))) {}
 
     /**
@@ -107,32 +103,13 @@ public:
      * @param op Clip_Op instance to copy.
      */
     Clip_Op(const Clip_Op& op)
-        : OperatorTensor(op),
+        : OperatorTensorWithImpl(op),
           mAttributes(op.mAttributes)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Clip_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Clip_Op>(*this);
-    }
+    {}
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Setter to specify the backend to use.
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
     /**
      * @brief Access the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -150,24 +127,6 @@ public:
      * @return Reference to the maximum value.
      */
     inline float& max() const noexcept { return mAttributes->getAttr<ClipAttr::Max>(); }
-
-    std::set<std::string> getAvailableBackends() const override;
-
-    /**
-     * @brief Get the input tensor names for the Clip operator.
-     * @return A vector containing the input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return { "data_input", "min_empty_tensor", "max_empty_tensor" };
-    }
-
-    /**
-     * @brief Get the output tensor names for the Clip operator.
-     * @return A vector containing the output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return { "data_output" };
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index e17ecdd67..96442d5a1 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -98,15 +98,7 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Concat_Op : public OperatorTensor,
-    public Registrable<Concat_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Concat_Op&)>> {
-
-public:
-    /**
-     * @brief Type identifier for the Concat operator.
-     */
-    static const std::string Type;
-
+class Concat_Op : public OperatorTensorWithImpl<Concat_Op, Concat_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<ConcatAttr, GENERATE_LIST_ATTR_TYPE(LIST_CONCAT_ATTR)>;
 
@@ -116,6 +108,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Concat";
+    static constexpr const char* const InputsName[] = {"data_input_0", "data_input_n"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Default constructor is deleted to enforce explicit initialization.
      */
@@ -135,12 +131,6 @@ public:
      */
     Concat_Op(const Concat_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Forward the dimensions of the operator's inputs and outputs.
      * @param[in] allowDataDependency Allow data dependency during dimension propagation.
@@ -148,19 +138,6 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Set the backend for the operator.
-     * @param[in] name Backend name.
-     * @param[in] device Device index (default: 0).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the set of available backends for the operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the Concat operator.
      * @return A shared pointer to the attributes.
@@ -172,22 +149,6 @@ public:
      * @return A reference to the axis attribute.
      */
     inline std::int32_t& axis() const { return mAttributes->template getAttr<ConcatAttr::Axis>(); }
-
-    /**
-     * @brief Get the names of the input tensors.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return { "data_input_0", "data_input_n" };
-    }
-
-    /**
-     * @brief Get the names of the output tensors.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return { "data_output" };
-    }
 };
 
 /**
diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
index ec5ab389e..769f51a1a 100644
--- a/include/aidge/operator/ConstantOfShape.hpp
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -55,16 +55,7 @@ namespace Aidge {
  * @brief This operator's purpose is to generate a tensor of shape given via
  * input and filled with a given value set via attribute.
  */
-class ConstantOfShape_Op
-    : public OperatorTensor,
-      public Registrable<ConstantOfShape_Op, std::string,
-                         std::function<std::shared_ptr<OperatorImpl>(
-                             const ConstantOfShape_Op &)>> {
-
-public:
-  // name of the type of the operation
-  static const std::string Type;
-
+class ConstantOfShape_Op : public OperatorTensorWithImpl<ConstantOfShape_Op> {
 private:
   using Attributes_ = StaticAttributes<ConstantOfShapeAttr, GENERATE_LIST_ATTR_TYPE(LIST_CONSTANTOFSHAPE_ATTR)>;
   template <ConstantOfShapeAttr e>
@@ -72,13 +63,17 @@ private:
   const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "ConstantOfShape";
+    static constexpr const char* const InputsName[] = {"input"};
+    static constexpr const char* const OutputsName[] = {"constant_of_shape"};
+
   /**
    * @brief constructor for ConstantOfShape_op
    * @param[in] value : a scalar tensor which holds the value that will
    * fill the output tensor
    */
   ConstantOfShape_Op(const Tensor &value = Tensor(0.f))
-      : OperatorTensor(Type, {InputCategory::Data}, 1),
+      : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
             attr<ConstantOfShapeAttr::Value>(value))) {}
 
@@ -89,21 +84,8 @@ public:
    * @param op Operator to copy.
    */
   ConstantOfShape_Op(const ConstantOfShape_Op &op)
-      : OperatorTensor(op), mAttributes(op.mAttributes) {
-    if (op.mImpl) {
-      SET_IMPL_MACRO(ConstantOfShape_Op, *this, op.backend());
-    } else {
-      mImpl = nullptr;
-    }
-  }
-
-  /**
-   * @brief Clone the operator using its copy-constructor.
-   * @see Operator::MatMul_Op
-   */
-  std::shared_ptr<Operator> clone() const override final {
-    return std::make_shared<ConstantOfShape_Op>(*this);
-  }
+      : OperatorTensorWithImpl(op),
+        mAttributes(op.mAttributes) {}
 
   /**
    * @brief Compute dimensions for the output Tensor
@@ -114,20 +96,12 @@ public:
 
   void setBackend(const std::string &name,
                   DeviceIdx_t device = 0) override final;
-  std::set<std::string> getAvailableBackends() const override;
 
   inline std::shared_ptr<Attributes> attributes() const override {
     return mAttributes;
   }
   inline Tensor &value() const noexcept {
     return mAttributes->template getAttr<ConstantOfShapeAttr::Value>();
-  }
-
-    static const std::vector<std::string> getInputsName() noexcept {
-        return {"input"};
-    }
-    static const std::vector<std::string> getOutputsName() noexcept {
-        return {"constant_of_shape"};
     }
 };
 
diff --git a/python_binding/operator/pybind_Cast.cpp b/python_binding/operator/pybind_Cast.cpp
index 24420e96a..2aca399f9 100644
--- a/python_binding/operator/pybind_Cast.cpp
+++ b/python_binding/operator/pybind_Cast.cpp
@@ -31,8 +31,13 @@ void init_Cast(py::module &m) {
     )mydelimiter")
         .def(py::init<DataType>(), py::arg("target_type"))
         .def("target_type", &Cast_Op::targetType, "Get the targeted type, output tensor data type")
-        .def_static("get_inputs_name", &Cast_Op::getInputsName, "Get the names of the input tensors.")
-        .def_static("get_outputs_name", &Cast_Op::getOutputsName, "Get the names of the output tensors.")
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Cast_Op::InputsName), std::end(Cast_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Cast_Op::OutputsName), std::end(Cast_Op::OutputsName));
+        }, "Get the names of the output tensors.")
+        .def_readonly_static("Type", &Cast_Op::Type)
 		.def_static("attributes_name", []() {
             return std::vector<std::string>(std::begin(EnumStrings<CastAttr>::data), std::end(EnumStrings<CastAttr>::data));
 		});
diff --git a/python_binding/operator/pybind_Clip.cpp b/python_binding/operator/pybind_Clip.cpp
index 6cbb6d93e..c5b6cc22a 100644
--- a/python_binding/operator/pybind_Clip.cpp
+++ b/python_binding/operator/pybind_Clip.cpp
@@ -31,8 +31,13 @@ void init_Clip(py::module& m) {
         :type max : :py:class:`float`
         )mydelimiter")
     .def(py::init<float, float>(), py::arg("min") = std::numeric_limits<float>::lowest(), py::arg("max") = std::numeric_limits<float>::max())
-    .def_static("get_inputs_name", &Clip_Op::getInputsName)
-    .def_static("get_outputs_name", &Clip_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Clip_Op::InputsName), std::end(Clip_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Clip_Op::OutputsName), std::end(Clip_Op::OutputsName));
+    }, "Get the names of the output tensors.")
+    .def_readonly_static("Type", &Clip_Op::Type)
 
 		.def_static("attributes_name", []() {
       return std::vector<std::string>(std::begin(EnumStrings<ClipAttr>::data), std::end(EnumStrings<ClipAttr>::data));
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 0fd09f8a1..112cedb10 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -32,13 +32,16 @@ void init_Concat(py::module& m) {
         .def(py::init<const IOIndex_t, const int>(),
              py::arg("nb_inputs"),
              py::arg("axis") = 0)
-        .def_static("get_inputs_name", &Concat_Op::getInputsName)
-        .def_static("get_outputs_name", &Concat_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Concat_Op::InputsName), std::end(Concat_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Concat_Op::OutputsName), std::end(Concat_Op::OutputsName));
+        }, "Get the names of the output tensors.")
+        .def_readonly_static("Type", &Concat_Op::Type)
 		.def_static("attributes_name", []() {
             return std::vector<std::string>(std::begin(EnumStrings<ConcatAttr>::data), std::end(EnumStrings<ConcatAttr>::data));
-		})
-        .def_readonly_static("Type", &Concat_Op::Type);
+		});
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
 
diff --git a/python_binding/operator/pybind_ConstantOfShape.cpp b/python_binding/operator/pybind_ConstantOfShape.cpp
index 08c3e549c..ffd2b85a3 100644
--- a/python_binding/operator/pybind_ConstantOfShape.cpp
+++ b/python_binding/operator/pybind_ConstantOfShape.cpp
@@ -31,8 +31,13 @@ void init_ConstantOfShape(py::module &m) {
                      that will fill the output tensor.
       :type value : :py:class:`Tensor`
       )mydelimiter")
-      .def_static("get_inputs_name", &ConstantOfShape_Op::getInputsName)
-      .def_static("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
+      .def_static("get_inputs_name", []() {
+          return std::vector<std::string>(std::begin(ConstantOfShape_Op::InputsName), std::end(ConstantOfShape_Op::InputsName));
+      }, "Get the names of the input tensors.")
+      .def_static("get_outputs_name", []() {
+          return std::vector<std::string>(std::begin(ConstantOfShape_Op::OutputsName), std::end(ConstantOfShape_Op::OutputsName));
+      }, "Get the names of the output tensors.")
+      .def_readonly_static("Type", &ConstantOfShape_Op::Type)
       .def_static("attributes_name", []() {
         return std::vector<std::string>(std::begin(EnumStrings<ConstantOfShapeAttr>::data), std::end(EnumStrings<ConstantOfShapeAttr>::data));
       })
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index 54eef17b6..8a45ebd6f 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -25,32 +25,14 @@ void Aidge::Cast_OpImpl::forward() {
     op.getOutput(0)->copyCast(*(op.getInput(0)));
 }
 
-const std::string Aidge::Cast_Op::Type = "Cast";
-
 Aidge::Cast_Op::Cast_Op(const DataType targetType)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
       mAttributes(std::make_shared<Attributes_>(
         attr<CastAttr::TargetType>(targetType)))
 {
-    mImpl = std::make_shared<Cast_OpImpl>(*this);
     mOutputs[0]->setDataType(targetType);
 }
 
-
-void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Cast_Op>::exists({name})) {
-        SET_IMPL_MACRO(Cast_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Cast_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Cast_Op::getAvailableBackends() const {
-    return Registrar<Cast_Op>::getKeys();
-}
-
 std::shared_ptr<Aidge::Node> Aidge::Cast(const Aidge::DataType targetType, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
 }
\ No newline at end of file
diff --git a/src/operator/Clip.cpp b/src/operator/Clip.cpp
index 62787ebcf..7eacc9c75 100644
--- a/src/operator/Clip.cpp
+++ b/src/operator/Clip.cpp
@@ -18,8 +18,6 @@
 #include "aidge/utils/Types.h"
 #include "aidge/operator/Clip.hpp"
 
-const std::string Aidge::Clip_Op::Type = "Clip";
-
 bool Aidge::Clip_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined())
         || (getInput(2) && !getInput(2)->undefined()))
@@ -80,13 +78,7 @@ bool Aidge::Clip_Op::forwardDims(bool allowDataDependency)
     mOutputs[0] -> resize(getInput(0)->dims());
     return true;
 }
-void Aidge::Clip_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Clip_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-std::set<std::string> Aidge::Clip_Op::getAvailableBackends() const {
-    return Registrar<Clip_Op>::getKeys();
-}
+
 std::shared_ptr<Aidge::Node> Aidge::Clip(const std::string &name,float min,float max)
 {
     return std::make_shared<Node>(std::make_shared<Clip_Op>(min, max), name);
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 27b9d1cf1..3800f7eb6 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -18,34 +18,20 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Concat_Op::Type = "Concat";
-
 Aidge::Concat_Op::Concat_Op(const Aidge::IOIndex_t nbIn, const std::int32_t axis)
-    : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
+    : OperatorTensorWithImpl(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<ConcatAttr::Axis>(axis)))
 {
     if (nbIn == 0) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
     }
-    mImpl = std::make_shared<Concat_OpImpl>(*this);
 }
 
 Aidge::Concat_Op::Concat_Op(const Aidge::Concat_Op& op)
-    : OperatorTensor(op),
-        mAttributes(op.mAttributes)
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Concat_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Concat_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Concat_Op::clone() const {
-    return std::make_shared<Concat_Op>(*this);
-}
+    : OperatorTensorWithImpl(op),
+      mAttributes(op.mAttributes)
+{}
 
 void Aidge::Concat_OpImpl::forward() {
     const Concat_Op& op = dynamic_cast<const Concat_Op&>(mOp);
@@ -126,20 +112,6 @@ bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
     return true;
 }
 
-void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    if (Registrar<Concat_Op>::exists({name})) {
-        SET_IMPL_MACRO(Concat_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Concat_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Concat_Op::getAvailableBackends() const {
-    return Registrar<Concat_Op>::getKeys();
-}
-
 /////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Concat(const Aidge::IOIndex_t nbIn, const std::int32_t axis, const std::string& name) {
diff --git a/src/operator/ConstantOfShape.cpp b/src/operator/ConstantOfShape.cpp
index 7fe9dc130..81944a93b 100644
--- a/src/operator/ConstantOfShape.cpp
+++ b/src/operator/ConstantOfShape.cpp
@@ -26,8 +26,6 @@
 
 namespace Aidge {
 
-const std::string ConstantOfShape_Op::Type = "ConstantOfShape";
-
 bool ConstantOfShape_Op::forwardDims(bool allowDataDependency) {
   if (!inputsAssociated()) {
     return false;
@@ -59,14 +57,9 @@ bool ConstantOfShape_Op::forwardDims(bool allowDataDependency) {
 
 void ConstantOfShape_Op::setBackend(const std::string &name,
                                        Aidge::DeviceIdx_t device) {
-  SET_IMPL_MACRO(ConstantOfShape_Op, *this, name);
-  mOutputs[0]->setBackend(name, device);
+  OperatorTensorWithImpl::setBackend(name, device);
   value().setBackend(name,device);
 }
 
-std::set<std::string> Aidge::ConstantOfShape_Op::getAvailableBackends() const {
-  return Registrar<ConstantOfShape_Op>::getKeys();
-}
-
 } // namespace Aidge
 
-- 
GitLab


From 51fb7287dc90066361da0ea1bd3674ac26bd5550 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Mon, 10 Mar 2025 11:13:07 +0100
Subject: [PATCH 09/12] Fixed linkage issue

---
 src/operator/Abs.cpp             | 25 +++++++++++++++++++++++++
 src/operator/Add.cpp             |  4 ++++
 src/operator/And.cpp             |  4 ++++
 src/operator/ArgMax.cpp          |  4 ++++
 src/operator/Atan.cpp            |  4 ++++
 src/operator/AvgPooling.cpp      |  4 ++++
 src/operator/BatchNorm.cpp       |  4 ++++
 src/operator/BitShift.cpp        |  4 ++++
 src/operator/Cast.cpp            |  4 ++++
 src/operator/Clip.cpp            |  4 ++++
 src/operator/Concat.cpp          |  4 ++++
 src/operator/ConstantOfShape.cpp |  4 ++++
 src/operator/Conv.cpp            |  4 ++++
 13 files changed, 73 insertions(+)
 create mode 100644 src/operator/Abs.cpp

diff --git a/src/operator/Abs.cpp b/src/operator/Abs.cpp
new file mode 100644
index 000000000..112c44782
--- /dev/null
+++ b/src/operator/Abs.cpp
@@ -0,0 +1,25 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Abs.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+
+constexpr const char* const Aidge::Abs_Op::Type;
+constexpr const char* const Aidge::Abs_Op::InputsName[];
+constexpr const char* const Aidge::Abs_Op::OutputsName[];
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index ea6f73b85..461c6f3dc 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -20,6 +20,10 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 
+constexpr const char* const Aidge::Add_Op::Type;
+constexpr const char* const Aidge::Add_Op::InputsName[];
+constexpr const char* const Aidge::Add_Op::OutputsName[];
+
 Aidge::Add_Op::Add_Op()
     : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1)
 {
diff --git a/src/operator/And.cpp b/src/operator/And.cpp
index b7121513f..cb3976e6c 100644
--- a/src/operator/And.cpp
+++ b/src/operator/And.cpp
@@ -21,6 +21,10 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
+constexpr const char* const Aidge::And_Op::Type;
+constexpr const char* const Aidge::And_Op::InputsName[];
+constexpr const char* const Aidge::And_Op::OutputsName[];
+
 bool Aidge::And_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
diff --git a/src/operator/ArgMax.cpp b/src/operator/ArgMax.cpp
index 35dc281c3..1b0b86575 100644
--- a/src/operator/ArgMax.cpp
+++ b/src/operator/ArgMax.cpp
@@ -22,6 +22,10 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
+constexpr const char* const Aidge::ArgMax_Op::Type;
+constexpr const char* const Aidge::ArgMax_Op::InputsName[];
+constexpr const char* const Aidge::ArgMax_Op::OutputsName[];
+
 Aidge::ArgMax_Op::ArgMax_Op(const Aidge::ArgMax_Op& op)
     : OperatorTensorWithImpl(op),
       mAttributes(op.mAttributes)
diff --git a/src/operator/Atan.cpp b/src/operator/Atan.cpp
index a9e92fb9f..0467745ba 100644
--- a/src/operator/Atan.cpp
+++ b/src/operator/Atan.cpp
@@ -18,6 +18,10 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
+constexpr const char* const Aidge::Atan_Op::Type;
+constexpr const char* const Aidge::Atan_Op::InputsName[];
+constexpr const char* const Aidge::Atan_Op::OutputsName[];
+
 Aidge::Atan_Op::Atan_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
 ///////////////////////////////////////////////////
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 42075567e..c4b9b07c2 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -23,6 +23,10 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::AvgPooling_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::AvgPooling_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::AvgPooling_Op<DIM>::OutputsName[];
+
 template <Aidge::DimIdx_t DIM>
 Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
     : OperatorTensorWithImpl<AvgPooling_Op<DIM>>(op),
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index 8df7e3816..aecdd602e 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -23,6 +23,10 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::BatchNorm_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::BatchNorm_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::BatchNorm_Op<DIM>::OutputsName[];
+
 template <Aidge::DimIdx_t DIM>
 Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
     : OperatorTensorWithImpl<BatchNorm_Op<DIM>>(op),
diff --git a/src/operator/BitShift.cpp b/src/operator/BitShift.cpp
index 5486a9e11..e86c266fd 100644
--- a/src/operator/BitShift.cpp
+++ b/src/operator/BitShift.cpp
@@ -21,6 +21,10 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
+constexpr const char* const Aidge::BitShift_Op::Type;
+constexpr const char* const Aidge::BitShift_Op::InputsName[];
+constexpr const char* const Aidge::BitShift_Op::OutputsName[];
+
 bool Aidge::BitShift_Op::forwardDims(bool /*allowDataDependency*/) {
     if (!inputsAssociated()) {
     return false;
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index 8a45ebd6f..e8755132a 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -25,6 +25,10 @@ void Aidge::Cast_OpImpl::forward() {
     op.getOutput(0)->copyCast(*(op.getInput(0)));
 }
 
+constexpr const char* const Aidge::Cast_Op::Type;
+constexpr const char* const Aidge::Cast_Op::InputsName[];
+constexpr const char* const Aidge::Cast_Op::OutputsName[];
+
 Aidge::Cast_Op::Cast_Op(const DataType targetType)
     : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
       mAttributes(std::make_shared<Attributes_>(
diff --git a/src/operator/Clip.cpp b/src/operator/Clip.cpp
index 7eacc9c75..638c77f73 100644
--- a/src/operator/Clip.cpp
+++ b/src/operator/Clip.cpp
@@ -18,6 +18,10 @@
 #include "aidge/utils/Types.h"
 #include "aidge/operator/Clip.hpp"
 
+constexpr const char* const Aidge::Clip_Op::Type;
+constexpr const char* const Aidge::Clip_Op::InputsName[];
+constexpr const char* const Aidge::Clip_Op::OutputsName[];
+
 bool Aidge::Clip_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined())
         || (getInput(2) && !getInput(2)->undefined()))
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 3800f7eb6..9dfbf558f 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -18,6 +18,10 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
+constexpr const char* const Aidge::Concat_Op::Type;
+constexpr const char* const Aidge::Concat_Op::InputsName[];
+constexpr const char* const Aidge::Concat_Op::OutputsName[];
+
 Aidge::Concat_Op::Concat_Op(const Aidge::IOIndex_t nbIn, const std::int32_t axis)
     : OperatorTensorWithImpl(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
         mAttributes(std::make_shared<Attributes_>(
diff --git a/src/operator/ConstantOfShape.cpp b/src/operator/ConstantOfShape.cpp
index 81944a93b..60e564d81 100644
--- a/src/operator/ConstantOfShape.cpp
+++ b/src/operator/ConstantOfShape.cpp
@@ -26,6 +26,10 @@
 
 namespace Aidge {
 
+constexpr const char* const ConstantOfShape_Op::Type;
+constexpr const char* const ConstantOfShape_Op::InputsName[];
+constexpr const char* const ConstantOfShape_Op::OutputsName[];
+
 bool ConstantOfShape_Op::forwardDims(bool allowDataDependency) {
   if (!inputsAssociated()) {
     return false;
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 1f3aba1a4..1a2c5d481 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -24,6 +24,10 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Conv_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Conv_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Conv_Op<DIM>::OutputsName[];
+
 template <Aidge::DimIdx_t DIM>
 Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
     : OperatorTensorWithImpl<Conv_Op<DIM>>(op),
-- 
GitLab


From 53907aacc8c9c3d5f2bb911cb37f0004610b9505 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Wed, 30 Apr 2025 09:35:58 +0200
Subject: [PATCH 10/12] Minor fixes following merge

---
 include/aidge/operator/BitShift.hpp |  2 +-
 include/aidge/operator/Reshape.hpp  |  2 +-
 include/aidge/operator/Squeeze.hpp  | 13 +++++++------
 src/operator/Abs.cpp                |  5 +----
 4 files changed, 10 insertions(+), 12 deletions(-)

diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
index 81b7136e6..0f1df6948 100644
--- a/include/aidge/operator/BitShift.hpp
+++ b/include/aidge/operator/BitShift.hpp
@@ -84,7 +84,7 @@ public:
      * @brief Constructor to initialize the `BitShift_Op` with a shift direction.
      * @param[in] direction The direction of the bitwise shift (left or right).
      */
-    BitShift_Op(BitShiftDirection direction, bool rounding);
+    BitShift_Op(BitShiftDirection direction, bool rounding = false);
 
     /**
      * @brief Copy-constructor. Copies operator attributes and output tensors but not input tensors.
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 66982b849..60e49f7bb 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -93,7 +93,7 @@ public:
 
     /**
      * @brief Copy-constructor.
-     * @param op Reshape_Op to copy.
+     * @param[in] op Reshape_Op to copy.
      * @details Copies the operator attributes and its output tensor(s), but not
      * its input tensors. The new operator has no associated input.
      */
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index b76bcd676..89b2dfde4 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -127,12 +127,13 @@ public:
         return mAttributes->template getAttr<SqueezeAttr::Axes>();
     }
 
-  static const std::vector<std::string> getInputsName() {
-    return {"data_input", "axes_to_squeeze"};
-  }
-  static const std::vector<std::string> getOutputsName() {
-    return {"squeezed"};
-  }
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input", "axes_to_squeeze"};
+    }
+
+    static const std::vector<std::string> getOutputsName() {
+        return {"squeezed"};
+    }
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
diff --git a/src/operator/Abs.cpp b/src/operator/Abs.cpp
index 75e18db62..a5d9a7d30 100644
--- a/src/operator/Abs.cpp
+++ b/src/operator/Abs.cpp
@@ -24,9 +24,6 @@ constexpr const char* const Aidge::Abs_Op::Type;
 constexpr const char* const Aidge::Abs_Op::InputsName[];
 constexpr const char* const Aidge::Abs_Op::OutputsName[];
 
-namespace Aidge {
-std::shared_ptr<Node> Abs(const std::string& name) {
+std::shared_ptr<Aidge::Node> Aidge::Abs(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Abs_Op>(), name);
 }
-
-} // namespace Aidge
-- 
GitLab


From 605c978c4cab544ced6408d24734dfde2e51bb2d Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Wed, 30 Apr 2025 14:26:53 +0200
Subject: [PATCH 11/12] Work without Python binding

---
 include/aidge/aidge.hpp                       |   1 -
 .../generic/operator/TransposeImpl.hpp        |  45 +++++
 include/aidge/operator/And.hpp                |   2 +-
 include/aidge/operator/ArgMax.hpp             |   2 +-
 include/aidge/operator/ConvDepthWise.hpp      |  61 ++-----
 include/aidge/operator/ConvTranspose.hpp      |  48 +++---
 include/aidge/operator/CryptoHash.hpp         |  27 +--
 include/aidge/operator/DepthToSpace.hpp       |  50 +-----
 include/aidge/operator/Div.hpp                |  44 +----
 include/aidge/operator/Dropout.hpp            |  37 +---
 include/aidge/operator/Equal.hpp              |  42 +----
 include/aidge/operator/Erf.hpp                |  36 +---
 include/aidge/operator/Expand.hpp             |  30 +---
 include/aidge/operator/FC.hpp                 |  70 +-------
 include/aidge/operator/Flatten.hpp            |  50 +-----
 include/aidge/operator/Fold.hpp               |  54 ++----
 include/aidge/operator/Gather.hpp             |  46 +----
 .../aidge/operator/GlobalAveragePooling.hpp   |  36 +---
 include/aidge/operator/GridSample.hpp         |  43 +----
 include/aidge/operator/Heaviside.hpp          |  42 +----
 include/aidge/operator/ILayerNorm.hpp         |  53 +-----
 include/aidge/operator/Identity.hpp           |  31 +---
 include/aidge/operator/LRN.hpp                |  48 +-----
 include/aidge/operator/LeakyReLU.hpp          |  34 +---
 include/aidge/operator/Ln.hpp                 |  37 +---
 include/aidge/operator/MatMul.hpp             |  36 +---
 include/aidge/operator/MaxPooling.hpp         |  53 ++----
 include/aidge/operator/Memorize.hpp           |  43 +----
 include/aidge/operator/Mod.hpp                |  27 +--
 include/aidge/operator/Move.hpp               |  11 +-
 include/aidge/operator/Mul.hpp                |  33 +---
 include/aidge/operator/Operator.hpp           |   6 -
 include/aidge/operator/OperatorTensor.hpp     |  15 +-
 include/aidge/operator/Pad.hpp                |  61 ++-----
 include/aidge/operator/Pop.hpp                |  46 +----
 include/aidge/operator/Pow.hpp                |  44 +----
 include/aidge/operator/Producer.hpp           |  17 +-
 include/aidge/operator/ReLU.hpp               |  38 +----
 include/aidge/operator/ReduceMean.hpp         |  29 +---
 include/aidge/operator/ReduceSum.hpp          |  39 +----
 include/aidge/operator/Reshape.hpp            |  23 +--
 include/aidge/operator/Resize.hpp             |  46 +----
 include/aidge/operator/Round.hpp              |  35 +---
 include/aidge/operator/Scaling.hpp            | 160 ------------------
 include/aidge/operator/Select.hpp             |  35 +---
 include/aidge/operator/Shape.hpp              |  52 +-----
 include/aidge/operator/ShiftGELU.hpp          |  39 +----
 include/aidge/operator/ShiftMax.hpp           |  32 +---
 include/aidge/operator/Sigmoid.hpp            |  34 +---
 include/aidge/operator/Slice.hpp              |  29 +---
 include/aidge/operator/Softmax.hpp            |  51 +-----
 include/aidge/operator/Split.hpp              |  49 +-----
 include/aidge/operator/Sqrt.hpp               |  35 +---
 include/aidge/operator/Squeeze.hpp            |  35 +---
 include/aidge/operator/Stack.hpp              |  44 +----
 include/aidge/operator/Sub.hpp                |  35 +---
 include/aidge/operator/Tanh.hpp               |  36 +---
 include/aidge/operator/TopK.hpp               |  32 +---
 include/aidge/operator/Transpose.hpp          |  72 +-------
 include/aidge/operator/Unfold.hpp             |  56 ++----
 include/aidge/operator/Unsqueeze.hpp          |  33 +---
 include/aidge/operator/WeightInterleaving.hpp |  37 +---
 python_binding/operator/pybind_Scaling.cpp    |  76 ---------
 python_binding/pybind_core.cpp                |   2 -
 .../generic/operator/TransposeImpl.cpp        |  27 +++
 src/operator/Abs.cpp                          |   2 +
 src/operator/Atan.cpp                         |   2 +-
 src/operator/AvgPooling.cpp                   |   3 +-
 src/operator/BatchNorm.cpp                    |   2 +
 src/operator/Cast.cpp                         |   8 +-
 src/operator/Concat.cpp                       |   2 +-
 src/operator/ConstantOfShape.cpp              |  22 ++-
 src/operator/Conv.cpp                         |   2 +-
 src/operator/ConvDepthWise.cpp                |  41 +----
 src/operator/ConvTranspose.cpp                |  43 +----
 src/operator/CryptoHash.cpp                   |  31 +---
 src/operator/DepthToSpace.cpp                 |  39 +----
 src/operator/Div.cpp                          |  18 +-
 src/operator/Dropout.cpp                      |  32 +---
 src/operator/Equal.cpp                        |  13 +-
 src/operator/Erf.cpp                          |  31 +---
 src/operator/Expand.cpp                       |  26 +--
 src/operator/FC.cpp                           |  30 +---
 src/operator/Flatten.cpp                      |  44 +----
 src/operator/Fold.cpp                         |  34 +---
 src/operator/Gather.cpp                       |  43 +----
 src/operator/GenericOperator.cpp              |   2 +-
 src/operator/GlobalAveragePooling.cpp         |  29 +---
 src/operator/GridSample.cpp                   |  40 +----
 src/operator/Heaviside.cpp                    |  29 +---
 src/operator/ILayerNorm.cpp                   |  16 +-
 src/operator/Identity.cpp                     |  26 +--
 src/operator/LRN.cpp                          |  33 +---
 src/operator/LeakyReLU.cpp                    |  26 +--
 src/operator/Ln.cpp                           |  31 +---
 src/operator/MatMul.cpp                       |  31 +---
 src/operator/MaxPooling.cpp                   |  35 +---
 src/operator/Memorize.cpp                     |  35 +---
 src/operator/Mod.cpp                          |  32 +---
 src/operator/Move.cpp                         |   6 +-
 src/operator/Mul.cpp                          |  31 +---
 src/operator/Pad.cpp                          |  21 +--
 src/operator/Pop.cpp                          |  42 +----
 src/operator/Pow.cpp                          |  17 +-
 src/operator/Producer.cpp                     |   6 +-
 src/operator/ReLU.cpp                         |  29 +---
 src/operator/ReduceMean.cpp                   |  33 +---
 src/operator/ReduceSum.cpp                    |  13 +-
 src/operator/Reshape.cpp                      |  10 +-
 src/operator/Resize.cpp                       |  21 +--
 src/operator/Round.cpp                        |  28 +--
 src/operator/Scaling.cpp                      |  70 --------
 src/operator/Select.cpp                       |  37 +---
 src/operator/Shape.cpp                        |  42 +----
 src/operator/ShiftGELU.cpp                    |  33 +---
 src/operator/ShiftMax.cpp                     |  37 +---
 src/operator/Sigmoid.cpp                      |  32 +---
 src/operator/Slice.cpp                        |  38 +----
 src/operator/Softmax.cpp                      |  33 +---
 src/operator/Split.cpp                        |  44 +----
 src/operator/Sqrt.cpp                         |  30 +---
 src/operator/Squeeze.cpp                      |  31 +---
 src/operator/Stack.cpp                        |  37 +---
 src/operator/Sub.cpp                          |  29 +---
 src/operator/Tanh.cpp                         |  31 +---
 src/operator/TopK.cpp                         |  27 +--
 src/operator/Transpose.cpp                    |  50 +-----
 src/operator/Unfold.cpp                       |  48 +-----
 src/operator/Unsqueeze.cpp                    |  35 +---
 src/operator/WeightInterleaving.cpp           |  49 ++----
 src/recipes/HorizontalTiling.cpp              |   9 +-
 131 files changed, 772 insertions(+), 3632 deletions(-)
 create mode 100644 include/aidge/backend/generic/operator/TransposeImpl.hpp
 delete mode 100644 include/aidge/operator/Scaling.hpp
 delete mode 100644 python_binding/operator/pybind_Scaling.cpp
 create mode 100644 src/backend/generic/operator/TransposeImpl.cpp
 delete mode 100644 src/operator/Scaling.cpp

diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 509aa1e81..744fdd758 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -73,7 +73,6 @@
 #include "aidge/operator/Shape.hpp"
 #include "aidge/operator/ShiftMax.hpp"
 #include "aidge/operator/ShiftGELU.hpp"
-#include "aidge/operator/Scaling.hpp"
 #include "aidge/operator/Slice.hpp"
 #include "aidge/operator/Softmax.hpp"
 #include "aidge/operator/Split.hpp"
diff --git a/include/aidge/backend/generic/operator/TransposeImpl.hpp b/include/aidge/backend/generic/operator/TransposeImpl.hpp
new file mode 100644
index 000000000..e9bc8d3d0
--- /dev/null
+++ b/include/aidge/backend/generic/operator/TransposeImpl.hpp
@@ -0,0 +1,45 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_TRANSPOSEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_TRANSPOSEIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+/**
+ * @brief implementation of the operator Transpose.
+ * @note Since this operator implementation is agnostic to the backend it is
+ * located here instead of in aidge_backend.
+ */
+class TransposeImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for TransposeImpl.
+     * @param[in] op The Operator instance.
+     * @param[in] backend The backend name (optional).
+     */
+    TransposeImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend)
+    {}
+
+    /**
+     * @brief Perform the forward operation for the transpose.
+     */
+    void forward() override;
+};
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_TRANSPOSEIMPL_H_
diff --git a/include/aidge/operator/And.hpp b/include/aidge/operator/And.hpp
index 9a5d23977..2b203ee70 100644
--- a/include/aidge/operator/And.hpp
+++ b/include/aidge/operator/And.hpp
@@ -48,7 +48,7 @@ namespace Aidge {
  */
 class And_Op : public OperatorTensorWithImpl<And_Op> {
 public:
-    static constexpr const char* const Type = "Add";
+    static constexpr const char* const Type = "And";
     static constexpr const char* const InputsName[] = {"data_input_1", "data_input_2"};
     static constexpr const char* const OutputsName[] = {"data_output"};
 
diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index 6cb3ce057..5802806c9 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -93,7 +93,7 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    static constexpr const char* const Type = "Add";
+    static constexpr const char* const Type = "ArgMax";
     static constexpr const char* const InputsName[] = {"data_input"};
     static constexpr const char* const OutputsName[] = {"data_output"};
 
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 7324551ec..f2dc3a9aa 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -45,6 +45,10 @@ namespace Aidge {
 enum class ConvDepthWiseAttr {
     GENERATE_LIST_ATTR_ENUM(LIST_CONVDEPTHWISE_ATTR)
 };
+
+template <DimIdx_t DIM> struct ConvDepthWise_Op_Type {};
+template <> struct ConvDepthWise_Op_Type<1> { static constexpr const char* const value = "ConvDepthWise1D"; };
+template <> struct ConvDepthWise_Op_Type<2> { static constexpr const char* const value = "ConvDepthWise2D"; };
 }  // namespace Aidge
 
 namespace {
@@ -78,12 +82,7 @@ namespace Aidge {
  * @see Registrable
  */
 template <DimIdx_t DIM>
-class ConvDepthWise_Op : public OperatorTensor,
-                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>> {
-
-public:
-    static const std::string Type;
-
+class ConvDepthWise_Op : public OperatorTensorWithImpl<ConvDepthWise_Op<DIM>> {
 private:
     using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
                             GENERATE_LIST_ATTR_TYPE(LIST_CONVDEPTHWISE_ATTR)
@@ -95,6 +94,17 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    using OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>::getOutput;
+    using OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>::OperatorTensorWithImpl;
+    using OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>::mOutputs;
+    using OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>::dimsForwarded;
+
+    static constexpr const char* const Type = ConvDepthWise_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input", "weight", "bias"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     ConvDepthWise_Op() = delete;
 
     /**
@@ -106,7 +116,7 @@ public:
     constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernelDims,
                                const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                                const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
+        : OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<ConvDepthWiseAttr::StrideDims>(strideDims),
             attr<ConvDepthWiseAttr::DilationDims>(dilationDims),
@@ -120,14 +130,6 @@ public:
      */
     ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned ConvDepthWise_Op object.
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
-    }
-
     /**
      * @brief Compute forward dimensions for the operator.
      * @param[in] allowDataDependency Flag to allow data dependency in dimension calculation.
@@ -147,19 +149,6 @@ public:
                           const std::vector<DimSize_t>& outputDims,
                           const IOIndex_t outputIdx = 0) const override;
 
-    /**
-     * @brief Set the backend for the operator.
-     * @param[in] name The name of the backend.
-     * @param[in] device The device index (default is 0).
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the list of available backends for the operator.
-     * @return A set of available backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the number of input channels.
      * @return The number of input channels.
@@ -195,22 +184,6 @@ public:
      * @return The kernel dimensions as a reference.
      */
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>(); }
-
-    /**
-     * @brief Get the names of the inputs.
-     * @return A vector containing the input names.
-     */
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input", "weight", "bias"};
-    }
-
-    /**
-     * @brief Get the names of the outputs.
-     * @return A vector containing the output names.
-     */
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/ConvTranspose.hpp b/include/aidge/operator/ConvTranspose.hpp
index e573a1a02..ce791b1b1 100644
--- a/include/aidge/operator/ConvTranspose.hpp
+++ b/include/aidge/operator/ConvTranspose.hpp
@@ -30,17 +30,13 @@
 namespace Aidge {
 enum class ConvTransposeAttr { StrideDims, DilationDims, KernelDims };
 
-template <DimIdx_t DIM>
-class ConvTranspose_Op
-    : public OperatorTensor,
-      public Registrable<ConvTranspose_Op<DIM>,
-                         std::string,
-                         std::function<std::shared_ptr<OperatorImpl>(
-                             const ConvTranspose_Op<DIM> &)>> {
-
-  public:
-    static const std::string Type;
+template <DimIdx_t DIM> struct ConvTranspose_Op_Type {};
+template <> struct ConvTranspose_Op_Type<1> { static constexpr const char* const value = "ConvTranspose1D"; };
+template <> struct ConvTranspose_Op_Type<2> { static constexpr const char* const value = "ConvTranspose2D"; };
+template <> struct ConvTranspose_Op_Type<3> { static constexpr const char* const value = "ConvTranspose3D"; };
 
+template <DimIdx_t DIM>
+class ConvTranspose_Op : public OperatorTensorWithImpl<ConvTranspose_Op<DIM>> {
   private:
     using Attributes_ = StaticAttributes<ConvTransposeAttr,
                                          std::array<DimSize_t, DIM>,
@@ -51,6 +47,18 @@ class ConvTranspose_Op
     const std::shared_ptr<Attributes_> mAttributes;
 
   public:
+    using OperatorTensorWithImpl<ConvTranspose_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<ConvTranspose_Op<DIM>>::getOutput;
+    using OperatorTensorWithImpl<ConvTranspose_Op<DIM>>::OperatorTensorWithImpl;
+    using OperatorTensorWithImpl<ConvTranspose_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<ConvTranspose_Op<DIM>>::mOutputs;
+    using OperatorTensorWithImpl<ConvTranspose_Op<DIM>>::dimsForwarded;
+    using OperatorTensorWithImpl<ConvTranspose_Op<DIM>>::type;
+
+    static constexpr const char* const Type = ConvTranspose_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input", "weight", "bias"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     ConvTranspose_Op() = delete;
 
     constexpr explicit ConvTranspose_Op(
@@ -59,7 +67,7 @@ class ConvTranspose_Op
             create_array<DimSize_t, DIM>(1),
         const std::array<DimSize_t, DIM> &dilationDims =
             create_array<DimSize_t, DIM>(1))
-    : OperatorTensor(Type,
+    : OperatorTensorWithImpl<ConvTranspose_Op<DIM>>(Type,
                      {InputCategory::Data,
                       InputCategory::Param,
                       InputCategory::OptionalParam},
@@ -77,14 +85,6 @@ class ConvTranspose_Op
      */
     ConvTranspose_Op(const ConvTranspose_Op<DIM> &op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Conv_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ConvTranspose_Op<DIM>>(*this);
-    }
-
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
@@ -92,9 +92,6 @@ class ConvTranspose_Op
                           const std::vector<DimSize_t> &outputDims,
                           const IOIndex_t outputIdx = 0) const override;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
     DimSize_t inChannels() const {
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(
@@ -130,13 +127,6 @@ class ConvTranspose_Op
     inline std::array<DimSize_t, DIM> &kernelDims() const {
         return mAttributes->template getAttr<ConvTransposeAttr::KernelDims>();
     }
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "weight", "bias"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/CryptoHash.hpp b/include/aidge/operator/CryptoHash.hpp
index 266adecd3..a95d8451a 100644
--- a/include/aidge/operator/CryptoHash.hpp
+++ b/include/aidge/operator/CryptoHash.hpp
@@ -46,18 +46,17 @@ enum class CryptoHashFunction {
  * @see OperatorTensor
  * @see Registrable
  */
-class CryptoHash_Op : public OperatorTensor,
-    public Registrable<CryptoHash_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const CryptoHash_Op&)>> {
-
-public:
-    static const std::string Type;
-
+class CryptoHash_Op : public OperatorTensorWithImpl<CryptoHash_Op> {
 private:
     using Attributes_ = StaticAttributes<CryptoHashAttr, CryptoHashFunction>;
     template <CryptoHashAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "CryptoHash";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     CryptoHash_Op();
 
     /**
@@ -68,17 +67,8 @@ public:
      */
     CryptoHash_Op(const CryptoHash_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::CryptoHash_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -90,13 +80,6 @@ public:
      * @return Reference to the `crypto_hash_function` attribute.
      */
     inline CryptoHashFunction& cryptoHashFunction() const noexcept { return mAttributes->getAttr<CryptoHashAttr::CryptoHashFunction>(); }
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> CryptoHash(const std::string& name = "");
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
index 0374feaa3..6dd0590b9 100644
--- a/include/aidge/operator/DepthToSpace.hpp
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -21,6 +21,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/DepthToSpaceImpl.hpp"
 
 
 #define LIST_DEPTHTOSPACE_ATTR(X)               \
@@ -69,16 +70,8 @@ namespace Aidge{
  * @see OperatorTensor
  * @see Registrable
  */
-class DepthToSpace_Op : public OperatorTensor,
-                public Registrable<DepthToSpace_Op,
-                    std::string,
-                    std::function<std::shared_ptr<OperatorImpl>(const DepthToSpace_Op &)>> {
+class DepthToSpace_Op : public OperatorTensorWithImpl<DepthToSpace_Op, DepthToSpace_OpImpl> {
 public:
-    /**
-     * @brief The type identifier for the DepthToSpace operator.
-     */
-    static const std::string Type;
-
     /**
      * @enum Mode
      * @brief Defines the modes for depth-to-space transformation.
@@ -92,6 +85,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "DepthToSpace";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     DepthToSpace_Op() = delete;
 
     /**
@@ -109,27 +106,8 @@ public:
      */
     DepthToSpace_Op(const DepthToSpace_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::DepthToSpace_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    /**
-     * @brief Set the backend for this operator.
-     * @param name Backend name.
-     * @param device Device index for the backend.
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the available backends for this operator.
-     * @return A set of strings representing available backends.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return Shared pointer to the attributes.
@@ -147,22 +125,6 @@ public:
      * @return Depth-to-space mode.
      */
     inline Mode& mode() const { return mAttributes->template getAttr<DepthToSpaceAttr::Mode>(); }
-
-    /**
-     * @brief Get the input tensor names.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 5dec98814..e8dc95caf 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -46,49 +46,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Div_Op : public OperatorTensor,
-    public Registrable<Div_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Div_Op&)>> {
-
+class Div_Op : public OperatorTensorWithImpl<Div_Op> {
 public:
-    static const std::string Type;
-
-    Div_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Div_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Div_Op(const Div_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Div_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    static constexpr const char* const Type = "Div";
+    static constexpr const char* const InputsName[] = {"data_input_1", "data_input_2"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Div_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Div_Op>(*this);
-    }
+    Div_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input_1", "data_input_2"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Div(const std::string& name = "");
diff --git a/include/aidge/operator/Dropout.hpp b/include/aidge/operator/Dropout.hpp
index 4d7465db2..d15569c86 100644
--- a/include/aidge/operator/Dropout.hpp
+++ b/include/aidge/operator/Dropout.hpp
@@ -31,26 +31,21 @@ enum class DropoutAttr {
 };
 
 // Define the Dropout_Op class, inheriting from OperatorTensor and Registrable
-class Dropout_Op : public OperatorTensor,
-                  public Registrable<Dropout_Op,
-                                     std::string,
-                                     std::function<std::shared_ptr<OperatorImpl>(const Dropout_Op&)>> {
-public:
-    static const std::string Type;
-
+class Dropout_Op : public OperatorTensorWithImpl<Dropout_Op> {
 private:
     using Attributes_ = StaticAttributes<DropoutAttr,  GENERATE_LIST_ATTR_TYPE(LIST_DROPOUT_ATTR)>;
     template <DropoutAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Dropout";
+    static constexpr const char* const InputsName[] = {"data_input", "probability"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     Dropout_Op(float probability = 0.5f);
 
     Dropout_Op(const Dropout_Op& op);
 
-
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Propagates dimensions through the Dropout operation.
      * This function updates the output Tensors' dimensions based on the input Tensors.
@@ -66,28 +61,10 @@ public:
      */
     bool forwardDims(bool allowDataDependency = true) override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    std::set<std::string> getAvailableBackends() const override;
-
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
     inline float& probability() const noexcept { return mAttributes -> getAttr<DropoutAttr::Probability>(); }
 
-    // Input/Output names for the operator
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input", "probability"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
-
-    /**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
-
 private:
     void checkProbability() const;
 };
@@ -107,8 +84,4 @@ constexpr const char* const EnumStrings<Aidge::DropoutAttr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::Dropout_Op::attributesName() {
-    return EnumStrings<Aidge::DropoutAttr>::data;
-}
-
 #endif /* AIDGE_CORE_OPERATOR_DROPOUT_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Equal.hpp b/include/aidge/operator/Equal.hpp
index 12bc9af78..655598d36 100644
--- a/include/aidge/operator/Equal.hpp
+++ b/include/aidge/operator/Equal.hpp
@@ -27,51 +27,19 @@ namespace Aidge {
 /**
  * @brief Tensor element-wise logical equal operation.
  */
-class Equal_Op : public OperatorTensor,
-    public Registrable<Equal_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Equal_Op&)>> {
+class Equal_Op : public OperatorTensorWithImpl<Equal_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Equal";
+    static constexpr const char* const InputsName[] = {"data_input_1", "data_input_2"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @brief Compute element-wise Equal operation on two given inputs.
      * @details supports broadcasting of both operands.
      */
-    Equal_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
-     * but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
-     */
-    Equal_Op(const Equal_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Equal_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Equal_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Equal_Op>(*this);
-    }
+    Equal_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input_1", "data_input_2"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 inline std::shared_ptr<Node> Equal(const std::string& name = "") {
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index c17e8075f..438add606 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -37,39 +37,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Erf_Op : public OperatorTensor,
-    public Registrable<Erf_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::shared_ptr<OperatorImpl>(const Erf_Op&)>>
-{
+class Erf_Op : public OperatorTensorWithImpl<Erf_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Erf";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    Erf_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Erf_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Erf_Op(const Erf_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Erf_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
+    Erf_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 };
 
 std::shared_ptr<Node> Erf(const std::string& name = "");
diff --git a/include/aidge/operator/Expand.hpp b/include/aidge/operator/Expand.hpp
index 95ca72a27..ed963c283 100644
--- a/include/aidge/operator/Expand.hpp
+++ b/include/aidge/operator/Expand.hpp
@@ -43,14 +43,11 @@ namespace Aidge {
  * @see https://onnx.ai/onnx/repo-docs/Broadcasting.html for detailed ONNX
  * broadcasting rules
  */
-class Expand_Op
-    : public OperatorTensor,
-      public Registrable<
-          Expand_Op,
-          std::string,
-          std::function<std::shared_ptr<OperatorImpl>(const Expand_Op &)>> {
-  public:
-    static const std::string Type;
+class Expand_Op : public OperatorTensorWithImpl<Expand_Op> {
+public:
+    static constexpr const char* const Type = "Expand";
+    static constexpr const char* const InputsName[] = {"data", "shape"};
+    static constexpr const char* const OutputsName[] = {"output"};
 
     /**
      * @brief Operator that broadcasts an input tensor to a larger provided
@@ -72,26 +69,11 @@ class Expand_Op
      * broadcasting rules
      */
     Expand_Op()
-        : OperatorTensor(Type,
+        : OperatorTensorWithImpl(Type,
                          {InputCategory::Data, InputCategory::Data},
                          1) {}
 
-    Expand_Op(const Expand_Op &op);
-
-    std::shared_ptr<Operator> clone() const override;
-
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string &name,
-                    DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data", "shape"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index e513c3059..8b83c0bd6 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -50,15 +50,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class FC_Op : public OperatorTensor,
-              public Registrable<FC_Op,
-                                 std::string,
-                                 std::function<std::shared_ptr<OperatorImpl>(const FC_Op &)>> {
+class FC_Op : public OperatorTensorWithImpl<FC_Op> {
 public:
-    /**
-     * @brief Static type identifier for the FC operator.
-     */
-    static const std::string Type;
+    static constexpr const char* const Type = "FC";
+    static constexpr const char* const InputsName[] = {"data_input", "weight", "bias"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @brief Default constructor for the FC operator.
@@ -66,33 +62,9 @@ public:
      * Initializes the operator with a type identifier and input categories.
      */
     FC_Op()
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1)
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1)
     {}
 
-    /**
-     * @brief Copy constructor.
-     *
-     * Copies the attributes and output tensor(s) of the operator, but does not
-     * copy input tensors. The new operator instance has no associated inputs.
-     *
-     * @param op The `FC_Op` instance to copy.
-     */
-    FC_Op(const FC_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(FC_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clones the operator using its copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override final;
-
     /**
      * @brief Associates an input tensor with the operator.
      *
@@ -121,22 +93,6 @@ public:
      */
     bool forwardDType() override final;
 
-    /**
-     * @brief Sets the backend for the operator.
-     *
-     * Configures the backend used for computation.
-     *
-     * @param[in] name Name of the backend.
-     * @param[in] device Index of the target device (default is 0).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Retrieves the available backends for the operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Gets the number of input channels for the FC operator.
      *
@@ -166,22 +122,6 @@ public:
         }
         return getInput(1)->template dims<2>()[0];
     }
-
-    /**
-     * @brief Retrieves the input tensor names for the FC operator.
-     * @return A vector of input tensor names: `{"data_input", "weight", "bias"}`.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "weight", "bias"};
-    }
-
-    /**
-     * @brief Retrieves the output tensor names for the FC operator.
-     * @return A vector of output tensor names: `{"data_output"}`.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Flatten.hpp b/include/aidge/operator/Flatten.hpp
index 9ce80c9a6..abcd35ba1 100644
--- a/include/aidge/operator/Flatten.hpp
+++ b/include/aidge/operator/Flatten.hpp
@@ -21,6 +21,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/FlattenImpl.hpp"
 
 
 #define LIST_FLATTEN_ATTR(X)  \
@@ -64,21 +65,17 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Flatten_Op : public OperatorTensor,
-                   public Registrable<Flatten_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Flatten_Op&)>> {
-
-public:
-    /**
-     * @brief The type identifier for the Flatten operator.
-     */
-    static const std::string Type;
-
+class Flatten_Op : public OperatorTensorWithImpl<Flatten_Op, Flatten_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<FlattenAttr, GENERATE_LIST_ATTR_TYPE(LIST_FLATTEN_ATTR)>;
     template <FlattenAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Flatten";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Deleted default constructor.
      */
@@ -96,12 +93,6 @@ public:
      */
     Flatten_Op(const Flatten_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Compute the forward dimensions.
      * @param[in] allowDataDependency Whether to allow data dependency in computation.
@@ -109,19 +100,6 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Set the backend for the operator.
-     * @param[in] name The name of the backend.
-     * @param[in] device Optional. The device index.
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the set of available backends for the operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -133,22 +111,6 @@ public:
      * @return A reference to the axis attribute.
      */
     inline std::int64_t& axis() const { return mAttributes->template getAttr<FlattenAttr::Axis>(); }
-
-    /**
-     * @brief Get the names of the input tensors.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the names of the output tensors.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index 6b5f37cdf..c5bdf27b4 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -48,6 +48,9 @@ namespace Aidge {
 enum class FoldAttr {
     GENERATE_LIST_ATTR_ENUM(LIST_FOLD_ATTR)
 };
+
+template <DimIdx_t DIM> struct Fold_Op_Type {};
+template <> struct Fold_Op_Type<2> { static constexpr const char* const value = "Fold2D"; };
 }  // namespace Aidge
 
 namespace {
@@ -91,12 +94,7 @@ namespace Aidge {
  */
 
 template <DimIdx_t DIM>
-class Fold_Op : public OperatorTensor,
-                public Registrable<Fold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Fold_Op<DIM> &)>> {
-
-public:
-    static const std::string Type;
-
+class Fold_Op : public OperatorTensorWithImpl<Fold_Op<DIM>> {
 private:
     using Attributes_ = StaticAttributes<FoldAttr, GENERATE_LIST_ATTR_TYPE(LIST_FOLD_ATTR)>;
 
@@ -104,6 +102,14 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    using OperatorTensorWithImpl<Fold_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<Fold_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<Fold_Op<DIM>>::mOutputs;
+
+    static constexpr const char* const Type = Fold_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     Fold_Op() = delete;
 
     /**
@@ -117,7 +123,7 @@ public:
                       const std::array<DimSize_t, DIM>& kernelDims,
                       const std::array<DimSize_t, DIM>& strideDims = create_array<DimSize_t,DIM>(1),
                       const std::array<DimSize_t, DIM>& dilationDims = create_array<DimSize_t,DIM>(1))
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
+        : OperatorTensorWithImpl<Fold_Op<DIM>>(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<FoldAttr::OutputDims>(outputDims),
             attr<FoldAttr::StrideDims>(strideDims),
@@ -132,11 +138,6 @@ public:
      */
     Fold_Op(const Fold_Op<DIM>& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Compute forward dimensions for the operator.
      * @param allowDataDependency Flag to allow data dependency in dimension calculation.
@@ -144,19 +145,6 @@ public:
      */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    /**
-     * @brief Set the backend for the operator.
-     * @param name Name of the backend.
-     * @param device Index of the device.
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the available backends for this operator.
-     * @return A set of available backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return Shared pointer to the attributes.
@@ -186,22 +174,6 @@ public:
      * @return Kernel dimensions.
      */
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<FoldAttr::KernelDims>(); }
-
-    /**
-     * @brief Get the input names for the Fold operation.
-     * @return List of input names.
-     */
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output names for the Fold operation.
-     * @return List of output names.
-     */
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 7aaae0423..3a887aeb0 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/GatherImpl.hpp"
 
 
 #define LIST_GATHER_ATTR(X)  \
@@ -44,13 +45,8 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Gather_Op : public OperatorTensor,
-                  public Registrable<Gather_Op,
-                                     std::string,
-                                     std::function<std::shared_ptr<OperatorImpl>(const Gather_Op&)>> {
+class Gather_Op : public OperatorTensorWithImpl<Gather_Op, Gather_OpImpl> {
 public:
-    static const std::string Type;
-
     /**
      * @enum Attr
      * @brief Attributes for the Gather operation.
@@ -70,6 +66,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Gather";
+    static constexpr const char* const InputsName[] = {"data_input", "indices"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Default constructor is deleted.
      */
@@ -93,11 +93,6 @@ public:
      */
     Gather_Op(const Gather_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Check if dimensions have been forwarded.
      * @return True if dimensions have been forwarded, false otherwise.
@@ -111,19 +106,6 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Set the backend for the operator.
-     * @param name The name of the backend.
-     * @param device Optional. The device index.
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the available backends for the operator.
-     * @return A set of available backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -147,22 +129,6 @@ public:
      * @return The gathered shape attribute.
      */
     inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes->getAttr<Attr::GatheredShape>(); }
-
-    /**
-     * @brief Get the input tensor names.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "indices"};
-    }
-
-    /**
-     * @brief Get the output tensor names.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index 0cfc16cca..fd215e53c 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -45,41 +45,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class GlobalAveragePooling_Op
-    : public OperatorTensor,
-      public Registrable<GlobalAveragePooling_Op, std::string,
-                         std::function<std::shared_ptr<OperatorImpl>(
-                             const GlobalAveragePooling_Op &)>> {
+class GlobalAveragePooling_Op : public OperatorTensorWithImpl<GlobalAveragePooling_Op> {
 public:
-	static const std::string Type;
+    static constexpr const char* const Type = "GlobalAveragePooling";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-	GlobalAveragePooling_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-	/**
-	 * @brief Copy-constructor.
-	 * @param op GlobalAveragePooling_Op to copy.
-	 * @details Copies the operator attributes and its output tensor(s), but not
-	 * its input tensors. The new operator has no associated input.
-	 */
-	GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op);
-
-	/**
-	 * @brief Clone the operator using its copy-constructor.
-	 * @see Operator::GlobalAveragePooling_Op
-	 */
-	std::shared_ptr<Operator> clone() const override;
+	GlobalAveragePooling_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
 	bool forwardDims(bool allowDataDependency = false) override final;
-
-	void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-	std::set<std::string> getAvailableBackends() const override;
-
-	static const std::vector<std::string> getInputsName() {
-	return {"data_input"};
-	}
-	static const std::vector<std::string> getOutputsName() {
-	return {"data_output"};
-	}
 };
 
 std::shared_ptr<Node> GlobalAveragePooling(const std::string &name = "");
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index 95d231535..1d6a83223 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -83,11 +83,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class GridSample_Op : public OperatorTensor,
-	public Registrable<GridSample_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const GridSample_Op&)>> {
-
+class GridSample_Op : public OperatorTensorWithImpl<GridSample_Op> {
 public:
-	static const std::string Type;
+    static constexpr const char* const Type = "GridSample";
+    static constexpr const char* const InputsName[] = {"data_input", "grid_field"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
 	/**
 	 * @enum Mode
@@ -129,12 +129,6 @@ public:
 	 */
 	~GridSample_Op() noexcept;
 
-	/**
-	 * @brief Clone the operator using its copy-constructor.
-	 * @return A shared pointer to the cloned operator.
-	 */
-	std::shared_ptr<Operator> clone() const override;
-
 	/**
 	 * @brief Determines whether dimensions can be forwarded.
 	 * @param allowDataDependencies Allow data-dependent dimensions.
@@ -142,19 +136,6 @@ public:
 	 */
 	bool forwardDims(bool /*allowDataDependencies*/ = false) override final;
 
-	/**
-	 * @brief Sets the backend for execution.
-	 * @param name Backend name.
-	 * @param device Device index.
-	 */
-	void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-	/**
-	 * @brief Retrieves the available backends.
-	 * @return A set of available backend names.
-	 */
-    std::set<std::string> getAvailableBackends() const override;
-
 	/**
 	 * @brief Retrieves the operator's attributes.
 	 * @return Shared pointer to the attributes.
@@ -178,22 +159,6 @@ public:
 	 * @return True if corners are aligned.
 	 */
 	inline bool alignCorners() const { return mAttributes->template getAttr<GridSampleAttr::AlignCorners>(); }
-
-	/**
-	 * @brief Retrieves the input names for GridSample.
-	 * @return Vector of input tensor names.
-	 */
-	static const std::vector<std::string> getInputsName() {
-		return {"data_input", "grid_field"};
-	}
-
-	/**
-	 * @brief Retrieves the output names for GridSample.
-	 * @return Vector of output tensor names.
-	 */
-	static const std::vector<std::string> getOutputsName() {
-		return {"data_output"};
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Heaviside.hpp b/include/aidge/operator/Heaviside.hpp
index b8aea020f..d72f73aa6 100644
--- a/include/aidge/operator/Heaviside.hpp
+++ b/include/aidge/operator/Heaviside.hpp
@@ -43,11 +43,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Heaviside_Op
-    : public OperatorTensor,
-      public Registrable<Heaviside_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Heaviside_Op &)>> {
+class Heaviside_Op : public OperatorTensorWithImpl<Heaviside_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Heaviside";
+    static constexpr const char* const InputsName[] = {"data_input", "data_values"};
+    static constexpr const char* const OutputsName[] = {"output"};
 
     /**
      * @enum Attr
@@ -80,40 +80,6 @@ public:
      */
     Heaviside_Op(const Heaviside_Op &op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    /**
-     * @brief Set the backend for this operator.
-     * @param name The backend name.
-     * @param device The device index (default is 0).
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the set of available backends.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
-    /**
-     * @brief Get the input names required by this operator.
-     * @return A vector containing the input names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "data_values"};
-    }
-
-    /**
-     * @brief Get the output names generated by this operator.
-     * @return A vector containing the output names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"output"};
-    }
-
     /**
      * @brief Get the attributes of the operator.
      */
diff --git a/include/aidge/operator/ILayerNorm.hpp b/include/aidge/operator/ILayerNorm.hpp
index dc90b7622..eda2c3df5 100644
--- a/include/aidge/operator/ILayerNorm.hpp
+++ b/include/aidge/operator/ILayerNorm.hpp
@@ -39,43 +39,19 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class ILayerNorm_Op : public OperatorTensor,
-    public Registrable<ILayerNorm_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ILayerNorm_Op&)>> {
-
+class ILayerNorm_Op : public OperatorTensorWithImpl<ILayerNorm_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "ILayerNorm";
+    static constexpr const char* const InputsName[] = {"data_input", "weight", "bias"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @brief Default constructor.
      */
     ILayerNorm_Op()
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::Param}, 1)
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Param, InputCategory::Param}, 1)
     {}
 
-    /**
-     * @brief Copy-constructor.
-     * @param[in] op ILayerNorm_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not its input tensors. 
-     * The new operator has no associated input.
-     */
-    ILayerNorm_Op(const ILayerNorm_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ILayerNorm_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ILayerNorm_Op>(*this);
-    }
-
     /**
      * @brief Associates an input tensor with the operator.
      * @param inputIdx The index of the input.
@@ -89,25 +65,6 @@ public:
      * @return True if propagation is successful, false otherwise.
      */
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    /**
-     * @brief Gets the names of the input tensors.
-     * @return A vector containing the names of input tensors.
-     */
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input", "weight", "bias"};
-    }
-
-    /**
-     * @brief Gets the names of the output tensors.
-     * @return A vector containing the names of output tensors.
-     */
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index a0200db6f..0f79b253f 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -24,6 +24,7 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/backend/generic/operator/IdentityImpl.hpp"
 
 namespace Aidge {
 
@@ -34,35 +35,13 @@ namespace Aidge {
  * Has we need to update this class to remove the use of Impl.
  *
  */
-class Identity_Op : public OperatorTensor,
-    public Registrable<Identity_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Identity_Op&)>> {
+class Identity_Op : public OperatorTensorWithImpl<Identity_Op, Identity_OpImpl> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Identity";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Identity_Op();
-
-    /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
-     * but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
-     */
-    Identity_Op(const Identity_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Identity_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Identity(const std::string& name = "");
diff --git a/include/aidge/operator/LRN.hpp b/include/aidge/operator/LRN.hpp
index f4c87d9c8..1ad63caf3 100644
--- a/include/aidge/operator/LRN.hpp
+++ b/include/aidge/operator/LRN.hpp
@@ -57,16 +57,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class LRN_Op : public OperatorTensor,
-                public Registrable<LRN_Op,
-                                   std::string,
-                                   std::function<std::shared_ptr<OperatorImpl>(const LRN_Op&)>> {
-
+class LRN_Op : public OperatorTensorWithImpl<LRN_Op> {
 public:
-    /**
-     * @brief Static type string for the LRN operator.
-     */
-    static const std::string Type;
+    static constexpr const char* const Type = "LRN";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @enum Attr
@@ -106,25 +101,6 @@ public:
      */
     LRN_Op(const LRN_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    /**
-     * @brief Set the backend for the LRN operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the available backends for the LRN operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -154,22 +130,6 @@ public:
      * @return Reference to the `size` attribute.
      */
     inline std::int32_t& size() const noexcept { return mAttributes->getAttr<Attr::Size>(); }
-
-    /**
-     * @brief Get the input tensor names for the LRN operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names for the LRN operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 0f9a1e087..4a267b8a5 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -41,11 +41,11 @@ namespace Aidge{
  * @see OperatorTensor
  * @see Registrable
  */
-class LeakyReLU_Op : public OperatorTensor,
-    public Registrable<LeakyReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>> {
-
+class LeakyReLU_Op : public OperatorTensorWithImpl<LeakyReLU_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "LeakyReLU";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @enum LeakyReLUAttr
@@ -74,7 +74,7 @@ public:
      * @param[in] negativeSlope The slope for negative input values.
      */
     LeakyReLU_Op(float negativeSlope)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
+        : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
           mAttributes(
             std::make_shared<Attributes_>(
                 attr<Attr::NegativeSlope>(negativeSlope)))
@@ -88,14 +88,6 @@ public:
      */
     LeakyReLU_Op(const LeakyReLU_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      */
@@ -105,22 +97,6 @@ public:
      * @brief Get the negative slope value.
      */
     inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<Attr::NegativeSlope>(); }
-
-    /**
-     * @brief Get the names of the input tensors.
-     * @return A vector containing the names of input tensors.
-     */
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the names of the output tensors.
-     * @return A vector containing the names of output tensors.
-     */
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Ln.hpp b/include/aidge/operator/Ln.hpp
index 4a78db439..d03c0ba83 100755
--- a/include/aidge/operator/Ln.hpp
+++ b/include/aidge/operator/Ln.hpp
@@ -37,40 +37,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Ln_Op : public OperatorTensor,
-    public Registrable<Ln_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::unique_ptr<OperatorImpl>(const Ln_Op&)>>
-{
+class Ln_Op : public OperatorTensorWithImpl<Ln_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Ln";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    Ln_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Ln_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Ln_Op(const Ln_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Ln_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
+    Ln_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 };
 
 std::shared_ptr<Node> Ln(const std::string& name = "");
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index 0313815ee..66a063bdb 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -51,28 +51,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class MatMul_Op : public OperatorTensor,
-              public Registrable<MatMul_Op,
-                                 std::string,
-                                 std::function<std::shared_ptr<OperatorImpl>(const MatMul_Op &)>> {
+class MatMul_Op : public OperatorTensorWithImpl<MatMul_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "MatMul";
+    static constexpr const char* const InputsName[] = {"data_input1", "data_input2"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    MatMul_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op MatMul_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    MatMul_Op(const MatMul_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::MatMul_Op
-     */
-    std::shared_ptr<Operator> clone() const override final;
+    MatMul_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Compute dimensions for the output Tensor following the same rules as
@@ -86,17 +71,6 @@ public:
      * dimensions (D) -> (D,1). The appended 1 is removed after computation.
      */
     bool forwardDims(bool allowDataDependency = false) override final;
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input1", "data_input2"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> MatMul(const std::string& name = "");
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index de3b2a58c..968d92bef 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -56,6 +56,11 @@ namespace Aidge {
 enum class MaxPoolingAttr {
     GENERATE_LIST_ATTR_ENUM(LIST_MAXPOOLING_ATTR)
 };
+
+template <DimIdx_t DIM> struct MaxPooling_Op_Type {};
+template <> struct MaxPooling_Op_Type<1> { static constexpr const char* const value = "MaxPooling1D"; };
+template <> struct MaxPooling_Op_Type<2> { static constexpr const char* const value = "MaxPooling2D"; };
+template <> struct MaxPooling_Op_Type<3> { static constexpr const char* const value = "MaxPooling3D"; };
 } // namespace Aidge
 
 namespace {
@@ -98,14 +103,7 @@ namespace Aidge {
  */
 
 template <DimIdx_t DIM>
-class MaxPooling_Op : public OperatorTensor,
-                public Registrable<MaxPooling_Op<DIM>,
-                                   std::string,
-                                   std::function<std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>>
-{
-public:
-    static const std::string Type; ///< Static identifier for this operator type.
-
+class MaxPooling_Op : public OperatorTensorWithImpl<MaxPooling_Op<DIM>> {
 private:
     using Attributes_ = StaticAttributes<MaxPoolingAttr,
                                 GENERATE_LIST_ATTR_TYPE(LIST_MAXPOOLING_ATTR)
@@ -115,6 +113,14 @@ private:
     const std::shared_ptr<Attributes_> mAttributes; ///< Shared pointer to operator attributes.
 
 public:
+    using OperatorTensorWithImpl<MaxPooling_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<MaxPooling_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<MaxPooling_Op<DIM>>::mOutputs;
+
+    static constexpr const char* const Type = MaxPooling_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     MaxPooling_Op() = delete; ///< Deleted default constructor.
 
     /**
@@ -137,12 +143,6 @@ public:
      */
     MaxPooling_Op(const MaxPooling_Op<DIM>& op);
 
-    /**
-     * @brief Clones the operator using the copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Computes output tensor dimensions based on input dimensions and operator attributes.
      * @param[in] allowDataDependency If true, dimensions may depend on input data; otherwise, strictly attribute-based.
@@ -150,19 +150,6 @@ public:
      */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    /**
-     * @brief Sets the backend implementation for this operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index where the backend will run (default: 0).
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Retrieves the list of available backend implementations for this operator.
-     * @return A set of available backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Accessor for operator attributes.
      * @return A shared pointer to the attributes object.
@@ -192,18 +179,6 @@ public:
      * @return Boolean value indicating whether ceil mode is enabled.
      */
     inline bool& ceilMode() const { return mAttributes->template getAttr<MaxPoolingAttr::CeilMode>(); }
-
-    /**
-     * @brief Retrieves the names of the input tensors.
-     * @return A vector of input tensors names.
-     */
-    static const std::vector<std::string> getInputsName(){ return {"data_input"}; }
-
-    /**
-     * @brief Retrieves the names of the output tensors.
-     * @return A vector of output tensors names.
-     */
-    static const std::vector<std::string> getOutputsName(){ return {"data_output"}; }
 };
 
 /**
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index faba98077..b2937e5ca 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -22,6 +22,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/MemorizeImpl.hpp"
 
 
 #define LIST_MEMORIZE_ATTR(X)                        \
@@ -47,10 +48,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Memorize_Op : public OperatorTensor,
-    public Registrable<Memorize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Memorize_Op&)>> {
+class Memorize_Op : public OperatorTensorWithImpl<Memorize_Op, Memorize_OpImpl> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Memorize";
+    static constexpr const char* const InputsName[] = {"data_input", "data_input_init"};
+    static constexpr const char* const OutputsName[] = {"data_output", "data_output_rec"};
 
     /**
      * @enum Attr
@@ -86,25 +88,6 @@ public:
      */
     Memorize_Op(const Memorize_Op& op);
 
-    /**
-     * @brief Clone the operator by creating a copy of it.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    /**
-     * @brief Assign a specific backend and device for computation.
-     * @param name Name of the backend.
-     * @param device The device index (default is 0).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the list of available backends compatible with this operator.
-     * @return A set of strings representing backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Perform dimension inference for the operator, optionally allowing
      * data dependency during the process.
@@ -146,22 +129,6 @@ public:
      * @return A reference to the end step value.
      */
     inline std::uint32_t& endStep() const { return mAttributes->template getAttr<Attr::EndStep>(); }
-
-    /**
-     * @brief Retrieve the names of the operator's input tensors.
-     * @return A vector of strings representing input tensor names.
-     */
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input", "data_input_init"};
-    }
-
-    /**
-     * @brief Retrieve the names of the operator's output tensors.
-     * @return A vector of strings representing output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output", "data_output_rec"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Mod.hpp b/include/aidge/operator/Mod.hpp
index 56a9381e0..c005bf80c 100644
--- a/include/aidge/operator/Mod.hpp
+++ b/include/aidge/operator/Mod.hpp
@@ -56,18 +56,17 @@ enum class ModAttr {
  * @see OperatorTensor
  * @see Registrable
  */
-class Mod_Op : public OperatorTensor,
-    public Registrable<Mod_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Mod_Op&)>> {
-
-public:
-    static const std::string Type;
-
+class Mod_Op : public OperatorTensorWithImpl<Mod_Op> {
 private:
     using Attributes_ = StaticAttributes<ModAttr, bool>;
     template <ModAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Mod";
+    static constexpr const char* const InputsName[] = {"dividend", "divisor"};
+    static constexpr const char* const OutputsName[] = {"remainder"};
+
     Mod_Op();
 
     /**
@@ -78,17 +77,8 @@ public:
      */
     Mod_Op(const Mod_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Mod_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -100,13 +90,6 @@ public:
      * @return Reference to the `fmod` attribute.
      */
     inline bool& fmod() const noexcept { return mAttributes->getAttr<ModAttr::Fmod>(); }
-
-    static const std::vector<std::string> getInputsName(){
-        return {"dividend", "divisor"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"remainder"};
-    }
 };
 
 std::shared_ptr<Node> Mod(const std::string& name = "");
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index b516ef549..c02daecbc 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -38,7 +38,9 @@ namespace Aidge {
 class Move_Op : public OperatorTensor,
     public Registrable<Move_Op, std::tuple<std::string, std::string>, std::function<std::unique_ptr<OperatorImpl>(const Move_Op&)>> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Move";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Move_Op();
 
@@ -58,13 +60,6 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
     std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Move(const std::string& name = "");
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 913fa05b4..067dbebd0 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -46,38 +46,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Mul_Op : public OperatorTensor,
-    public Registrable<Mul_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Mul_Op&)>> {
+class Mul_Op : public OperatorTensorWithImpl<Mul_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Mul";
+    static constexpr const char* const InputsName[] = {"data_input_1", "data_input_2"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    Mul_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Mul_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Mul_Op(const Mul_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Mul_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
+    Mul_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input_1", "data_input_2"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Mul(const std::string& name = "");
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index bbd6b49fa..8f2930e14 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -366,12 +366,6 @@ public:
      */
     inline bool isBackEdge(IOIndex_t inputIdx) const { return mBackEdges.find(inputIdx) != mBackEdges.end(); }
 
-    /** @brief Returns an empty vector of input names. */
-    static const std::vector<std::string> getInputsName() { return {}; }
-
-    /** @brief Returns an empty vector of output names. */
-    static const std::vector<std::string> getOutputsName() { return {}; }
-
 #ifdef PYBIND
     /**
      * @brief Returns a string representation of the operator (for Python bindings).
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index 92ce71cc8..217acf986 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -232,8 +232,13 @@ class OperatorTensorWithImpl : public OperatorTensor,
 {
 public:
     OperatorTensorWithImpl(const std::string& type, const std::vector<InputCategory>& inputsCategory,
-        const IOIndex_t nbOut): OperatorTensor(type, inputsCategory, nbOut) {}
-    
+        const IOIndex_t nbOut): OperatorTensor(type, inputsCategory, nbOut)
+    {
+        if (!std::is_same<DEF_IMPL, OperatorImpl>::value) {
+            mImpl = std::make_shared<DEF_IMPL>(*static_cast<T*>(this));
+        }
+    }
+
     OperatorTensorWithImpl(const T& op)
         : OperatorTensor(op)
     {
@@ -269,7 +274,9 @@ public:
             SET_IMPL_MACRO(T, *static_cast<T*>(this), name);
         }
 
-        mOutputs[0]->setBackend(name, device);
+        for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
+            mOutputs[i]->setBackend(name, device);
+        }
 
         for (IOIndex_t i = 0; i < nbInputs(); ++i) {
             if (inputCategory(i) == InputCategory::Param || inputCategory(i) == InputCategory::OptionalParam) {
@@ -277,7 +284,7 @@ public:
                     getInput(i)->setBackend(name, device);
                 }
                 else if (inputCategory(i) != InputCategory::OptionalParam) {
-                    Log::notice("{}_Op::setBackend(): could not set backend for {} input, because input is not connected", type(), getInputsName()[i]);
+                    Log::notice("{}_Op::setBackend(): could not set backend for input #{}, because input is not connected", type(), i);
                 }
             }
         }
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 4fdde713b..c628c3fbe 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -54,6 +54,11 @@ enum class PadBorderType {
     Wrap,     ///< Values wrap around the tensor dimensions.
     Zero      ///< All out-of-bound values are set to 0.
 };
+
+template <DimIdx_t DIM> struct Pad_Op_Type {};
+template <> struct Pad_Op_Type<1> { static constexpr const char* const value = "Pad1D"; };
+template <> struct Pad_Op_Type<2> { static constexpr const char* const value = "Pad2D"; };
+template <> struct Pad_Op_Type<3> { static constexpr const char* const value = "Pad3D"; };
 } // namespace Aidge
 
 namespace {
@@ -127,14 +132,7 @@ namespace Aidge {
  * batch and channel consistency, or aligning tensor dimensions in machine learning workflows.
  */
 template <DimIdx_t DIM>
-class Pad_Op : public OperatorTensor,
-               public Registrable<Pad_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pad_Op<DIM>&)>> {
-public:
-    /**
-     * @brief Static string indicating the type of the operator.
-     */
-    static const std::string Type;
-
+class Pad_Op : public OperatorTensorWithImpl<Pad_Op<DIM>> {
 private:
     using Attributes_ = StaticAttributes<PadAttr, GENERATE_LIST_ATTR_TYPE(LIST_PAD_ATTR)>;
     template <PadAttr e>
@@ -143,6 +141,14 @@ private:
     const std::shared_ptr<Attributes_> mAttributes; ///< Holds operator attributes.
 
 public:
+    using OperatorTensorWithImpl<Pad_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<Pad_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<Pad_Op<DIM>>::mOutputs;
+
+    static constexpr const char* const Type = Pad_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Deleted default constructor.
      */
@@ -157,7 +163,7 @@ public:
     constexpr Pad_Op(const std::array<DimSize_t, 2 * DIM>& beginEndTuples,
                      PadBorderType borderType = PadBorderType::Constant,
                      double borderValue = 0.0)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
+        : OperatorTensorWithImpl<Pad_Op<DIM>>(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
               attr<PadAttr::BeginEndBorders>(beginEndTuples),
               attr<PadAttr::BorderType>(borderType),
@@ -169,15 +175,9 @@ public:
      * @details Copies operator attributes and its output tensors, but not its input tensors. The new operator has no associated input.
      */
     Pad_Op(const Pad_Op& op)
-        : OperatorTensor(op),
+        : OperatorTensorWithImpl<Pad_Op<DIM>>(op),
           mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {}
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Compute output dimensions during the forward pass.
      * @param[in] allowDataDependency Flag indicating whether to allow data-dependent dimensions.
@@ -185,19 +185,6 @@ public:
      */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    /**
-     * @brief Set the backend for the Pad operator.
-     * @param name Name of the backend.
-     * @param device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the available backends for the Pad operator.
-     * @return A set of available backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -227,22 +214,6 @@ public:
     inline double& borderValue() const noexcept {
         return mAttributes->template getAttr<PadAttr::BorderValue>();
     }
-
-    /**
-     * @brief Get the input tensor names.
-     * @return Vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names.
-     * @return Vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 83afcf685..05510062e 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -22,6 +22,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/PopImpl.hpp"
 
 
 #define LIST_POP_ATTR(X)  \
@@ -65,17 +66,17 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Pop_Op : public OperatorTensor,
-    public Registrable<Pop_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Pop_Op&)>> {
-public:
-    static const std::string Type;
-
+class Pop_Op : public OperatorTensorWithImpl<Pop_Op, Pop_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<PopAttr, GENERATE_LIST_ATTR_TYPE(LIST_POP_ATTR)>;
     template <PopAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Pop";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Default constructor for the `Pop` operator.
      */
@@ -88,25 +89,6 @@ public:
      */
     Pop_Op(const Pop_Op& op);
 
-    /**
-     * @brief Clone the operator by creating a copy of it.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    /**
-     * @brief Assign a specific backend and device for computation.
-     * @param name Name of the backend.
-     * @param device The device index (default is 0).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the list of available backends compatible with this operator.
-     * @return A set of strings representing backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Perform dimension inference for the operator, optionally allowing
      * data dependency during the process.
@@ -145,22 +127,6 @@ public:
     inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<PopAttr::ForwardStep>(); }
 
     inline std::uint32_t& backwardStep() const { return mAttributes->template getAttr<PopAttr::BackwardStep>(); }
-
-    /**
-     * @brief Retrieve the names of the operator's input tensors.
-     * @return A vector of strings representing input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Retrieve the names of the operator's output tensors.
-     * @return A vector of strings representing output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index 5d0afc79d..18b1f41b6 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -46,49 +46,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Pow_Op : public OperatorTensor,
-    public Registrable<Pow_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pow_Op&)>> {
+class Pow_Op : public OperatorTensorWithImpl<Pow_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Pow";
+    static constexpr const char* const InputsName[] = {"data_input_1", "data_input_2"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    Pow_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Pow_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Pow_Op(const Pow_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Pow_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Pow_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Pow_Op>(*this);
-    }
+    Pow_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input_1", "data_input_2"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Pow(const std::string& name = "");
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index ae88c0c71..a61186acc 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -81,7 +81,8 @@ class Producer_Op
                          std::function<std::shared_ptr<OperatorImpl>( const Producer_Op& )>>
 {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Producer";
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
 private:
     using Attributes_ = StaticAttributes<ProducerAttr,
@@ -216,20 +217,6 @@ public:
      */
     inline bool dimsForwarded() const noexcept override final { return true; }
 
-    /**
-     * @brief Retrieves the names of the inputs for the operator.
-     *
-     * @return An empty vector, as `Producer_Op` takes no inputs.
-     */
-    static const std::vector<std::string> getInputsName() { return {}; }
-
-    /**
-     * @brief Retrieves the names of the outputs for the operator.
-     *
-     * @return A vector containing the output name "data_output".
-     */
-    static const std::vector<std::string> getOutputsName() { return {"data_output"}; }
-
     /**
      * @brief Sets the output tensor for the operator.
      *
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index a9a84a3ee..1e2c24d05 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -37,41 +37,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class ReLU_Op :
-    public OperatorTensor,
-    public Registrable<ReLU_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>>
-{
+class ReLU_Op : public OperatorTensorWithImpl<ReLU_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "ReLU";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    ReLU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op ReLU_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    ReLU_Op(const ReLU_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::ReLU_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
+    ReLU_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 };
 
 std::shared_ptr<Node> ReLU(const std::string& name = "");
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 59a91835b..ef1909d21 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -58,11 +58,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class ReduceMean_Op : public OperatorTensor,
-                public Registrable<ReduceMean_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>> {
-
+class ReduceMean_Op : public OperatorTensorWithImpl<ReduceMean_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "ReduceMean";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @enum Attr
@@ -109,11 +109,6 @@ public:
      */
     ReduceMean_Op(const ReduceMean_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Compute the output dimensions during the forward pass.
      * @param[in] allowDataDependency Whether to allow data-dependent dimensions.
@@ -121,14 +116,6 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Set the backend for the Reshape operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      */
@@ -149,14 +136,6 @@ public:
      */
     inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<Attr::NoopWithEmptyAxes>(); }
 
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
     virtual ~ReduceMean_Op() noexcept;
 };
 
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
index cceb9147b..183caf654 100644
--- a/include/aidge/operator/ReduceSum.hpp
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -57,11 +57,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class ReduceSum_Op : public OperatorTensor,
-                public Registrable<ReduceSum_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceSum_Op &)>> {
-
+class ReduceSum_Op : public OperatorTensorWithImpl<ReduceSum_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "ReduceSum";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @enum Attr
@@ -97,7 +97,7 @@ public:
      * and if false, we reduce on all axes
      */
     ReduceSum_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
+        : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<Attr::Axes>(axes),
             attr<Attr::KeepDims>(keep_dims),
@@ -111,29 +111,12 @@ public:
      * its input tensors. The new operator has no associated input.
      */
     ReduceSum_Op(const ReduceSum_Op& op)
-        : OperatorTensor(op),
+        : OperatorTensorWithImpl(op),
           mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ReduceSum_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::ReduceSum_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ReduceSum_Op>(*this);
-    }
+    {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      */
@@ -153,14 +136,6 @@ public:
      * @brief Get the behavior when axes are empty.
      */
     inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<Attr::NoopWithEmptyAxes>(); }
-
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 60e49f7bb..54c1e92d8 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -74,10 +74,9 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    /**
-     * @brief Static type string for the Reshape operator.
-     */
-    static const std::string Type;
+    static constexpr const char* const Type = "Reshape";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @brief Deleted default constructor.
@@ -136,22 +135,6 @@ public:
      * @return Reference to the AllowZero attribute.
      */
     inline bool& allowZero() const { return mAttributes->template getAttr<ReshapeAttr::AllowZero>(); }
-
-    /**
-     * @brief Get the input tensor names for the Reshape operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names for the Reshape operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 27bbddecb..921b86d46 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -99,13 +99,7 @@ namespace Aidge {
  * @param InterpolationMode type of interpolation (currently only support cubic
  * interpolation)
  */
-class Resize_Op
-    : public OperatorTensor,
-      public Registrable<
-          Resize_Op,
-          std::string,
-          std::function<std::shared_ptr<OperatorImpl>(const Resize_Op &)>> {
-
+class Resize_Op : public OperatorTensorWithImpl<Resize_Op> {
 private:
     using Attributes_ =
         StaticAttributes<ResizeAttr,
@@ -115,7 +109,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Resize";
+    static constexpr const char* const InputsName[] = {"data_input", "roi ", "scales", "sizes"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief creates a resize operator
      * This node can take 4 different inputs, more details in the class
@@ -138,7 +135,7 @@ public:
         Interpolation::Mode interpol_mode = Interpolation::Mode::RoundPreferFloor,
         float cubic_coef_a = -.75f,
         PadBorderType paddingMode = PadBorderType::Edge)
-        : OperatorTensor(Type,
+        : OperatorTensorWithImpl(Type,
                          {InputCategory::Data,
                           InputCategory::OptionalData,
                           InputCategory::OptionalData,
@@ -157,31 +154,12 @@ public:
      * @param op Operator to copy.
      */
     Resize_Op(const Resize_Op &op)
-        : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Resize_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Resize_Op
-     */
-    std::shared_ptr<Operator> clone() const override final {
-        return std::make_shared<Resize_Op>(*this);
-    }
+        : OperatorTensorWithImpl(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+    {}
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string &name,
-                    DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override {
-        return Registrar<Resize_Op>::getKeys();
-    }
-
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
     inline Interpolation::CoordinateTransformation
@@ -198,14 +176,6 @@ public:
     inline PadBorderType paddingMode() const {
         return mAttributes->template getAttr<ResizeAttr::PaddingMode>();
     }
-
-    static const std::vector<std::string> getInputsName() {
-        //  roi, scales, sizes, even if considered as const parameters/input
-        return {"data_input", "roi ", "scales", "sizes"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Round.hpp b/include/aidge/operator/Round.hpp
index 3a5bb0859..acfc9f2a8 100644
--- a/include/aidge/operator/Round.hpp
+++ b/include/aidge/operator/Round.hpp
@@ -36,38 +36,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Round_Op : public OperatorTensor,
-                public Registrable<Round_Op,  // <Op, backend, implementation creation function>
-                                std::string,
-                                std::function<std::shared_ptr<OperatorImpl>(const Round_Op&)>>
-{
+class Round_Op : public OperatorTensorWithImpl<Round_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Round";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    Round_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Round_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Round_Op(const Round_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Round_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
+    Round_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 };
 
 std::shared_ptr<Node> Round(const std::string& name = "");
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
deleted file mode 100644
index b073943e2..000000000
--- a/include/aidge/operator/Scaling.hpp
+++ /dev/null
@@ -1,160 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CORE_OPERATOR_SCALING_H_
-#define AIDGE_CORE_OPERATOR_SCALING_H_
-
-#include <cstddef>  // std::size_t
-#include <vector>
-#include <memory>
-
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/graph/Node.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
-#include "aidge/utils/Types.h"
-
-// Caution: This operator is now deprecated and should no longer be used.
-// It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
-
-#define LIST_SCALING_ATTR(X) \
-    X(ScalingFactor, "scaling_factor", float), \
-    X(QuantizedNbBits, "quantized_nb_bits", std::size_t), \
-    X(IsOutputUnsigned, "is_output_unsigned", bool)
-
-namespace Aidge {
-/**
- * @enum ScalingAttr
- * @brief Attributes for the Scaling operation.
- *
- * - ScalingFactor: Floating-point scaling factor applied to the input tensor.
- * - QuantizedNbBits: Specifies the bit-width used for quantization.
- * - IsOutputUnsigned: Indicates whether the quantized output values are unsigned.
- */
-enum class ScalingAttr {
-    GENERATE_LIST_ATTR_ENUM(LIST_SCALING_ATTR)
-};
-} // namespace Aidge
-
-namespace {
-template <>
-struct EnumStrings<Aidge::ScalingAttr> {
-    static const char* const data[];
-};
-constexpr const char* const EnumStrings<Aidge::ScalingAttr>::data[] = {
-    GENERATE_LIST_ATTR_STR(LIST_SCALING_ATTR)
-};
-}
-
-namespace Aidge {
-/**
- * @brief Description of a scaling operation to scale and quantize input tensors.
- *
- * The `Scaling_Op` class applies a scaling factor to the input tensor, quantizes
- * the scaled values to a specified bit-width, and outputs either signed or unsigned integers
- * based on the configuration.
- *
- * The input and output Tensors have the same dimensions.
- *
- * ### Deprecation Notice
- * This operator is deprecated and has been replaced by the `Quantizer` MetaOperator.
- * It is retained for backward compatibility and should not be used in new implementations.
- *
- * @see OperatorTensor
- * @see Registrable
- */
-class Scaling_Op
-    : public OperatorTensor,
-      public Registrable<Scaling_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Scaling_Op&)>> {
-
-public:
-    static const std::string Type;
-
-private:
-    using Attributes_ = StaticAttributes<ScalingAttr, GENERATE_LIST_ATTR_TYPE(LIST_SCALING_ATTR)>;
-    template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
-    const std::shared_ptr<Attributes_> mAttributes;
-
-public:
-    Scaling_Op() = delete;
-
-    /**
-     * @brief Constructor for the Scaling operator.
-     * @param[in] scalingFactor Scaling factor to be applied to the input tensor.
-     * @param[in] nbBits Number of bits for quantization.
-     * @param[in] isOutputUnsigned Flag indicating whether the output should be unsigned.
-     */
-    Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned);
-
-    /**
-     * @brief Copy-constructor.
-     * @param[in] op Scaling_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not its input tensors.
-     * The new operator has no associated input.
-     */
-    Scaling_Op(const Scaling_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    /**
-     * @brief Get the attributes of the operator.
-     */
-    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-
-    /**
-     * @brief Get the scaling factor.
-     */
-    inline float& scalingFactor() const noexcept { return mAttributes -> getAttr<ScalingAttr::ScalingFactor>(); }
-
-    /**
-     * @brief Get the number of quantization bits.
-     */
-    inline std::size_t& quantizedNbBits() const noexcept { return mAttributes -> getAttr<ScalingAttr::QuantizedNbBits>(); }
-
-    /**
-     * @brief Check if the output is unsigned.
-     */
-    inline bool& isOutputUnsigned() const noexcept { return mAttributes -> getAttr<ScalingAttr::IsOutputUnsigned>(); }
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-};
-
-/**
- * @brief Apply a scaling and quantization operation on a tensor.
- *
- * @param[in] scalingFactor Scaling factor to apply to the input tensor.
- * @param[in] quantizedNbBits Number of bits for quantization.
- * @param[in] isOutputUnsigned Whether the quantized output should be unsigned.
- * @param[in] name Name of the Operator.
- * @return std::shared_ptr<Node> Node containing the Operator.
- */
-std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
-                                     std::size_t quantizedNbBits = 8,
-                                     bool isOutputUnsigned = true,
-                                     const std::string& name = "");
-} // namespace Aidge
-
-#undef LIST_SCALING_ATTR
-
-#endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */
diff --git a/include/aidge/operator/Select.hpp b/include/aidge/operator/Select.hpp
index cd0a56bb9..a877c34cd 100644
--- a/include/aidge/operator/Select.hpp
+++ b/include/aidge/operator/Select.hpp
@@ -21,6 +21,7 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/backend/generic/operator/SelectImpl.hpp"
 
 namespace Aidge {
 
@@ -29,41 +30,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Select_Op : public OperatorTensor,
-    public Registrable<Select_Op,
-                       std::string,
-                       std::function<std::shared_ptr<OperatorImpl>(const Select_Op&)>>
-{
+class Select_Op : public OperatorTensorWithImpl<Select_Op, Select_OpImpl> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Select";
+    static constexpr const char* const InputsName[] = {"select", "data_input_0", "data_input_n"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Select_Op(const Aidge::IOIndex_t nbIn);
 
-    /**
-     * @brief Copy-constructor.
-     * @param op Select_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Select_Op(const Select_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Select_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName() {
-        return {"select", "data_input_0", "data_input_n"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Select(const IOIndex_t nbIn, const std::string& name = "");
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index e58493818..b16c2b63d 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/ShapeImpl.hpp"
 
 
 #define LIST_SHAPE_ATTR(X) \
@@ -63,17 +64,7 @@ namespace Aidge {
  * @example Input: Tensor with shape `[4, 5, 6, 7]`, `start=1`, `end=3` -> Output: `[5, 6]`
  * @example Input: Tensor with shape `[4, 5, 6]`, `start=0`, `end=-1` (default) -> Output: `[4, 5, 6]`
  */
-class Shape_Op : public OperatorTensor,
-                public Registrable<Shape_Op,
-                                   std::string,
-                                   std::function<std::shared_ptr<OperatorImpl>(const Shape_Op&)>> {
-
-public:
-    /**
-     * @brief Static type string for the Shape operator.
-     */
-    static const std::string Type;
-
+class Shape_Op : public OperatorTensorWithImpl<Shape_Op, Shape_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<ShapeAttr, GENERATE_LIST_ATTR_TYPE(LIST_SHAPE_ATTR)>;
     template <ShapeAttr e> using attr = typename Attributes_::template attr<e>;
@@ -82,6 +73,10 @@ private:
     using outDType = cpptype_t<DataType::Int64>;
 
 public:
+    static constexpr const char* const Type = "Shape";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Constructor for the Shape operator.
      * @param[in] start Start index for slicing dimensions.
@@ -97,12 +92,6 @@ public:
      */
     Shape_Op(const Shape_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Compute the output dimensions during the forward pass.
      * @param allowDataDependency Whether to allow data-dependent dimensions.
@@ -116,21 +105,8 @@ public:
      */
     bool forwardDType() override final;
 
-    /**
-     * @brief Set the backend for the Shape operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
     void setDataType(const Aidge::DataType &datatype) const override;
 
-    /**
-     * @brief Get the available backends for the Shape operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -148,22 +124,6 @@ public:
      * @return Reference to the end index attribute.
      */
     inline std::int64_t& end() const noexcept { return mAttributes->getAttr<ShapeAttr::End>(); }
-
-    /**
-     * @brief Get the input tensor names for the Shape operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names for the Shape operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp
index 2375f845f..428ed7e14 100644
--- a/include/aidge/operator/ShiftGELU.hpp
+++ b/include/aidge/operator/ShiftGELU.hpp
@@ -50,44 +50,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class ShiftGELU_Op : public OperatorTensor,
-    public Registrable<ShiftGELU_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::shared_ptr<OperatorImpl>(const ShiftGELU_Op&)>>
-{
+class ShiftGELU_Op : public OperatorTensorWithImpl<ShiftGELU_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "ShiftGELU";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     ShiftGELU_Op();
-
-    /**
-     * @brief Copy-constructor.
-     * @param op ShiftGELU_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    ShiftGELU_Op(const ShiftGELU_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::ShiftGELU_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    /**
-     * @brief Set the backend for the Reshape operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> ShiftGELU(const std::string& name = "");
diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp
index fc76e3005..81be1b15b 100644
--- a/include/aidge/operator/ShiftMax.hpp
+++ b/include/aidge/operator/ShiftMax.hpp
@@ -51,37 +51,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class ShiftMax_Op : public OperatorTensor,
-    public Registrable<ShiftMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ShiftMax_Op&)>> {
+class ShiftMax_Op : public OperatorTensorWithImpl<ShiftMax_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "ShiftMax";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     ShiftMax_Op();
-
-    /**
-     * @brief Copy-constructor.
-     * @param op ShiftMax_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    ShiftMax_Op(const ShiftMax_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::ShiftMax_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> ShiftMax(const std::string& name = "");
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index 0208a7f60..b9bbcbe4d 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -36,39 +36,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Sigmoid_Op : public OperatorTensor,
-    public Registrable<Sigmoid_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::shared_ptr<OperatorImpl>(const Sigmoid_Op&)>>
-{
+class Sigmoid_Op : public OperatorTensorWithImpl<Sigmoid_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Sigmoid";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Sigmoid_Op();
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Sigmoid_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Sigmoid_Op(const Sigmoid_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Sigmoid_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Sigmoid(const std::string& name = "");
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 77baec5f8..e10843d09 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -22,6 +22,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/SliceImpl.hpp"
 
 
 #define LIST_SLICE_ATTR(X) \
@@ -89,12 +90,7 @@ namespace Aidge{
  * @see OperatorTensor
  * @see Registrable
  */
-class Slice_Op : public OperatorTensor,
-                public Registrable<Slice_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Slice_Op &)>> {
-
-public:
-    static const std::string Type;
-
+class Slice_Op : public OperatorTensorWithImpl<Slice_Op, Slice_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<SliceAttr, GENERATE_LIST_ATTR_TYPE(LIST_SLICE_ATTR)>;
     template <SliceAttr e>
@@ -102,6 +98,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Slice";
+    static constexpr const char* const InputsName[] = {"data_input", "starts", "ends", "axes", "steps"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     Slice_Op() = delete;
 
     /**
@@ -124,19 +124,10 @@ public:
      */
     Slice_Op(const Slice_Op &op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     bool dimsForwarded() const override final;
 
     bool forwardDims(bool allowDataDependency = true) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      */
@@ -161,14 +152,6 @@ public:
      * @brief Get the steps for the slice operation.
      */
     inline std::vector<std::int64_t>& steps() const noexcept { return mAttributes -> getAttr<SliceAttr::Steps>(); }
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "starts", "ends", "axes", "steps"};
-    }
-
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 92ac5e080..97b64c528 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -68,23 +68,17 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Softmax_Op : public OperatorTensor,
-                   public Registrable<Softmax_Op,
-                                      std::string,
-                                      std::function<std::shared_ptr<OperatorImpl>(const Softmax_Op&)>> {
-
-public:
-    /**
-     * @brief Static type string for the Softmax operator.
-     */
-    static const std::string Type;
-
+class Softmax_Op : public OperatorTensorWithImpl<Softmax_Op> {
 private:
     using Attributes_ = StaticAttributes<SoftmaxAttr, GENERATE_LIST_ATTR_TYPE(LIST_SOFTMAX_ATTR)>;
     template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Softmax";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Deleted default constructor.
      */
@@ -104,25 +98,6 @@ public:
      */
     Softmax_Op(const Softmax_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    /**
-     * @brief Set the backend for the Softmax operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the available backends for the Softmax operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -134,22 +109,6 @@ public:
      * @return Reference to the axis attribute.
      */
     inline std::int32_t& axis() const noexcept { return mAttributes->getAttr<SoftmaxAttr::Axis>(); }
-
-    /**
-     * @brief Get the input names for the Softmax operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output names for the Softmax operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index d6d44c2cc..b09e77f7b 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -22,6 +22,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/SplitImpl.hpp"
 
 
 #define LIST_SPLIT_ATTR(X) \
@@ -82,21 +83,17 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Split_Op : public OperatorTensor,
-                 public Registrable<Split_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Split_Op &)>> {
-
-public:
-    /**
-     * @brief Static type string for the Split operator.
-     */
-    static const std::string Type;
-
+class Split_Op : public OperatorTensorWithImpl<Split_Op, Split_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<SplitAttr,GENERATE_LIST_ATTR_TYPE(LIST_SPLIT_ATTR)>;
     template <SplitAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Split";
+    static constexpr const char* const InputsName[] = {"data_input", "split"};
+    static constexpr const char* const OutputsName[] = {"data_output_0", "data_output_n"};
+
     Split_Op() = delete;
 
     /**
@@ -115,27 +112,9 @@ public:
      */
     Split_Op(const Split_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Set the backend for the Split operator.
-     * @param[in] name Backend name.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the available backends for the Split operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      */
@@ -150,22 +129,6 @@ public:
      * @brief Get the sizes of each split.
      */
     inline std::vector<DimSize_t>& split() const { return mAttributes->template getAttr<SplitAttr::Split>(); }
-
-    /**
-     * @brief Get the input names for the Split operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "split"};
-    }
-
-    /**
-     * @brief Get the output names for the Split operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output_0", "data_output_n"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index 8b9eb1b78..caf534b05 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -35,38 +35,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Sqrt_Op : public OperatorTensor,
-                public Registrable<Sqrt_Op,  // <Op, backend, implementation creation function>
-                                std::string,
-                                std::function<std::shared_ptr<OperatorImpl>(const Sqrt_Op&)>> {
+class Sqrt_Op : public OperatorTensorWithImpl<Sqrt_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Sqrt";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    Sqrt_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Sqrt_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Sqrt_Op(const Sqrt_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Sqrt_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
+    Sqrt_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 };
 
 std::shared_ptr<Node> Sqrt(const std::string& name = "");
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index 89b2dfde4..89da2f77f 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -24,6 +24,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/SqueezeImpl.hpp"
 
 
 #define LIST_SQUEEZE_ATTR(X) \
@@ -68,21 +69,17 @@ namespace Aidge {
  * @example Calling squeeze() with no argument will result in the removal of
  * every 1-sized dimension in the tensor.
  */
-class Squeeze_Op
-    : public OperatorTensor,
-      public Registrable<Squeeze_Op, std::string,
-                         std::function<std::shared_ptr<OperatorImpl>(const Squeeze_Op &)>> {
-
-public:
-  static const std::string
-      Type; // name of the type of the operation (Here "Squeeze")
-
+class Squeeze_Op : public OperatorTensorWithImpl<Squeeze_Op, Squeeze_OpImpl> {
 private:
   using Attributes_ = StaticAttributes<SqueezeAttr, std::vector<std::int8_t>>;
   template <SqueezeAttr e> using attr = typename Attributes_::template attr<e>;
   const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Squeeze";
+    static constexpr const char* const InputsName[] = {"data_input", "axes_to_squeeze"};
+    static constexpr const char* const OutputsName[] = {"squeezed"};
+
   /**
    * @brief constructor for Squeeze op
    * @param[in] axes around which perform the operation
@@ -97,24 +94,12 @@ public:
    */
   Squeeze_Op(const Squeeze_Op &op);
 
-  /**
-   * @brief Clone the operator using its copy-constructor.
-   * @see Operator::MatMul_Op
-   */
-  std::shared_ptr<Operator> clone() const override final {
-    return std::make_shared<Squeeze_Op>(*this);
-  }
-
   /**
    * @brief Compute dimensions for the output Tensor
    */
   bool forwardDims(bool allowDataDependency = false) override final;
   bool dimsForwarded() const override final;
 
-  void setBackend(const std::string &name,
-                  DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
   inline std::shared_ptr<Attributes> attributes() const override {
     return mAttributes;
   }
@@ -126,14 +111,6 @@ public:
     inline std::vector<std::int8_t> &axes() const noexcept {
         return mAttributes->template getAttr<SqueezeAttr::Axes>();
     }
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "axes_to_squeeze"};
-    }
-
-    static const std::vector<std::string> getOutputsName() {
-        return {"squeezed"};
-    }
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
index 67d8cd6f0..42c26690d 100644
--- a/include/aidge/operator/Stack.hpp
+++ b/include/aidge/operator/Stack.hpp
@@ -21,6 +21,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/StackImpl.hpp"
 
 namespace Aidge {
 
@@ -69,8 +70,7 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class StackOp : public OperatorTensor,
-    public Registrable<StackOp, std::string, std::function<std::unique_ptr<OperatorImpl>(const StackOp&)>> {
+class StackOp : public OperatorTensorWithImpl<StackOp, StackOpImpl> {
 private:
     using Attributes_ = StaticAttributes<StackAttr,
             GENERATE_LIST_ATTR_TYPE(LIST_STACK_ATTR)
@@ -79,7 +79,9 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Stack";
+    static constexpr const char* const InputsName[] = {"data_input", "max_elements"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @brief Constructs a new Stack Operator.
@@ -94,25 +96,6 @@ public:
      */
     StackOp(const StackOp& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    /**
-     * @brief Assign a specific backend and device for computation.
-     * @param name Name of the backend.
-     * @param device The device index (default is 0).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the list of available backends compatible with this operator.
-     * @return A set of strings representing backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Check if dimensions have been forwarded successfully.
      * @return True if dimensions are forwarded.
@@ -168,23 +151,6 @@ public:
     inline std::uint32_t& backwardStep() const {
         return mAttributes->template getAttr<StackAttr::BackwardStep>();
     }
-
-
-    /**
-     * @brief Retrieve the names of the operator's input tensors.
-     * @return A vector of strings representing input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "max_elements"};
-    }
-
-    /**
-     * @brief Retrieve the names of the operator's output tensors.
-     * @return A vector of strings representing output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index 73cce3f55..685fde9e8 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -46,40 +46,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Sub_Op : public OperatorTensor,
-    public Registrable<Sub_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Sub_Op&)>> {
+class Sub_Op : public OperatorTensorWithImpl<Sub_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Sub";
+    static constexpr const char* const InputsName[] = {"data_input_1", "data_input_2"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-public:
-    Sub_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Sub_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Sub_Op(const Sub_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Sub_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
+    Sub_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input_1", "data_input_2"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Sub(const std::string& name = "");
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
index 71b1511d9..9df5b2a7c 100644
--- a/include/aidge/operator/Tanh.hpp
+++ b/include/aidge/operator/Tanh.hpp
@@ -37,41 +37,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Tanh_Op : 
-    public OperatorTensor,
-    public Registrable<Tanh_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::unique_ptr<OperatorImpl>(const Tanh_Op&)>>
-{
+class Tanh_Op : public OperatorTensorWithImpl<Tanh_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Tanh";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Tanh_Op();
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Tanh_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Tanh_Op(const Tanh_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Tanh_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Tanh(const std::string& name = "");
diff --git a/include/aidge/operator/TopK.hpp b/include/aidge/operator/TopK.hpp
index e1aa193bb..073ecc198 100644
--- a/include/aidge/operator/TopK.hpp
+++ b/include/aidge/operator/TopK.hpp
@@ -57,8 +57,7 @@ constexpr const char* const EnumStrings<Aidge::TopKAttr>::data[] = {
 
 namespace Aidge {
 
-class TopK_Op : public OperatorTensor,
-    public Registrable<TopK_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const TopK_Op&)>> {
+class TopK_Op : public OperatorTensorWithImpl<TopK_Op> {
 private:
     using Attributes_ =
         StaticAttributes<TopKAttr,
@@ -68,7 +67,9 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "TopK";
+    static constexpr const char* const InputsName[] = {"x", "k"};
+    static constexpr const char* const OutputsName[] = {"values", "indices"};
 
     TopK_Op(int64_t axis = -1,
         bool largest = true,
@@ -83,40 +84,15 @@ public:
      */
     TopK_Op(const TopK_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::TopK_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<TopK_Op>(*this);
-    }
-
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
     void setDataType(const DataType& dataType) const override final;
-    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline int64_t& axis() const { return mAttributes->template getAttr<TopKAttr::Axis>(); }
     inline bool& largest() const { return mAttributes->template getAttr<TopKAttr::Largest>(); }
     inline bool& sorted() const { return mAttributes->template getAttr<TopKAttr::Sorted>(); }
     inline IOIndex_t& k() const { return mAttributes->template getAttr<TopKAttr::K>(); }
-
-    static const std::vector<std::string> getInputsName(){
-        return {"x", "k"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"values", "indices"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::TopKAttr>::data;
-	}
 };
 
 std::shared_ptr<Node> TopK(const std::string& name = "");
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 387189622..978234616 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -23,30 +23,7 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-
-namespace Aidge {
-/**
- * @brief implementation of the operator Transpose.
- * @note Since this operator implementation is agnostic to the backend it is
- * located here instead of in aidge_backend.
- */
-class TransposeImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructor for TransposeImpl.
-     * @param[in] op The Operator instance.
-     * @param[in] backend The backend name (optional).
-     */
-    TransposeImpl(const Operator& op, const std::string& backend = "")
-        : OperatorImpl(op, backend)
-    {}
-
-    /**
-     * @brief Perform the forward operation for the transpose.
-     */
-    void forward() override;
-};
-} // namespace Aidge
+#include "aidge/backend/generic/operator/TransposeImpl.hpp"
 
 #define LIST_TRANSPOSE_ATTR(X) \
     X(OutputDimsOrder, "output_dims_order", std::vector<DimSize_t>)
@@ -66,15 +43,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Transpose_Op : public OperatorTensor,
-                public Registrable<Transpose_Op,
-                                   std::string,
-                                   std::function<std::shared_ptr<OperatorImpl>(const Transpose_Op&)>> {
+class Transpose_Op : public OperatorTensorWithImpl<Transpose_Op, TransposeImpl> {
 public:
-    /**
-     * @brief Static type string for the Transpose operator.
-     */
-    static const std::string Type;
+    static constexpr const char* const Type = "Transpose";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @enum Attr
@@ -113,12 +86,6 @@ public:
      */
     Transpose_Op(const Transpose_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Compute the output dimensions during the forward pass.
      * @param allowDataDependency Whether to allow data-dependent dimensions.
@@ -126,19 +93,6 @@ public:
      */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    /**
-     * @brief Set the backend for the Transpose operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the available backends for the Transpose operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -154,22 +108,6 @@ public:
     inline std::vector<DimSize_t>& outputDimsOrder() const noexcept {
         return mAttributes->getAttr<Attr::OutputDimsOrder>();
     }
-
-    /**
-     * @brief Get the input tensor names for the Transpose operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names for the Transpose operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index 9dbc84611..5bac27b9b 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -26,6 +26,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/UnfoldImpl.hpp"
 
 
 #define LIST_UNFOLD_ATTR(X)  \
@@ -45,6 +46,9 @@ namespace Aidge {
 enum class UnfoldAttr {
     GENERATE_LIST_ATTR_ENUM(LIST_UNFOLD_ATTR)
 };
+
+template <DimIdx_t DIM> struct Unfold_Op_Type {};
+template <> struct Unfold_Op_Type<2> { static constexpr const char* const value = "Unfold2D"; };
 }  // namespace Aidge
 
 namespace {
@@ -74,20 +78,21 @@ namespace Aidge {
  * @see Registrable
  */
 template <DimIdx_t DIM>
-class Unfold_Op : public OperatorTensor,
-                  public Registrable<Unfold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM>&)>> {
-public:
-    /**
-     * @brief Static type string for the Unfold operator.
-     */
-    static const std::string Type;
-
+class Unfold_Op : public OperatorTensorWithImpl<Unfold_Op<DIM>, Unfold_OpImpl<DIM>> {
 private:
     using Attributes_ = StaticAttributes<UnfoldAttr, GENERATE_LIST_ATTR_TYPE(LIST_UNFOLD_ATTR)>;
     template <UnfoldAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    using OperatorTensorWithImpl<Unfold_Op<DIM>, Unfold_OpImpl<DIM>>::getInput;
+    using OperatorTensorWithImpl<Unfold_Op<DIM>, Unfold_OpImpl<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<Unfold_Op<DIM>, Unfold_OpImpl<DIM>>::mOutputs;
+
+    static constexpr const char* const Type = Unfold_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Deleted default constructor.
      */
@@ -111,12 +116,6 @@ public:
      */
     Unfold_Op(const Unfold_Op<DIM>& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Compute the output dimensions during the forward pass.
      * @param[in] allowDataDependency Whether to allow data-dependent dimensions.
@@ -124,19 +123,6 @@ public:
      */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    /**
-     * @brief Set the backend for the Unfold operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the available backends for the Unfold operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -166,22 +152,6 @@ public:
     inline std::array<DimSize_t, DIM>& kernelDims() const {
         return mAttributes->template getAttr<UnfoldAttr::KernelDims>();
     }
-
-    /**
-     * @brief Get the input tensor names for the Unfold operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names for the Unfold operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index 0ddc80dd1..d3bf1fa6c 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/UnsqueezeImpl.hpp"
 
 
 #define LIST_UNSQUEEZE_ATTR(X)  \
@@ -62,14 +63,7 @@ namespace Aidge {
  * dims_to_unsqueeze[i] < tensor.nbDim() +
  * dims_to_unsqueeze.size()
  */
-class Unsqueeze_Op
-    : public OperatorTensor,
-      public Registrable<Unsqueeze_Op, std::string,
-                         std::function<std::shared_ptr<OperatorImpl>(const Unsqueeze_Op &)>> {
-
-public:
-  static const std::string Type;
-
+class Unsqueeze_Op : public OperatorTensorWithImpl<Unsqueeze_Op, Unsqueeze_OpImpl> {
 private:
   using Attributes_ = StaticAttributes<UnsqueezeAttr, GENERATE_LIST_ATTR_TYPE(LIST_UNSQUEEZE_ATTR)>;
   template <UnsqueezeAttr e>
@@ -77,6 +71,10 @@ private:
   const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+  static constexpr const char* const Type = "Unsqueeze";
+  static constexpr const char* const InputsName[] = {"data_input", "axes_to_unsqueeze"};
+  static constexpr const char* const OutputsName[] = {"unsqueezed"};
+
   Unsqueeze_Op() = delete;
 
   /**
@@ -93,14 +91,6 @@ public:
    */
   Unsqueeze_Op(const Unsqueeze_Op &op);
 
-  /**
-   * @brief Clone the operator using its copy-constructor.
-   * @see Operator::MatMul_Op
-   */
-  std::shared_ptr<Operator> clone() const override final {
-    return std::make_shared<Unsqueeze_Op>(*this);
-  }
-
   /**
    * @brief Compute dimensions for the output Tensor
    */
@@ -108,10 +98,6 @@ public:
 
   bool dimsForwarded() const override final;
 
-  void setBackend(const std::string &name,
-                  DeviceIdx_t device = 0) override final;
-  std::set<std::string> getAvailableBackends() const override;
-
   inline std::shared_ptr<Attributes> attributes() const override {
     return mAttributes;
   }
@@ -124,13 +110,6 @@ public:
   inline std::vector<int8_t> &axes() const noexcept {
     return mAttributes->template getAttr<UnsqueezeAttr::Axes>();
   }
-
-  static const std::vector<std::string> getInputsName() {
-    return {"data_input", "axes_to_unsqueeze"};
-  }
-  static const std::vector<std::string> getOutputsName() {
-    return {"unsqueezed"};
-  }
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
diff --git a/include/aidge/operator/WeightInterleaving.hpp b/include/aidge/operator/WeightInterleaving.hpp
index a8f8c3d74..e7eae855b 100644
--- a/include/aidge/operator/WeightInterleaving.hpp
+++ b/include/aidge/operator/WeightInterleaving.hpp
@@ -39,43 +39,16 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class WeightInterleaving_Op :
-    public OperatorTensor,
-    public Registrable<WeightInterleaving_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::shared_ptr<OperatorImpl>(const WeightInterleaving_Op&)>>
-{
+class WeightInterleaving_Op : public OperatorTensorWithImpl<WeightInterleaving_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "WeightInterleaving";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    WeightInterleaving_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op WeightInterleaving_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    WeightInterleaving_Op(const WeightInterleaving_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::WeightInterleaving_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
+    WeightInterleaving_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
-
     /**
      * @brief Calculates the required size for the 8-bits`compactData` vector.
      *
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
deleted file mode 100644
index f0b5d13e1..000000000
--- a/python_binding/operator/pybind_Scaling.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2024 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <pybind11/pybind11.h>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/operator/Scaling.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
-
-namespace py = pybind11;
-
-namespace Aidge {
-
-void init_Scaling(py::module& m) {
-    py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, OperatorTensor>(
-        m, "ScalingOp", py::multiple_inheritance(),
-        R"mydelimiter(
-        Initialize a Scaling operator for element-wise tensor scaling.
-
-        This operator scales tensor elements by a specified scaling factor, 
-        optionally constraining the output to a specified bit-width and signedness.
-
-        :param scaling_factor: The scaling factor to apply to tensor elements.
-        :type scaling_factor: float
-        :param nb_bits: The number of bits for quantization of the output. Must be a positive integer.
-        :type nb_bits: int
-        :param is_output_unsigned: Specifies whether the output should be unsigned (True) or signed (False).
-        :type is_output_unsigned: bool
-        )mydelimiter")
-        .def(py::init<float, size_t, bool>(),
-             py::arg("scaling_factor"),
-             py::arg("nb_bits"),
-             py::arg("is_output_unsigned"))
-        .def_static("get_inputs_name", &Scaling_Op::getInputsName)
-        .def_static("get_outputs_name", &Scaling_Op::getOutputsName)
-
-		.def_static("attributes_name", []() {
-            return std::vector<std::string>(std::begin(EnumStrings<ScalingAttr>::data), std::end(EnumStrings<ScalingAttr>::data));
-		})
-        .def_readonly_static("Type", &Scaling_Op::Type);
-
-    declare_registrable<Scaling_Op>(m, "ScalingOp");
-
-    m.def("Scaling", &Scaling,
-          py::arg("scaling_factor") = 1.0f,
-          py::arg("nb_bits") = 8,
-          py::arg("is_output_unsigned") = true,
-          py::arg("name") = "",
-          R"mydelimiter(
-          Initialize a node containing a Scaling operator to scale tensor elements.
-
-          This operator applies a scaling factor to each element of the input tensor. The result 
-          can optionally be quantized to a specific bit-width and constrained to unsigned or signed output.
-
-          :param scaling_factor: The factor by which to scale the tensor elements. Default is 1.0.
-          :type scaling_factor: float
-          :param nb_bits: The number of bits for quantized output. Default is 8.
-          :type nb_bits: int
-          :param is_output_unsigned: Indicates whether the output tensor values should be unsigned. Default is True.
-          :type is_output_unsigned: bool
-          :param name: The name of the node (optional).
-          :type name: str
-          :return: A node containing the Scaling operator.
-          :rtype: :py:class:`ScalingOp`
-          )mydelimiter");
-}
-
-}  // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index f151aaae1..c4534d3ee 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -87,7 +87,6 @@ void init_ReduceSum(py::module&);
 void init_Reshape(py::module&);
 void init_Resize(py::module&);
 void init_Round(py::module&);
-void init_Scaling(py::module&);
 void init_Select(py::module&);
 void init_Shape(py::module&);
 void init_Sigmoid(py::module&);
@@ -198,7 +197,6 @@ void init_Aidge(py::module& m) {
     init_Reshape(m);
     init_Resize(m);
     init_Round(m);
-    init_Scaling(m);
     init_Select(m);
     init_Shape(m);
     init_Sigmoid(m);
diff --git a/src/backend/generic/operator/TransposeImpl.cpp b/src/backend/generic/operator/TransposeImpl.cpp
new file mode 100644
index 000000000..78b868810
--- /dev/null
+++ b/src/backend/generic/operator/TransposeImpl.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/TransposeImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Transpose.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+void Aidge::TransposeImpl::forward() {
+    const Transpose_Op& op = dynamic_cast<const Transpose_Op&>(mOp);
+    op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.outputDimsOrder());
+}
+
+} // namespace Aidge
diff --git a/src/operator/Abs.cpp b/src/operator/Abs.cpp
index a5d9a7d30..6e5c5db42 100644
--- a/src/operator/Abs.cpp
+++ b/src/operator/Abs.cpp
@@ -24,6 +24,8 @@ constexpr const char* const Aidge::Abs_Op::Type;
 constexpr const char* const Aidge::Abs_Op::InputsName[];
 constexpr const char* const Aidge::Abs_Op::OutputsName[];
 
+////////////////////////////////////////////////////////////////////////////////
+
 std::shared_ptr<Aidge::Node> Aidge::Abs(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Abs_Op>(), name);
 }
diff --git a/src/operator/Atan.cpp b/src/operator/Atan.cpp
index 0467745ba..457f724c4 100644
--- a/src/operator/Atan.cpp
+++ b/src/operator/Atan.cpp
@@ -24,7 +24,7 @@ constexpr const char* const Aidge::Atan_Op::OutputsName[];
 
 Aidge::Atan_Op::Atan_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
-///////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Atan(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Atan_Op>(), name);
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 1b8f815a8..9d70a3e98 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -126,7 +126,7 @@ template class Aidge::AvgPooling_Op<2>;
 template class Aidge::AvgPooling_Op<3>;
 template class Aidge::AvgPooling_Op<4>;
 
-////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::AvgPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
@@ -137,6 +137,7 @@ std::shared_ptr<Aidge::Node> Aidge::AvgPooling(const std::array<Aidge::DimSize_t
     AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", AvgPooling_Op<DIM>::Type);
     return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilations, ceil_mode), name);
 }
+
 template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, bool);
 template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
 template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&, const std::array<Aidge::DimSize_t, 3>&, bool);
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index c579dac81..e86b27209 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -70,6 +70,8 @@ template class Aidge::BatchNorm_Op<2>;
 template class Aidge::BatchNorm_Op<3>;
 template class Aidge::BatchNorm_Op<4>;
 
+////////////////////////////////////////////////////////////////////////////////
+
 template <Aidge::DimSize_t DIM>
 inline std::shared_ptr<Aidge::Node> Aidge::BatchNorm(const Aidge::DimSize_t nbFeatures,
                                        const float epsilon,
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index bcb8855dc..518a2dc24 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -24,8 +24,6 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-
 constexpr const char* const Aidge::Cast_Op::Type;
 constexpr const char* const Aidge::Cast_Op::InputsName[];
 constexpr const char* const Aidge::Cast_Op::OutputsName[];
@@ -38,7 +36,7 @@ Aidge::Cast_Op::Cast_Op(const DataType targetType)
     mOutputs[0]->setDataType(targetType);
 }
 
-Cast_Op::Cast_Op(const Cast_Op& op)
+Aidge::Cast_Op::Cast_Op(const Cast_Op& op)
     : OperatorTensorWithImpl(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {}
@@ -56,8 +54,6 @@ void Aidge::Cast_Op::setDataType(const DataType& dataType) const {
 
 ////////////////////////////////////////////////////////////////////////////////
 
-std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name) {
+std::shared_ptr<Aidge::Node> Aidge::Cast(const DataType targetType, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
 }
-
-} // namespace Aidge
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index b7282aa62..19cb13429 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -77,7 +77,7 @@ bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
     return true;
 }
 
-/////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Concat(const Aidge::IOIndex_t nbIn, const std::int32_t axis, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
diff --git a/src/operator/ConstantOfShape.cpp b/src/operator/ConstantOfShape.cpp
index e07e4b0ed..5210d421c 100644
--- a/src/operator/ConstantOfShape.cpp
+++ b/src/operator/ConstantOfShape.cpp
@@ -24,22 +24,20 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
+constexpr const char* const Aidge::ConstantOfShape_Op::Type;
+constexpr const char* const Aidge::ConstantOfShape_Op::InputsName[];
+constexpr const char* const Aidge::ConstantOfShape_Op::OutputsName[];
 
-constexpr const char* const ConstantOfShape_Op::Type;
-constexpr const char* const ConstantOfShape_Op::InputsName[];
-constexpr const char* const ConstantOfShape_Op::OutputsName[];
-
-ConstantOfShape_Op::ConstantOfShape_Op(const Tensor &value)
+Aidge::ConstantOfShape_Op::ConstantOfShape_Op(const Tensor &value)
     : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
       mAttributes(std::make_shared<Attributes_>(
         attr<ConstantOfShapeAttr::Value>(value))) {}
 
-ConstantOfShape_Op::ConstantOfShape_Op(const ConstantOfShape_Op &op)
+Aidge::ConstantOfShape_Op::ConstantOfShape_Op(const ConstantOfShape_Op &op)
     : OperatorTensorWithImpl(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {}
 
-bool ConstantOfShape_Op::forwardDims(bool allowDataDependency) {
+bool Aidge::ConstantOfShape_Op::forwardDims(bool allowDataDependency) {
   if (!inputsAssociated()) {
     return false;
   }
@@ -68,15 +66,15 @@ bool ConstantOfShape_Op::forwardDims(bool allowDataDependency) {
   return true;
 }
 
-void ConstantOfShape_Op::setBackend(const std::string &name,
+void Aidge::ConstantOfShape_Op::setBackend(const std::string &name,
                                        Aidge::DeviceIdx_t device) {
   OperatorTensorWithImpl::setBackend(name, device);
   value().setBackend(name,device);
 }
 
-std::shared_ptr<Node> ConstantOfShape(const Tensor value, const std::string &name) {
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ConstantOfShape(const Tensor value, const std::string &name) {
   return std::make_shared<Node>(std::make_shared<ConstantOfShape_Op>(value),
                                 name);
 }
-
-} // namespace Aidge
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index e7fba24c0..b887e32b7 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -180,7 +180,7 @@ Aidge::DimSize_t Aidge::Conv_Op<DIM>::outChannels() const {
 template class Aidge::Conv_Op<1>;
 template class Aidge::Conv_Op<2>;
 
-/////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::Conv(Aidge::DimSize_t inChannels,
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index 0a8480ace..ccec6e6f2 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -24,20 +24,15 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise" + std::to_string(DIM) + "D";
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::ConvDepthWise_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::ConvDepthWise_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::ConvDepthWise_Op<DIM>::OutputsName[];
 
 template <Aidge::DimIdx_t DIM>
 Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
+{}
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -138,34 +133,10 @@ Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField(
     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
 }
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-
-    // By default, automatically set backend for weight and bias inputs
-    if (getInput(1)) {
-        getInput(1)->setBackend(name, device);
-    }
-    else {
-        Log::notice("ConvDepthWise_Op::setBackend(): could not set backend for weight input, because input is not connected");
-    }
-
-    if (getInput(2)) {
-        // Bias is optional
-        getInput(2)->setBackend(name, device);
-    }
-}
-
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::ConvDepthWise_Op<DIM>::getAvailableBackends() const {
-    return Registrar<ConvDepthWise_Op<DIM>>::getKeys();
-}
-
 template class Aidge::ConvDepthWise_Op<1>;
 template class Aidge::ConvDepthWise_Op<2>;
 
-////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise(const Aidge::DimSize_t nbChannels,
diff --git a/src/operator/ConvTranspose.cpp b/src/operator/ConvTranspose.cpp
index a048f4049..1a71695e9 100644
--- a/src/operator/ConvTranspose.cpp
+++ b/src/operator/ConvTranspose.cpp
@@ -26,19 +26,15 @@
 
 namespace Aidge {
 
-template <DimIdx_t DIM>
-const std::string ConvTranspose_Op<DIM>::Type =
-    "ConvTranspose" + std::to_string(DIM) + "D";
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::ConvTranspose_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::ConvTranspose_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::ConvTranspose_Op<DIM>::OutputsName[];
 
 template <DimIdx_t DIM>
 ConvTranspose_Op<DIM>::ConvTranspose_Op(const ConvTranspose_Op<DIM> &op)
-    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
-    if (op.mImpl) {
-        SET_IMPL_MACRO(ConvTranspose_Op<DIM>, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
+    : OperatorTensorWithImpl<ConvTranspose_Op<DIM>>(op),
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+{}
 
 template <DimIdx_t DIM>
 bool ConvTranspose_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -195,35 +191,10 @@ ConvTranspose_Op<DIM>::computeReceptiveField(
     return res;
 }
 
-template <DimIdx_t DIM>
-void ConvTranspose_Op<DIM>::setBackend(const std::string &name,
-                                       DeviceIdx_t device) {
-    SET_IMPL_MACRO(ConvTranspose_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-
-    // By default, automatically set backend for weight and bias inputs
-    if (getInput(1)) {
-        getInput(1)->setBackend(name, device);
-    } else {
-        Log::notice("ConvTranspose_Op::setBackend(): could not set backend "
-                    "for weight input, because input is not connected");
-    }
-
-    if (getInput(2)) {
-        // Bias is optional
-        getInput(2)->setBackend(name, device);
-    }
-}
-
-template <DimIdx_t DIM>
-std::set<std::string> ConvTranspose_Op<DIM>::getAvailableBackends() const {
-    return Registrar<ConvTranspose_Op<DIM>>::getKeys();
-}
-
 template class ConvTranspose_Op<1>;
 template class ConvTranspose_Op<2>;
 
-/////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<DimIdx_t, 1>::size_type DIM>
 std::shared_ptr<Node>
diff --git a/src/operator/CryptoHash.cpp b/src/operator/CryptoHash.cpp
index 530e94766..a6dc6513a 100644
--- a/src/operator/CryptoHash.cpp
+++ b/src/operator/CryptoHash.cpp
@@ -20,44 +20,27 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::CryptoHash_Op::Type = "CryptoHash";
+constexpr const char* const Aidge::CryptoHash_Op::Type;
+constexpr const char* const Aidge::CryptoHash_Op::InputsName[];
+constexpr const char* const Aidge::CryptoHash_Op::OutputsName[];
 
 Aidge::CryptoHash_Op::CryptoHash_Op()
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
         attr<CryptoHashAttr::CryptoHashFunction>(CryptoHashFunction::SHA256)))
 {}
 
 Aidge::CryptoHash_Op::CryptoHash_Op(const Aidge::CryptoHash_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(CryptoHash_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::CryptoHash_Op::clone() const {
-    return std::make_shared<CryptoHash_Op>(*this);
-}
+{}
 
 bool Aidge::CryptoHash_Op::forwardDims(bool /*allowDataDependency*/) {
     mOutputs[0]->resize({256 / getDataTypeBitWidth(mOutputs[0]->dataType())});
     return true;
 }
 
-void Aidge::CryptoHash_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(CryptoHash_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::CryptoHash_Op::getAvailableBackends() const {
-    return Registrar<CryptoHash_Op>::getKeys();
-}
-
-///////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::CryptoHash(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<CryptoHash_Op>(), name);
diff --git a/src/operator/DepthToSpace.cpp b/src/operator/DepthToSpace.cpp
index 9ab7034e7..cbaaaca6d 100644
--- a/src/operator/DepthToSpace.cpp
+++ b/src/operator/DepthToSpace.cpp
@@ -21,11 +21,12 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-
-const std::string Aidge::DepthToSpace_Op::Type = "DepthToSpace";
+constexpr const char* const Aidge::DepthToSpace_Op::Type;
+constexpr const char* const Aidge::DepthToSpace_Op::InputsName[];
+constexpr const char* const Aidge::DepthToSpace_Op::OutputsName[];
 
 Aidge::DepthToSpace_Op::DepthToSpace_Op(const std::uint32_t blockSize, const Aidge::DepthToSpace_Op::Mode mode)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<DepthToSpaceAttr::BlockSize>(blockSize),
         attr<DepthToSpaceAttr::Mode>(mode)))
@@ -34,19 +35,9 @@ Aidge::DepthToSpace_Op::DepthToSpace_Op(const std::uint32_t blockSize, const Aid
 }
 
 Aidge::DepthToSpace_Op::DepthToSpace_Op(const Aidge::DepthToSpace_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(DepthToSpace_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::DepthToSpace_Op::clone() const {
-    return std::make_shared<DepthToSpace_Op>(*this);
-}
+{}
 
 bool Aidge::DepthToSpace_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -68,24 +59,10 @@ bool Aidge::DepthToSpace_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::DepthToSpace_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<DepthToSpace_Op>::exists({name})) {
-        SET_IMPL_MACRO(DepthToSpace_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<DepthToSpace_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::DepthToSpace_Op::getAvailableBackends() const {
-    return Registrar<DepthToSpace_Op>::getKeys();
-}
-
-//////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::DepthToSpace(const std::uint32_t blockSize,
                                     const Aidge::DepthToSpace_Op::Mode mode,
                                     const std::string& name) {
     return std::make_shared<Node>(std::make_shared<DepthToSpace_Op>(blockSize, mode), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 96eea3df9..81fe0e4ba 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -20,7 +20,9 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::Div_Op::Type = "Div";
+constexpr const char* const Aidge::Div_Op::Type;
+constexpr const char* const Aidge::Div_Op::InputsName[];
+constexpr const char* const Aidge::Div_Op::OutputsName[];
 
 bool Aidge::Div_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -51,18 +53,8 @@ bool Aidge::Div_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-
-void Aidge::Div_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Div_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Div_Op::getAvailableBackends() const {
-    return Registrar<Div_Op>::getKeys();
-}
-
-///////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Div(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Div_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Dropout.cpp b/src/operator/Dropout.cpp
index 0063a446e..86eba16a0 100644
--- a/src/operator/Dropout.cpp
+++ b/src/operator/Dropout.cpp
@@ -21,10 +21,12 @@
 
 namespace Aidge {
 
-const std::string Dropout_Op::Type = "Dropout";
+constexpr const char* const Dropout_Op::Type;
+constexpr const char* const Dropout_Op::InputsName[];
+constexpr const char* const Dropout_Op::OutputsName[];
 
 Dropout_Op::Dropout_Op(float probability)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
       mAttributes(std::make_shared<Attributes_>(
           attr<DropoutAttr::Probability>(probability)))
 {
@@ -32,20 +34,9 @@ Dropout_Op::Dropout_Op(float probability)
 }
 
 Dropout_Op::Dropout_Op(const Dropout_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    // Copy constructor implementation
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Dropout_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Operator> Dropout_Op::clone() const {
-    return std::make_shared<Dropout_Op>(*this);
-}
+{}
 
 bool Dropout_Op::forwardDims(bool allowDataDependency) {
     if (!inputsAssociated())
@@ -73,20 +64,11 @@ bool Dropout_Op::forwardDims(bool allowDataDependency) {
     return true;
 }
 
-void Dropout_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(Dropout_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Dropout_Op::getAvailableBackends() const {
-    return Registrar<Dropout_Op>::getKeys();
-}
-
 void Dropout_Op::checkProbability() const {
     AIDGE_ASSERT(probability() >= 0.0f && probability() < 1.0f, "'Probability' attribute must be set in [0.0, 1.0) interval.");
 }
 
-////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Node> Dropout(float probability,
                               const std::string& name) {
diff --git a/src/operator/Equal.cpp b/src/operator/Equal.cpp
index cc0fcd984..1e37d9c83 100644
--- a/src/operator/Equal.cpp
+++ b/src/operator/Equal.cpp
@@ -21,7 +21,9 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Equal_Op::Type = "Equal";
+constexpr const char* const Aidge::Equal_Op::Type;
+constexpr const char* const Aidge::Equal_Op::InputsName[];
+constexpr const char* const Aidge::Equal_Op::OutputsName[];
 
 bool Aidge::Equal_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -51,12 +53,3 @@ bool Aidge::Equal_Op::forwardDims(bool /*allowDataDependency*/) {
 
     return false;
 }
-
-void Aidge::Equal_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Equal_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Equal_Op::getAvailableBackends() const {
-    return Registrar<Equal_Op>::getKeys();
-}
diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp
index bd5f76f8a..a99253648 100644
--- a/src/operator/Erf.cpp
+++ b/src/operator/Erf.cpp
@@ -17,33 +17,12 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Erf_Op::Type = "Erf";
+constexpr const char* const Aidge::Erf_Op::Type;
+constexpr const char* const Aidge::Erf_Op::InputsName[];
+constexpr const char* const Aidge::Erf_Op::OutputsName[];
 
-Aidge::Erf_Op::Erf_Op(const Aidge::Erf_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Erf_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Erf_Op::clone() const {
-    return std::make_shared<Erf_Op>(*this);
-}
-
-void Aidge::Erf_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Erf_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Erf_Op::getAvailableBackends() const {
-    return Registrar<Erf_Op>::getKeys();
-}
-
-/////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Erf(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Erf_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Expand.cpp b/src/operator/Expand.cpp
index 969dd6e59..ad7ef3016 100644
--- a/src/operator/Expand.cpp
+++ b/src/operator/Expand.cpp
@@ -21,19 +21,9 @@
 
 namespace Aidge {
 
-const std::string Expand_Op::Type = "Expand";
-
-Expand_Op::Expand_Op(const Expand_Op &op) : OperatorTensor(op) {
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Expand_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Expand_Op::clone() const {
-    return std::make_shared<Expand_Op>(*this);
-}
+constexpr const char* const Expand_Op::Type;
+constexpr const char* const Expand_Op::InputsName[];
+constexpr const char* const Expand_Op::OutputsName[];
 
 bool Expand_Op::forwardDims(bool allowDataDependency) {
     /////////////////
@@ -92,15 +82,7 @@ bool Expand_Op::forwardDims(bool allowDataDependency) {
     return true;
 }
 
-void Expand_Op::setBackend(const std::string &name,
-                           Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Expand_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Expand_Op::getAvailableBackends() const {
-    return Registrar<Expand_Op>::getKeys();
-}
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Expand(const std::string &name) {
     return std::make_shared<Node>(std::make_shared<Expand_Op>(), name);
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 07208b522..4639fd69a 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -21,11 +21,9 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::FC_Op::Type = "FC";
-
-std::shared_ptr<Aidge::Operator> Aidge::FC_Op::clone() const {
-    return std::make_shared<FC_Op>(*this);
-}
+constexpr const char* const Aidge::FC_Op::Type;
+constexpr const char* const Aidge::FC_Op::InputsName[];
+constexpr const char* const Aidge::FC_Op::OutputsName[];
 
 void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs());
@@ -86,27 +84,7 @@ bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(FC_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-
-    // By default, automatically set backend for weight and bias inputs
-    if (getInput(1)) {
-        getInput(1)->setBackend(name, device);
-    }
-    else {
-        Log::notice("FC_Op::setBackend(): could not set backend for weight input, because input is not connected");
-    }
-
-    if (getInput(2)) {
-        // Bias is optional
-        getInput(2)->setBackend(name, device);
-    }
-}
-
-std::set<std::string> Aidge::FC_Op::getAvailableBackends() const {
-    return Registrar<FC_Op>::getKeys();
-}
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::FC(const Aidge::DimSize_t inChannels,
                                        const Aidge::DimSize_t outChannels,
diff --git a/src/operator/Flatten.cpp b/src/operator/Flatten.cpp
index 4d4b6385b..f0107e857 100644
--- a/src/operator/Flatten.cpp
+++ b/src/operator/Flatten.cpp
@@ -23,32 +23,20 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-
-const std::string Aidge::Flatten_Op::Type = "Flatten";
+constexpr const char* const Aidge::Flatten_Op::Type;
+constexpr const char* const Aidge::Flatten_Op::InputsName[];
+constexpr const char* const Aidge::Flatten_Op::OutputsName[];
 
 Aidge::Flatten_Op::Flatten_Op(const std::int64_t axis)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<FlattenAttr::Axis>(axis)))
-{
-    mImpl = std::make_shared<Flatten_OpImpl>(*this);
-}
+{}
 
 Aidge::Flatten_Op::Flatten_Op(const Aidge::Flatten_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Flatten_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Flatten_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Flatten_Op::clone() const {
-    return std::make_shared<Flatten_Op>(*this);
-}
+{}
 
 bool Aidge::Flatten_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -62,24 +50,10 @@ bool Aidge::Flatten_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::Flatten_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Flatten_Op>::exists({name})){
-        SET_IMPL_MACRO(Flatten_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Flatten_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Flatten_Op::getAvailableBackends() const {
-    return Registrar<Flatten_Op>::getKeys();
-}
-
-//////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Flatten(std::int64_t axis,
                             const std::string &name)
 {
     return std::make_shared<Node>(std::make_shared<Flatten_Op>(axis), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
index 50a474cd3..dce0915b8 100644
--- a/src/operator/Fold.cpp
+++ b/src/operator/Fold.cpp
@@ -23,26 +23,15 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Fold_Op<DIM>::Type = "Fold" + std::to_string(DIM) + "D";
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Fold_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Fold_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Fold_Op<DIM>::OutputsName[];
 
 template <Aidge::DimIdx_t DIM>
 Aidge::Fold_Op<DIM>::Fold_Op(const Aidge::Fold_Op<DIM> &op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl<Fold_Op<DIM>>(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
-    }
-    else {
-        mImpl = nullptr;
-    }
-}
-
-template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::Fold_Op<DIM>::clone() const {
-    return std::make_shared<Fold_Op<DIM>>(*this);
-}
+{}
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -76,20 +65,9 @@ bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::Fold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Fold_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::Fold_Op<DIM>::getAvailableBackends() const {
-    return Registrar<Fold_Op<DIM>>::getKeys();
-}
-
 template class Aidge::Fold_Op<2>;
 
-///////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::Fold(const std::array<Aidge::DimSize_t, DIM> &outputDims,
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 410403adc..e2ad84f2d 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -21,35 +21,24 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Gather_Op::Type = "Gather";
-
+constexpr const char* const Aidge::Gather_Op::Type;
+constexpr const char* const Aidge::Gather_Op::InputsName[];
+constexpr const char* const Aidge::Gather_Op::OutputsName[];
 
 Aidge::Gather_Op::Gather_Op(std::int8_t axis,
               const std::vector<int64_t>& indices,
               const std::vector<Aidge::DimSize_t>& gatheredShape)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
     mAttributes(std::make_shared<Attributes_>(
         attr<Attr::Axis>(axis),
         attr<Attr::Indices>(indices),
         attr<Attr::GatheredShape>(gatheredShape)))
-{
-    mImpl = std::make_shared<Gather_OpImpl>(*this);
-}
+{}
 
 Aidge::Gather_Op::Gather_Op(const Aidge::Gather_Op& op)
-    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Gather_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Gather_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Gather_Op::clone() const {
-    return std::make_shared<Gather_Op>(*this);
-}
+    : OperatorTensorWithImpl(op),
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+{}
 
 bool Aidge::Gather_Op::dimsForwarded() const {
     if (getInput(1) && !getInput(1)->undefined()) {
@@ -105,21 +94,7 @@ bool Aidge::Gather_Op::forwardDims(bool allowDataDependency) {
     return false;
 }
 
-void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Gather_Op>::exists({name})) {
-        SET_IMPL_MACRO(Gather_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Gather_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Gather_Op::getAvailableBackends() const {
-    return Registrar<Gather_Op>::getKeys();
-}
-
-/////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Gather(std::int8_t axis,
                                         const std::vector<int64_t>& indices,
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index e0f7cf34a..867fc5775 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -103,7 +103,7 @@ void Aidge::GenericOperator_Op::setBackend(const std::string & name, DeviceIdx_t
     }
 }
 
-///////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
                                             const std::vector<Aidge::InputCategory>& inputCategory,
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index 57886ec2f..feaae1ea3 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -19,21 +19,9 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::GlobalAveragePooling_Op::Type = "GlobalAveragePooling";
-
-Aidge::GlobalAveragePooling_Op::GlobalAveragePooling_Op(const Aidge::GlobalAveragePooling_Op &op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::GlobalAveragePooling_Op::clone() const {
-    return std::make_shared<GlobalAveragePooling_Op>(*this);
-}
+constexpr const char* const Aidge::GlobalAveragePooling_Op::Type;
+constexpr const char* const Aidge::GlobalAveragePooling_Op::InputsName[];
+constexpr const char* const Aidge::GlobalAveragePooling_Op::OutputsName[];
 
 bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -54,16 +42,7 @@ bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::GlobalAveragePooling_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::GlobalAveragePooling_Op::getAvailableBackends() const {
-    return Registrar<GlobalAveragePooling_Op>::getKeys();
-}
-
-////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::GlobalAveragePooling(const std::string &name) {
   return std::make_shared<Node>(std::make_shared<GlobalAveragePooling_Op>(), name);
diff --git a/src/operator/GridSample.cpp b/src/operator/GridSample.cpp
index e1b8d1442..c3e3bc405 100644
--- a/src/operator/GridSample.cpp
+++ b/src/operator/GridSample.cpp
@@ -21,15 +21,15 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-
-const std::string Aidge::GridSample_Op::Type = "GridSample";
-
+constexpr const char* const Aidge::GridSample_Op::Type;
+constexpr const char* const Aidge::GridSample_Op::InputsName[];
+constexpr const char* const Aidge::GridSample_Op::OutputsName[];
 
 Aidge::GridSample_Op::GridSample_Op(
     typename Aidge::GridSample_Op::Mode mode,
     typename Aidge::GridSample_Op::PaddingMode paddingMode,
     bool alignCorners)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Param}, 1),
       mAttributes(std::make_shared<Attributes_>(
         attr<GridSampleAttr::Mode>(mode),
         attr<GridSampleAttr::PaddingMode>(paddingMode),
@@ -40,25 +40,12 @@ Aidge::GridSample_Op::GridSample_Op(
 
 
 Aidge::GridSample_Op::GridSample_Op(const Aidge::GridSample_Op& other)
-    : OperatorTensor(other),
+    : OperatorTensorWithImpl(other),
       mAttributes(std::make_shared<Attributes_>(*other.mAttributes))
-{
-    if (other.mImpl) {
-        SET_IMPL_MACRO(GridSample_Op, *this, other.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
+{}
 
 Aidge::GridSample_Op::~GridSample_Op() noexcept = default;
 
-
-std::shared_ptr<Aidge::Operator> Aidge::GridSample_Op::clone() const {
-    return std::make_shared<GridSample_Op>(*this);
-}
-
-
 bool Aidge::GridSample_Op::forwardDims(bool /*allowDataDependency*/) {
     // TODO: adapt for other formats than NCHW
     if (inputsAssociated()) {
@@ -88,20 +75,7 @@ bool Aidge::GridSample_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-
-
-void Aidge::GridSample_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(GridSample_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::GridSample_Op::getAvailableBackends() const {
-    return Registrar<GridSample_Op>::getKeys();
-}
-
-
-////////////////////////////////////////////////
-
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::GridSample(
                         typename Aidge::GridSample_Op::Mode mode,
diff --git a/src/operator/Heaviside.cpp b/src/operator/Heaviside.cpp
index 3c6fe5495..0c68de6fc 100644
--- a/src/operator/Heaviside.cpp
+++ b/src/operator/Heaviside.cpp
@@ -25,34 +25,19 @@ namespace Aidge {
 // ----------------------------------------------------------- Heaviside_Op
 // class
 
-const std::string Heaviside_Op::Type = "Heaviside";
+constexpr const char* const Heaviside_Op::Type;
+constexpr const char* const Heaviside_Op::InputsName[];
+constexpr const char* const Heaviside_Op::OutputsName[];
 
 Heaviside_Op::Heaviside_Op(float value)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
       mAttributes(
           std::make_shared<Attributes_>(attr<Attr::Value>(value))) {}
 
 Heaviside_Op::Heaviside_Op(const Heaviside_Op &op)
-    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Heaviside_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Heaviside_Op::clone() const {
-    return std::make_shared<Heaviside_Op>(*this);
-}
-
-void Heaviside_Op::setBackend(const std::string &name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(Heaviside_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Heaviside_Op::getAvailableBackends() const {
-    return Registrar<Heaviside_Op>::getKeys();
-}
+    : OperatorTensorWithImpl(op),
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+{}
 
 // --------------------------------------------------------------- Free
 // functions
diff --git a/src/operator/ILayerNorm.cpp b/src/operator/ILayerNorm.cpp
index daa7ecf86..788d69733 100644
--- a/src/operator/ILayerNorm.cpp
+++ b/src/operator/ILayerNorm.cpp
@@ -19,7 +19,9 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::ILayerNorm_Op::Type = "ILayerNorm";
+constexpr const char* const Aidge::ILayerNorm_Op::Type;
+constexpr const char* const Aidge::ILayerNorm_Op::InputsName[];
+constexpr const char* const Aidge::ILayerNorm_Op::OutputsName[];
 
 void Aidge::ILayerNorm_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs());
@@ -42,15 +44,3 @@ bool Aidge::ILayerNorm_Op::forwardDims(bool /*allowDataDependency*/) {
     }
     return false;
 }
-
-
-void Aidge::ILayerNorm_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(ILayerNorm_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-    getInput(1)->setBackend(name, device);
-    getInput(2)->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::ILayerNorm_Op::getAvailableBackends() const {
-    return Registrar<ILayerNorm_Op>::getKeys();
-}
diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp
index 25bb5a5b6..322487184 100644
--- a/src/operator/Identity.cpp
+++ b/src/operator/Identity.cpp
@@ -15,32 +15,16 @@
 
 #include "aidge/backend/generic/operator/IdentityImpl.hpp"
 
-
-const std::string Aidge::Identity_Op::Type = "Identity";
+constexpr const char* const Aidge::Identity_Op::Type;
+constexpr const char* const Aidge::Identity_Op::InputsName[];
+constexpr const char* const Aidge::Identity_Op::OutputsName[];
 
 Aidge::Identity_Op::Identity_Op()
-    : OperatorTensor(Type, {InputCategory::Data}, 1)
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1)
 {
-    mImpl = std::make_shared<Identity_OpImpl>(*this);
 }
 
-Aidge::Identity_Op::Identity_Op(const Aidge::Identity_Op& op)
-    : OperatorTensor(op)
-{
-    mImpl = std::make_shared<Identity_OpImpl>(*this, op.backend());
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Identity_Op::clone() const {
-    return std::make_shared<Identity_Op>(*this);
-}
-
-void Aidge::Identity_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Identity_Op::getAvailableBackends() const {
-    return Registrar<Identity_Op>::getKeys();
-}
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Identity(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Identity_Op>(), name);
diff --git a/src/operator/LRN.cpp b/src/operator/LRN.cpp
index 36dde6712..09f7f3518 100644
--- a/src/operator/LRN.cpp
+++ b/src/operator/LRN.cpp
@@ -18,10 +18,12 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::LRN_Op::Type = "LRN";
+constexpr const char* const Aidge::LRN_Op::Type;
+constexpr const char* const Aidge::LRN_Op::InputsName[];
+constexpr const char* const Aidge::LRN_Op::OutputsName[];
 
 Aidge::LRN_Op::LRN_Op(std::int32_t size)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
         attr<Attr::Alpha>(0.0001),
         attr<Attr::Beta>(0.75),
@@ -30,31 +32,12 @@ Aidge::LRN_Op::LRN_Op(std::int32_t size)
 {}
 
 Aidge::LRN_Op::LRN_Op(const Aidge::LRN_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(LRN_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::LRN_Op::clone() const {
-    return std::make_shared<LRN_Op>(*this);
-}
-
-void Aidge::LRN_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<LRN_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::LRN_Op::getAvailableBackends() const {
-    return Registrar<LRN_Op>::getKeys();
-}
+{}
 
-////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::LRN(std::int32_t size, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<LRN_Op>(size), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/LeakyReLU.cpp b/src/operator/LeakyReLU.cpp
index b5e1a9d6a..8a943de10 100644
--- a/src/operator/LeakyReLU.cpp
+++ b/src/operator/LeakyReLU.cpp
@@ -16,33 +16,17 @@
 
 #include "aidge/data/Tensor.hpp"
 
-const std::string Aidge::LeakyReLU_Op::Type = "LeakyReLU";
+constexpr const char* const Aidge::LeakyReLU_Op::Type;
+constexpr const char* const Aidge::LeakyReLU_Op::InputsName[];
+constexpr const char* const Aidge::LeakyReLU_Op::OutputsName[];
 
 Aidge::LeakyReLU_Op::LeakyReLU_Op(const Aidge::LeakyReLU_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
-    if (op.mImpl){
-        SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::LeakyReLU_Op::clone() const {
-    return std::make_shared<LeakyReLU_Op>(*this);
-}
-
-void Aidge::LeakyReLU_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::LeakyReLU_Op::getAvailableBackends() const {
-    return Registrar<LeakyReLU_Op>::getKeys();
-}
-
-/////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::LeakyReLU(float negativeSlope, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
diff --git a/src/operator/Ln.cpp b/src/operator/Ln.cpp
index 90ae8d8c7..dac01db0b 100755
--- a/src/operator/Ln.cpp
+++ b/src/operator/Ln.cpp
@@ -17,33 +17,12 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Ln_Op::Type = "Ln";
+constexpr const char* const Aidge::Ln_Op::Type;
+constexpr const char* const Aidge::Ln_Op::InputsName[];
+constexpr const char* const Aidge::Ln_Op::OutputsName[];
 
-Aidge::Ln_Op::Ln_Op(const Aidge::Ln_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Ln_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Ln_Op::clone() const {
-    return std::make_shared<Ln_Op>(*this);
-}
-
-void Aidge::Ln_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    mImpl = Registrar<Ln_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Ln_Op::getAvailableBackends() const {
-    return Registrar<Ln_Op>::getKeys();
-}
-
-/////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Ln(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Ln_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 8fd2aa068..963c23e3f 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -18,21 +18,9 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::MatMul_Op::Type = "MatMul";
-
-Aidge::MatMul_Op::MatMul_Op(const Aidge::MatMul_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(MatMul_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::MatMul_Op::clone() const {
-    return std::make_shared<MatMul_Op>(*this);
-}
+constexpr const char* const Aidge::MatMul_Op::Type;
+constexpr const char* const Aidge::MatMul_Op::InputsName[];
+constexpr const char* const Aidge::MatMul_Op::OutputsName[];
 
 bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated(false)) {
@@ -92,17 +80,8 @@ bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(MatMul_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::MatMul_Op::getAvailableBackends() const {
-    return Registrar<MatMul_Op>::getKeys();
-}
-
-////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::MatMul(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<MatMul_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
index 2ee361340..f04a706d9 100644
--- a/src/operator/MaxPooling.cpp
+++ b/src/operator/MaxPooling.cpp
@@ -19,15 +19,16 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::MaxPooling_Op<DIM>::Type = "MaxPooling" + std::to_string(DIM) + "D";
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::MaxPooling_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::MaxPooling_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::MaxPooling_Op<DIM>::OutputsName[];
 
 template <Aidge::DimIdx_t DIM>
 Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
                             const std::array<Aidge::DimSize_t, DIM> &stride_dims,
                             const std::array<Aidge::DimSize_t, DIM> &dilations,
                             bool ceil_mode)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl<MaxPooling_Op<DIM>>(Type, {InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
     attr<MaxPoolingAttr::KernelDims>(kernel_dims),
     attr<MaxPoolingAttr::StrideDims>(stride_dims),
@@ -37,20 +38,9 @@ Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM>
 
 template <Aidge::DimIdx_t DIM>
 Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const Aidge::MaxPooling_Op<DIM>& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl<MaxPooling_Op<DIM>>(op),
     mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::MaxPooling_Op<DIM>::clone() const {
-    return std::make_shared<MaxPooling_Op<DIM>>(*this);
-}
+{}
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::MaxPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -83,22 +73,11 @@ bool Aidge::MaxPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::MaxPooling_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::MaxPooling_Op<DIM>::getAvailableBackends() const {
-    return Registrar<MaxPooling_Op<DIM>>::getKeys();
-}
-
 template class Aidge::MaxPooling_Op<1>;
 template class Aidge::MaxPooling_Op<2>;
 template class Aidge::MaxPooling_Op<3>;
 
-///////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::MaxPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index c3ccc12ac..b3828a5a7 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -20,11 +20,12 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-
-const std::string Aidge::Memorize_Op::Type = "Memorize";
+constexpr const char* const Aidge::Memorize_Op::Type;
+constexpr const char* const Aidge::Memorize_Op::InputsName[];
+constexpr const char* const Aidge::Memorize_Op::OutputsName[];
 
 Aidge::Memorize_Op::Memorize_Op(const std::uint32_t endStep)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 2),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 2),
         mAttributes(std::make_shared<Attributes_>(
                     attr<Attr::ScheduleStep>(0),
                     attr<Attr::ForwardStep>(0),
@@ -36,22 +37,12 @@ Aidge::Memorize_Op::Memorize_Op(const std::uint32_t endStep)
 }
 
 Aidge::Memorize_Op::Memorize_Op(const Aidge::Memorize_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
-    } else {
-        mImpl = std::make_shared<Memorize_OpImpl>(*this);
-    }
     mOutputs[1] = mOutputs[0];
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Memorize_Op::clone() const {
-    return std::make_shared<Memorize_Op>(*this);
-}
-
-
 void Aidge::Memorize_Op::updateConsummerProducer() {
     Operator::updateConsummerProducer();
     ++scheduleStep();
@@ -87,16 +78,6 @@ bool Aidge::Memorize_Op::dimsForwarded() const {
     return forwarded;
 }
 
-void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Memorize_Op>::exists({name})){
-        SET_IMPL_MACRO(Memorize_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Memorize_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
 void Aidge::Memorize_Op::forward() {
     OperatorTensor::forward();
     ++forwardStep();
@@ -107,11 +88,7 @@ void Aidge::Memorize_Op::backward() {
     OperatorTensor::backward();
 }
 
-std::set<std::string> Aidge::Memorize_Op::getAvailableBackends() const {
-    return Registrar<Memorize_Op>::getKeys();
-}
-
-/////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Memorize(const std::uint32_t endStep, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
diff --git a/src/operator/Mod.cpp b/src/operator/Mod.cpp
index 673c00225..76d21a156 100644
--- a/src/operator/Mod.cpp
+++ b/src/operator/Mod.cpp
@@ -20,28 +20,20 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::Mod_Op::Type = "Mod";
+constexpr const char* const Aidge::Mod_Op::Type;
+constexpr const char* const Aidge::Mod_Op::InputsName[];
+constexpr const char* const Aidge::Mod_Op::OutputsName[];
 
 Aidge::Mod_Op::Mod_Op()
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
         attr<ModAttr::Fmod>(false)))
 {}
 
 Aidge::Mod_Op::Mod_Op(const Aidge::Mod_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Mod_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Mod_Op::clone() const {
-    return std::make_shared<Mod_Op>(*this);
-}
+{}
 
 bool Aidge::Mod_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -72,17 +64,7 @@ bool Aidge::Mod_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-
-void Aidge::Mod_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Mod_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Mod_Op::getAvailableBackends() const {
-    return Registrar<Mod_Op>::getKeys();
-}
-
-///////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Mod(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Mod_Op>(), name);
diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp
index a637f8331..230894a72 100644
--- a/src/operator/Move.cpp
+++ b/src/operator/Move.cpp
@@ -16,7 +16,9 @@
 #include "aidge/backend/generic/operator/MoveImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 
-const std::string Aidge::Move_Op::Type = "Move";
+constexpr const char* const Aidge::Move_Op::Type;
+constexpr const char* const Aidge::Move_Op::InputsName[];
+constexpr const char* const Aidge::Move_Op::OutputsName[];
 
 Aidge::Move_Op::Move_Op()
     : OperatorTensor(Type, {InputCategory::Data}, 1)
@@ -58,7 +60,7 @@ std::set<std::string> Aidge::Move_Op::getAvailableBackends() const {
     return backendsList;
 }
 
-////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Move(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Move_Op>(), name);
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index 3f163c9d6..1e8e9d2c0 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -21,21 +21,9 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Mul_Op::Type = "Mul";
-
-Aidge::Mul_Op::Mul_Op(const Aidge::Mul_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Mul_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Mul_Op::clone() const {
-    return std::make_shared<Mul_Op>(*this);
-}
+constexpr const char* const Aidge::Mul_Op::Type;
+constexpr const char* const Aidge::Mul_Op::InputsName[];
+constexpr const char* const Aidge::Mul_Op::OutputsName[];
 
 bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -66,17 +54,8 @@ bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Mul_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Mul_Op::getAvailableBackends() const {
-    return Registrar<Mul_Op>::getKeys();
-}
-
-///////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Mul(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
index a0b5f2df5..d961bf372 100644
--- a/src/operator/Pad.cpp
+++ b/src/operator/Pad.cpp
@@ -19,13 +19,9 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Pad_Op<DIM>::Type = "Pad" + std::to_string(DIM) + "D";
-
-template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::Pad_Op<DIM>::clone() const {
-    return std::make_shared<Pad_Op<DIM>>(*this);
-}
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Pad_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Pad_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Pad_Op<DIM>::OutputsName[];
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Pad_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -47,17 +43,6 @@ bool Aidge::Pad_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::Pad_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Pad_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::Pad_Op<DIM>::getAvailableBackends() const {
-    return Registrar<Pad_Op<DIM>>::getKeys();
-}
-
 template class Aidge::Pad_Op<1>;
 template class Aidge::Pad_Op<2>;
 
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index 01b45e6d3..33271fbb9 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -22,31 +22,19 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-
-const std::string Aidge::Pop_Op::Type = "Pop";
+constexpr const char* const Aidge::Pop_Op::Type;
+constexpr const char* const Aidge::Pop_Op::InputsName[];
+constexpr const char* const Aidge::Pop_Op::OutputsName[];
 
 Aidge::Pop_Op::Pop_Op()
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(attr<PopAttr::ForwardStep>(0), attr<PopAttr::BackwardStep>(0)))
-{
-    mImpl = std::make_shared<Pop_OpImpl>(*this);
-}
+{}
 
 Aidge::Pop_Op::Pop_Op(const Aidge::Pop_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
     mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Pop_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Pop_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Pop_Op::clone() const {
-    return std::make_shared<Pop_Op>(*this);
-}
+{}
 
 bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -65,20 +53,6 @@ void Aidge::Pop_Op::updateConsummerProducer() {
     mAttributes->template getAttr<PopAttr::BackwardStep>() = 0;
 }
 
-void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Pop_Op>::exists({name})){
-        SET_IMPL_MACRO(Pop_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Pop_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Pop_Op::getAvailableBackends() const {
-    return Registrar<Pop_Op>::getKeys();
-}
-
 void Aidge::Pop_Op::forward() {
     OperatorTensor::forward();
     ++mAttributes->template getAttr<PopAttr::ForwardStep>();
@@ -90,7 +64,7 @@ void Aidge::Pop_Op::backward() {
     --mAttributes->template getAttr<PopAttr::BackwardStep>();
 }
 
-///////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Pop(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Pop_Op>(), name);
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index ada71d6cc..17410d33a 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -20,7 +20,9 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::Pow_Op::Type = "Pow";
+constexpr const char* const Aidge::Pow_Op::Type;
+constexpr const char* const Aidge::Pow_Op::InputsName[];
+constexpr const char* const Aidge::Pow_Op::OutputsName[];
 
 bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -51,17 +53,8 @@ bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Pow_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Pow_Op::getAvailableBackends() const {
-    return Registrar<Pow_Op>::getKeys();
-}
-
-////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Pow(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 505192661..db2d6a3ba 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -23,8 +23,8 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-
-const std::string Aidge::Producer_Op::Type = "Producer";
+constexpr const char* const Aidge::Producer_Op::Type;
+constexpr const char* const Aidge::Producer_Op::OutputsName[];
 
 template <std::size_t DIM>
 Aidge::Producer_Op::Producer_Op(
@@ -106,7 +106,7 @@ void Aidge::Producer_Op::setOutput(const Aidge::IOIndex_t outputIdx, const std::
     OperatorTensor::setOutput(outputIdx, data);
 }
 
-/////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::Producer(const std::array<Aidge::DimSize_t, DIM> &dims,
diff --git a/src/operator/ReLU.cpp b/src/operator/ReLU.cpp
index bda26fa33..ed2f8e48d 100644
--- a/src/operator/ReLU.cpp
+++ b/src/operator/ReLU.cpp
@@ -17,32 +17,11 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::ReLU_Op::Type = "ReLU";
+constexpr const char* const Aidge::ReLU_Op::Type;
+constexpr const char* const Aidge::ReLU_Op::InputsName[];
+constexpr const char* const Aidge::ReLU_Op::OutputsName[];
 
-Aidge::ReLU_Op::ReLU_Op(const Aidge::ReLU_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(ReLU_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::ReLU_Op::clone() const {
-    return std::make_shared<ReLU_Op>(*this);
-}
-
-void Aidge::ReLU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(ReLU_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::ReLU_Op::getAvailableBackends() const {
-    return Registrar<ReLU_Op>::getKeys();
-}
-
-/////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ReLU(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index ec6e68fbe..7188027cb 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -25,10 +25,12 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
+constexpr const char* const Aidge::ReduceMean_Op::Type;
+constexpr const char* const Aidge::ReduceMean_Op::InputsName[];
+constexpr const char* const Aidge::ReduceMean_Op::OutputsName[];
 
 Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<Attr::Axes>(axes),
         attr<Attr::KeepDims>(keep_dims),
@@ -36,19 +38,9 @@ Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, bool
 {}
 
 Aidge::ReduceMean_Op::ReduceMean_Op(const Aidge::ReduceMean_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::ReduceMean_Op::clone() const {
-    return std::make_shared<ReduceMean_Op>(*this);
-}
+{}
 
 bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -89,18 +81,9 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::ReduceMean_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(ReduceMean_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::ReduceMean_Op::getAvailableBackends() const {
-    return Registrar<ReduceMean_Op>::getKeys();
-}
-
 Aidge::ReduceMean_Op::~ReduceMean_Op() noexcept = default;
 
-////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ReduceMean(const std::vector<std::int32_t> &axes,
                                         bool keep_dims,
@@ -108,4 +91,4 @@ std::shared_ptr<Aidge::Node> Aidge::ReduceMean(const std::vector<std::int32_t> &
                                         const std::string& name) {
     AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported");
     return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims, noop_with_empty_axes), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/ReduceSum.cpp b/src/operator/ReduceSum.cpp
index 73b6722e1..45330ebf3 100644
--- a/src/operator/ReduceSum.cpp
+++ b/src/operator/ReduceSum.cpp
@@ -25,7 +25,9 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::ReduceSum_Op::Type = "ReduceSum";
+constexpr const char* const Aidge::ReduceSum_Op::Type;
+constexpr const char* const Aidge::ReduceSum_Op::InputsName[];
+constexpr const char* const Aidge::ReduceSum_Op::OutputsName[];
 
 bool Aidge::ReduceSum_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -65,12 +67,3 @@ bool Aidge::ReduceSum_Op::forwardDims(bool /*allowDataDependency*/) {
     }
     return false;
 }
-
-void Aidge::ReduceSum_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(ReduceSum_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::ReduceSum_Op::getAvailableBackends() const {
-    return Registrar<ReduceSum_Op>::getKeys();
-}
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 6800cbe81..6f3686fa3 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -23,8 +23,9 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-
-const std::string Aidge::Reshape_Op::Type = "Reshape";
+constexpr const char* const Aidge::Reshape_Op::Type;
+constexpr const char* const Aidge::Reshape_Op::InputsName[];
+constexpr const char* const Aidge::Reshape_Op::OutputsName[];
 
 Aidge::Reshape_Op::Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
     : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
@@ -32,7 +33,6 @@ Aidge::Reshape_Op::Reshape_Op(const std::vector<std::int64_t>& shape, bool allow
         attr<ReshapeAttr::Shape>(shape),
         attr<ReshapeAttr::AllowZero>(allowzero)))
 {
-    mImpl = std::make_shared<Reshape_OpImpl>(*this);
 }
 
 Aidge::Reshape_Op::Reshape_Op(const Aidge::Reshape_Op& op)
@@ -120,11 +120,11 @@ bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
     return false;
 }
 
-//////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Reshape(const std::vector<std::int64_t>& shape,
                             bool allowzero,
                             const std::string &name)
 {
     return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape, allowzero), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index b2ef56572..946b650e6 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -29,7 +29,9 @@
 
 namespace Aidge {
 
-const std::string Resize_Op::Type = "Resize";
+constexpr const char* const Resize_Op::Type;
+constexpr const char* const Resize_Op::InputsName[];
+constexpr const char* const Resize_Op::OutputsName[];
 
 bool Resize_Op::dimsForwarded() const {
     // in case of ROI add getInput(1) condition
@@ -136,23 +138,6 @@ bool Resize_Op::forwardDims(bool allowDataDependency) {
     return true;
 }
 
-void Resize_Op::setBackend(const std::string &name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(Resize_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-
-    // By default, automatically set backend for all optional inputs: roi, scales and
-    // sizes
-    if (getInput(1)) {
-        getInput(1)->setBackend(name, device);
-    }
-    if (getInput(2)) {
-        getInput(2)->setBackend(name, device);
-    }
-    if (getInput(3)) {
-        getInput(3)->setBackend(name, device);
-    }
-}
-
 std::shared_ptr<Node>
 Resize(std::vector<float> scale,
         std::vector<std::size_t> size,
diff --git a/src/operator/Round.cpp b/src/operator/Round.cpp
index ba4eff9d1..742864c55 100644
--- a/src/operator/Round.cpp
+++ b/src/operator/Round.cpp
@@ -19,31 +19,11 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Round_Op::Type = "Round";
+constexpr const char* const Aidge::Round_Op::Type;
+constexpr const char* const Aidge::Round_Op::InputsName[];
+constexpr const char* const Aidge::Round_Op::OutputsName[];
 
-Aidge::Round_Op::Round_Op(const Aidge::Round_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Round_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-
-std::shared_ptr<Aidge::Operator> Aidge::Round_Op::clone() const {
-    return std::make_shared<Round_Op>(*this);
-}
-
-void Aidge::Round_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Round_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Round_Op::getAvailableBackends() const {
-    return Registrar<Round_Op>::getKeys();
-}
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Round(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Round_Op>(), name);
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
deleted file mode 100644
index 218d25cbd..000000000
--- a/src/operator/Scaling.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include "aidge/operator/Scaling.hpp"
-
-#include <memory>
-#include <string>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/utils/Types.h"
-
-
-//Caution: This operator is now deprecated and should no longer be used. 
-//It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
-
-const std::string Aidge::Scaling_Op::Type = "Scaling";
-
-Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
-        mAttributes(std::make_shared<Attributes_>(
-        attr<ScalingAttr::ScalingFactor>(scalingFactor),
-        attr<ScalingAttr::QuantizedNbBits>(nbBits),
-        attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
-{
-    Log::warn("Caution: The [Scaling] operator is now deprecated and should no longer be used.\nIt has been replaced by the MetaOperator [Quantizer] (located directly in aidge_quantization).");
-} 
-
-Aidge::Scaling_Op::Scaling_Op(const Aidge::Scaling_Op& op)
-    : OperatorTensor(op),
-    mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    Log::warn("Caution: The [Scaling] operator is now deprecated and should no longer be used. \nIt has been replaced by the MetaOperator [Quantizer] (located directly in aidge_quantization).");
-    if (op.mImpl){
-        SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Scaling_Op::clone() const {
-    return std::make_shared<Scaling_Op>(*this);
-}
-
-void Aidge::Scaling_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Scaling_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Scaling_Op::getAvailableBackends() const {
-    return Registrar<Scaling_Op>::getKeys();
-}
-
-////////////////////////////////////////////////
-
-std::shared_ptr<Aidge::Node> Aidge::Scaling(float scalingFactor,
-                                     std::size_t quantizedNbBits,
-                                     bool isOutputUnsigned,
-                                     const std::string& name)
-{
-    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
-}
\ No newline at end of file
diff --git a/src/operator/Select.cpp b/src/operator/Select.cpp
index 6e686ecc4..715a65e19 100644
--- a/src/operator/Select.cpp
+++ b/src/operator/Select.cpp
@@ -21,30 +21,15 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 
-
-const std::string Aidge::Select_Op::Type = "Select";
+constexpr const char* const Aidge::Select_Op::Type;
+constexpr const char* const Aidge::Select_Op::InputsName[];
+constexpr const char* const Aidge::Select_Op::OutputsName[];
 
 Aidge::Select_Op::Select_Op(const Aidge::IOIndex_t nbIn)
-    : OperatorTensor(Type, std::vector<InputCategory>(nbIn + 1, InputCategory::Data), 1)
+    : OperatorTensorWithImpl(Type, std::vector<InputCategory>(nbIn + 1, InputCategory::Data), 1)
 {
     // ctor
     AIDGE_ASSERT(nbIn > 1, "Select operator should have at least two inputs.");
-    mImpl = std::make_shared<Select_OpImpl>(*this);
-}
-
-Aidge::Select_Op::Select_Op(const Select_Op& op)
-    : OperatorTensor(op)
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Select_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Select_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Select_Op::clone() const {
-    return std::make_shared<Select_Op>(*this);
 }
 
 bool Aidge::Select_Op::forwardDims(bool /*allowDataDependency*/) {
@@ -65,20 +50,6 @@ bool Aidge::Select_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::Select_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    if (Registrar<Select_Op>::exists({name})){
-        SET_IMPL_MACRO(Select_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Select_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Select_Op::getAvailableBackends() const {
-    return Registrar<Select_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Select(const Aidge::IOIndex_t nbIn, const std::string& name) {
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index 0927d3a6b..7083cab3f 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -21,34 +21,22 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/Log.hpp"
 
-///////////////////////////////////////////////
-
-const std::string Aidge::Shape_Op::Type = "Shape";
+constexpr const char* const Aidge::Shape_Op::Type;
+constexpr const char* const Aidge::Shape_Op::InputsName[];
+constexpr const char* const Aidge::Shape_Op::OutputsName[];
 
 Aidge::Shape_Op::Shape_Op(const std::int64_t start, const std::int64_t end)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<ShapeAttr::Start>(start),
         attr<ShapeAttr::End>(end)))
-{
-    mImpl = std::make_shared<Shape_OpImpl>(*this);
-}
+{}
 
 Aidge::Shape_Op::Shape_Op(const Aidge::Shape_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Shape_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Shape_OpImpl>(*this);
-    }
-}
+{}
 
-std::shared_ptr<Aidge::Operator> Aidge::Shape_Op::clone() const {
-    return std::make_shared<Shape_Op>(*this);
-}
 bool Aidge::Shape_Op::forwardDType(){
     setDataType(NativeType_v<outDType>);
     return true;
@@ -96,16 +84,6 @@ bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
     return true;
 }
 
-void Aidge::Shape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Shape_Op>::exists({name})) {
-        SET_IMPL_MACRO(Shape_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Shape_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
 void Aidge::Shape_Op::setDataType(const Aidge::DataType &datatype) const {
     if (datatype != NativeType_v<outDType>)
         Log::warn("Shape operator output type was forcibly set to {}, as it is the only supported type. "
@@ -115,11 +93,7 @@ void Aidge::Shape_Op::setDataType(const Aidge::DataType &datatype) const {
     getOutput(0)->setDataType(NativeType_v<outDType>);
 }
 
-std::set<std::string> Aidge::Shape_Op::getAvailableBackends() const {
-    return Registrar<Shape_Op>::getKeys();
-}
-
-//////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Shape(const std::int64_t start, const std::int64_t end, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
diff --git a/src/operator/ShiftGELU.cpp b/src/operator/ShiftGELU.cpp
index bd229e6cf..8d41c9318 100644
--- a/src/operator/ShiftGELU.cpp
+++ b/src/operator/ShiftGELU.cpp
@@ -19,35 +19,14 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::ShiftGELU_Op::Type = "ShiftGELU";
+constexpr const char* const Aidge::ShiftGELU_Op::Type;
+constexpr const char* const Aidge::ShiftGELU_Op::InputsName[];
+constexpr const char* const Aidge::ShiftGELU_Op::OutputsName[];
 
-Aidge::ShiftGELU_Op::ShiftGELU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+Aidge::ShiftGELU_Op::ShiftGELU_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
-Aidge::ShiftGELU_Op::ShiftGELU_Op(const Aidge::ShiftGELU_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(ShiftGELU_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::ShiftGELU_Op::clone() const {
-    return std::make_shared<ShiftGELU_Op>(*this);
-}
-
-void Aidge::ShiftGELU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(ShiftGELU_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::ShiftGELU_Op::getAvailableBackends() const {
-    return Registrar<ShiftGELU_Op>::getKeys();
-}
-
-///////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ShiftGELU(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<ShiftGELU_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/ShiftMax.cpp b/src/operator/ShiftMax.cpp
index 58d4bf461..24bd3a403 100644
--- a/src/operator/ShiftMax.cpp
+++ b/src/operator/ShiftMax.cpp
@@ -19,39 +19,14 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::ShiftMax_Op::Type = "ShiftMax";
+constexpr const char* const Aidge::ShiftMax_Op::Type;
+constexpr const char* const Aidge::ShiftMax_Op::InputsName[];
+constexpr const char* const Aidge::ShiftMax_Op::OutputsName[];
 
-Aidge::ShiftMax_Op::ShiftMax_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+Aidge::ShiftMax_Op::ShiftMax_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
-Aidge::ShiftMax_Op::ShiftMax_Op(const Aidge::ShiftMax_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(ShiftMax_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-/**
- * @brief Clone the operator using its copy-constructor.
- * @see Operator::ShiftMax_Op
- */
-std::shared_ptr<Aidge::Operator> Aidge::ShiftMax_Op::clone() const {
-    return std::make_shared<ShiftMax_Op>(*this);
-}
-
-void Aidge::ShiftMax_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(ShiftMax_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::ShiftMax_Op::getAvailableBackends() const {
-    return Registrar<ShiftMax_Op>::getKeys();
-}
-
-/////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ShiftMax(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<ShiftMax_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp
index d97f8c523..8cf683b56 100644
--- a/src/operator/Sigmoid.cpp
+++ b/src/operator/Sigmoid.cpp
@@ -18,35 +18,13 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Sigmoid_Op::Type = "Sigmoid";
+constexpr const char* const Aidge::Sigmoid_Op::Type;
+constexpr const char* const Aidge::Sigmoid_Op::InputsName[];
+constexpr const char* const Aidge::Sigmoid_Op::OutputsName[];
 
-Aidge::Sigmoid_Op::Sigmoid_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+Aidge::Sigmoid_Op::Sigmoid_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
-Aidge::Sigmoid_Op::Sigmoid_Op(const Aidge::Sigmoid_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Sigmoid_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Sigmoid_Op::clone() const {
-    return std::make_shared<Sigmoid_Op>(*this);
-}
-
-
-void Aidge::Sigmoid_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Sigmoid_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Sigmoid_Op::getAvailableBackends() const {
-    return Registrar<Sigmoid_Op>::getKeys();
-}
-
-///////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Sigmoid(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Sigmoid_Op>(), name);
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 60ec176c7..315a1066c 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -26,13 +26,15 @@
 #include "aidge/utils/Registrar.hpp"
 
 
-const std::string Aidge::Slice_Op::Type = "Slice";
+constexpr const char* const Aidge::Slice_Op::Type;
+constexpr const char* const Aidge::Slice_Op::InputsName[];
+constexpr const char* const Aidge::Slice_Op::OutputsName[];
 
 Aidge::Slice_Op::Slice_Op(const std::vector<std::int64_t>& starts,
                         const std::vector<std::int64_t>& ends,
                         const std::vector<std::int8_t>& axes,
                         const std::vector<std::int64_t>& steps)
-    : OperatorTensor(Type,
+    : OperatorTensorWithImpl(Type,
         {InputCategory::Data,
             InputCategory::OptionalData,
             InputCategory::OptionalData,
@@ -45,23 +47,11 @@ Aidge::Slice_Op::Slice_Op(const std::vector<std::int64_t>& starts,
         attr<SliceAttr::Axes>(axes),
         attr<SliceAttr::Steps>(steps)))
 {
-    mImpl = std::make_shared<Slice_OpImpl>(*this);
 }
 
 Aidge::Slice_Op::Slice_Op(const Aidge::Slice_Op& op)
-    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Slice_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Slice_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Slice_Op::clone() const {
-    return std::make_shared<Slice_Op>(*this);
-}
+    : OperatorTensorWithImpl(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+{}
 
 bool Aidge::Slice_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined())
@@ -213,21 +203,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
     return false;
 }
 
-void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Slice_Op>::exists({name})){
-        SET_IMPL_MACRO(Slice_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Slice_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Slice_Op::getAvailableBackends() const {
-    return Registrar<Slice_Op>::getKeys();
-}
-
-////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Slice(const std::vector<std::int64_t>& starts,
                                    const std::vector<std::int64_t>& ends,
diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp
index df8a9e021..b43d498bd 100644
--- a/src/operator/Softmax.cpp
+++ b/src/operator/Softmax.cpp
@@ -18,40 +18,23 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Softmax_Op::Type = "Softmax";
+constexpr const char* const Aidge::Softmax_Op::Type;
+constexpr const char* const Aidge::Softmax_Op::InputsName[];
+constexpr const char* const Aidge::Softmax_Op::OutputsName[];
 
 Aidge::Softmax_Op::Softmax_Op(std::int32_t axis)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
         attr<SoftmaxAttr::Axis>(axis)))
 {}
 
 Aidge::Softmax_Op::Softmax_Op(const Aidge::Softmax_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Softmax_Op::clone() const {
-    return std::make_shared<Softmax_Op>(*this);
-}
-
-void Aidge::Softmax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Softmax_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Softmax_Op::getAvailableBackends() const {
-    return Registrar<Softmax_Op>::getKeys();
-}
+{}
 
-////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Softmax(std::int32_t axis, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index 4bdf01b69..7aa732140 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -22,34 +22,24 @@
 #include "aidge/utils/Types.h"
 
 
-const std::string Aidge::Split_Op::Type = "Split";
+constexpr const char* const Aidge::Split_Op::Type;
+constexpr const char* const Aidge::Split_Op::InputsName[];
+constexpr const char* const Aidge::Split_Op::OutputsName[];
 
 Aidge::Split_Op::Split_Op(std::int8_t axis,
                         Aidge::DimSize_t nbOutputs,
                         const std::vector<Aidge::DimSize_t>& split)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
     mAttributes(std::make_shared<Attributes_>(
         attr<SplitAttr::Axis>(axis),
         attr<SplitAttr::Split>(split)))
 {
-    mImpl = std::make_shared<Split_OpImpl>(*this);
 }
 
 Aidge::Split_Op::Split_Op(const Aidge::Split_Op &op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
     mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Split_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Split_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Split_Op::clone() const {
-    return std::make_shared<Split_Op>(*this);
-}
+{}
 
 bool Aidge::Split_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined()))
@@ -121,29 +111,11 @@ bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
     return false;
 }
 
-void Aidge::Split_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Split_Op>::exists({name})) {
-        SET_IMPL_MACRO(Split_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Split_OpImpl>(*this);
-    }
-    for (std::size_t i = 0; i < this->nbOutputs(); i++)
-    {
-        mOutputs[i]->setBackend(name, device);
-    }
-
-}
-
-std::set<std::string> Aidge::Split_Op::getAvailableBackends() const {
-    return Registrar<Split_Op>::getKeys();
-}
-
-////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Split(Aidge::DimSize_t nbOutput,
                                    std::int8_t axis,
                                    const std::vector<Aidge::DimSize_t>& split,
                                    const std::string &name) {
     return std::make_shared<Node>(std::make_shared<Split_Op>(axis, nbOutput, split), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Sqrt.cpp b/src/operator/Sqrt.cpp
index bd3286f09..bec6eb447 100644
--- a/src/operator/Sqrt.cpp
+++ b/src/operator/Sqrt.cpp
@@ -19,33 +19,11 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Sqrt_Op::Type = "Sqrt";
+constexpr const char* const Aidge::Sqrt_Op::Type;
+constexpr const char* const Aidge::Sqrt_Op::InputsName[];
+constexpr const char* const Aidge::Sqrt_Op::OutputsName[];
 
-Aidge::Sqrt_Op::Sqrt_Op(const Aidge::Sqrt_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Sqrt_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-
-std::shared_ptr<Aidge::Operator> Aidge::Sqrt_Op::clone() const {
-    return std::make_shared<Sqrt_Op>(*this);
-}
-
-void Aidge::Sqrt_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Sqrt_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Sqrt_Op::getAvailableBackends() const {
-    return Registrar<Sqrt_Op>::getKeys();
-}
-
-////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Sqrt(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Sqrt_Op>(), name);
diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp
index 53b8e76ed..b97cd8262 100644
--- a/src/operator/Squeeze.cpp
+++ b/src/operator/Squeeze.cpp
@@ -26,29 +26,24 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-const std::string Squeeze_Op::Type = "Squeeze";
+constexpr const char* const Squeeze_Op::Type;
+constexpr const char* const Squeeze_Op::InputsName[];
+constexpr const char* const Squeeze_Op::OutputsName[];
 
 Squeeze_Op::Squeeze_Op(const std::vector<std::int8_t> &axes)
-    : OperatorTensor(
+    : OperatorTensorWithImpl(
         Type,
         {InputCategory::Data, InputCategory::OptionalData},
         1),
     mAttributes(
         std::make_shared<Attributes_>(attr<SqueezeAttr::Axes>(axes)))
 {
-    mImpl = std::make_shared<Squeeze_OpImpl>(*this);
 }
 
 Squeeze_Op::Squeeze_Op(const Squeeze_Op &op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Squeeze_Op, *this, op.backend());
-    } else {
-        mImpl = std::make_shared<Squeeze_OpImpl>(*this);
-    }
-}
+{}
 
 bool Squeeze_Op::dimsForwarded() const {
   if ((getInput(1) && !getInput(1)->undefined())) {
@@ -136,20 +131,6 @@ bool Squeeze_Op::forwardDims(bool allowDataDependency) {
     return true;
 }
 
-void Squeeze_Op::setBackend(const std::string &name,
-                            Aidge::DeviceIdx_t device) {
-  if (Registrar<Squeeze_Op>::exists({name})) {
-    SET_IMPL_MACRO(Squeeze_Op, *this, name);
-  } else {
-    mImpl = std::make_shared<Squeeze_OpImpl>(*this);
-  }
-  mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Squeeze_Op::getAvailableBackends() const {
-  return Registrar<Squeeze_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Node> Squeeze(const std::vector<std::int8_t> axes,
diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp
index 9e66fac64..893e3f437 100644
--- a/src/operator/Stack.cpp
+++ b/src/operator/Stack.cpp
@@ -22,30 +22,22 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-
-const std::string StackOp::Type = "Stack";
+constexpr const char* const StackOp::Type;
+constexpr const char* const StackOp::InputsName[];
+constexpr const char* const StackOp::OutputsName[];
 
 StackOp::StackOp(std::uint32_t maxElements)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
       mAttributes(std::make_shared<Attributes_>(
           attr<StackAttr::MaxElements>(maxElements),
           attr<StackAttr::BackwardStep>(0),
           attr<StackAttr::ForwardStep>(0))) {
-    mImpl = std::make_shared<StackOpImpl>(*this);
 }
 
 StackOp::StackOp(const Aidge::StackOp &op)
-    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(StackOp, *this, op.backend());
-    } else {
-        mImpl = std::make_shared<StackOpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::StackOp::clone() const {
-    return std::make_shared<StackOp>(*this);
-}
+    : OperatorTensorWithImpl(op),
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+{}
 
 bool Aidge::StackOp::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined()))
@@ -83,19 +75,6 @@ bool Aidge::StackOp::forwardDims(bool allowDataDependency) {
     return false;
 }
 
-void StackOp::setBackend(const std::string &name, DeviceIdx_t device) {
-    if (Registrar<StackOp>::exists({name})) {
-        SET_IMPL_MACRO(StackOp, *this, name);
-    } else {
-        mImpl = std::make_shared<StackOpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> StackOp::getAvailableBackends() const {
-    return Registrar<StackOp>::getKeys();
-}
-
 void StackOp::forward() {
     OperatorTensor::forward();
     ++forwardStep();
@@ -107,6 +86,8 @@ void StackOp::backward() {
     --backwardStep();
 }
 
+////////////////////////////////////////////////////////////////////////////////
+
 std::shared_ptr<Node> Stack(std::uint32_t maxElements,
                             const std::string &name) {
     return std::make_shared<Node>(std::make_shared<StackOp>(maxElements),
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index ca7348b3b..c1c185897 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -22,21 +22,9 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Sub_Op::Type = "Sub";
-
-Aidge::Sub_Op::Sub_Op(const Aidge::Sub_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Sub_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Sub_Op::clone() const {
-    return std::make_shared<Sub_Op>(*this);
-}
+constexpr const char* const Aidge::Sub_Op::Type;
+constexpr const char* const Aidge::Sub_Op::InputsName[];
+constexpr const char* const Aidge::Sub_Op::OutputsName[];
 
 bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -67,16 +55,7 @@ bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Sub_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Sub_Op::getAvailableBackends() const {
-    return Registrar<Sub_Op>::getKeys();
-}
-
-//////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Sub(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp
index fe295ab71..bef62605d 100644
--- a/src/operator/Tanh.cpp
+++ b/src/operator/Tanh.cpp
@@ -18,34 +18,13 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Tanh_Op::Type = "Tanh";
+constexpr const char* const Aidge::Tanh_Op::Type;
+constexpr const char* const Aidge::Tanh_Op::InputsName[];
+constexpr const char* const Aidge::Tanh_Op::OutputsName[];
 
-Aidge::Tanh_Op::Tanh_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+Aidge::Tanh_Op::Tanh_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
-Aidge::Tanh_Op::Tanh_Op(const Aidge::Tanh_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Tanh_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Tanh_Op::clone() const {
-    return std::make_shared<Tanh_Op>(*this);
-}
-
-void Aidge::Tanh_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Tanh_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Tanh_Op::getAvailableBackends() const {
-    return Registrar<Tanh_Op>::getKeys();
-}
-
-////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Tanh(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Tanh_Op>(), name);
diff --git a/src/operator/TopK.cpp b/src/operator/TopK.cpp
index 4d574784f..e9ac96ce4 100644
--- a/src/operator/TopK.cpp
+++ b/src/operator/TopK.cpp
@@ -22,15 +22,16 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-
-const std::string Aidge::TopK_Op::Type = "TopK";
+constexpr const char* const TopK_Op::Type;
+constexpr const char* const TopK_Op::InputsName[];
+constexpr const char* const TopK_Op::OutputsName[];
 
 TopK_Op::TopK_Op(
     int64_t axis,
     bool largest,
     bool sorted,
     IOIndex_t k)
-    : OperatorTensor(Type,
+    : OperatorTensorWithImpl(Type,
                      {InputCategory::Data,
                       InputCategory::OptionalData},
                      2),
@@ -44,15 +45,9 @@ TopK_Op::TopK_Op(
 }
 
 TopK_Op::TopK_Op(const TopK_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(TopK_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
+{}
 
 bool Aidge::TopK_Op::forwardDims(bool allowDataDependency) {
     if (inputsAssociated()) {
@@ -88,21 +83,11 @@ bool Aidge::TopK_Op::forwardDims(bool allowDataDependency) {
     return false;
 }
 
-void Aidge::TopK_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(TopK_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-    mOutputs[1]->setBackend(name, device);
-}
-
 void Aidge::TopK_Op::setDataType(const DataType& dataType) const {
     mOutputs[0]->setDataType(dataType);
     // mOutputs[1] data type is fixed (Int64)
 }
 
-std::set<std::string> Aidge::TopK_Op::getAvailableBackends() const {
-    return Registrar<TopK_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Node> TopK(const std::string& name) {
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index b0c2c11d6..78df803d8 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -23,38 +23,20 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-void Aidge::TransposeImpl::forward() {
-    const Transpose_Op& op = dynamic_cast<const Transpose_Op&>(mOp);
-    op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.outputDimsOrder());
-}
-
-///////////////////////////////////////////////////
-
-const std::string Aidge::Transpose_Op::Type = "Transpose";
+constexpr const char* const Aidge::Transpose_Op::Type;
+constexpr const char* const Aidge::Transpose_Op::InputsName[];
+constexpr const char* const Aidge::Transpose_Op::OutputsName[];
 
 Aidge::Transpose_Op::Transpose_Op(const std::vector<Aidge::DimSize_t> &outputDimsOrder)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<Attr::OutputDimsOrder>(outputDimsOrder)))
-{
-    mImpl = std::make_shared<TransposeImpl>(*this);
-}
+{}
 
 Aidge::Transpose_Op::Transpose_Op(const Aidge::Transpose_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
     mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<TransposeImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Transpose_Op::clone() const {
-    return std::make_shared<Transpose_Op>(*this);
-}
+{}
 
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -96,23 +78,9 @@ bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::Transpose_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Transpose_Op>::exists({name})){
-        SET_IMPL_MACRO(Transpose_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<TransposeImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Transpose_Op::getAvailableBackends() const {
-    return Registrar<Transpose_Op>::getKeys();
-}
-
-//////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Transpose(const std::vector<Aidge::DimSize_t> &outputDimsOrder,
                                               const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp
index 5fac669b8..67cd79b90 100644
--- a/src/operator/Unfold.cpp
+++ b/src/operator/Unfold.cpp
@@ -24,40 +24,26 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Unfold_Op<DIM>::Type = "Unfold";
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Unfold_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Unfold_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Unfold_Op<DIM>::OutputsName[];
 
 template <Aidge::DimIdx_t DIM>
 Aidge::Unfold_Op<DIM>::Unfold_Op(const std::array<Aidge::DimSize_t, DIM> &kernelDims,
                     const std::array<Aidge::DimSize_t, DIM> &strideDims,
                     const std::array<Aidge::DimSize_t, DIM> &dilationDims)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl<Unfold_Op<DIM>, Unfold_OpImpl<DIM>>(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<UnfoldAttr::StrideDims>(strideDims),
         attr<UnfoldAttr::DilationDims>(dilationDims),
         attr<UnfoldAttr::KernelDims>(kernelDims)))
-{
-    mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
-}
+{}
 
 template <Aidge::DimIdx_t DIM>
 Aidge::Unfold_Op<DIM>::Unfold_Op(const Aidge::Unfold_Op<DIM> &op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl<Unfold_Op<DIM>, Unfold_OpImpl<DIM>>(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
-    }
-}
-
-template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::Unfold_Op<DIM>::clone() const {
-    return std::make_shared<Unfold_Op>(*this);
-}
+{}
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Unfold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -83,25 +69,9 @@ bool Aidge::Unfold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::Unfold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Unfold_Op<DIM>>::exists({name})){
-        SET_IMPL_MACRO(Unfold_Op<DIM>, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::Unfold_Op<DIM>::getAvailableBackends() const {
-    return Registrar<Unfold_Op<DIM>>::getKeys();
-}
-
 template class Aidge::Unfold_Op<2>;
 
-///////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::Unfold(const std::array<Aidge::DimSize_t, DIM> &kernelDims,
@@ -115,4 +85,4 @@ std::shared_ptr<Aidge::Node> Aidge::Unfold(const std::array<Aidge::DimSize_t, DI
 template std::shared_ptr<Aidge::Node> Aidge::Unfold<2>(const std::array<Aidge::DimSize_t, 2>&,
                                   const std::string&,
                                   const std::array<Aidge::DimSize_t, 2>&,
-                                  const std::array<Aidge::DimSize_t, 2>&);
\ No newline at end of file
+                                  const std::array<Aidge::DimSize_t, 2>&);
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
index b73b416c9..dcd68fbcc 100644
--- a/src/operator/Unsqueeze.cpp
+++ b/src/operator/Unsqueeze.cpp
@@ -23,28 +23,21 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-const std::string Unsqueeze_Op::Type = "Unsqueeze";
-
+constexpr const char* const Unsqueeze_Op::Type;
+constexpr const char* const Unsqueeze_Op::InputsName[];
+constexpr const char* const Unsqueeze_Op::OutputsName[];
 
 Unsqueeze_Op::Unsqueeze_Op(const std::vector<int8_t> &axes)
-    : OperatorTensor(Type,
+    : OperatorTensorWithImpl(Type,
                     {InputCategory::Data, InputCategory::OptionalData},
                     1),
       mAttributes(std::make_shared<Attributes_>(attr<UnsqueezeAttr::Axes>(axes)))
-{
-    mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
-}
+{}
 
 Unsqueeze_Op::Unsqueeze_Op(const Unsqueeze_Op &op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Unsqueeze_Op, *this, op.backend());
-    } else {
-        mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
-    }
-}
+{}
 
 bool Aidge::Unsqueeze_Op::dimsForwarded() const {
   if ((getInput(1) && !getInput(1)->undefined())) {
@@ -124,20 +117,6 @@ bool Unsqueeze_Op::forwardDims(bool allowDataDependency) {
   return true;
 }
 
-void Unsqueeze_Op::setBackend(const std::string &name,
-                              Aidge::DeviceIdx_t device) {
-  if (Registrar<Unsqueeze_Op>::exists({name})) {
-    SET_IMPL_MACRO(Unsqueeze_Op, *this, name);
-  } else {
-    mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
-  }
-  mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Unsqueeze_Op::getAvailableBackends() const {
-  return Registrar<Unsqueeze_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes,
diff --git a/src/operator/WeightInterleaving.cpp b/src/operator/WeightInterleaving.cpp
index 66af1d51f..0852aa852 100644
--- a/src/operator/WeightInterleaving.cpp
+++ b/src/operator/WeightInterleaving.cpp
@@ -21,29 +21,9 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::WeightInterleaving_Op::Type = "WeightInterleaving";
-
-/**
- * @brief Copy-constructor.
- * @param op WeightInterleaving_Op to copy.
- * @details Copies the operator attributes and its output tensor(s), but not
- * its input tensors. The new operator has no associated input.
- */
-Aidge::WeightInterleaving_Op::WeightInterleaving_Op(const WeightInterleaving_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(WeightInterleaving_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-
-std::shared_ptr<Aidge::Operator> Aidge::WeightInterleaving_Op::clone() const {
-    return std::make_shared<WeightInterleaving_Op>(*this);
-}
-
+constexpr const char* const Aidge::WeightInterleaving_Op::Type;
+constexpr const char* const Aidge::WeightInterleaving_Op::InputsName[];
+constexpr const char* const Aidge::WeightInterleaving_Op::OutputsName[];
 
 bool Aidge::WeightInterleaving_Op::forwardDims(bool /*allowDataDependency*/) {
     
@@ -92,21 +72,6 @@ bool Aidge::WeightInterleaving_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-
-void Aidge::WeightInterleaving_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(WeightInterleaving_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::WeightInterleaving_Op::getAvailableBackends() const {
-    return Registrar<WeightInterleaving_Op>::getKeys();
-}
-
-std::shared_ptr<Aidge::Node> Aidge::WeightInterleaving(const std::string& name) {
-    return std::make_shared<Node>(std::make_shared<WeightInterleaving_Op>(), name);
-}
-
-
 std::size_t Aidge::WeightInterleaving_Op::compactDataSize(std::size_t dataSize, std::uint8_t nbBits) {
     AIDGE_ASSERT(nbBits > 0 && nbBits < 8, "nbBits must be between 1 and 4"); // Ensure valid bit width
 
@@ -118,4 +83,10 @@ std::size_t Aidge::WeightInterleaving_Op::compactDataSize(std::size_t dataSize,
     std::size_t requiredSize = (dataSize + nbSlot - 1) / nbSlot;
 
     return requiredSize;
-}
\ No newline at end of file
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::WeightInterleaving(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<WeightInterleaving_Op>(), name);
+}
diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp
index a0b166498..9bc0e855e 100644
--- a/src/recipes/HorizontalTiling.cpp
+++ b/src/recipes/HorizontalTiling.cpp
@@ -83,7 +83,6 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
         }
     }
 
-    const std::vector<std::string> sliceInputsNames = Slice_Op::getInputsName();
     // coordinates of the first value of the current output slice
     std::vector<DimSize_t> currentFirstDims = std::vector<DimSize_t>(outTensor->nbDims(), 0);
     for (IOIndex_t i = 0; currentFirstDims[axis] < outTensor->dims()[axis]; currentFirstDims[axis] += outputDims[axis], ++i) {
@@ -107,7 +106,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
         starts -> setBackend(backend);
         starts -> resize(std::vector<std::size_t>({inputDimsStart.size()}));
         starts -> getImpl() -> copyFromHost(inputDimsStart.data(), inputDimsStart.size());
-        auto startsNode = Producer(starts, slice->name() + "_" + sliceInputsNames[1]);
+        auto startsNode = Producer(starts, slice->name() + "_1");
         startsNode -> addChild(slice, 0, 1);
 
         // Create Slice's Ends producer node
@@ -120,7 +119,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
         ends -> setBackend(backend);
         ends -> resize(std::vector<std::size_t>({inputDimsEnd.size()}));
         ends -> getImpl() -> copyFromHost(inputDimsEnd.data(), inputDimsEnd.size());
-        auto endsNode = Producer(ends, slice->name() + "_" + sliceInputsNames[2]);
+        auto endsNode = Producer(ends, slice->name() + "_2");
         endsNode -> addChild(slice, 0, 2);
 
         // Create Slice's Axes producer node
@@ -131,7 +130,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
         axes -> setBackend(backend);
         axes -> resize(std::vector<std::size_t>({usedDims.size()}));
         axes -> getImpl() -> copyFromHost(usedDims.data(), usedDims.size());
-        auto axesNode = Producer(axes, slice->name() + "_" + sliceInputsNames[3]);
+        auto axesNode = Producer(axes, slice->name() + "_3");
         axesNode -> addChild(slice, 0, 3);
 
         // Create Slice's Steps producer node
@@ -141,7 +140,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
         steps -> setBackend(backend);
         steps -> resize(std::vector<std::size_t>({inputDimsSteps.size()}));
         steps -> getImpl() -> copyFromHost(inputDimsSteps.data(), inputDimsSteps.size());
-        auto stepsNode = Producer(steps, slice->name() + "_" + sliceInputsNames[4]);
+        auto stepsNode = Producer(steps, slice->name() + "_4");
         stepsNode -> addChild(slice, 0, 4);
 
         // auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, inputDimsSteps);
-- 
GitLab


From 2ad35529adb18149391eeab5436a1382411e0f0c Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Wed, 30 Apr 2025 14:51:31 +0200
Subject: [PATCH 12/12] Fixed binding

---
 .../operator/pybind_ConvDepthWise.cpp           |  9 ++++++---
 .../operator/pybind_ConvTranspose.cpp           |  8 ++++++--
 python_binding/operator/pybind_CryptoHash.cpp   | 11 +++++++++--
 python_binding/operator/pybind_DepthToSpace.cpp |  9 ++++++---
 python_binding/operator/pybind_Div.cpp          |  8 ++++++--
 python_binding/operator/pybind_Dropout.cpp      | 17 ++++++++---------
 python_binding/operator/pybind_Equal.cpp        |  9 +++++++--
 python_binding/operator/pybind_Erf.cpp          |  8 ++++++--
 python_binding/operator/pybind_Expand.cpp       |  8 ++++++--
 python_binding/operator/pybind_FC.cpp           |  8 ++++++--
 python_binding/operator/pybind_Flatten.cpp      |  8 ++++++--
 python_binding/operator/pybind_Fold.cpp         |  8 ++++++--
 python_binding/operator/pybind_Gather.cpp       |  9 ++++++---
 .../operator/pybind_GlobalAveragePooling.cpp    |  8 ++++++--
 python_binding/operator/pybind_GridSample.cpp   |  9 ++++++---
 python_binding/operator/pybind_Heaviside.cpp    |  9 ++++++---
 python_binding/operator/pybind_Identity.cpp     |  8 ++++++--
 python_binding/operator/pybind_LRN.cpp          |  9 ++++++---
 python_binding/operator/pybind_LeakyReLU.cpp    |  9 ++++++---
 python_binding/operator/pybind_Ln.cpp           |  8 ++++++--
 python_binding/operator/pybind_Matmul.cpp       |  8 ++++++--
 python_binding/operator/pybind_MaxPooling.cpp   |  8 ++++++--
 python_binding/operator/pybind_Memorize.cpp     |  8 ++++++--
 python_binding/operator/pybind_Mod.cpp          |  8 ++++++--
 python_binding/operator/pybind_Mul.cpp          |  8 ++++++--
 python_binding/operator/pybind_Pad.cpp          |  8 ++++++--
 python_binding/operator/pybind_Pop.cpp          |  9 ++++++---
 python_binding/operator/pybind_Pow.cpp          |  8 ++++++--
 python_binding/operator/pybind_Producer.cpp     |  8 ++++++--
 python_binding/operator/pybind_ReLU.cpp         |  8 ++++++--
 python_binding/operator/pybind_ReduceMean.cpp   | 10 +++++++---
 python_binding/operator/pybind_ReduceSum.cpp    |  9 ++++++---
 python_binding/operator/pybind_Reshape.cpp      |  9 ++++++---
 python_binding/operator/pybind_Resize.cpp       |  8 ++++++--
 python_binding/operator/pybind_Round.cpp        |  8 ++++++--
 python_binding/operator/pybind_Select.cpp       |  8 ++++++--
 python_binding/operator/pybind_Shape.cpp        |  9 ++++++---
 python_binding/operator/pybind_Sigmoid.cpp      |  8 ++++++--
 python_binding/operator/pybind_Slice.cpp        |  9 ++++++---
 python_binding/operator/pybind_Softmax.cpp      |  9 ++++++---
 python_binding/operator/pybind_Split.cpp        | 11 +++++++----
 python_binding/operator/pybind_Sqrt.cpp         |  8 ++++++--
 python_binding/operator/pybind_Squeeze.cpp      |  9 ++++++---
 python_binding/operator/pybind_Stack.cpp        |  9 ++++++---
 python_binding/operator/pybind_Sub.cpp          |  8 ++++++--
 python_binding/operator/pybind_Tanh.cpp         |  8 ++++++--
 python_binding/operator/pybind_TopK.cpp         | 15 +++++++--------
 python_binding/operator/pybind_Transpose.cpp    |  8 ++++++--
 python_binding/operator/pybind_Unfold.cpp       |  8 ++++++--
 python_binding/operator/pybind_Unsqueeze.cpp    |  8 ++++++--
 .../operator/pybind_WeightInterleaving.cpp      |  8 ++++++--
 51 files changed, 315 insertions(+), 133 deletions(-)

diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index ce2cdba01..04113a27d 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -54,9 +54,12 @@ void declare_ConvDepthWiseOp(py::module &m) {
     }), py::arg("kernel_dims"),
         py::arg("stride_dims"),
         py::arg("dilation_dims"))
-  .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
-  .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
-
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(ConvDepthWise_Op<DIM>::InputsName), std::end(ConvDepthWise_Op<DIM>::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(ConvDepthWise_Op<DIM>::OutputsName), std::end(ConvDepthWise_Op<DIM>::OutputsName));
+    }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
       return std::vector<std::string>(std::begin(EnumStrings<ConvDepthWiseAttr>::data), std::end(EnumStrings<ConvDepthWiseAttr>::data));
 		})
diff --git a/python_binding/operator/pybind_ConvTranspose.cpp b/python_binding/operator/pybind_ConvTranspose.cpp
index 0f759e3db..6c41d559f 100644
--- a/python_binding/operator/pybind_ConvTranspose.cpp
+++ b/python_binding/operator/pybind_ConvTranspose.cpp
@@ -57,8 +57,12 @@ template <DimIdx_t DIM> void declare_ConvTransposeOp(py::module &m) {
              py::arg("kernel_dims"),
              py::arg("stride_dims") = std::vector<DimSize_t>(DIM, 1),
              py::arg("dilation_dims") = std::vector<DimSize_t>(DIM, 1))
-        .def_static("get_inputs_name", &ConvTranspose_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &ConvTranspose_Op<DIM>::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(ConvTranspose_Op<DIM>::InputsName), std::end(ConvTranspose_Op<DIM>::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(ConvTranspose_Op<DIM>::OutputsName), std::end(ConvTranspose_Op<DIM>::OutputsName));
+        }, "Get the names of the output tensors.")
         .def("in_channels", &ConvTranspose_Op<DIM>::inChannels)
         .def("out_channels", &ConvTranspose_Op<DIM>::outChannels)
         .def_readonly_static("Type", &ConvTranspose_Op<DIM>::Type);
diff --git a/python_binding/operator/pybind_CryptoHash.cpp b/python_binding/operator/pybind_CryptoHash.cpp
index 923f91b60..10913ec9b 100644
--- a/python_binding/operator/pybind_CryptoHash.cpp
+++ b/python_binding/operator/pybind_CryptoHash.cpp
@@ -25,8 +25,15 @@
   
      py::class_<CryptoHash_Op, std::shared_ptr<CryptoHash_Op>, OperatorTensor>(m, "CryptoHashOp", py::multiple_inheritance())
          .def(py::init<>())
-         .def_static("get_inputs_name", &CryptoHash_Op::getInputsName)
-         .def_static("get_outputs_name", &CryptoHash_Op::getOutputsName)
+         .def_static("get_inputs_name", []() {
+             return std::vector<std::string>(std::begin(CryptoHash_Op::InputsName), std::end(CryptoHash_Op::InputsName));
+         }, "Get the names of the input tensors.")
+         .def_static("get_outputs_name", []() {
+             return std::vector<std::string>(std::begin(CryptoHash_Op::OutputsName), std::end(CryptoHash_Op::OutputsName));
+         }, "Get the names of the output tensors.")
+         .def_static("attributes_name", []() {
+           return std::vector<std::string>(std::begin(EnumStrings<CryptoHashAttr>::data), std::end(EnumStrings<CryptoHashAttr>::data));
+         })
          .def_readonly_static("Type", &CryptoHash_Op::Type);
  
      declare_registrable<CryptoHash_Op>(m, "CryptoHashOp");
diff --git a/python_binding/operator/pybind_DepthToSpace.cpp b/python_binding/operator/pybind_DepthToSpace.cpp
index 469a3f264..e6841af7d 100644
--- a/python_binding/operator/pybind_DepthToSpace.cpp
+++ b/python_binding/operator/pybind_DepthToSpace.cpp
@@ -35,9 +35,12 @@ void declare_DepthToSpace(py::module &m) {
     .def(py::init([](const std::uint32_t blockSize, const std::string& mode) {
             return new DepthToSpace_Op(blockSize, stringToMode(mode));
         }), py::arg("block_size"), py::arg("mode") = "CRD")
-    .def_static("get_inputs_name", &DepthToSpace_Op::getInputsName)
-    .def_static("get_outputs_name", &DepthToSpace_Op::getOutputsName)
-
+      .def_static("get_inputs_name", []() {
+          return std::vector<std::string>(std::begin(DepthToSpace_Op::InputsName), std::end(DepthToSpace_Op::InputsName));
+      }, "Get the names of the input tensors.")
+      .def_static("get_outputs_name", []() {
+          return std::vector<std::string>(std::begin(DepthToSpace_Op::OutputsName), std::end(DepthToSpace_Op::OutputsName));
+      }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
       return std::vector<std::string>(std::begin(EnumStrings<DepthToSpaceAttr>::data), std::end(EnumStrings<DepthToSpaceAttr>::data));
 		})
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
index ef5a035b1..57148e432 100644
--- a/python_binding/operator/pybind_Div.cpp
+++ b/python_binding/operator/pybind_Div.cpp
@@ -37,8 +37,12 @@ void init_Div(py::module& m) {
     :type name: str, Optional
     )mydelimiter")
         .def(py::init<>())
-        .def_static("get_inputs_name", &Div_Op::getInputsName)
-        .def_static("get_outputs_name", &Div_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Div_Op::InputsName), std::end(Div_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Div_Op::OutputsName), std::end(Div_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &Div_Op::Type);
 
     declare_registrable<Div_Op>(m, "DivOp");
diff --git a/python_binding/operator/pybind_Dropout.cpp b/python_binding/operator/pybind_Dropout.cpp
index 4925b34a6..bdc9467e4 100644
--- a/python_binding/operator/pybind_Dropout.cpp
+++ b/python_binding/operator/pybind_Dropout.cpp
@@ -23,16 +23,15 @@ void init_Dropout(py::module& m) {
     py::class_<Dropout_Op, std::shared_ptr<Dropout_Op>, OperatorTensor>(
         m, "DropoutOp", py::multiple_inheritance())
         .def(py::init<float>(), py::arg("probability") = 0.5f)
-        .def_static("get_inputs_name", &Dropout_Op::getInputsName)
-        .def_static("get_outputs_name", &Dropout_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Dropout_Op::InputsName), std::end(Dropout_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Dropout_Op::OutputsName), std::end(Dropout_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Dropout_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<DropoutAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
-		})
+          return std::vector<std::string>(std::begin(EnumStrings<DropoutAttr>::data), std::end(EnumStrings<DropoutAttr>::data));
+        })
         .def_readonly_static("Type", &Dropout_Op::Type);
 
     // Declaring the operator as registrable
diff --git a/python_binding/operator/pybind_Equal.cpp b/python_binding/operator/pybind_Equal.cpp
index ef4488edc..a4242a42c 100644
--- a/python_binding/operator/pybind_Equal.cpp
+++ b/python_binding/operator/pybind_Equal.cpp
@@ -22,8 +22,13 @@ void init_Equal(py::module& m) {
     py::class_<Equal_Op, std::shared_ptr<Equal_Op>, OperatorTensor>(m, "Equal_Op", py::multiple_inheritance(),
           R"mydelimiter( Initialize an Equal operator.)mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Equal_Op::getInputsName)
-    .def_static("get_outputs_name", &Equal_Op::getOutputsName);
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Equal_Op::InputsName), std::end(Equal_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Equal_Op::OutputsName), std::end(Equal_Op::OutputsName));
+    }, "Get the names of the output tensors.");
+
     declare_registrable<Equal_Op>(m, "EqualOp");
     m.def("Equal", &Equal, py::arg("name") = "",
 	   R"mydelimiter(
diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp
index 546f97692..4d32691a3 100644
--- a/python_binding/operator/pybind_Erf.cpp
+++ b/python_binding/operator/pybind_Erf.cpp
@@ -28,8 +28,12 @@ void init_Erf(py::module& m) {
             erf(x) = (2 / sqrt(pi)) * integral from 0 to x of exp(-t^2) dt
         )mydelimiter")
         .def(py::init<>())
-        .def_static("get_inputs_name", &Erf_Op::getInputsName)
-        .def_static("get_outputs_name", &Erf_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Erf_Op::InputsName), std::end(Erf_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Erf_Op::OutputsName), std::end(Erf_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &Erf_Op::Type);
 
     declare_registrable<Erf_Op>(m, "ErfOp");
diff --git a/python_binding/operator/pybind_Expand.cpp b/python_binding/operator/pybind_Expand.cpp
index c20e47e84..c12b5280a 100644
--- a/python_binding/operator/pybind_Expand.cpp
+++ b/python_binding/operator/pybind_Expand.cpp
@@ -44,8 +44,12 @@ void init_Expand(py::module &m) {
       broadcasting rules
 )mydelimiter")
         .def(py::init<>())
-        .def_static("get_inputs_name", &Expand_Op::getInputsName)
-        .def_static("get_outputs_name", &Expand_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Expand_Op::InputsName), std::end(Expand_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Expand_Op::OutputsName), std::end(Expand_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &Expand_Op::Type);
 
     declare_registrable<Expand_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index c29b6e1d3..33dd806d9 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -30,8 +30,12 @@ void declare_FC(py::module &m) {
     :type type : :py:class:`str`
     )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &FC_Op::getInputsName)
-    .def_static("get_outputs_name", &FC_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(FC_Op::InputsName), std::end(FC_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(FC_Op::OutputsName), std::end(FC_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &FC_Op::Type)
     .def("out_channels", &FC_Op::outChannels)
     .def("__repr__", [](FC_Op& b) {
diff --git a/python_binding/operator/pybind_Flatten.cpp b/python_binding/operator/pybind_Flatten.cpp
index 899e5d775..3ba07f205 100644
--- a/python_binding/operator/pybind_Flatten.cpp
+++ b/python_binding/operator/pybind_Flatten.cpp
@@ -31,8 +31,12 @@ void init_Flatten(py::module &m) {
                         between [-r;r-1] with r = input_tensor.nbDims()
 		:type axes : :py:class: List[Int]
 		)mydelimiter")
-      .def("get_inputs_name", &Flatten_Op::getInputsName)
-      .def("get_outputs_name", &Flatten_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Flatten_Op::InputsName), std::end(Flatten_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Flatten_Op::OutputsName), std::end(Flatten_Op::OutputsName));
+        }, "Get the names of the output tensors.")
       .def("axis", &Flatten_Op::axis);
   // Here we bind the constructor of the Flatten Node. We add an argument
   // for each attribute of the operator (in here we only have 'axis') and
diff --git a/python_binding/operator/pybind_Fold.cpp b/python_binding/operator/pybind_Fold.cpp
index ce5167958..1b489c06d 100644
--- a/python_binding/operator/pybind_Fold.cpp
+++ b/python_binding/operator/pybind_Fold.cpp
@@ -46,8 +46,12 @@ void declare_FoldOp(py::module &m) {
             py::arg("kernel_dims"),
             py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
             py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
-        .def_static("get_inputs_name", &Fold_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &Fold_Op<DIM>::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Fold_Op<DIM>::InputsName), std::end(Fold_Op<DIM>::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Fold_Op<DIM>::OutputsName), std::end(Fold_Op<DIM>::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_static("attributes_name", []() {
             return std::vector<std::string>(std::begin(EnumStrings<FoldAttr>::data), std::end(EnumStrings<FoldAttr>::data));
         })
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index 0a7358cc5..a0463bb21 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -42,9 +42,12 @@ void init_Gather(py::module& m) {
                 py::arg("axis"),
                 py::arg("indices"),
                 py::arg("gathered_shape"))
-        .def_static("get_inputs_name", &Gather_Op::getInputsName)
-        .def_static("get_outputs_name", &Gather_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Gather_Op::InputsName), std::end(Gather_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Gather_Op::OutputsName), std::end(Gather_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
             return std::vector<std::string>(std::begin(EnumStrings<Gather_Op::Attr>::data), std::end(EnumStrings<Gather_Op::Attr>::data));
 		})
diff --git a/python_binding/operator/pybind_GlobalAveragePooling.cpp b/python_binding/operator/pybind_GlobalAveragePooling.cpp
index 691456027..0f8b68711 100644
--- a/python_binding/operator/pybind_GlobalAveragePooling.cpp
+++ b/python_binding/operator/pybind_GlobalAveragePooling.cpp
@@ -35,8 +35,12 @@ void init_GlobalAveragePooling(py::module &m) {
                              :type name : str
                              )mydelimiter")
       .def(py::init<>())
-      .def_static("get_inputs_name", &GlobalAveragePooling_Op::getInputsName)
-      .def_static("get_outputs_name", &GlobalAveragePooling_Op::getOutputsName)
+      .def_static("get_inputs_name", []() {
+          return std::vector<std::string>(std::begin(GlobalAveragePooling_Op::InputsName), std::end(GlobalAveragePooling_Op::InputsName));
+      }, "Get the names of the input tensors.")
+      .def_static("get_outputs_name", []() {
+          return std::vector<std::string>(std::begin(GlobalAveragePooling_Op::OutputsName), std::end(GlobalAveragePooling_Op::OutputsName));
+      }, "Get the names of the output tensors.")
       .def_readonly_static("Type", &GlobalAveragePooling_Op::Type);
 
   declare_registrable<GlobalAveragePooling_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
index 41250beda..c835e436b 100644
--- a/python_binding/operator/pybind_GridSample.cpp
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -63,9 +63,12 @@ void declare_GridSampleOp(py::module &m) {
         }), py::arg("mode") = "linear",
             py::arg("padding_mode") = "zeros",
             py::arg("align_corners") = false)
-        .def_static("get_inputs_name", &GridSample_Op::getInputsName)
-        .def_static("get_outputs_name", &GridSample_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(GridSample_Op::InputsName), std::end(GridSample_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(GridSample_Op::OutputsName), std::end(GridSample_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
             return std::vector<std::string>(std::begin(EnumStrings<GridSampleAttr>::data), std::end(EnumStrings<GridSampleAttr>::data));
 		})
diff --git a/python_binding/operator/pybind_Heaviside.cpp b/python_binding/operator/pybind_Heaviside.cpp
index 55a0ae1c8..d2a705964 100644
--- a/python_binding/operator/pybind_Heaviside.cpp
+++ b/python_binding/operator/pybind_Heaviside.cpp
@@ -35,9 +35,12 @@ void init_Heaviside(py::module &m) {
           :param name : Name of the node.
           )mydelimiter")
         .def(py::init<float>(), py::arg("value"))
-        .def_static("get_inputs_name", &Heaviside_Op::getInputsName)
-        .def_static("get_outputs_name", &Heaviside_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Heaviside_Op::InputsName), std::end(Heaviside_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Heaviside_Op::OutputsName), std::end(Heaviside_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
             return std::vector<std::string>(std::begin(EnumStrings<Heaviside_Op::Attr>::data), std::end(EnumStrings<Heaviside_Op::Attr>::data));
 		})
diff --git a/python_binding/operator/pybind_Identity.cpp b/python_binding/operator/pybind_Identity.cpp
index 22ddf9402..e6c47b882 100644
--- a/python_binding/operator/pybind_Identity.cpp
+++ b/python_binding/operator/pybind_Identity.cpp
@@ -24,8 +24,12 @@ void init_Identity(py::module& m) {
     A class representing the Identity operator, which returns the input as-is.
     )mydelimiter")
         .def(py::init<>())
-        .def_static("get_inputs_name", &Identity_Op::getInputsName)
-        .def_static("get_outputs_name", &Identity_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Identity_Op::InputsName), std::end(Identity_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Identity_Op::OutputsName), std::end(Identity_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &Identity_Op::Type);
 
     m.def("Identity", &Identity, py::arg("name") = "",
diff --git a/python_binding/operator/pybind_LRN.cpp b/python_binding/operator/pybind_LRN.cpp
index 6aad786d7..c3fe248ad 100644
--- a/python_binding/operator/pybind_LRN.cpp
+++ b/python_binding/operator/pybind_LRN.cpp
@@ -28,9 +28,12 @@ void init_LRN(py::module& m) {
     based on its neighbors within a local region defined by the given size parameter.
     )mydelimiter")
         .def(py::init<std::int32_t>(), py::arg("size"))
-        .def_static("get_inputs_name", &LRN_Op::getInputsName)
-        .def_static("get_outputs_name", &LRN_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(LRN_Op::InputsName), std::end(LRN_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(LRN_Op::OutputsName), std::end(LRN_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
             return std::vector<std::string>(std::begin(EnumStrings<LRN_Op::Attr>::data), std::end(EnumStrings<LRN_Op::Attr>::data));
 		})
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 46b906244..15b1182ae 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -28,9 +28,12 @@ void init_LeakyReLU(py::module& m) {
     The negative_slope parameter controls the angle of the negative part of the function.
     )mydelimiter")
         .def(py::init<float>(), py::arg("negative_slope"))
-        .def_static("get_inputs_name", &LeakyReLU_Op::getInputsName)
-        .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(LeakyReLU_Op::InputsName), std::end(LeakyReLU_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(LeakyReLU_Op::OutputsName), std::end(LeakyReLU_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
             return std::vector<std::string>(std::begin(EnumStrings<LeakyReLU_Op::Attr>::data), std::end(EnumStrings<LeakyReLU_Op::Attr>::data));
 		})
diff --git a/python_binding/operator/pybind_Ln.cpp b/python_binding/operator/pybind_Ln.cpp
index 61fc3583d..78d20861d 100755
--- a/python_binding/operator/pybind_Ln.cpp
+++ b/python_binding/operator/pybind_Ln.cpp
@@ -26,8 +26,12 @@ void init_Ln(py::module& m) {
     The operator computes the element-wise natural logarithm of the input tensor.
     )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Ln_Op::getInputsName)
-    .def_static("get_outputs_name", &Ln_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Ln_Op::InputsName), std::end(Ln_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Ln_Op::OutputsName), std::end(Ln_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &Ln_Op::Type);
 
     m.def("Ln", &Ln, py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index 459dcea62..90d3e41ea 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -36,8 +36,12 @@ void init_MatMul(py::module &m) {
     :type name: str, Optional
     )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &MatMul_Op::getInputsName)
-    .def_static("get_outputs_name", &MatMul_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(MatMul_Op::InputsName), std::end(MatMul_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(MatMul_Op::OutputsName), std::end(MatMul_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &MatMul_Op::Type);
 
   declare_registrable<MatMul_Op>(m, "MatMulOp");
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 811543ebc..f1366ddfa 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -50,8 +50,12 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         py::arg("stride_dims"),
         py::arg("dilations"),
         py::arg("ceil_mode"))
-  .def_static("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
-  .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(MaxPooling_Op<DIM>::InputsName), std::end(MaxPooling_Op<DIM>::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(MaxPooling_Op<DIM>::OutputsName), std::end(MaxPooling_Op<DIM>::OutputsName));
+    }, "Get the names of the output tensors.")
 
   .def_static("attributes_name", []() {
     return std::vector<std::string>(std::begin(EnumStrings<MaxPoolingAttr>::data), std::end(EnumStrings<MaxPoolingAttr>::data));
diff --git a/python_binding/operator/pybind_Memorize.cpp b/python_binding/operator/pybind_Memorize.cpp
index 647b9b094..34680f13f 100644
--- a/python_binding/operator/pybind_Memorize.cpp
+++ b/python_binding/operator/pybind_Memorize.cpp
@@ -22,8 +22,12 @@ namespace Aidge {
 void init_Memorize(py::module& m) {
     py::class_<Memorize_Op, std::shared_ptr<Memorize_Op>, OperatorTensor>(m, "MemorizeOp", py::multiple_inheritance())
         .def(py::init<const std::uint32_t>(), py::arg("end_step"))
-        .def_static("get_inputs_name", &Memorize_Op::getInputsName)
-        .def_static("get_outputs_name", &Memorize_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Memorize_Op::InputsName), std::end(Memorize_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Memorize_Op::OutputsName), std::end(Memorize_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
 			return std::vector<std::string>(std::begin(EnumStrings<Memorize_Op::Attr>::data), std::end(EnumStrings<Memorize_Op::Attr>::data));
 		});
diff --git a/python_binding/operator/pybind_Mod.cpp b/python_binding/operator/pybind_Mod.cpp
index aa88f2068..058c56e2a 100644
--- a/python_binding/operator/pybind_Mod.cpp
+++ b/python_binding/operator/pybind_Mod.cpp
@@ -33,8 +33,12 @@
      :type name : str
      )mydelimiter")
          .def(py::init<>())
-         .def_static("get_inputs_name", &Mod_Op::getInputsName)
-         .def_static("get_outputs_name", &Mod_Op::getOutputsName)
+         .def_static("get_inputs_name", []() {
+             return std::vector<std::string>(std::begin(Mod_Op::InputsName), std::end(Mod_Op::InputsName));
+         }, "Get the names of the input tensors.")
+         .def_static("get_outputs_name", []() {
+             return std::vector<std::string>(std::begin(Mod_Op::OutputsName), std::end(Mod_Op::OutputsName));
+         }, "Get the names of the output tensors.")
          .def_readonly_static("Type", &Mod_Op::Type);
  
      declare_registrable<Mod_Op>(m, "ModOp");
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
index 3cdcec20b..0fd18be57 100644
--- a/python_binding/operator/pybind_Mul.cpp
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -27,8 +27,12 @@ void init_Mul(py::module& m) {
     :type name: str, Optional
     )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Mul_Op::getInputsName)
-    .def_static("get_outputs_name", &Mul_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Mul_Op::InputsName), std::end(Mul_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Mul_Op::OutputsName), std::end(Mul_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &Mul_Op::Type);
     declare_registrable<Mul_Op>(m, "MulOp");
 
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 4a42a9539..8032bcfce 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -48,8 +48,12 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
         py::arg("beginEndTuples"),
         py::arg("borderType") = PadBorderType::Constant,
         py::arg("borderValue") = 0.0)
-    .def_static("get_inputs_name", &Pad_Op<DIM>::getInputsName)
-    .def_static("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Pad_Op<DIM>::InputsName), std::end(Pad_Op<DIM>::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Pad_Op<DIM>::OutputsName), std::end(Pad_Op<DIM>::OutputsName));
+    }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
 			return std::vector<std::string>(std::begin(EnumStrings<PadAttr>::data), std::end(EnumStrings<PadAttr>::data));
 		})
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
index 1be6d753e..30c6b93d4 100644
--- a/python_binding/operator/pybind_Pop.cpp
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -21,9 +21,12 @@ namespace Aidge {
 void init_Pop(py::module& m) {
     py::class_<Pop_Op, std::shared_ptr<Pop_Op>, OperatorTensor>(m, "PopOp", py::multiple_inheritance())
     .def(py::init<>())
-    .def_static("get_inputs_name", &Pop_Op::getInputsName)
-    .def_static("get_outputs_name", &Pop_Op::getOutputsName)
-
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Pop_Op::InputsName), std::end(Pop_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Pop_Op::OutputsName), std::end(Pop_Op::OutputsName));
+    }, "Get the names of the output tensors.")
 	.def_static("attributes_name", []() {
 		return std::vector<std::string>(std::begin(EnumStrings<PopAttr>::data), std::end(EnumStrings<PopAttr>::data));
 	})
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
index c112f895e..e34750f90 100644
--- a/python_binding/operator/pybind_Pow.cpp
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -37,8 +37,12 @@ void init_Pow(py::module& m) {
     :type name: str, optional
     )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Pow_Op::getInputsName)
-    .def_static("get_outputs_name", &Pow_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Pow_Op::InputsName), std::end(Pow_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Pow_Op::OutputsName), std::end(Pow_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &Pow_Op::Type);
     declare_registrable<Pow_Op>(m, "PowOp");
 
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 3467ed970..1f9dcf5c0 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -37,8 +37,12 @@ void init_Producer(py::module &m) {
         py::multiple_inheritance())
         .def(py::init<const std::shared_ptr<Tensor>, bool>(), py::arg("tensor"), py::arg("constant"))
         .def("dims", &Producer_Op::dims)
-        .def_static("get_inputs_name", &Producer_Op::getInputsName)
-        .def_static("get_outputs_name", &Producer_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>();
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Producer_Op::OutputsName), std::end(Producer_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &Producer_Op::Type);
 
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index 41ef91ed9..4dcd6c9d2 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -31,8 +31,12 @@ void init_ReLU(py::module& m) {
         :type name : str
         )mydelimiter")
         .def(py::init<>())
-        .def_static("get_inputs_name", &ReLU_Op::getInputsName)
-        .def_static("get_outputs_name", &ReLU_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(ReLU_Op::InputsName), std::end(ReLU_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(ReLU_Op::OutputsName), std::end(ReLU_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &ReLU_Op::Type);
 
     declare_registrable<ReLU_Op>(m, "ReLUOp");
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 4ee843810..cb1631f63 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -38,9 +38,13 @@ void declare_ReduceMeanOp(py::module &m) {
 		:type noop_with_empty_axes: bool
 		)mydelimiter")
     .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes") = std::vector<std::int32_t>(), py::arg("keep_dims") = true, py::arg("noop_with_empty_axes") = false)
-    .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
-    .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
-	.def_static("attributes_name", []() {
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(ReduceMean_Op::InputsName), std::end(ReduceMean_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(ReduceMean_Op::OutputsName), std::end(ReduceMean_Op::OutputsName));
+    }, "Get the names of the output tensors.")
+    .def_static("attributes_name", []() {
 		return std::vector<std::string>(std::begin(EnumStrings<ReduceMean_Op::Attr>::data), std::end(EnumStrings<ReduceMean_Op::Attr>::data));
 	})
     .def_readonly_static("Type", &ReduceMean_Op::Type)
diff --git a/python_binding/operator/pybind_ReduceSum.cpp b/python_binding/operator/pybind_ReduceSum.cpp
index 4d9d56c74..0d131edde 100644
--- a/python_binding/operator/pybind_ReduceSum.cpp
+++ b/python_binding/operator/pybind_ReduceSum.cpp
@@ -40,9 +40,12 @@ void init_ReduceSum(py::module &m) {
 			:type noop_with_empty_axes: bool
 		)mydelimiter")
     .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes"), py::arg("keep_dims"), py::arg("noop_with_empty_axes"))
-    .def_static("get_inputs_name", &ReduceSum_Op::getInputsName)
-    .def_static("get_outputs_name", &ReduceSum_Op::getOutputsName)
-
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(ReduceSum_Op::InputsName), std::end(ReduceSum_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(ReduceSum_Op::OutputsName), std::end(ReduceSum_Op::OutputsName));
+    }, "Get the names of the output tensors.")
 	.def_static("attributes_name", []() {
 		return std::vector<std::string>(std::begin(EnumStrings<ReduceSum_Op::Attr>::data), std::end(EnumStrings<ReduceSum_Op::Attr>::data));
 	})
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index 6f3fce2d2..f78fa0aa4 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -33,9 +33,12 @@ void init_Reshape(py::module& m) {
         :type allowzero: bool
         )mydelimiter")
     .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
-    .def_static("get_inputs_name", &Reshape_Op::getInputsName)
-    .def_static("get_outputs_name", &Reshape_Op::getOutputsName)
-
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Reshape_Op::InputsName), std::end(Reshape_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Reshape_Op::OutputsName), std::end(Reshape_Op::OutputsName));
+    }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
       return std::vector<std::string>(std::begin(EnumStrings<ReshapeAttr>::data), std::end(EnumStrings<ReshapeAttr>::data));
 		})
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
index 137366ad3..bcf9090dd 100644
--- a/python_binding/operator/pybind_Resize.cpp
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -26,8 +26,12 @@ void init_Resize(py::module &m) {
   py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(
           m, "ResizeOp", py::multiple_inheritance())
         .def(py::init<Interpolation::CoordinateTransformation, Interpolation::Mode, float, PadBorderType>(), py::arg("coordinate_transformation_mode"), py::arg("interpolation_mode"), py::arg("cubic_coeff_a") = -0.75f, py::arg("padding_mode") = PadBorderType::Edge)
-        .def_static("get_inputs_name", &Resize_Op::getInputsName)
-        .def_static("get_outputs_name", &Resize_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Resize_Op::InputsName), std::end(Resize_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Resize_Op::OutputsName), std::end(Resize_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
             return std::vector<std::string>(std::begin(EnumStrings<ResizeAttr>::data), std::end(EnumStrings<ResizeAttr>::data));
 		})
diff --git a/python_binding/operator/pybind_Round.cpp b/python_binding/operator/pybind_Round.cpp
index c055ab7fd..aac5d8d40 100644
--- a/python_binding/operator/pybind_Round.cpp
+++ b/python_binding/operator/pybind_Round.cpp
@@ -29,8 +29,12 @@ void init_Round(py::module& m) {
         :type name: str
         )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Round_Op::getInputsName)
-    .def_static("get_outputs_name", &Round_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Round_Op::InputsName), std::end(Round_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Round_Op::OutputsName), std::end(Round_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &Round_Op::Type);
 
     declare_registrable<Round_Op>(m, "RoundOp");
diff --git a/python_binding/operator/pybind_Select.cpp b/python_binding/operator/pybind_Select.cpp
index 0cb858acd..de580d8ed 100644
--- a/python_binding/operator/pybind_Select.cpp
+++ b/python_binding/operator/pybind_Select.cpp
@@ -29,8 +29,12 @@ void init_Select(py::module& m) {
         )mydelimiter")
         .def(py::init<const IOIndex_t>(),
              py::arg("nb_inputs"))
-        .def_static("get_inputs_name", &Select_Op::getInputsName)
-        .def_static("get_outputs_name", &Select_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Select_Op::InputsName), std::end(Select_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Select_Op::OutputsName), std::end(Select_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &Select_Op::Type);
 
     declare_registrable<Select_Op>(m, "SelectOp");
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
index dced85174..9287d8ef8 100644
--- a/python_binding/operator/pybind_Shape.cpp
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -32,9 +32,12 @@ void init_Shape(py::module& m) {
         :type end: int
         )mydelimiter")
         .def(py::init<const std::int64_t, const std::int64_t>(), py::arg("start"), py::arg("end"))
-        .def_static("get_inputs_name", &Shape_Op::getInputsName)
-        .def_static("get_outputs_name", &Shape_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Shape_Op::InputsName), std::end(Shape_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Shape_Op::OutputsName), std::end(Shape_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
             return std::vector<std::string>(std::begin(EnumStrings<ShapeAttr>::data), std::end(EnumStrings<ShapeAttr>::data));
 		})
diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp
index b061d806f..1ec08205a 100644
--- a/python_binding/operator/pybind_Sigmoid.cpp
+++ b/python_binding/operator/pybind_Sigmoid.cpp
@@ -30,8 +30,12 @@ void init_Sigmoid(py::module& m) {
         :type name : str
         )mydelimiter")
         .def(py::init<>())
-        .def_static("get_inputs_name", &Sigmoid_Op::getInputsName)
-        .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Sigmoid_Op::InputsName), std::end(Sigmoid_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Sigmoid_Op::OutputsName), std::end(Sigmoid_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &Sigmoid_Op::Type);
 
 
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index 7c4449605..ed4fa0e2f 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -43,9 +43,12 @@ void init_Slice(py::module& m) {
                   py::arg("ends"),
                   py::arg("axes") = std::vector<std::int8_t>(),
                   py::arg("steps") = std::vector<std::int64_t>())
-    .def_static("get_inputs_name", &Slice_Op::getInputsName)
-    .def_static("get_outputs_name", &Slice_Op::getOutputsName)
-
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Slice_Op::InputsName), std::end(Slice_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Slice_Op::OutputsName), std::end(Slice_Op::OutputsName));
+    }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
       return std::vector<std::string>(std::begin(EnumStrings<SliceAttr>::data), std::end(EnumStrings<SliceAttr>::data));
 		})
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 7fbd3851a..c5379d421 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -28,9 +28,12 @@ void init_Softmax(py::module& m) {
             :type axis: int
         )mydelimiter")
         .def(py::init<std::int32_t>(), py::arg("axis"))
-        .def_static("get_inputs_name", &Softmax_Op::getInputsName)
-        .def_static("get_outputs_name", &Softmax_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Softmax_Op::InputsName), std::end(Softmax_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Softmax_Op::OutputsName), std::end(Softmax_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
             return std::vector<std::string>(std::begin(EnumStrings<SoftmaxAttr>::data), std::end(EnumStrings<SoftmaxAttr>::data));
 		})
diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp
index 643c6cca5..70720829e 100644
--- a/python_binding/operator/pybind_Split.cpp
+++ b/python_binding/operator/pybind_Split.cpp
@@ -33,10 +33,13 @@ void init_Split(py::module& m) {
     .def(py::init<DimSize_t, std::int8_t, std::vector<DimSize_t>&>(),
             py::arg("nb_outputs"),
             py::arg("axis"),
-            py::arg("split"))
-    .def_static("get_inputs_name", &Split_Op::getInputsName)
-    .def_static("get_outputs_name", &Split_Op::getOutputsName)
-
+        py::arg("split"))
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Split_Op::InputsName), std::end(Split_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Split_Op::OutputsName), std::end(Split_Op::OutputsName));
+    }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
       return std::vector<std::string>(std::begin(EnumStrings<SplitAttr>::data), std::end(EnumStrings<SplitAttr>::data));
 		})
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index d383ae0a4..4ed6a8bdf 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -24,8 +24,12 @@ void init_Sqrt(py::module& m) {
         This operator computes the square root of each element in the input tensor. The input values must be non-negative.
         )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Sqrt_Op::getInputsName)
-    .def_static("get_outputs_name", &Sqrt_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Sqrt_Op::InputsName), std::end(Sqrt_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Sqrt_Op::OutputsName), std::end(Sqrt_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &Sqrt_Op::Type);
 
     declare_registrable<Sqrt_Op>(m, "SqrtOp");
diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp
index 22779cd12..7a06240c3 100644
--- a/python_binding/operator/pybind_Squeeze.cpp
+++ b/python_binding/operator/pybind_Squeeze.cpp
@@ -32,9 +32,12 @@ void init_Squeeze(py::module &m) {
     				& r in [-128 , 127]
     :type axes: :py:class: List[Int]
     )mydelimiter")
-    .def_static("get_inputs_name", &Squeeze_Op::getInputsName)
-    .def_static("get_outputs_name", &Squeeze_Op::getOutputsName)
-
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Squeeze_Op::InputsName), std::end(Squeeze_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Squeeze_Op::OutputsName), std::end(Squeeze_Op::OutputsName));
+    }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
       return std::vector<std::string>(std::begin(EnumStrings<SqueezeAttr>::data), std::end(EnumStrings<SqueezeAttr>::data));
 		})
diff --git a/python_binding/operator/pybind_Stack.cpp b/python_binding/operator/pybind_Stack.cpp
index 8a20fb0f5..641475efb 100644
--- a/python_binding/operator/pybind_Stack.cpp
+++ b/python_binding/operator/pybind_Stack.cpp
@@ -24,9 +24,12 @@ void init_Stack(py::module &m) {
         py::multiple_inheritance(),
         R"mydelimiter(Initialize a Stack operator.)mydelimiter")
         .def(py::init<const std::uint32_t>(), py::arg("max_elements"))
-        .def_static("get_inputs_name", &StackOp::getInputsName)
-        .def_static("get_outputs_name", &StackOp::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(StackOp::InputsName), std::end(StackOp::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(StackOp::OutputsName), std::end(StackOp::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
             return std::vector<std::string>(std::begin(EnumStrings<StackAttr>::data), std::end(EnumStrings<StackAttr>::data));
 		})
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
index b94de2f52..046b1a6de 100644
--- a/python_binding/operator/pybind_Sub.cpp
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -36,8 +36,12 @@ void init_Sub(py::module& m) {
         :type name: str, optional
         )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Sub_Op::getInputsName)
-    .def_static("get_outputs_name", &Sub_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Sub_Op::InputsName), std::end(Sub_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Sub_Op::OutputsName), std::end(Sub_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &Sub_Op::Type);
     declare_registrable<Sub_Op>(m, "SubOp");
     m.def("Sub", &Sub, py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Tanh.cpp b/python_binding/operator/pybind_Tanh.cpp
index 6c0d026e6..3f103b0db 100644
--- a/python_binding/operator/pybind_Tanh.cpp
+++ b/python_binding/operator/pybind_Tanh.cpp
@@ -29,8 +29,12 @@ void init_Tanh(py::module& m) {
         :type name : str
         )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Tanh_Op::getInputsName)
-    .def_static("get_outputs_name", &Tanh_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Tanh_Op::InputsName), std::end(Tanh_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Tanh_Op::OutputsName), std::end(Tanh_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &Tanh_Op::Type);
 
     m.def("Tanh", &Tanh, py::arg("name") = "",
diff --git a/python_binding/operator/pybind_TopK.cpp b/python_binding/operator/pybind_TopK.cpp
index 314a3283b..8c06d0801 100644
--- a/python_binding/operator/pybind_TopK.cpp
+++ b/python_binding/operator/pybind_TopK.cpp
@@ -21,15 +21,14 @@ namespace Aidge {
 void init_TopK(py::module& m) {
     py::class_<TopK_Op, std::shared_ptr<TopK_Op>, OperatorTensor>(m, "TopKOp", py::multiple_inheritance())
     .def(py::init<int64_t, bool, bool, IOIndex_t>(), py::arg("axis") = -1, py::arg("largest") = true, py::arg("sorted") = true, py::arg("k") = 0)
-    .def_static("get_inputs_name", &TopK_Op::getInputsName)
-    .def_static("get_outputs_name", &TopK_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(TopK_Op::InputsName), std::end(TopK_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(TopK_Op::OutputsName), std::end(TopK_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_static("attributes_name", []() {
-        std::vector<std::string> result;
-        auto attributes = TopK_Op::attributesName();
-        for (size_t i = 0; i < size(EnumStrings<TopKAttr>::data); ++i) {
-            result.emplace_back(attributes[i]);
-        }
-        return result;
+        return std::vector<std::string>(std::begin(EnumStrings<TopKAttr>::data), std::end(EnumStrings<TopKAttr>::data));
     })
     .def_readonly_static("Type", &TopK_Op::Type);
 
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index ec020681b..34ec49baa 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -36,8 +36,12 @@ void declare_Transpose(py::module &m) {
 		:type output_dims_order : :py:class: List[Int]
 		)mydelimiter")
     .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order")=std::vector<std::size_t>())
-    .def_static("get_inputs_name", &Transpose_Op::getInputsName)
-    .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Transpose_Op::InputsName), std::end(Transpose_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Transpose_Op::OutputsName), std::end(Transpose_Op::OutputsName));
+    }, "Get the names of the output tensors.")
 	.def_static("attributes_name", []() {
 		return std::vector<std::string>(std::begin(EnumStrings<Transpose_Op::Attr>::data), std::end(EnumStrings<Transpose_Op::Attr>::data));
 	})
diff --git a/python_binding/operator/pybind_Unfold.cpp b/python_binding/operator/pybind_Unfold.cpp
index 6530b2c0d..05347ea3d 100644
--- a/python_binding/operator/pybind_Unfold.cpp
+++ b/python_binding/operator/pybind_Unfold.cpp
@@ -41,8 +41,12 @@ void declare_UnfoldOp(py::module &m) {
         }), py::arg("kernel_dims"),
             py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
             py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
-        .def_static("get_inputs_name", &Unfold_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &Unfold_Op<DIM>::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Unfold_Op<DIM>::InputsName), std::end(Unfold_Op<DIM>::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Unfold_Op<DIM>::OutputsName), std::end(Unfold_Op<DIM>::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_static("attributes_name", []() {
             return std::vector<std::string>(std::begin(EnumStrings<UnfoldAttr>::data), std::end(EnumStrings<UnfoldAttr>::data));
         })
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
index 09d65f728..a43878a5b 100644
--- a/python_binding/operator/pybind_Unsqueeze.cpp
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -28,8 +28,12 @@ void init_Unsqueeze(py::module &m) {
             :type axes: :py:class: List[Int]
 		)mydelimiter")
       // Here we bind the methods of the Unsqueeze_Op that will want to access
-      .def_static("get_inputs_name", &Unsqueeze_Op::getInputsName)
-      .def_static("get_outputs_name", &Unsqueeze_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Unsqueeze_Op::InputsName), std::end(Unsqueeze_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Unsqueeze_Op::OutputsName), std::end(Unsqueeze_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_static("attributes_name", []() {
             return std::vector<std::string>(std::begin(EnumStrings<UnsqueezeAttr>::data), std::end(EnumStrings<UnsqueezeAttr>::data));
         })
diff --git a/python_binding/operator/pybind_WeightInterleaving.cpp b/python_binding/operator/pybind_WeightInterleaving.cpp
index 25b423bd6..81bb69d11 100644
--- a/python_binding/operator/pybind_WeightInterleaving.cpp
+++ b/python_binding/operator/pybind_WeightInterleaving.cpp
@@ -19,8 +19,12 @@ namespace Aidge {
 void declare_WeightInterleaving(py::module &m) {
   py::class_<WeightInterleaving_Op, std::shared_ptr<WeightInterleaving_Op>, OperatorTensor>(m, "WeightInterleavingOp", py::multiple_inheritance())
     .def(py::init<>())
-    .def_static("get_inputs_name", &WeightInterleaving_Op::getInputsName)
-    .def_static("get_outputs_name", &WeightInterleaving_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(WeightInterleaving_Op::InputsName), std::end(WeightInterleaving_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(WeightInterleaving_Op::OutputsName), std::end(WeightInterleaving_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &WeightInterleaving_Op::Type)
 
     .def("__repr__", [](WeightInterleaving_Op& b) {
-- 
GitLab