diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 509aa1e81250fc5386fbb96a338b5a39a82ede93..744fdd7583a4b451a49e8e63fed601414ff8bce1 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -73,7 +73,6 @@
 #include "aidge/operator/Shape.hpp"
 #include "aidge/operator/ShiftMax.hpp"
 #include "aidge/operator/ShiftGELU.hpp"
-#include "aidge/operator/Scaling.hpp"
 #include "aidge/operator/Slice.hpp"
 #include "aidge/operator/Softmax.hpp"
 #include "aidge/operator/Split.hpp"
diff --git a/include/aidge/backend/generic/operator/TransposeImpl.hpp b/include/aidge/backend/generic/operator/TransposeImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e9bc8d3d0d5033bc39e07d11816001844054a131
--- /dev/null
+++ b/include/aidge/backend/generic/operator/TransposeImpl.hpp
@@ -0,0 +1,45 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_TRANSPOSEIMPL_H_
+#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_TRANSPOSEIMPL_H_
+
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+/**
+ * @brief implementation of the operator Transpose.
+ * @note Since this operator implementation is agnostic to the backend it is
+ * located here instead of in aidge_backend.
+ */
+class TransposeImpl : public OperatorImpl {
+public:
+    /**
+     * @brief Constructor for TransposeImpl.
+     * @param[in] op The Operator instance.
+     * @param[in] backend The backend name (optional).
+     */
+    TransposeImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend)
+    {}
+
+    /**
+     * @brief Perform the forward operation for the transpose.
+     */
+    void forward() override;
+};
+} // namespace Aidge
+
+#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_TRANSPOSEIMPL_H_
diff --git a/include/aidge/operator/Abs.hpp b/include/aidge/operator/Abs.hpp
index d70387ed013c793c22ca436f19deac5812da3765..af926b25a4f8819aec13f478cb0941f9bfdbe5a4 100644
--- a/include/aidge/operator/Abs.hpp
+++ b/include/aidge/operator/Abs.hpp
@@ -36,40 +36,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Abs_Op : public OperatorTensor,
-    public Registrable<Abs_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::shared_ptr<OperatorImpl>(const Abs_Op&)>> {
+class Abs_Op : public OperatorTensorWithImpl<Abs_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Abs";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Abs_Op();
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Abs_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Abs_Op(const Abs_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Abs_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Abs_Op>(*this);
-    }
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Abs(const std::string& name = "");
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index fcd154b6e1abe0d3bcfc6e72d8c078b3277f9b47..ed64b97f52f901626b0c0cec0b99d019c5f16851 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -45,51 +45,16 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Add_Op : public OperatorTensor,
-    public Registrable<Add_Op,
-                       std::string,
-                       std::function<std::shared_ptr<OperatorImpl>(const Add_Op&)>>
-{
+
+class Add_Op : public OperatorTensorWithImpl<Add_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Add";
+    static constexpr const char* const InputsName[] = {"data_input_0", "data_input_n"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Add_Op();
 
-    /**
-     * @brief Copy-constructor.
-     * @param op Add_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Add_Op(const Add_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Add_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    // Data operator[](const char* inputName) override final {
-    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
-    //         (strcmp(inputName, "weight") ? mInputs[1] :
-    //         (strcmp(inputName, "bias") ? mInputs[2] :
-    //         nullptr));
-    //     assert((in!=nullptr) && "No such parameter");
-    //     return *in;
-    // }
-
-
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input_0", "data_input_n"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Add(const std::string& name = "");
diff --git a/include/aidge/operator/And.hpp b/include/aidge/operator/And.hpp
index b9f75f991ab9f06b219a7e879e2b6903a60f91de..22efbb029cb595550fa15f4bf674451a823f2b4a 100644
--- a/include/aidge/operator/And.hpp
+++ b/include/aidge/operator/And.hpp
@@ -46,40 +46,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class And_Op : public OperatorTensor,
-    public Registrable<And_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const And_Op&)>> {
+class And_Op : public OperatorTensorWithImpl<And_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "And";
+    static constexpr const char* const InputsName[] = {"data_input_1", "data_input_2"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     And_Op();
 
-    /**
-     * @brief Copy-constructor.
-     * @param op And_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    And_Op(const And_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::And_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<And_Op>(*this);
-    }
-
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input_1", "data_input_2"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> And(const std::string& name = "");
diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index cc520d3fbebe42539c68a3a260e7c6ab2e1ab829..5802806c99a096c8f55f145d15dc629ba30a3ee9 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -82,13 +82,7 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class ArgMax_Op : public OperatorTensor,
-                public Registrable<ArgMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ArgMax_Op &)>> {
-
-public:
-    /// The type of the operator as a string.
-    static const std::string Type;
-
+class ArgMax_Op : public OperatorTensorWithImpl<ArgMax_Op> {
 private:
     using Attributes_ = StaticAttributes<ArgMaxAttr,
                             GENERATE_LIST_ATTR_TYPE(LIST_ARGMAX_ATTR)
@@ -99,6 +93,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "ArgMax";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     ArgMax_Op() = delete;
 
     /**
@@ -119,12 +117,6 @@ public:
      */
     ArgMax_Op(const ArgMax_Op& op);
 
-    /**
-     * @brief Creates a copy of the current ArgMax operator.
-     * @return A shared pointer to the new ArgMax operator instance.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Performs dimension inference for the ArgMax operation.
      * @param[in] allowDataDependency Whether data dependency is allowed during dimension inference.
@@ -132,19 +124,6 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Sets the backend for the operator.
-     * @param name The name of the backend.
-     * @param device The device index on which the backend operates (default is 0).
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Retrieves a list of available backends for the ArgMax operator.
-     * @return A set of strings representing the available backends.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Gets the attribute storage for the ArgMax operator.
      * @return A shared pointer to the attribute storage.
@@ -168,30 +147,6 @@ public:
      * @return A reference to the selectLastIndex attribute.
      */
     inline bool& selectLastIndex() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::SelectLastIndex>(); }
-
-    /**
-     * @brief Returns the names of the input tensors for the ArgMax operator.
-     * @return A vector of strings containing the input names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Returns the names of the output tensors for the ArgMax operator.
-     * @return A vector of strings containing the output names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ArgMaxAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Atan.hpp b/include/aidge/operator/Atan.hpp
index 6f81ab0a878a0c253b98580ecbefaf2f821a543f..a0aed085a92898a7f9fe83a6d597baf75652bca7 100644
--- a/include/aidge/operator/Atan.hpp
+++ b/include/aidge/operator/Atan.hpp
@@ -37,39 +37,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Atan_Op : public OperatorTensor,
-    public Registrable<Atan_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::shared_ptr<OperatorImpl>(const Atan_Op&)>>
-{
+class Atan_Op : public OperatorTensorWithImpl<Atan_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Atan";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Atan_Op();
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Atan_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Atan_Op(const Atan_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Atan_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Atan(const std::string& name = "");
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 86e1946fa24ebee98c6ed865253589fec69324a9..a2a7c0ee71b1e3d8b7ca7e1d217f9ae448317d46 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -63,6 +63,12 @@ constexpr const char* const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
 }
 
 namespace Aidge {
+template <DimIdx_t DIM> struct AvgPooling_Op_Type {};
+template <> struct AvgPooling_Op_Type<1> { static constexpr const char* const value = "AvgPooling1D"; };
+template <> struct AvgPooling_Op_Type<2> { static constexpr const char* const value = "AvgPooling2D"; };
+template <> struct AvgPooling_Op_Type<3> { static constexpr const char* const value = "AvgPooling3D"; };
+template <> struct AvgPooling_Op_Type<4> { static constexpr const char* const value = "AvgPooling4D"; };
+
 /**
  * @brief Class representing an Average Pooling operation.
  *
@@ -93,15 +99,7 @@ namespace Aidge {
  */
 
 template <DimIdx_t DIM>
-class AvgPooling_Op : public OperatorTensor,
-                public Registrable<AvgPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>> {
-
-public:
-    /**
-     * @brief Type identifier for the AvgPooling operation.
-     */
-    static const std::string Type;
-
+class AvgPooling_Op : public OperatorTensorWithImpl<AvgPooling_Op<DIM>> {
 private:
     /**
      * @brief Static attributes representing kernel and stride dimensions.
@@ -118,6 +116,17 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    using OperatorTensorWithImpl<AvgPooling_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<AvgPooling_Op<DIM>>::getOutput;
+    using OperatorTensorWithImpl<AvgPooling_Op<DIM>>::OperatorTensorWithImpl;
+    using OperatorTensorWithImpl<AvgPooling_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<AvgPooling_Op<DIM>>::mOutputs;
+    using OperatorTensorWithImpl<AvgPooling_Op<DIM>>::dimsForwarded;
+
+    static constexpr const char* const Type = AvgPooling_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Default constructor is deleted.
      */
@@ -144,12 +153,6 @@ public:
      */
     AvgPooling_Op(const AvgPooling_Op<DIM>& op);
 
-    /**
-     * @brief Clones the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override final;
-
     /**
      * @brief Calculates the output dimensions based on the input dimensions and operator attributes.
      * @param[in] allowDataDependency If true, considers data-dependent operations. Defaults to false.
@@ -169,19 +172,6 @@ public:
                           const std::vector<DimSize_t>& outputDims,
                           const IOIndex_t outputIdx = 0) const override final;
 
-    /**
-     * @brief Sets the backend for the operation.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index. Defaults to 0.
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Retrieves the available backends for the operation.
-     * @return A set of strings representing the available backends.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Accessor for the operation attributes.
      * @return Shared pointer to the attributes.
@@ -211,30 +201,6 @@ public:
      * @return Boolean value indicating whether ceil mode is enabled.
      */
     inline bool& ceilMode() const { return mAttributes->template getAttr<AvgPoolingAttr::CeilMode>(); }
-
-    /**
-     * @brief Retrieves the names of the input tensors.
-     * @return A vector of strings representing the input tensors names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Retrieves the names of the output tensors.
-     * @return A vector of strings representing the output tensors names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::AvgPoolingAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index f3a1a8d563aef98f41e5e1ce0df154313f11831b..1221930cd1d93ece9047fc9f3b4d90f0be6522b3 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -56,6 +56,10 @@ constexpr const char* const EnumStrings<Aidge::BatchNormAttr>::data[] = {
 }
 
 namespace Aidge {
+template <DimIdx_t DIM> struct BatchNorm_Op_Type {};
+template <> struct BatchNorm_Op_Type<2> { static constexpr const char* const value = "BatchNorm2D"; };
+template <> struct BatchNorm_Op_Type<3> { static constexpr const char* const value = "BatchNorm3D"; };
+template <> struct BatchNorm_Op_Type<4> { static constexpr const char* const value = "BatchNorm4D"; };
 /**
  * @class BatchNorm_Op
  * @brief Implements the Batch Normalization (BN) operation, a technique used to normalize the inputs of a layer.
@@ -76,12 +80,7 @@ namespace Aidge {
  * @see Registrable
  */
 template <DimIdx_t DIM>
-class BatchNorm_Op : public OperatorTensor,
-                public Registrable<BatchNorm_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>> {
-
-public:
-    static const std::string Type;
-
+class BatchNorm_Op : public OperatorTensorWithImpl<BatchNorm_Op<DIM>> {
 private:
     using Attributes_ = StaticAttributes<BatchNormAttr,
                             GENERATE_LIST_ATTR_TYPE(LIST_BATCHNORM_ATTR)
@@ -92,6 +91,19 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::getOutput;
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::nbInputs;
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::inputCategory;
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::OperatorTensorWithImpl;
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::mOutputs;
+    using OperatorTensorWithImpl<BatchNorm_Op<DIM>>::dimsForwarded;
+
+    static constexpr const char* const Type = BatchNorm_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input", "scale", "shift", "mean", "variance"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     BatchNorm_Op() = delete;
 
     /**
@@ -108,17 +120,8 @@ public:
      */
     BatchNorm_Op(const BatchNorm_Op<DIM>& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::BatchNorm_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      */
@@ -138,22 +141,6 @@ public:
      * @brief Get whether the operator is in training mode.
      */
     inline bool& trainingMode() const { return mAttributes->template getAttr<BatchNormAttr::TrainingMode>(); }
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "scale", "shift", "mean", "variance"};
-    }
-
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::BatchNormAttr>::data;
-	}
 };
 
 extern template class Aidge::BatchNorm_Op<2>;
diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
index 69892c1b4662dd9ef2a41c56f9726eff8d9e3fed..00e979d93ba3cf69ed98b7d827ea7a8bccd72ae8 100644
--- a/include/aidge/operator/BitShift.hpp
+++ b/include/aidge/operator/BitShift.hpp
@@ -57,9 +57,7 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class BitShift_Op : public OperatorTensor,
-    public Registrable<BitShift_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const BitShift_Op&)>> {
-
+class BitShift_Op : public OperatorTensorWithImpl<BitShift_Op> {
 public:
     /**
      * @enum BitShiftDirection
@@ -67,11 +65,6 @@ public:
      */
     enum BitShiftDirection { left, right };
 
-    /**
-     * @brief Type identifier for the operator.
-     */
-    static const std::string Type;
-
 private:
     using Attributes_ = StaticAttributes<BitShiftAttr,
                             GENERATE_LIST_ATTR_TYPE(LIST_BITSHIFT_ATTR)
@@ -83,6 +76,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "BitShift";
+    static constexpr const char* const InputsName[] = {"InputTensor", "ShiftAmount"};
+    static constexpr const char* const OutputsName[] = {"OutputTensor"};
+
     /**
      * @brief Constructor to initialize the `BitShift_Op` with a shift direction.
      * @param[in] direction The direction of the bitwise shift (left or right).
@@ -95,29 +92,8 @@ public:
      */
     BitShift_Op(const BitShift_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<BitShift_Op>(*this);
-    }
-
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Set the backend to be used for this operator.
-     * @param[in] name Backend name.
-     * @param[in] device Device index (default: 0).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the set of available backends for this operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -131,6 +107,7 @@ public:
     inline BitShiftDirection& direction() const noexcept {
         return mAttributes->template getAttr<BitShiftAttr::BitShiftdirection>();
     }
+
     /**
      * @brief Retrieve the rounding flag.
      * @return A boolean (True: Apply bitshift rounding).
@@ -138,30 +115,6 @@ public:
     inline bool rounding() const noexcept {
         return mAttributes->template getAttr<BitShiftAttr::Rounding>();
     }
-
-    /**
-     * @brief Get the names of the input tensors.
-     * @return A vector containing the input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return { "InputTensor", "ShiftAmount" };
-    }
-
-    /**
-     * @brief Get the names of the output tensors.
-     * @return A vector containing the output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return { "OutputTensor" };
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::BitShiftAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 2adbcad3392c3033f78caa923fb3cd0b78f03703..ae996a64aeeaa5378a6c24113eaee9e5b2bb6fef 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -22,6 +22,7 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/CastImpl.hpp"
 
 
 #define LIST_CAST_ATTR(X)  \
@@ -61,15 +62,7 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Cast_Op : public OperatorTensor,
-    public Registrable<Cast_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Cast_Op&)>> {
-
-public:
-    /**
-     * @brief Type string identifying this operator.
-     */
-    static const std::string Type;
-
+class Cast_Op : public OperatorTensorWithImpl<Cast_Op, Cast_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<CastAttr,
         GENERATE_LIST_ATTR_TYPE(LIST_CAST_ATTR)
@@ -81,6 +74,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Cast";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Deleted default constructor.
      */
@@ -104,19 +101,6 @@ public:
      */
     bool forwardDType() override final;
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Cast_Op>(*this);
-    }
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Access the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -135,30 +119,6 @@ public:
      * @param dataType Data type to set.
      */
     virtual void setDataType(const DataType& dataType) const override;
-
-    /**
-     * @brief Get the input tensor names for the Cast operator.
-     * @return A vector containing the input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names for the Cast operator.
-     * @return A vector containing the output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::CastAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp
index 8d29eecff869403cab99b2bdb1847e2d9c970499..e7b3121487c003a054d9c24fa19b689c9a0636d0 100644
--- a/include/aidge/operator/Clip.hpp
+++ b/include/aidge/operator/Clip.hpp
@@ -70,15 +70,7 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Clip_Op : public OperatorTensor,
-    public Registrable<Clip_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Clip_Op&)>> {
-
-public:
-    /**
-     * @brief Type string identifying this operator.
-     */
-    static const std::string Type;
-
+class Clip_Op : public OperatorTensorWithImpl<Clip_Op> {
 private:
     using Attributes_ = StaticAttributes<ClipAttr,
         GENERATE_LIST_ATTR_TYPE(LIST_CLIP_ATTR)
@@ -90,6 +82,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Clip";
+    static constexpr const char* const InputsName[] = {"data_input", "min_empty_tensor", "max_empty_tensor"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Deleted default constructor.
      */
@@ -108,16 +104,6 @@ public:
      */
     Clip_Op(const Clip_Op& op);
 
-    virtual ~Clip_Op() noexcept;
-
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Clip_Op>(*this);
-    }
-
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -127,11 +113,6 @@ public:
      */
     bool forwardDType() override final;
 
-    /**
-     * @brief Setter to specify the backend to use.
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
     /**
      * @brief Access the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -149,32 +130,6 @@ public:
      * @return Reference to the maximum value.
      */
     float& max() const;
-
-    std::set<std::string> getAvailableBackends() const override;
-
-    /**
-     * @brief Get the input tensor names for the Clip operator.
-     * @return A vector containing the input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return { "data_input", "min_empty_tensor", "max_empty_tensor" };
-    }
-
-    /**
-     * @brief Get the output tensor names for the Clip operator.
-     * @return A vector containing the output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return { "data_output" };
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ClipAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index f14e0404f4d8d847b24d6e0aa8d9f94da3715c32..4928aa0509e81de2203509ce87cf0e5fdb38f481 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -24,6 +24,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/ConcatImpl.hpp"
 
 
 #define LIST_CONCAT_ATTR(X)  \
@@ -72,15 +73,7 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Concat_Op : public OperatorTensor,
-    public Registrable<Concat_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Concat_Op&)>> {
-
-public:
-    /**
-     * @brief Type identifier for the Concat operator.
-     */
-    static const std::string Type;
-
+class Concat_Op : public OperatorTensorWithImpl<Concat_Op, Concat_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<ConcatAttr, GENERATE_LIST_ATTR_TYPE(LIST_CONCAT_ATTR)>;
 
@@ -90,6 +83,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Concat";
+    static constexpr const char* const InputsName[] = {"data_input_0", "data_input_n"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Default constructor is deleted to enforce explicit initialization.
      */
@@ -109,12 +106,6 @@ public:
      */
     Concat_Op(const Concat_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Forward the dimensions of the operator's inputs and outputs.
      * @param[in] allowDataDependency Allow data dependency during dimension propagation.
@@ -122,19 +113,6 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Set the backend for the operator.
-     * @param[in] name Backend name.
-     * @param[in] device Device index (default: 0).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the set of available backends for the operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the Concat operator.
      * @return A shared pointer to the attributes.
@@ -146,30 +124,6 @@ public:
      * @return A reference to the axis attribute.
      */
     inline std::int32_t& axis() const { return mAttributes->template getAttr<ConcatAttr::Axis>(); }
-
-    /**
-     * @brief Get the names of the input tensors.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return { "data_input_0", "data_input_n" };
-    }
-
-    /**
-     * @brief Get the names of the output tensors.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return { "data_output" };
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ConcatAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
index a5d30f696e1fe4a0fb83157fed214d7f5fe79210..5343980a8f0d59de655ec49b06072d1f97068b2f 100644
--- a/include/aidge/operator/ConstantOfShape.hpp
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -55,16 +55,7 @@ namespace Aidge {
  * @brief This operator's purpose is to generate a tensor of shape given via
  * input and filled with a given value set via attribute.
  */
-class ConstantOfShape_Op
-    : public OperatorTensor,
-      public Registrable<ConstantOfShape_Op, std::string,
-                         std::function<std::shared_ptr<OperatorImpl>(
-                             const ConstantOfShape_Op &)>> {
-
-public:
-  // name of the type of the operation
-  static const std::string Type;
-
+class ConstantOfShape_Op : public OperatorTensorWithImpl<ConstantOfShape_Op> {
 private:
   using Attributes_ = StaticAttributes<ConstantOfShapeAttr, GENERATE_LIST_ATTR_TYPE(LIST_CONSTANTOFSHAPE_ATTR)>;
   template <ConstantOfShapeAttr e>
@@ -72,6 +63,10 @@ private:
   const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "ConstantOfShape";
+    static constexpr const char* const InputsName[] = {"input"};
+    static constexpr const char* const OutputsName[] = {"constant_of_shape"};
+
   /**
    * @brief constructor for ConstantOfShape_op
    * @param[in] value : a scalar tensor which holds the value that will
@@ -87,14 +82,6 @@ public:
    */
   ConstantOfShape_Op(const ConstantOfShape_Op &op);
 
-  /**
-   * @brief Clone the operator using its copy-constructor.
-   * @see Operator::MatMul_Op
-   */
-  std::shared_ptr<Operator> clone() const override final {
-    return std::make_shared<ConstantOfShape_Op>(*this);
-  }
-
   /**
    * @brief Compute dimensions for the output Tensor
    * @param allowDataDependency specify if the output shape of this operator
@@ -104,28 +91,12 @@ public:
 
   void setBackend(const std::string &name,
                   DeviceIdx_t device = 0) override final;
-  std::set<std::string> getAvailableBackends() const override;
 
   inline std::shared_ptr<Attributes> attributes() const override {
     return mAttributes;
   }
   inline Tensor &value() const noexcept {
     return mAttributes->template getAttr<ConstantOfShapeAttr::Value>();
-  }
-
-    static const std::vector<std::string> getInputsName() noexcept {
-        return {"input"};
-    }
-    static const std::vector<std::string> getOutputsName() noexcept {
-        return {"constant_of_shape"};
-    }
-
-    /**
-     * @brief Retrieves the names of the attributes for the operator.
-     * @return A vector containing the attributes name.
-     */
-    static constexpr const char* const* attributesName() noexcept {
-        return EnumStrings<Aidge::ConstantOfShapeAttr>::data;
     }
 };
 
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index b94a6ecadea161261e97d9baaa47c77b90a07f10..65affa029dfa706280546f52251c9c62792087dc 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -45,6 +45,11 @@ enum class ConvAttr {
     GENERATE_LIST_ATTR_ENUM(LIST_CONV_ATTR)
 };
 
+template <DimIdx_t DIM> struct Conv_Op_Type {};
+template <> struct Conv_Op_Type<1> { static constexpr const char* const value = "Conv1D"; };
+template <> struct Conv_Op_Type<2> { static constexpr const char* const value = "Conv2D"; };
+template <> struct Conv_Op_Type<3> { static constexpr const char* const value = "Conv3D"; };
+
 /**
  * @class Conv_Op
  * @brief Convolution operator for performing a multi-dimensional convolution.
@@ -74,22 +79,28 @@ enum class ConvAttr {
  * @see Registrable
  */
 template <DimIdx_t DIM>
-class Conv_Op : public OperatorTensor,
-                public Registrable<Conv_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>> {
-
-public:
-    static const std::string Type;
-
+class Conv_Op : public OperatorTensorWithImpl<Conv_Op<DIM>> {
+private:
     // Use the external enum so that Aidge::Conv_Op<DIM>::Attr is valid.
     using Attr = ConvAttr;
 
-private:
     using Attributes_ = StaticAttributes<Attr, GENERATE_LIST_ATTR_TYPE(LIST_CONV_ATTR)>;
     template <Attr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    using OperatorTensorWithImpl<Conv_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<Conv_Op<DIM>>::getOutput;
+    using OperatorTensorWithImpl<Conv_Op<DIM>>::OperatorTensorWithImpl;
+    using OperatorTensorWithImpl<Conv_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<Conv_Op<DIM>>::mOutputs;
+    using OperatorTensorWithImpl<Conv_Op<DIM>>::dimsForwarded;
+
+    static constexpr const char* const Type = Conv_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input", "weight", "bias"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     Conv_Op() = delete;
 
     /**
@@ -101,7 +112,7 @@ public:
     constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
                       const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                       const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
+        : OperatorTensorWithImpl<Conv_Op<DIM>>(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<Attr::StrideDims>(strideDims),
             attr<Attr::DilationDims>(dilationDims),
@@ -115,15 +126,6 @@ public:
      */
     Conv_Op(const Conv_Op<DIM>& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned Conv_Op object.
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Conv_Op<DIM>>(*this);
-    }
-
-
     /**
      * @brief Compute forward dimensions for the operator.
      * @param allowDataDependency Flag to allow data dependency in dimension calculation.
@@ -143,19 +145,6 @@ public:
                           const std::vector<DimSize_t>& outputDims,
                           const IOIndex_t outputIdx = 0) const override;
 
-    /**
-     * @brief Set the backend for the operator.
-     * @param name The name of the backend.
-     * @param device The device index (default is 0).
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the list of available backends for the operator.
-     * @return A set of available backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the number of input channels.
      * @return The number of input channels.
@@ -193,20 +182,6 @@ public:
      * @return The kernel dimensions as a reference.
      */
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<Attr::KernelDims>(); }
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input", "weight", "bias"};
-    }
-
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -268,11 +243,6 @@ constexpr const char* const EnumStrings<Aidge::ConvAttr>::data[] = {
 };
 }
 
-template <Aidge::DimIdx_t DIM>
-constexpr const char* const* Aidge::Conv_Op<DIM>::attributesName() {
-    return EnumStrings<Aidge::Conv_Op<DIM>::Attr>::data;
-}
-
 extern template class Aidge::Conv_Op<1>;
 extern template class Aidge::Conv_Op<2>;
 extern template class Aidge::Conv_Op<3>;
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 341b6f76647059e94613feb0b87dfb3a0187d875..f2dc3a9aa56bcafdf734d8d0fadb556701b163d1 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -45,6 +45,10 @@ namespace Aidge {
 enum class ConvDepthWiseAttr {
     GENERATE_LIST_ATTR_ENUM(LIST_CONVDEPTHWISE_ATTR)
 };
+
+template <DimIdx_t DIM> struct ConvDepthWise_Op_Type {};
+template <> struct ConvDepthWise_Op_Type<1> { static constexpr const char* const value = "ConvDepthWise1D"; };
+template <> struct ConvDepthWise_Op_Type<2> { static constexpr const char* const value = "ConvDepthWise2D"; };
 }  // namespace Aidge
 
 namespace {
@@ -78,12 +82,7 @@ namespace Aidge {
  * @see Registrable
  */
 template <DimIdx_t DIM>
-class ConvDepthWise_Op : public OperatorTensor,
-                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>> {
-
-public:
-    static const std::string Type;
-
+class ConvDepthWise_Op : public OperatorTensorWithImpl<ConvDepthWise_Op<DIM>> {
 private:
     using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
                             GENERATE_LIST_ATTR_TYPE(LIST_CONVDEPTHWISE_ATTR)
@@ -95,6 +94,17 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    using OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>::getOutput;
+    using OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>::OperatorTensorWithImpl;
+    using OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>::mOutputs;
+    using OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>::dimsForwarded;
+
+    static constexpr const char* const Type = ConvDepthWise_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input", "weight", "bias"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     ConvDepthWise_Op() = delete;
 
     /**
@@ -106,7 +116,7 @@ public:
     constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernelDims,
                                const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                                const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
+        : OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<ConvDepthWiseAttr::StrideDims>(strideDims),
             attr<ConvDepthWiseAttr::DilationDims>(dilationDims),
@@ -120,14 +130,6 @@ public:
      */
     ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned ConvDepthWise_Op object.
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
-    }
-
     /**
      * @brief Compute forward dimensions for the operator.
      * @param[in] allowDataDependency Flag to allow data dependency in dimension calculation.
@@ -147,19 +149,6 @@ public:
                           const std::vector<DimSize_t>& outputDims,
                           const IOIndex_t outputIdx = 0) const override;
 
-    /**
-     * @brief Set the backend for the operator.
-     * @param[in] name The name of the backend.
-     * @param[in] device The device index (default is 0).
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the list of available backends for the operator.
-     * @return A set of available backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the number of input channels.
      * @return The number of input channels.
@@ -195,30 +184,6 @@ public:
      * @return The kernel dimensions as a reference.
      */
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>(); }
-
-    /**
-     * @brief Get the names of the inputs.
-     * @return A vector containing the input names.
-     */
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input", "weight", "bias"};
-    }
-
-    /**
-     * @brief Get the names of the outputs.
-     * @return A vector containing the output names.
-     */
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ConvDepthWiseAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/ConvTranspose.hpp b/include/aidge/operator/ConvTranspose.hpp
index b58ce706a2cc1c6cb739d57e0e1c2bbc348e975f..982dd94c9632f1548f2df01cccb7edf4f4210d99 100644
--- a/include/aidge/operator/ConvTranspose.hpp
+++ b/include/aidge/operator/ConvTranspose.hpp
@@ -30,17 +30,13 @@
 namespace Aidge {
 enum class ConvTransposeAttr { StrideDims, DilationDims, KernelDims };
 
-template <DimIdx_t DIM>
-class ConvTranspose_Op
-    : public OperatorTensor,
-      public Registrable<ConvTranspose_Op<DIM>,
-                         std::string,
-                         std::function<std::shared_ptr<OperatorImpl>(
-                             const ConvTranspose_Op<DIM> &)>> {
-
-  public:
-    static const std::string Type;
+template <DimIdx_t DIM> struct ConvTranspose_Op_Type {};
+template <> struct ConvTranspose_Op_Type<1> { static constexpr const char* const value = "ConvTranspose1D"; };
+template <> struct ConvTranspose_Op_Type<2> { static constexpr const char* const value = "ConvTranspose2D"; };
+template <> struct ConvTranspose_Op_Type<3> { static constexpr const char* const value = "ConvTranspose3D"; };
 
+template <DimIdx_t DIM>
+class ConvTranspose_Op : public OperatorTensorWithImpl<ConvTranspose_Op<DIM>> {
   private:
     using Attributes_ = StaticAttributes<ConvTransposeAttr,
                                          std::array<DimSize_t, DIM>,
@@ -51,6 +47,18 @@ class ConvTranspose_Op
     const std::shared_ptr<Attributes_> mAttributes;
 
   public:
+    using OperatorTensorWithImpl<ConvTranspose_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<ConvTranspose_Op<DIM>>::getOutput;
+    using OperatorTensorWithImpl<ConvTranspose_Op<DIM>>::OperatorTensorWithImpl;
+    using OperatorTensorWithImpl<ConvTranspose_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<ConvTranspose_Op<DIM>>::mOutputs;
+    using OperatorTensorWithImpl<ConvTranspose_Op<DIM>>::dimsForwarded;
+    using OperatorTensorWithImpl<ConvTranspose_Op<DIM>>::type;
+
+    static constexpr const char* const Type = ConvTranspose_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input", "weight", "bias"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     ConvTranspose_Op() = delete;
 
     constexpr explicit ConvTranspose_Op(
@@ -59,7 +67,7 @@ class ConvTranspose_Op
             create_array<DimSize_t, DIM>(1),
         const std::array<DimSize_t, DIM> &dilationDims =
             create_array<DimSize_t, DIM>(1))
-    : OperatorTensor(Type,
+    : OperatorTensorWithImpl<ConvTranspose_Op<DIM>>(Type,
                      {InputCategory::Data,
                       InputCategory::Param,
                       InputCategory::OptionalParam},
@@ -77,14 +85,6 @@ class ConvTranspose_Op
      */
     ConvTranspose_Op(const ConvTranspose_Op<DIM> &op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Conv_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ConvTranspose_Op<DIM>>(*this);
-    }
-
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
@@ -92,9 +92,6 @@ class ConvTranspose_Op
                           const std::vector<DimSize_t> &outputDims,
                           const IOIndex_t outputIdx = 0) const override;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
     DimSize_t inChannels() const {
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(
@@ -130,13 +127,6 @@ class ConvTranspose_Op
     inline std::array<DimSize_t, DIM> &kernelDims() const {
         return mAttributes->template getAttr<ConvTransposeAttr::KernelDims>();
     }
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "weight", "bias"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/CryptoHash.hpp b/include/aidge/operator/CryptoHash.hpp
index 266adecd38c1ef86d6282471de53b97b05c46edc..a95d8451a2ba0e7ef421018c9e0400664c008a5f 100644
--- a/include/aidge/operator/CryptoHash.hpp
+++ b/include/aidge/operator/CryptoHash.hpp
@@ -46,18 +46,17 @@ enum class CryptoHashFunction {
  * @see OperatorTensor
  * @see Registrable
  */
-class CryptoHash_Op : public OperatorTensor,
-    public Registrable<CryptoHash_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const CryptoHash_Op&)>> {
-
-public:
-    static const std::string Type;
-
+class CryptoHash_Op : public OperatorTensorWithImpl<CryptoHash_Op> {
 private:
     using Attributes_ = StaticAttributes<CryptoHashAttr, CryptoHashFunction>;
     template <CryptoHashAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "CryptoHash";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     CryptoHash_Op();
 
     /**
@@ -68,17 +67,8 @@ public:
      */
     CryptoHash_Op(const CryptoHash_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::CryptoHash_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -90,13 +80,6 @@ public:
      * @return Reference to the `crypto_hash_function` attribute.
      */
     inline CryptoHashFunction& cryptoHashFunction() const noexcept { return mAttributes->getAttr<CryptoHashAttr::CryptoHashFunction>(); }
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> CryptoHash(const std::string& name = "");
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
index 9d530159f39c3399dd00759f55697ed5911aef79..6dd0590b9dc43d2fcb9bb4300b272e0d423890fe 100644
--- a/include/aidge/operator/DepthToSpace.hpp
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -21,6 +21,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/DepthToSpaceImpl.hpp"
 
 
 #define LIST_DEPTHTOSPACE_ATTR(X)               \
@@ -69,16 +70,8 @@ namespace Aidge{
  * @see OperatorTensor
  * @see Registrable
  */
-class DepthToSpace_Op : public OperatorTensor,
-                public Registrable<DepthToSpace_Op,
-                    std::string,
-                    std::function<std::shared_ptr<OperatorImpl>(const DepthToSpace_Op &)>> {
+class DepthToSpace_Op : public OperatorTensorWithImpl<DepthToSpace_Op, DepthToSpace_OpImpl> {
 public:
-    /**
-     * @brief The type identifier for the DepthToSpace operator.
-     */
-    static const std::string Type;
-
     /**
      * @enum Mode
      * @brief Defines the modes for depth-to-space transformation.
@@ -92,6 +85,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "DepthToSpace";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     DepthToSpace_Op() = delete;
 
     /**
@@ -109,27 +106,8 @@ public:
      */
     DepthToSpace_Op(const DepthToSpace_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::DepthToSpace_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    /**
-     * @brief Set the backend for this operator.
-     * @param name Backend name.
-     * @param device Device index for the backend.
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the available backends for this operator.
-     * @return A set of strings representing available backends.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return Shared pointer to the attributes.
@@ -147,30 +125,6 @@ public:
      * @return Depth-to-space mode.
      */
     inline Mode& mode() const { return mAttributes->template getAttr<DepthToSpaceAttr::Mode>(); }
-
-    /**
-     * @brief Get the input tensor names.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::DepthToSpaceAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 5dec988145874af9312d3ac8133c850c490bf0e7..e8dc95caf067a978b83735cb572395a96ad1a671 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -46,49 +46,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Div_Op : public OperatorTensor,
-    public Registrable<Div_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Div_Op&)>> {
-
+class Div_Op : public OperatorTensorWithImpl<Div_Op> {
 public:
-    static const std::string Type;
-
-    Div_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Div_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Div_Op(const Div_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Div_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    static constexpr const char* const Type = "Div";
+    static constexpr const char* const InputsName[] = {"data_input_1", "data_input_2"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Div_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Div_Op>(*this);
-    }
+    Div_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input_1", "data_input_2"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Div(const std::string& name = "");
diff --git a/include/aidge/operator/Dropout.hpp b/include/aidge/operator/Dropout.hpp
index 4d7465db2f960432beefcdd89576b72ea3e45485..d15569c869b99fd0a36a56149471db69da6c8b5a 100644
--- a/include/aidge/operator/Dropout.hpp
+++ b/include/aidge/operator/Dropout.hpp
@@ -31,26 +31,21 @@ enum class DropoutAttr {
 };
 
 // Define the Dropout_Op class, inheriting from OperatorTensor and Registrable
-class Dropout_Op : public OperatorTensor,
-                  public Registrable<Dropout_Op,
-                                     std::string,
-                                     std::function<std::shared_ptr<OperatorImpl>(const Dropout_Op&)>> {
-public:
-    static const std::string Type;
-
+class Dropout_Op : public OperatorTensorWithImpl<Dropout_Op> {
 private:
     using Attributes_ = StaticAttributes<DropoutAttr,  GENERATE_LIST_ATTR_TYPE(LIST_DROPOUT_ATTR)>;
     template <DropoutAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Dropout";
+    static constexpr const char* const InputsName[] = {"data_input", "probability"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     Dropout_Op(float probability = 0.5f);
 
     Dropout_Op(const Dropout_Op& op);
 
-
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Propagates dimensions through the Dropout operation.
      * This function updates the output Tensors' dimensions based on the input Tensors.
@@ -66,28 +61,10 @@ public:
      */
     bool forwardDims(bool allowDataDependency = true) override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    std::set<std::string> getAvailableBackends() const override;
-
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
     inline float& probability() const noexcept { return mAttributes -> getAttr<DropoutAttr::Probability>(); }
 
-    // Input/Output names for the operator
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input", "probability"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
-
-    /**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
-
 private:
     void checkProbability() const;
 };
@@ -107,8 +84,4 @@ constexpr const char* const EnumStrings<Aidge::DropoutAttr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::Dropout_Op::attributesName() {
-    return EnumStrings<Aidge::DropoutAttr>::data;
-}
-
 #endif /* AIDGE_CORE_OPERATOR_DROPOUT_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Equal.hpp b/include/aidge/operator/Equal.hpp
index 12bc9af7812aedf52a4502f270e136c65a4a9756..655598d364276db9ae8f116276c6c7a9dd1585b7 100644
--- a/include/aidge/operator/Equal.hpp
+++ b/include/aidge/operator/Equal.hpp
@@ -27,51 +27,19 @@ namespace Aidge {
 /**
  * @brief Tensor element-wise logical equal operation.
  */
-class Equal_Op : public OperatorTensor,
-    public Registrable<Equal_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Equal_Op&)>> {
+class Equal_Op : public OperatorTensorWithImpl<Equal_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Equal";
+    static constexpr const char* const InputsName[] = {"data_input_1", "data_input_2"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @brief Compute element-wise Equal operation on two given inputs.
      * @details supports broadcasting of both operands.
      */
-    Equal_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
-     * but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
-     */
-    Equal_Op(const Equal_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Equal_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Equal_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Equal_Op>(*this);
-    }
+    Equal_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input_1", "data_input_2"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 inline std::shared_ptr<Node> Equal(const std::string& name = "") {
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index c17e8075fbb48bb14f075d323b24b4539a69cf9a..438add606847016baaa41855c2e16b505c41603f 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -37,39 +37,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Erf_Op : public OperatorTensor,
-    public Registrable<Erf_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::shared_ptr<OperatorImpl>(const Erf_Op&)>>
-{
+class Erf_Op : public OperatorTensorWithImpl<Erf_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Erf";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    Erf_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Erf_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Erf_Op(const Erf_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Erf_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
+    Erf_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 };
 
 std::shared_ptr<Node> Erf(const std::string& name = "");
diff --git a/include/aidge/operator/Expand.hpp b/include/aidge/operator/Expand.hpp
index 95ca72a2782959f036f09a7948c6fda46fbdf9f0..ed963c2836ddc42577aa1e34cc4496d53a785fe6 100644
--- a/include/aidge/operator/Expand.hpp
+++ b/include/aidge/operator/Expand.hpp
@@ -43,14 +43,11 @@ namespace Aidge {
  * @see https://onnx.ai/onnx/repo-docs/Broadcasting.html for detailed ONNX
  * broadcasting rules
  */
-class Expand_Op
-    : public OperatorTensor,
-      public Registrable<
-          Expand_Op,
-          std::string,
-          std::function<std::shared_ptr<OperatorImpl>(const Expand_Op &)>> {
-  public:
-    static const std::string Type;
+class Expand_Op : public OperatorTensorWithImpl<Expand_Op> {
+public:
+    static constexpr const char* const Type = "Expand";
+    static constexpr const char* const InputsName[] = {"data", "shape"};
+    static constexpr const char* const OutputsName[] = {"output"};
 
     /**
      * @brief Operator that broadcasts an input tensor to a larger provided
@@ -72,26 +69,11 @@ class Expand_Op
      * broadcasting rules
      */
     Expand_Op()
-        : OperatorTensor(Type,
+        : OperatorTensorWithImpl(Type,
                          {InputCategory::Data, InputCategory::Data},
                          1) {}
 
-    Expand_Op(const Expand_Op &op);
-
-    std::shared_ptr<Operator> clone() const override;
-
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string &name,
-                    DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data", "shape"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index e513c3059fa3819ac81f26a373b92de9d3cba3fc..8b83c0bd6c25d646f8986d39cd7cf454ff5ec224 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -50,15 +50,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class FC_Op : public OperatorTensor,
-              public Registrable<FC_Op,
-                                 std::string,
-                                 std::function<std::shared_ptr<OperatorImpl>(const FC_Op &)>> {
+class FC_Op : public OperatorTensorWithImpl<FC_Op> {
 public:
-    /**
-     * @brief Static type identifier for the FC operator.
-     */
-    static const std::string Type;
+    static constexpr const char* const Type = "FC";
+    static constexpr const char* const InputsName[] = {"data_input", "weight", "bias"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @brief Default constructor for the FC operator.
@@ -66,33 +62,9 @@ public:
      * Initializes the operator with a type identifier and input categories.
      */
     FC_Op()
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1)
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1)
     {}
 
-    /**
-     * @brief Copy constructor.
-     *
-     * Copies the attributes and output tensor(s) of the operator, but does not
-     * copy input tensors. The new operator instance has no associated inputs.
-     *
-     * @param op The `FC_Op` instance to copy.
-     */
-    FC_Op(const FC_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(FC_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clones the operator using its copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override final;
-
     /**
      * @brief Associates an input tensor with the operator.
      *
@@ -121,22 +93,6 @@ public:
      */
     bool forwardDType() override final;
 
-    /**
-     * @brief Sets the backend for the operator.
-     *
-     * Configures the backend used for computation.
-     *
-     * @param[in] name Name of the backend.
-     * @param[in] device Index of the target device (default is 0).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Retrieves the available backends for the operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Gets the number of input channels for the FC operator.
      *
@@ -166,22 +122,6 @@ public:
         }
         return getInput(1)->template dims<2>()[0];
     }
-
-    /**
-     * @brief Retrieves the input tensor names for the FC operator.
-     * @return A vector of input tensor names: `{"data_input", "weight", "bias"}`.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "weight", "bias"};
-    }
-
-    /**
-     * @brief Retrieves the output tensor names for the FC operator.
-     * @return A vector of output tensor names: `{"data_output"}`.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Flatten.hpp b/include/aidge/operator/Flatten.hpp
index 11775aafbbb987ee3c6f922472992ec0789893bf..abcd35ba1f7dfb2629d2050e6623d913560d7afa 100644
--- a/include/aidge/operator/Flatten.hpp
+++ b/include/aidge/operator/Flatten.hpp
@@ -21,6 +21,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/FlattenImpl.hpp"
 
 
 #define LIST_FLATTEN_ATTR(X)  \
@@ -64,21 +65,17 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Flatten_Op : public OperatorTensor,
-                   public Registrable<Flatten_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Flatten_Op&)>> {
-
-public:
-    /**
-     * @brief The type identifier for the Flatten operator.
-     */
-    static const std::string Type;
-
+class Flatten_Op : public OperatorTensorWithImpl<Flatten_Op, Flatten_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<FlattenAttr, GENERATE_LIST_ATTR_TYPE(LIST_FLATTEN_ATTR)>;
     template <FlattenAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Flatten";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Deleted default constructor.
      */
@@ -96,12 +93,6 @@ public:
      */
     Flatten_Op(const Flatten_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Compute the forward dimensions.
      * @param[in] allowDataDependency Whether to allow data dependency in computation.
@@ -109,19 +100,6 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Set the backend for the operator.
-     * @param[in] name The name of the backend.
-     * @param[in] device Optional. The device index.
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the set of available backends for the operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -133,30 +111,6 @@ public:
      * @return A reference to the axis attribute.
      */
     inline std::int64_t& axis() const { return mAttributes->template getAttr<FlattenAttr::Axis>(); }
-
-    /**
-     * @brief Get the names of the input tensors.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the names of the output tensors.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::FlattenAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index 9b71057fb20327c7c37d3ac9aa49d021e7c244cc..c5bdf27b4da91f4ce41d992b33732641ff44458b 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -48,6 +48,9 @@ namespace Aidge {
 enum class FoldAttr {
     GENERATE_LIST_ATTR_ENUM(LIST_FOLD_ATTR)
 };
+
+template <DimIdx_t DIM> struct Fold_Op_Type {};
+template <> struct Fold_Op_Type<2> { static constexpr const char* const value = "Fold2D"; };
 }  // namespace Aidge
 
 namespace {
@@ -91,12 +94,7 @@ namespace Aidge {
  */
 
 template <DimIdx_t DIM>
-class Fold_Op : public OperatorTensor,
-                public Registrable<Fold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Fold_Op<DIM> &)>> {
-
-public:
-    static const std::string Type;
-
+class Fold_Op : public OperatorTensorWithImpl<Fold_Op<DIM>> {
 private:
     using Attributes_ = StaticAttributes<FoldAttr, GENERATE_LIST_ATTR_TYPE(LIST_FOLD_ATTR)>;
 
@@ -104,6 +102,14 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    using OperatorTensorWithImpl<Fold_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<Fold_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<Fold_Op<DIM>>::mOutputs;
+
+    static constexpr const char* const Type = Fold_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     Fold_Op() = delete;
 
     /**
@@ -117,7 +123,7 @@ public:
                       const std::array<DimSize_t, DIM>& kernelDims,
                       const std::array<DimSize_t, DIM>& strideDims = create_array<DimSize_t,DIM>(1),
                       const std::array<DimSize_t, DIM>& dilationDims = create_array<DimSize_t,DIM>(1))
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
+        : OperatorTensorWithImpl<Fold_Op<DIM>>(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<FoldAttr::OutputDims>(outputDims),
             attr<FoldAttr::StrideDims>(strideDims),
@@ -132,11 +138,6 @@ public:
      */
     Fold_Op(const Fold_Op<DIM>& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Compute forward dimensions for the operator.
      * @param allowDataDependency Flag to allow data dependency in dimension calculation.
@@ -144,19 +145,6 @@ public:
      */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    /**
-     * @brief Set the backend for the operator.
-     * @param name Name of the backend.
-     * @param device Index of the device.
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the available backends for this operator.
-     * @return A set of available backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return Shared pointer to the attributes.
@@ -186,30 +174,6 @@ public:
      * @return Kernel dimensions.
      */
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<FoldAttr::KernelDims>(); }
-
-    /**
-     * @brief Get the input names for the Fold operation.
-     * @return List of input names.
-     */
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output names for the Fold operation.
-     * @return List of output names.
-     */
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::FoldAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 8bd8239ec664a7bcb9d520c3dc37488f932437bb..3a887aeb0516f4525eac77d96c7b6aafc38b9b34 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/GatherImpl.hpp"
 
 
 #define LIST_GATHER_ATTR(X)  \
@@ -44,13 +45,8 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Gather_Op : public OperatorTensor,
-                  public Registrable<Gather_Op,
-                                     std::string,
-                                     std::function<std::shared_ptr<OperatorImpl>(const Gather_Op&)>> {
+class Gather_Op : public OperatorTensorWithImpl<Gather_Op, Gather_OpImpl> {
 public:
-    static const std::string Type;
-
     /**
      * @enum Attr
      * @brief Attributes for the Gather operation.
@@ -70,6 +66,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Gather";
+    static constexpr const char* const InputsName[] = {"data_input", "indices"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Default constructor is deleted.
      */
@@ -93,11 +93,6 @@ public:
      */
     Gather_Op(const Gather_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Check if dimensions have been forwarded.
      * @return True if dimensions have been forwarded, false otherwise.
@@ -111,19 +106,6 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Set the backend for the operator.
-     * @param name The name of the backend.
-     * @param device Optional. The device index.
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the available backends for the operator.
-     * @return A set of available backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -147,28 +129,6 @@ public:
      * @return The gathered shape attribute.
      */
     inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes->getAttr<Attr::GatheredShape>(); }
-
-    /**
-     * @brief Get the input tensor names.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "indices"};
-    }
-
-    /**
-     * @brief Get the output tensor names.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -200,10 +160,6 @@ constexpr const char* const EnumStrings<Aidge::Gather_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::Gather_Op::attributesName() {
-    return EnumStrings<Aidge::Gather_Op::Attr>::data;
-}
-
 #undef LIST_GATHER_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index 0cfc16ccafcb79b041d3b964fd51c756fec63060..fd215e53c080387f230e03c312de1ce13a97bdd2 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -45,41 +45,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class GlobalAveragePooling_Op
-    : public OperatorTensor,
-      public Registrable<GlobalAveragePooling_Op, std::string,
-                         std::function<std::shared_ptr<OperatorImpl>(
-                             const GlobalAveragePooling_Op &)>> {
+class GlobalAveragePooling_Op : public OperatorTensorWithImpl<GlobalAveragePooling_Op> {
 public:
-	static const std::string Type;
+    static constexpr const char* const Type = "GlobalAveragePooling";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-	GlobalAveragePooling_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-	/**
-	 * @brief Copy-constructor.
-	 * @param op GlobalAveragePooling_Op to copy.
-	 * @details Copies the operator attributes and its output tensor(s), but not
-	 * its input tensors. The new operator has no associated input.
-	 */
-	GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op);
-
-	/**
-	 * @brief Clone the operator using its copy-constructor.
-	 * @see Operator::GlobalAveragePooling_Op
-	 */
-	std::shared_ptr<Operator> clone() const override;
+	GlobalAveragePooling_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
 	bool forwardDims(bool allowDataDependency = false) override final;
-
-	void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-	std::set<std::string> getAvailableBackends() const override;
-
-	static const std::vector<std::string> getInputsName() {
-	return {"data_input"};
-	}
-	static const std::vector<std::string> getOutputsName() {
-	return {"data_output"};
-	}
 };
 
 std::shared_ptr<Node> GlobalAveragePooling(const std::string &name = "");
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index 348b7b20bf0299f6a5d22c37fdadabb366ed9427..1d6a83223a9927142c71f3f689f02f10bf16d3a5 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -83,11 +83,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class GridSample_Op : public OperatorTensor,
-	public Registrable<GridSample_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const GridSample_Op&)>> {
-
+class GridSample_Op : public OperatorTensorWithImpl<GridSample_Op> {
 public:
-	static const std::string Type;
+    static constexpr const char* const Type = "GridSample";
+    static constexpr const char* const InputsName[] = {"data_input", "grid_field"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
 	/**
 	 * @enum Mode
@@ -129,12 +129,6 @@ public:
 	 */
 	~GridSample_Op() noexcept;
 
-	/**
-	 * @brief Clone the operator using its copy-constructor.
-	 * @return A shared pointer to the cloned operator.
-	 */
-	std::shared_ptr<Operator> clone() const override;
-
 	/**
 	 * @brief Determines whether dimensions can be forwarded.
 	 * @param allowDataDependencies Allow data-dependent dimensions.
@@ -142,19 +136,6 @@ public:
 	 */
 	bool forwardDims(bool /*allowDataDependencies*/ = false) override final;
 
-	/**
-	 * @brief Sets the backend for execution.
-	 * @param name Backend name.
-	 * @param device Device index.
-	 */
-	void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-	/**
-	 * @brief Retrieves the available backends.
-	 * @return A set of available backend names.
-	 */
-    std::set<std::string> getAvailableBackends() const override;
-
 	/**
 	 * @brief Retrieves the operator's attributes.
 	 * @return Shared pointer to the attributes.
@@ -178,30 +159,6 @@ public:
 	 * @return True if corners are aligned.
 	 */
 	inline bool alignCorners() const { return mAttributes->template getAttr<GridSampleAttr::AlignCorners>(); }
-
-	/**
-	 * @brief Retrieves the input names for GridSample.
-	 * @return Vector of input tensor names.
-	 */
-	static const std::vector<std::string> getInputsName() {
-		return {"data_input", "grid_field"};
-	}
-
-	/**
-	 * @brief Retrieves the output names for GridSample.
-	 * @return Vector of output tensor names.
-	 */
-	static const std::vector<std::string> getOutputsName() {
-		return {"data_output"};
-	}
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::GridSampleAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Heaviside.hpp b/include/aidge/operator/Heaviside.hpp
index 49f9059033b2816b594802b1fcfaa4340418f883..d72f73aa65e2f75b1c67466830c3c3fa058ba64c 100644
--- a/include/aidge/operator/Heaviside.hpp
+++ b/include/aidge/operator/Heaviside.hpp
@@ -43,11 +43,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Heaviside_Op
-    : public OperatorTensor,
-      public Registrable<Heaviside_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Heaviside_Op &)>> {
+class Heaviside_Op : public OperatorTensorWithImpl<Heaviside_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Heaviside";
+    static constexpr const char* const InputsName[] = {"data_input", "data_values"};
+    static constexpr const char* const OutputsName[] = {"output"};
 
     /**
      * @enum Attr
@@ -80,46 +80,6 @@ public:
      */
     Heaviside_Op(const Heaviside_Op &op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    /**
-     * @brief Set the backend for this operator.
-     * @param name The backend name.
-     * @param device The device index (default is 0).
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the set of available backends.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
-    /**
-     * @brief Get the input names required by this operator.
-     * @return A vector containing the input names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "data_values"};
-    }
-
-    /**
-     * @brief Get the output names generated by this operator.
-     * @return A vector containing the output names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
-
     /**
      * @brief Get the attributes of the operator.
      */
@@ -161,10 +121,6 @@ constexpr const char* const EnumStrings<Aidge::Heaviside_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::Heaviside_Op::attributesName() {
-    return EnumStrings<Aidge::Heaviside_Op::Attr>::data;
-}
-
 #undef LIST_HEAVISIDE_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_HEAVISIDE_H_ */
diff --git a/include/aidge/operator/ILayerNorm.hpp b/include/aidge/operator/ILayerNorm.hpp
index dc90b762260cb4498d15aa18024684b98b162175..eda2c3df5f4010d14b93925a3b5eb08e259f6383 100644
--- a/include/aidge/operator/ILayerNorm.hpp
+++ b/include/aidge/operator/ILayerNorm.hpp
@@ -39,43 +39,19 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class ILayerNorm_Op : public OperatorTensor,
-    public Registrable<ILayerNorm_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ILayerNorm_Op&)>> {
-
+class ILayerNorm_Op : public OperatorTensorWithImpl<ILayerNorm_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "ILayerNorm";
+    static constexpr const char* const InputsName[] = {"data_input", "weight", "bias"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @brief Default constructor.
      */
     ILayerNorm_Op()
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::Param}, 1)
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Param, InputCategory::Param}, 1)
     {}
 
-    /**
-     * @brief Copy-constructor.
-     * @param[in] op ILayerNorm_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not its input tensors. 
-     * The new operator has no associated input.
-     */
-    ILayerNorm_Op(const ILayerNorm_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ILayerNorm_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ILayerNorm_Op>(*this);
-    }
-
     /**
      * @brief Associates an input tensor with the operator.
      * @param inputIdx The index of the input.
@@ -89,25 +65,6 @@ public:
      * @return True if propagation is successful, false otherwise.
      */
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    /**
-     * @brief Gets the names of the input tensors.
-     * @return A vector containing the names of input tensors.
-     */
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input", "weight", "bias"};
-    }
-
-    /**
-     * @brief Gets the names of the output tensors.
-     * @return A vector containing the names of output tensors.
-     */
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 /**
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index a0200db6f6001c23dda644e2513253687432463d..0f79b253fe21233cdf3e229aedae7f6c1b94d3dd 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -24,6 +24,7 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/backend/generic/operator/IdentityImpl.hpp"
 
 namespace Aidge {
 
@@ -34,35 +35,13 @@ namespace Aidge {
  * Has we need to update this class to remove the use of Impl.
  *
  */
-class Identity_Op : public OperatorTensor,
-    public Registrable<Identity_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Identity_Op&)>> {
+class Identity_Op : public OperatorTensorWithImpl<Identity_Op, Identity_OpImpl> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Identity";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Identity_Op();
-
-    /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
-     * but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
-     */
-    Identity_Op(const Identity_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Identity_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Identity(const std::string& name = "");
diff --git a/include/aidge/operator/LRN.hpp b/include/aidge/operator/LRN.hpp
index b1cbc143dd592271ebb982a81eb9350b0ea04a70..1ad63caf307727e19cb0f9d080c56fbf68d99d74 100644
--- a/include/aidge/operator/LRN.hpp
+++ b/include/aidge/operator/LRN.hpp
@@ -57,16 +57,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class LRN_Op : public OperatorTensor,
-                public Registrable<LRN_Op,
-                                   std::string,
-                                   std::function<std::shared_ptr<OperatorImpl>(const LRN_Op&)>> {
-
+class LRN_Op : public OperatorTensorWithImpl<LRN_Op> {
 public:
-    /**
-     * @brief Static type string for the LRN operator.
-     */
-    static const std::string Type;
+    static constexpr const char* const Type = "LRN";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @enum Attr
@@ -106,25 +101,6 @@ public:
      */
     LRN_Op(const LRN_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    /**
-     * @brief Set the backend for the LRN operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the available backends for the LRN operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -154,28 +130,6 @@ public:
      * @return Reference to the `size` attribute.
      */
     inline std::int32_t& size() const noexcept { return mAttributes->getAttr<Attr::Size>(); }
-
-    /**
-     * @brief Get the input tensor names for the LRN operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names for the LRN operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -199,10 +153,6 @@ constexpr const char* const EnumStrings<Aidge::LRN_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::LRN_Op::attributesName() {
-    return EnumStrings<Aidge::LRN_Op::Attr>::data;
-}
-
 #undef LIST_LRN_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_LRN_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 867f324d3044cdc8ebd440dfebd5547f6936f47f..4a267b8a5a5477be0141bf81d961082607c77847 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -41,11 +41,11 @@ namespace Aidge{
  * @see OperatorTensor
  * @see Registrable
  */
-class LeakyReLU_Op : public OperatorTensor,
-    public Registrable<LeakyReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>> {
-
+class LeakyReLU_Op : public OperatorTensorWithImpl<LeakyReLU_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "LeakyReLU";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @enum LeakyReLUAttr
@@ -74,7 +74,7 @@ public:
      * @param[in] negativeSlope The slope for negative input values.
      */
     LeakyReLU_Op(float negativeSlope)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
+        : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
           mAttributes(
             std::make_shared<Attributes_>(
                 attr<Attr::NegativeSlope>(negativeSlope)))
@@ -88,14 +88,6 @@ public:
      */
     LeakyReLU_Op(const LeakyReLU_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      */
@@ -105,28 +97,6 @@ public:
      * @brief Get the negative slope value.
      */
     inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<Attr::NegativeSlope>(); }
-
-    /**
-     * @brief Get the names of the input tensors.
-     * @return A vector containing the names of input tensors.
-     */
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the names of the output tensors.
-     * @return A vector containing the names of output tensors.
-     */
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -149,10 +119,6 @@ constexpr const char* const EnumStrings<Aidge::LeakyReLU_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::LeakyReLU_Op::attributesName() {
-    return EnumStrings<Attr>::data;
-}
-
 #undef LIST_LEAKYRELU_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_LEAKYRELU_H_ */
diff --git a/include/aidge/operator/Ln.hpp b/include/aidge/operator/Ln.hpp
index 4a78db4391e0f69023b0de8c843264352c9ca3fb..d03c0ba8320118aa3323a061a20dc3b53927785e 100755
--- a/include/aidge/operator/Ln.hpp
+++ b/include/aidge/operator/Ln.hpp
@@ -37,40 +37,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Ln_Op : public OperatorTensor,
-    public Registrable<Ln_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::unique_ptr<OperatorImpl>(const Ln_Op&)>>
-{
+class Ln_Op : public OperatorTensorWithImpl<Ln_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Ln";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    Ln_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Ln_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Ln_Op(const Ln_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Ln_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
+    Ln_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 };
 
 std::shared_ptr<Node> Ln(const std::string& name = "");
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index 0313815ee2d2dd568f7dc9b9b496e234349e657f..66a063bdb25c7b239ab718c1f3c1d3aff3c7e2d0 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -51,28 +51,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class MatMul_Op : public OperatorTensor,
-              public Registrable<MatMul_Op,
-                                 std::string,
-                                 std::function<std::shared_ptr<OperatorImpl>(const MatMul_Op &)>> {
+class MatMul_Op : public OperatorTensorWithImpl<MatMul_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "MatMul";
+    static constexpr const char* const InputsName[] = {"data_input1", "data_input2"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    MatMul_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op MatMul_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    MatMul_Op(const MatMul_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::MatMul_Op
-     */
-    std::shared_ptr<Operator> clone() const override final;
+    MatMul_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Compute dimensions for the output Tensor following the same rules as
@@ -86,17 +71,6 @@ public:
      * dimensions (D) -> (D,1). The appended 1 is removed after computation.
      */
     bool forwardDims(bool allowDataDependency = false) override final;
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input1", "data_input2"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> MatMul(const std::string& name = "");
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 01104262147dc461259c3de17e3b3ec3383328b4..968d92beff66ef52aaef839db74c2587e0530e79 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -56,6 +56,11 @@ namespace Aidge {
 enum class MaxPoolingAttr {
     GENERATE_LIST_ATTR_ENUM(LIST_MAXPOOLING_ATTR)
 };
+
+template <DimIdx_t DIM> struct MaxPooling_Op_Type {};
+template <> struct MaxPooling_Op_Type<1> { static constexpr const char* const value = "MaxPooling1D"; };
+template <> struct MaxPooling_Op_Type<2> { static constexpr const char* const value = "MaxPooling2D"; };
+template <> struct MaxPooling_Op_Type<3> { static constexpr const char* const value = "MaxPooling3D"; };
 } // namespace Aidge
 
 namespace {
@@ -98,14 +103,7 @@ namespace Aidge {
  */
 
 template <DimIdx_t DIM>
-class MaxPooling_Op : public OperatorTensor,
-                public Registrable<MaxPooling_Op<DIM>,
-                                   std::string,
-                                   std::function<std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>>
-{
-public:
-    static const std::string Type; ///< Static identifier for this operator type.
-
+class MaxPooling_Op : public OperatorTensorWithImpl<MaxPooling_Op<DIM>> {
 private:
     using Attributes_ = StaticAttributes<MaxPoolingAttr,
                                 GENERATE_LIST_ATTR_TYPE(LIST_MAXPOOLING_ATTR)
@@ -115,6 +113,14 @@ private:
     const std::shared_ptr<Attributes_> mAttributes; ///< Shared pointer to operator attributes.
 
 public:
+    using OperatorTensorWithImpl<MaxPooling_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<MaxPooling_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<MaxPooling_Op<DIM>>::mOutputs;
+
+    static constexpr const char* const Type = MaxPooling_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     MaxPooling_Op() = delete; ///< Deleted default constructor.
 
     /**
@@ -137,12 +143,6 @@ public:
      */
     MaxPooling_Op(const MaxPooling_Op<DIM>& op);
 
-    /**
-     * @brief Clones the operator using the copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Computes output tensor dimensions based on input dimensions and operator attributes.
      * @param[in] allowDataDependency If true, dimensions may depend on input data; otherwise, strictly attribute-based.
@@ -150,19 +150,6 @@ public:
      */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    /**
-     * @brief Sets the backend implementation for this operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index where the backend will run (default: 0).
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Retrieves the list of available backend implementations for this operator.
-     * @return A set of available backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Accessor for operator attributes.
      * @return A shared pointer to the attributes object.
@@ -192,26 +179,6 @@ public:
      * @return Boolean value indicating whether ceil mode is enabled.
      */
     inline bool& ceilMode() const { return mAttributes->template getAttr<MaxPoolingAttr::CeilMode>(); }
-
-    /**
-     * @brief Retrieves the names of the input tensors.
-     * @return A vector of input tensors names.
-     */
-    static const std::vector<std::string> getInputsName(){ return {"data_input"}; }
-
-    /**
-     * @brief Retrieves the names of the output tensors.
-     * @return A vector of output tensors names.
-     */
-    static const std::vector<std::string> getOutputsName(){ return {"data_output"}; }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::MaxPoolingAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index 99ac6341886a257d4a85520f8b0be47539598c72..b2937e5caa8345825c851e80460ca69fc4f3ae89 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -22,6 +22,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/MemorizeImpl.hpp"
 
 
 #define LIST_MEMORIZE_ATTR(X)                        \
@@ -47,10 +48,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Memorize_Op : public OperatorTensor,
-    public Registrable<Memorize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Memorize_Op&)>> {
+class Memorize_Op : public OperatorTensorWithImpl<Memorize_Op, Memorize_OpImpl> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Memorize";
+    static constexpr const char* const InputsName[] = {"data_input", "data_input_init"};
+    static constexpr const char* const OutputsName[] = {"data_output", "data_output_rec"};
 
     /**
      * @enum Attr
@@ -86,25 +88,6 @@ public:
      */
     Memorize_Op(const Memorize_Op& op);
 
-    /**
-     * @brief Clone the operator by creating a copy of it.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    /**
-     * @brief Assign a specific backend and device for computation.
-     * @param name Name of the backend.
-     * @param device The device index (default is 0).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the list of available backends compatible with this operator.
-     * @return A set of strings representing backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Perform dimension inference for the operator, optionally allowing
      * data dependency during the process.
@@ -146,28 +129,6 @@ public:
      * @return A reference to the end step value.
      */
     inline std::uint32_t& endStep() const { return mAttributes->template getAttr<Attr::EndStep>(); }
-
-    /**
-     * @brief Retrieve the names of the operator's input tensors.
-     * @return A vector of strings representing input tensor names.
-     */
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input", "data_input_init"};
-    }
-
-    /**
-     * @brief Retrieve the names of the operator's output tensors.
-     * @return A vector of strings representing output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output", "data_output_rec"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -189,10 +150,6 @@ constexpr const char* const EnumStrings<Aidge::Memorize_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::Memorize_Op::attributesName() {
-    return EnumStrings<Aidge::Memorize_Op::Attr>::data;
-}
-
 #undef LIST_MEMORIZE_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_MEMORIZE_H_ */
diff --git a/include/aidge/operator/Mod.hpp b/include/aidge/operator/Mod.hpp
index 56a9381e088b413b77da7f7c0c75df8c51a5ba42..c005bf80c39b4f43bc1f2b930764c8d86a99f731 100644
--- a/include/aidge/operator/Mod.hpp
+++ b/include/aidge/operator/Mod.hpp
@@ -56,18 +56,17 @@ enum class ModAttr {
  * @see OperatorTensor
  * @see Registrable
  */
-class Mod_Op : public OperatorTensor,
-    public Registrable<Mod_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Mod_Op&)>> {
-
-public:
-    static const std::string Type;
-
+class Mod_Op : public OperatorTensorWithImpl<Mod_Op> {
 private:
     using Attributes_ = StaticAttributes<ModAttr, bool>;
     template <ModAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Mod";
+    static constexpr const char* const InputsName[] = {"dividend", "divisor"};
+    static constexpr const char* const OutputsName[] = {"remainder"};
+
     Mod_Op();
 
     /**
@@ -78,17 +77,8 @@ public:
      */
     Mod_Op(const Mod_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Mod_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -100,13 +90,6 @@ public:
      * @return Reference to the `fmod` attribute.
      */
     inline bool& fmod() const noexcept { return mAttributes->getAttr<ModAttr::Fmod>(); }
-
-    static const std::vector<std::string> getInputsName(){
-        return {"dividend", "divisor"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"remainder"};
-    }
 };
 
 std::shared_ptr<Node> Mod(const std::string& name = "");
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index b516ef54959c1ae22f607fddea0de94a7436b365..c02daecbc4a33d952ec5748f205fe7bb14482110 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -38,7 +38,9 @@ namespace Aidge {
 class Move_Op : public OperatorTensor,
     public Registrable<Move_Op, std::tuple<std::string, std::string>, std::function<std::unique_ptr<OperatorImpl>(const Move_Op&)>> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Move";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Move_Op();
 
@@ -58,13 +60,6 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
     std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Move(const std::string& name = "");
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 913fa05b4dadab05addaa2837bfbd25d25711716..067dbebd050e37e7f28e1097070bd0cb915a2634 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -46,38 +46,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Mul_Op : public OperatorTensor,
-    public Registrable<Mul_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Mul_Op&)>> {
+class Mul_Op : public OperatorTensorWithImpl<Mul_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Mul";
+    static constexpr const char* const InputsName[] = {"data_input_1", "data_input_2"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    Mul_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Mul_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Mul_Op(const Mul_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Mul_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
+    Mul_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input_1", "data_input_2"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Mul(const std::string& name = "");
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index bbd6b49fa1df92261753eed42a4850ab2fe3de21..8f2930e143141e6a54fa535f7238a92de40f8491 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -366,12 +366,6 @@ public:
      */
     inline bool isBackEdge(IOIndex_t inputIdx) const { return mBackEdges.find(inputIdx) != mBackEdges.end(); }
 
-    /** @brief Returns an empty vector of input names. */
-    static const std::vector<std::string> getInputsName() { return {}; }
-
-    /** @brief Returns an empty vector of output names. */
-    static const std::vector<std::string> getOutputsName() { return {}; }
-
 #ifdef PYBIND
     /**
      * @brief Returns a string representation of the operator (for Python bindings).
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index 3ba37cbbb6754a61aa03b812e7adfdcd4899e96c..217acf986c916e330e4b267654c717ad5ab76426 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -16,14 +16,14 @@
 #include <string>
 #include <vector>
 
+#include "aidge/data/Tensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
 
-class Tensor;
-
 /**
  * @class OperatorTensor
  * @brief Base class for all operators that work with tensor inputs and outputs.
@@ -225,6 +225,76 @@ protected:
     bool inputsAssociated(bool checkNonEmpty = true) const;
 };
 
+
+template <class T, class DEF_IMPL = OperatorImpl>
+class OperatorTensorWithImpl : public OperatorTensor,
+                               public Registrable<T, std::string, std::function<std::shared_ptr<OperatorImpl>(const T&)>>
+{
+public:
+    OperatorTensorWithImpl(const std::string& type, const std::vector<InputCategory>& inputsCategory,
+        const IOIndex_t nbOut): OperatorTensor(type, inputsCategory, nbOut)
+    {
+        if (!std::is_same<DEF_IMPL, OperatorImpl>::value) {
+            mImpl = std::make_shared<DEF_IMPL>(*static_cast<T*>(this));
+        }
+    }
+
+    OperatorTensorWithImpl(const T& op)
+        : OperatorTensor(op)
+    {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(T, *static_cast<T*>(this), op.backend());
+        }
+        else if (!std::is_same<DEF_IMPL, OperatorImpl>::value) {
+            mImpl = std::make_shared<DEF_IMPL>(*static_cast<T*>(this));
+        }
+        else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy constructor.
+     * @return A shared pointer to the cloned T object.
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<T>(*static_cast<const T*>(this));
+    }
+
+    virtual void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
+        if (!std::is_same<DEF_IMPL, OperatorImpl>::value) {
+            if (Registrar<T>::exists({name})){
+                SET_IMPL_MACRO(T, *static_cast<T*>(this), name);
+            }
+            else {
+                mImpl = std::make_shared<DEF_IMPL>(*static_cast<T*>(this));
+            }
+        }
+        else {
+            SET_IMPL_MACRO(T, *static_cast<T*>(this), name);
+        }
+
+        for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
+            mOutputs[i]->setBackend(name, device);
+        }
+
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            if (inputCategory(i) == InputCategory::Param || inputCategory(i) == InputCategory::OptionalParam) {
+                if (getInput(i)) {
+                    getInput(i)->setBackend(name, device);
+                }
+                else if (inputCategory(i) != InputCategory::OptionalParam) {
+                    Log::notice("{}_Op::setBackend(): could not set backend for input #{}, because input is not connected", type(), i);
+                }
+            }
+        }
+    }
+
+    std::set<std::string> getAvailableBackends() const override {
+        return Registrar<T>::getKeys();
+    }
+};
+
 }  // namespace Aidge
 
 #endif  // AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 630655ad2139028edc34df6b967206381969c07e..5beecc0167c645c81d739919c662c1ce0838ce47 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -54,6 +54,11 @@ enum class PadBorderType {
     Wrap,     ///< Values wrap around the tensor dimensions.
     Zero      ///< All out-of-bound values are set to 0.
 };
+
+template <DimIdx_t DIM> struct Pad_Op_Type {};
+template <> struct Pad_Op_Type<1> { static constexpr const char* const value = "Pad1D"; };
+template <> struct Pad_Op_Type<2> { static constexpr const char* const value = "Pad2D"; };
+template <> struct Pad_Op_Type<3> { static constexpr const char* const value = "Pad3D"; };
 } // namespace Aidge
 
 namespace {
@@ -127,14 +132,7 @@ namespace Aidge {
  * batch and channel consistency, or aligning tensor dimensions in machine learning workflows.
  */
 template <DimIdx_t DIM>
-class Pad_Op : public OperatorTensor,
-               public Registrable<Pad_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pad_Op<DIM>&)>> {
-public:
-    /**
-     * @brief Static string indicating the type of the operator.
-     */
-    static const std::string Type;
-
+class Pad_Op : public OperatorTensorWithImpl<Pad_Op<DIM>> {
 private:
     using Attributes_ = StaticAttributes<PadAttr, GENERATE_LIST_ATTR_TYPE(LIST_PAD_ATTR)>;
     template <PadAttr e>
@@ -143,6 +141,15 @@ private:
     const std::shared_ptr<Attributes_> mAttributes; ///< Holds operator attributes.
 
 public:
+    using OperatorTensorWithImpl<Pad_Op<DIM>>::getInput;
+    using OperatorTensorWithImpl<Pad_Op<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<Pad_Op<DIM>>::mOutputs;
+    using OperatorTensorWithImpl<Pad_Op<DIM>>::type;
+
+    static constexpr const char* const Type = Pad_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Deleted default constructor.
      */
@@ -157,7 +164,7 @@ public:
     constexpr Pad_Op(const std::array<DimSize_t, 2 * DIM>& beginEndTuples,
                      PadBorderType borderType = PadBorderType::Constant,
                      double borderValue = 0.0)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
+        : OperatorTensorWithImpl<Pad_Op<DIM>>(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
               attr<PadAttr::BeginEndBorders>(beginEndTuples),
               attr<PadAttr::BorderType>(borderType),
@@ -169,15 +176,9 @@ public:
      * @details Copies operator attributes and its output tensors, but not its input tensors. The new operator has no associated input.
      */
     Pad_Op(const Pad_Op& op)
-        : OperatorTensor(op),
+        : OperatorTensorWithImpl<Pad_Op<DIM>>(op),
           mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {}
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Compute output dimensions during the forward pass.
      * @param[in] allowDataDependency Flag indicating whether to allow data-dependent dimensions.
@@ -185,19 +186,6 @@ public:
      */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    /**
-     * @brief Set the backend for the Pad operator.
-     * @param name Name of the backend.
-     * @param device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the available backends for the Pad operator.
-     * @return A set of available backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -227,30 +215,6 @@ public:
     inline double& borderValue() const noexcept {
         return mAttributes->template getAttr<PadAttr::BorderValue>();
     }
-
-    /**
-     * @brief Get the input tensor names.
-     * @return Vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names.
-     * @return Vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::PadAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index e8d4269f2f25c7020aae6ed7306fd338ab95770a..05510062ed36efa22d5f201386996124473e0808 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -22,6 +22,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/PopImpl.hpp"
 
 
 #define LIST_POP_ATTR(X)  \
@@ -65,17 +66,17 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Pop_Op : public OperatorTensor,
-    public Registrable<Pop_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Pop_Op&)>> {
-public:
-    static const std::string Type;
-
+class Pop_Op : public OperatorTensorWithImpl<Pop_Op, Pop_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<PopAttr, GENERATE_LIST_ATTR_TYPE(LIST_POP_ATTR)>;
     template <PopAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Pop";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Default constructor for the `Pop` operator.
      */
@@ -88,25 +89,6 @@ public:
      */
     Pop_Op(const Pop_Op& op);
 
-    /**
-     * @brief Clone the operator by creating a copy of it.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    /**
-     * @brief Assign a specific backend and device for computation.
-     * @param name Name of the backend.
-     * @param device The device index (default is 0).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the list of available backends compatible with this operator.
-     * @return A set of strings representing backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Perform dimension inference for the operator, optionally allowing
      * data dependency during the process.
@@ -145,30 +127,6 @@ public:
     inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<PopAttr::ForwardStep>(); }
 
     inline std::uint32_t& backwardStep() const { return mAttributes->template getAttr<PopAttr::BackwardStep>(); }
-
-    /**
-     * @brief Retrieve the names of the operator's input tensors.
-     * @return A vector of strings representing input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Retrieve the names of the operator's output tensors.
-     * @return A vector of strings representing output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::PopAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index 5d0afc79d75f81d926a86a60cb73506347ddbd79..18b1f41b6bc58359bb383d1ac7d5e697e4fecc71 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -46,49 +46,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Pow_Op : public OperatorTensor,
-    public Registrable<Pow_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pow_Op&)>> {
+class Pow_Op : public OperatorTensorWithImpl<Pow_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Pow";
+    static constexpr const char* const InputsName[] = {"data_input_1", "data_input_2"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    Pow_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Pow_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Pow_Op(const Pow_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Pow_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Pow_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Pow_Op>(*this);
-    }
+    Pow_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input_1", "data_input_2"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Pow(const std::string& name = "");
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index ae88c0c714ec5a1ce3a5b39e290d2566e49d9f4b..a61186accf95f3fa78d5355c9d896a4f89177b16 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -81,7 +81,8 @@ class Producer_Op
                          std::function<std::shared_ptr<OperatorImpl>( const Producer_Op& )>>
 {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Producer";
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
 private:
     using Attributes_ = StaticAttributes<ProducerAttr,
@@ -216,20 +217,6 @@ public:
      */
     inline bool dimsForwarded() const noexcept override final { return true; }
 
-    /**
-     * @brief Retrieves the names of the inputs for the operator.
-     *
-     * @return An empty vector, as `Producer_Op` takes no inputs.
-     */
-    static const std::vector<std::string> getInputsName() { return {}; }
-
-    /**
-     * @brief Retrieves the names of the outputs for the operator.
-     *
-     * @return A vector containing the output name "data_output".
-     */
-    static const std::vector<std::string> getOutputsName() { return {"data_output"}; }
-
     /**
      * @brief Sets the output tensor for the operator.
      *
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index a9a84a3ee80eea5c0032fa08bce4ab96c44dba04..1e2c24d05cff6c08162f366594cac73ba028282f 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -37,41 +37,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class ReLU_Op :
-    public OperatorTensor,
-    public Registrable<ReLU_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>>
-{
+class ReLU_Op : public OperatorTensorWithImpl<ReLU_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "ReLU";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    ReLU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op ReLU_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    ReLU_Op(const ReLU_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::ReLU_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
+    ReLU_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 };
 
 std::shared_ptr<Node> ReLU(const std::string& name = "");
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index cdb139f96f4bb33b9a22479a2f996d71abf85f0e..ef1909d214fa7956e92970604f64ac962119c38b 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -58,11 +58,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class ReduceMean_Op : public OperatorTensor,
-                public Registrable<ReduceMean_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>> {
-
+class ReduceMean_Op : public OperatorTensorWithImpl<ReduceMean_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "ReduceMean";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @enum Attr
@@ -109,11 +109,6 @@ public:
      */
     ReduceMean_Op(const ReduceMean_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Compute the output dimensions during the forward pass.
      * @param[in] allowDataDependency Whether to allow data-dependent dimensions.
@@ -121,14 +116,6 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Set the backend for the Reshape operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      */
@@ -149,20 +136,6 @@ public:
      */
     inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<Attr::NoopWithEmptyAxes>(); }
 
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
-
     virtual ~ReduceMean_Op() noexcept;
 };
 
@@ -194,10 +167,6 @@ constexpr const char* const EnumStrings<Aidge::ReduceMean_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::ReduceMean_Op::attributesName(){
-    return EnumStrings<Aidge::ReduceMean_Op::Attr>::data;
-}
-
 #undef LIST_REDUCEMEAN_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
index 9900a79d38d02590c13fa7f20353e441deeb9b78..183caf65480cae41fdc5f1f8ab21b2405428041a 100644
--- a/include/aidge/operator/ReduceSum.hpp
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -57,11 +57,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class ReduceSum_Op : public OperatorTensor,
-                public Registrable<ReduceSum_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceSum_Op &)>> {
-
+class ReduceSum_Op : public OperatorTensorWithImpl<ReduceSum_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "ReduceSum";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @enum Attr
@@ -97,7 +97,7 @@ public:
      * and if false, we reduce on all axes
      */
     ReduceSum_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
+        : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<Attr::Axes>(axes),
             attr<Attr::KeepDims>(keep_dims),
@@ -111,29 +111,12 @@ public:
      * its input tensors. The new operator has no associated input.
      */
     ReduceSum_Op(const ReduceSum_Op& op)
-        : OperatorTensor(op),
+        : OperatorTensorWithImpl(op),
           mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ReduceSum_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::ReduceSum_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ReduceSum_Op>(*this);
-    }
+    {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      */
@@ -153,20 +136,6 @@ public:
      * @brief Get the behavior when axes are empty.
      */
     inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<Attr::NoopWithEmptyAxes>(); }
-
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -201,10 +170,6 @@ constexpr const char* const EnumStrings<Aidge::ReduceSum_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::ReduceSum_Op::attributesName() {
-    return EnumStrings<Aidge::ReduceSum_Op::Attr>::data;
-}
-
 #undef LIST_REDUCESUM_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCESUM_H_ */
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 4d0e21e27696262924fd5bd99673aea99081b750..54c1e92d8cf0b4c2813394eefb07f1b467bebebe 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -21,6 +21,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/ReshapeImpl.hpp"
 
 
 #define LIST_RESHAPE_ATTR(X)  \
@@ -66,21 +67,17 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Reshape_Op : public OperatorTensor,
-                   public Registrable<Reshape_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Reshape_Op&)>> {
-
-public:
-    /**
-     * @brief Static type string for the Reshape operator.
-     */
-    static const std::string Type;
-
+class Reshape_Op : public OperatorTensorWithImpl<Reshape_Op, Reshape_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<ReshapeAttr, GENERATE_LIST_ATTR_TYPE(LIST_RESHAPE_ATTR)>;
     template <ReshapeAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Reshape";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Deleted default constructor.
      */
@@ -101,12 +98,6 @@ public:
      */
     Reshape_Op(const Reshape_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Check whether the dimensions have been forwarded successfully.
      * @return True if dimensions were successfully forwarded.
@@ -126,18 +117,6 @@ public:
      * @return True if successful, false otherwise.
      */
     bool forwardDType() override final;
-    /**
-     * @brief Set the backend for the Reshape operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the available backends for the Reshape operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
 
     /**
      * @brief Get the attributes of the operator.
@@ -156,30 +135,6 @@ public:
      * @return Reference to the AllowZero attribute.
      */
     inline bool& allowZero() const { return mAttributes->template getAttr<ReshapeAttr::AllowZero>(); }
-
-    /**
-     * @brief Get the input tensor names for the Reshape operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names for the Reshape operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ReshapeAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 6af6f85a9137570c8380fc1af89d362da99bffa7..921b86d467d3fc1eb6ebefb97aa7b4d850b7c9d0 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -99,13 +99,7 @@ namespace Aidge {
  * @param InterpolationMode type of interpolation (currently only support cubic
  * interpolation)
  */
-class Resize_Op
-    : public OperatorTensor,
-      public Registrable<
-          Resize_Op,
-          std::string,
-          std::function<std::shared_ptr<OperatorImpl>(const Resize_Op &)>> {
-
+class Resize_Op : public OperatorTensorWithImpl<Resize_Op> {
 private:
     using Attributes_ =
         StaticAttributes<ResizeAttr,
@@ -115,7 +109,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Resize";
+    static constexpr const char* const InputsName[] = {"data_input", "roi ", "scales", "sizes"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief creates a resize operator
      * This node can take 4 different inputs, more details in the class
@@ -138,7 +135,7 @@ public:
         Interpolation::Mode interpol_mode = Interpolation::Mode::RoundPreferFloor,
         float cubic_coef_a = -.75f,
         PadBorderType paddingMode = PadBorderType::Edge)
-        : OperatorTensor(Type,
+        : OperatorTensorWithImpl(Type,
                          {InputCategory::Data,
                           InputCategory::OptionalData,
                           InputCategory::OptionalData,
@@ -157,31 +154,12 @@ public:
      * @param op Operator to copy.
      */
     Resize_Op(const Resize_Op &op)
-        : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Resize_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Resize_Op
-     */
-    std::shared_ptr<Operator> clone() const override final {
-        return std::make_shared<Resize_Op>(*this);
-    }
+        : OperatorTensorWithImpl(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+    {}
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string &name,
-                    DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override {
-        return Registrar<Resize_Op>::getKeys();
-    }
-
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
     inline Interpolation::CoordinateTransformation
@@ -198,22 +176,6 @@ public:
     inline PadBorderType paddingMode() const {
         return mAttributes->template getAttr<ResizeAttr::PaddingMode>();
     }
-
-    static const std::vector<std::string> getInputsName() {
-        //  roi, scales, sizes, even if considered as const parameters/input
-        return {"data_input", "roi ", "scales", "sizes"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ResizeAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Round.hpp b/include/aidge/operator/Round.hpp
index 3a5bb0859392670e1c6710b115bef72c3c52074a..acfc9f2a81bb39475d126ec125a4bb533d881289 100644
--- a/include/aidge/operator/Round.hpp
+++ b/include/aidge/operator/Round.hpp
@@ -36,38 +36,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Round_Op : public OperatorTensor,
-                public Registrable<Round_Op,  // <Op, backend, implementation creation function>
-                                std::string,
-                                std::function<std::shared_ptr<OperatorImpl>(const Round_Op&)>>
-{
+class Round_Op : public OperatorTensorWithImpl<Round_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Round";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    Round_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Round_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Round_Op(const Round_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Round_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
+    Round_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 };
 
 std::shared_ptr<Node> Round(const std::string& name = "");
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
deleted file mode 100644
index c5264fe551bf6ab0d18010b37bb66782170cee74..0000000000000000000000000000000000000000
--- a/include/aidge/operator/Scaling.hpp
+++ /dev/null
@@ -1,168 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CORE_OPERATOR_SCALING_H_
-#define AIDGE_CORE_OPERATOR_SCALING_H_
-
-#include <cstddef>  // std::size_t
-#include <vector>
-#include <memory>
-
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/graph/Node.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
-#include "aidge/utils/Types.h"
-
-// Caution: This operator is now deprecated and should no longer be used.
-// It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
-
-#define LIST_SCALING_ATTR(X) \
-    X(ScalingFactor, "scaling_factor", float), \
-    X(QuantizedNbBits, "quantized_nb_bits", std::size_t), \
-    X(IsOutputUnsigned, "is_output_unsigned", bool)
-
-namespace Aidge {
-/**
- * @enum ScalingAttr
- * @brief Attributes for the Scaling operation.
- *
- * - ScalingFactor: Floating-point scaling factor applied to the input tensor.
- * - QuantizedNbBits: Specifies the bit-width used for quantization.
- * - IsOutputUnsigned: Indicates whether the quantized output values are unsigned.
- */
-enum class ScalingAttr {
-    GENERATE_LIST_ATTR_ENUM(LIST_SCALING_ATTR)
-};
-} // namespace Aidge
-
-namespace {
-template <>
-struct EnumStrings<Aidge::ScalingAttr> {
-    static const char* const data[];
-};
-constexpr const char* const EnumStrings<Aidge::ScalingAttr>::data[] = {
-    GENERATE_LIST_ATTR_STR(LIST_SCALING_ATTR)
-};
-}
-
-namespace Aidge {
-/**
- * @brief Description of a scaling operation to scale and quantize input tensors.
- *
- * The `Scaling_Op` class applies a scaling factor to the input tensor, quantizes
- * the scaled values to a specified bit-width, and outputs either signed or unsigned integers
- * based on the configuration.
- *
- * The input and output Tensors have the same dimensions.
- *
- * ### Deprecation Notice
- * This operator is deprecated and has been replaced by the `Quantizer` MetaOperator.
- * It is retained for backward compatibility and should not be used in new implementations.
- *
- * @see OperatorTensor
- * @see Registrable
- */
-class Scaling_Op
-    : public OperatorTensor,
-      public Registrable<Scaling_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Scaling_Op&)>> {
-
-public:
-    static const std::string Type;
-
-private:
-    using Attributes_ = StaticAttributes<ScalingAttr, GENERATE_LIST_ATTR_TYPE(LIST_SCALING_ATTR)>;
-    template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
-    const std::shared_ptr<Attributes_> mAttributes;
-
-public:
-    Scaling_Op() = delete;
-
-    /**
-     * @brief Constructor for the Scaling operator.
-     * @param[in] scalingFactor Scaling factor to be applied to the input tensor.
-     * @param[in] nbBits Number of bits for quantization.
-     * @param[in] isOutputUnsigned Flag indicating whether the output should be unsigned.
-     */
-    Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned);
-
-    /**
-     * @brief Copy-constructor.
-     * @param[in] op Scaling_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not its input tensors.
-     * The new operator has no associated input.
-     */
-    Scaling_Op(const Scaling_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    /**
-     * @brief Get the attributes of the operator.
-     */
-    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-
-    /**
-     * @brief Get the scaling factor.
-     */
-    inline float& scalingFactor() const noexcept { return mAttributes -> getAttr<ScalingAttr::ScalingFactor>(); }
-
-    /**
-     * @brief Get the number of quantization bits.
-     */
-    inline std::size_t& quantizedNbBits() const noexcept { return mAttributes -> getAttr<ScalingAttr::QuantizedNbBits>(); }
-
-    /**
-     * @brief Check if the output is unsigned.
-     */
-    inline bool& isOutputUnsigned() const noexcept { return mAttributes -> getAttr<ScalingAttr::IsOutputUnsigned>(); }
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ScalingAttr>::data;
-	}
-};
-
-/**
- * @brief Apply a scaling and quantization operation on a tensor.
- *
- * @param[in] scalingFactor Scaling factor to apply to the input tensor.
- * @param[in] quantizedNbBits Number of bits for quantization.
- * @param[in] isOutputUnsigned Whether the quantized output should be unsigned.
- * @param[in] name Name of the Operator.
- * @return std::shared_ptr<Node> Node containing the Operator.
- */
-std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
-                                     std::size_t quantizedNbBits = 8,
-                                     bool isOutputUnsigned = true,
-                                     const std::string& name = "");
-} // namespace Aidge
-
-#undef LIST_SCALING_ATTR
-
-#endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */
diff --git a/include/aidge/operator/Select.hpp b/include/aidge/operator/Select.hpp
index cd0a56bb905ff8f2a626f0b735cde73c266c7738..a877c34cdce0a7ca7161f662fbed414ad8dacc74 100644
--- a/include/aidge/operator/Select.hpp
+++ b/include/aidge/operator/Select.hpp
@@ -21,6 +21,7 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/backend/generic/operator/SelectImpl.hpp"
 
 namespace Aidge {
 
@@ -29,41 +30,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Select_Op : public OperatorTensor,
-    public Registrable<Select_Op,
-                       std::string,
-                       std::function<std::shared_ptr<OperatorImpl>(const Select_Op&)>>
-{
+class Select_Op : public OperatorTensorWithImpl<Select_Op, Select_OpImpl> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Select";
+    static constexpr const char* const InputsName[] = {"select", "data_input_0", "data_input_n"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Select_Op(const Aidge::IOIndex_t nbIn);
 
-    /**
-     * @brief Copy-constructor.
-     * @param op Select_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Select_Op(const Select_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Select_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     bool forwardDims(bool allowDataDependency = false) override final;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName() {
-        return {"select", "data_input_0", "data_input_n"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Select(const IOIndex_t nbIn, const std::string& name = "");
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 37968c2f335a4c3ab5b15a4e2336d1eadf7e48d7..b16c2b63d713993fddb2a6041339bf3aa8612fc1 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/ShapeImpl.hpp"
 
 
 #define LIST_SHAPE_ATTR(X) \
@@ -63,17 +64,7 @@ namespace Aidge {
  * @example Input: Tensor with shape `[4, 5, 6, 7]`, `start=1`, `end=3` -> Output: `[5, 6]`
  * @example Input: Tensor with shape `[4, 5, 6]`, `start=0`, `end=-1` (default) -> Output: `[4, 5, 6]`
  */
-class Shape_Op : public OperatorTensor,
-                public Registrable<Shape_Op,
-                                   std::string,
-                                   std::function<std::shared_ptr<OperatorImpl>(const Shape_Op&)>> {
-
-public:
-    /**
-     * @brief Static type string for the Shape operator.
-     */
-    static const std::string Type;
-
+class Shape_Op : public OperatorTensorWithImpl<Shape_Op, Shape_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<ShapeAttr, GENERATE_LIST_ATTR_TYPE(LIST_SHAPE_ATTR)>;
     template <ShapeAttr e> using attr = typename Attributes_::template attr<e>;
@@ -82,6 +73,10 @@ private:
     using outDType = cpptype_t<DataType::Int64>;
 
 public:
+    static constexpr const char* const Type = "Shape";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Constructor for the Shape operator.
      * @param[in] start Start index for slicing dimensions.
@@ -97,12 +92,6 @@ public:
      */
     Shape_Op(const Shape_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Compute the output dimensions during the forward pass.
      * @param allowDataDependency Whether to allow data-dependent dimensions.
@@ -116,21 +105,8 @@ public:
      */
     bool forwardDType() override final;
 
-    /**
-     * @brief Set the backend for the Shape operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
     void setDataType(const Aidge::DataType &datatype) const override;
 
-    /**
-     * @brief Get the available backends for the Shape operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -148,30 +124,6 @@ public:
      * @return Reference to the end index attribute.
      */
     inline std::int64_t& end() const noexcept { return mAttributes->getAttr<ShapeAttr::End>(); }
-
-    /**
-     * @brief Get the input tensor names for the Shape operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names for the Shape operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::ShapeAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp
index 2375f845f2b05ceadfa4463eba00fc0c9eeb0302..428ed7e14495da40666c6c4a2f04a42ace7175f2 100644
--- a/include/aidge/operator/ShiftGELU.hpp
+++ b/include/aidge/operator/ShiftGELU.hpp
@@ -50,44 +50,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class ShiftGELU_Op : public OperatorTensor,
-    public Registrable<ShiftGELU_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::shared_ptr<OperatorImpl>(const ShiftGELU_Op&)>>
-{
+class ShiftGELU_Op : public OperatorTensorWithImpl<ShiftGELU_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "ShiftGELU";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     ShiftGELU_Op();
-
-    /**
-     * @brief Copy-constructor.
-     * @param op ShiftGELU_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    ShiftGELU_Op(const ShiftGELU_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::ShiftGELU_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    /**
-     * @brief Set the backend for the Reshape operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> ShiftGELU(const std::string& name = "");
diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp
index fc76e300527ca8b6f7a7097a8f7b2c11155fee56..81be1b15b3399b3fc45d37db3ca98f78f5a27a62 100644
--- a/include/aidge/operator/ShiftMax.hpp
+++ b/include/aidge/operator/ShiftMax.hpp
@@ -51,37 +51,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class ShiftMax_Op : public OperatorTensor,
-    public Registrable<ShiftMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ShiftMax_Op&)>> {
+class ShiftMax_Op : public OperatorTensorWithImpl<ShiftMax_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "ShiftMax";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     ShiftMax_Op();
-
-    /**
-     * @brief Copy-constructor.
-     * @param op ShiftMax_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    ShiftMax_Op(const ShiftMax_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::ShiftMax_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> ShiftMax(const std::string& name = "");
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index 0208a7f600e95ea27f8af3da8417c85126a29f00..b9bbcbe4d0752813c932db5cca402ad3486c496e 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -36,39 +36,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Sigmoid_Op : public OperatorTensor,
-    public Registrable<Sigmoid_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::shared_ptr<OperatorImpl>(const Sigmoid_Op&)>>
-{
+class Sigmoid_Op : public OperatorTensorWithImpl<Sigmoid_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Sigmoid";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Sigmoid_Op();
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Sigmoid_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Sigmoid_Op(const Sigmoid_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Sigmoid_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Sigmoid(const std::string& name = "");
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 434fb8ab96bc6bb681aa27c44b7ff3f4c63e273d..e10843d09d43f7aeb467727114d4044cf49e793d 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -22,6 +22,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/SliceImpl.hpp"
 
 
 #define LIST_SLICE_ATTR(X) \
@@ -89,12 +90,7 @@ namespace Aidge{
  * @see OperatorTensor
  * @see Registrable
  */
-class Slice_Op : public OperatorTensor,
-                public Registrable<Slice_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Slice_Op &)>> {
-
-public:
-    static const std::string Type;
-
+class Slice_Op : public OperatorTensorWithImpl<Slice_Op, Slice_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<SliceAttr, GENERATE_LIST_ATTR_TYPE(LIST_SLICE_ATTR)>;
     template <SliceAttr e>
@@ -102,6 +98,10 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Slice";
+    static constexpr const char* const InputsName[] = {"data_input", "starts", "ends", "axes", "steps"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     Slice_Op() = delete;
 
     /**
@@ -124,19 +124,10 @@ public:
      */
     Slice_Op(const Slice_Op &op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     bool dimsForwarded() const override final;
 
     bool forwardDims(bool allowDataDependency = true) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      */
@@ -161,22 +152,6 @@ public:
      * @brief Get the steps for the slice operation.
      */
     inline std::vector<std::int64_t>& steps() const noexcept { return mAttributes -> getAttr<SliceAttr::Steps>(); }
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "starts", "ends", "axes", "steps"};
-    }
-
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::SliceAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index b0c6a2edae7ab9bec4a5f45746f2bc9258b6eb29..97b64c528cdd64c6c0ccb1521a79b8695db80966 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -68,23 +68,17 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Softmax_Op : public OperatorTensor,
-                   public Registrable<Softmax_Op,
-                                      std::string,
-                                      std::function<std::shared_ptr<OperatorImpl>(const Softmax_Op&)>> {
-
-public:
-    /**
-     * @brief Static type string for the Softmax operator.
-     */
-    static const std::string Type;
-
+class Softmax_Op : public OperatorTensorWithImpl<Softmax_Op> {
 private:
     using Attributes_ = StaticAttributes<SoftmaxAttr, GENERATE_LIST_ATTR_TYPE(LIST_SOFTMAX_ATTR)>;
     template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Softmax";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Deleted default constructor.
      */
@@ -104,25 +98,6 @@ public:
      */
     Softmax_Op(const Softmax_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    /**
-     * @brief Set the backend for the Softmax operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the available backends for the Softmax operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -134,30 +109,6 @@ public:
      * @return Reference to the axis attribute.
      */
     inline std::int32_t& axis() const noexcept { return mAttributes->getAttr<SoftmaxAttr::Axis>(); }
-
-    /**
-     * @brief Get the input names for the Softmax operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output names for the Softmax operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::SoftmaxAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index e9e43a350eadac3bd15bf2afdcec4370b697e55a..b09e77f7b8ace5e15348cec0522f622a1c84cbe3 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -22,6 +22,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/SplitImpl.hpp"
 
 
 #define LIST_SPLIT_ATTR(X) \
@@ -82,21 +83,17 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Split_Op : public OperatorTensor,
-                 public Registrable<Split_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Split_Op &)>> {
-
-public:
-    /**
-     * @brief Static type string for the Split operator.
-     */
-    static const std::string Type;
-
+class Split_Op : public OperatorTensorWithImpl<Split_Op, Split_OpImpl> {
 private:
     using Attributes_ = StaticAttributes<SplitAttr,GENERATE_LIST_ATTR_TYPE(LIST_SPLIT_ATTR)>;
     template <SplitAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Split";
+    static constexpr const char* const InputsName[] = {"data_input", "split"};
+    static constexpr const char* const OutputsName[] = {"data_output_0", "data_output_n"};
+
     Split_Op() = delete;
 
     /**
@@ -115,27 +112,9 @@ public:
      */
     Split_Op(const Split_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Set the backend for the Split operator.
-     * @param[in] name Backend name.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the available backends for the Split operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      */
@@ -150,30 +129,6 @@ public:
      * @brief Get the sizes of each split.
      */
     inline std::vector<DimSize_t>& split() const { return mAttributes->template getAttr<SplitAttr::Split>(); }
-
-    /**
-     * @brief Get the input names for the Split operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "split"};
-    }
-
-    /**
-     * @brief Get the output names for the Split operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output_0", "data_output_n"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::SplitAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index 8b9eb1b787f6cc74c97fb25c1b3c1f7d5daf7123..caf534b057ca23ac5afd08687f2b8d9255c83f2e 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -35,38 +35,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Sqrt_Op : public OperatorTensor,
-                public Registrable<Sqrt_Op,  // <Op, backend, implementation creation function>
-                                std::string,
-                                std::function<std::shared_ptr<OperatorImpl>(const Sqrt_Op&)>> {
+class Sqrt_Op : public OperatorTensorWithImpl<Sqrt_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Sqrt";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    Sqrt_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Sqrt_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Sqrt_Op(const Sqrt_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Sqrt_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
+    Sqrt_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 };
 
 std::shared_ptr<Node> Sqrt(const std::string& name = "");
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index 03db92a844594bc45af65cc3c77e7956a38e2cad..89da2f77f8c91ba90db2118fd9d41efa1242899c 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -24,6 +24,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/SqueezeImpl.hpp"
 
 
 #define LIST_SQUEEZE_ATTR(X) \
@@ -68,21 +69,17 @@ namespace Aidge {
  * @example Calling squeeze() with no argument will result in the removal of
  * every 1-sized dimension in the tensor.
  */
-class Squeeze_Op
-    : public OperatorTensor,
-      public Registrable<Squeeze_Op, std::string,
-                         std::function<std::shared_ptr<OperatorImpl>(const Squeeze_Op &)>> {
-
-public:
-  static const std::string
-      Type; // name of the type of the operation (Here "Squeeze")
-
+class Squeeze_Op : public OperatorTensorWithImpl<Squeeze_Op, Squeeze_OpImpl> {
 private:
   using Attributes_ = StaticAttributes<SqueezeAttr, std::vector<std::int8_t>>;
   template <SqueezeAttr e> using attr = typename Attributes_::template attr<e>;
   const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    static constexpr const char* const Type = "Squeeze";
+    static constexpr const char* const InputsName[] = {"data_input", "axes_to_squeeze"};
+    static constexpr const char* const OutputsName[] = {"squeezed"};
+
   /**
    * @brief constructor for Squeeze op
    * @param[in] axes around which perform the operation
@@ -97,24 +94,12 @@ public:
    */
   Squeeze_Op(const Squeeze_Op &op);
 
-  /**
-   * @brief Clone the operator using its copy-constructor.
-   * @see Operator::MatMul_Op
-   */
-  std::shared_ptr<Operator> clone() const override final {
-    return std::make_shared<Squeeze_Op>(*this);
-  }
-
   /**
    * @brief Compute dimensions for the output Tensor
    */
   bool forwardDims(bool allowDataDependency = false) override final;
   bool dimsForwarded() const override final;
 
-  void setBackend(const std::string &name,
-                  DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
   inline std::shared_ptr<Attributes> attributes() const override {
     return mAttributes;
   }
@@ -126,21 +111,6 @@ public:
     inline std::vector<std::int8_t> &axes() const noexcept {
         return mAttributes->template getAttr<SqueezeAttr::Axes>();
     }
-
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "axes_to_squeeze"};
-    }
-    static const std::vector<std::string> getOutputsName() {
-        return {"squeezed"};
-    }
-
-    /**
-     * @brief Retrieves the names of the attributes for the operator.
-     * @return A vector containing the attributes name.
-     */
-    static constexpr const char* const* attributesName(){
-        return EnumStrings<Aidge::SqueezeAttr>::data;
-    }
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
index e0d741226af66c0ed85323fe85ff5bf437003a5d..42c26690decd7d4337e076206606352bf0556a0a 100644
--- a/include/aidge/operator/Stack.hpp
+++ b/include/aidge/operator/Stack.hpp
@@ -21,6 +21,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/StackImpl.hpp"
 
 namespace Aidge {
 
@@ -69,8 +70,7 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class StackOp : public OperatorTensor,
-    public Registrable<StackOp, std::string, std::function<std::unique_ptr<OperatorImpl>(const StackOp&)>> {
+class StackOp : public OperatorTensorWithImpl<StackOp, StackOpImpl> {
 private:
     using Attributes_ = StaticAttributes<StackAttr,
             GENERATE_LIST_ATTR_TYPE(LIST_STACK_ATTR)
@@ -79,7 +79,9 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Stack";
+    static constexpr const char* const InputsName[] = {"data_input", "max_elements"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @brief Constructs a new Stack Operator.
@@ -94,25 +96,6 @@ public:
      */
     StackOp(const StackOp& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-    /**
-     * @brief Assign a specific backend and device for computation.
-     * @param name Name of the backend.
-     * @param device The device index (default is 0).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-
-    /**
-     * @brief Get the list of available backends compatible with this operator.
-     * @return A set of strings representing backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Check if dimensions have been forwarded successfully.
      * @return True if dimensions are forwarded.
@@ -168,31 +151,6 @@ public:
     inline std::uint32_t& backwardStep() const {
         return mAttributes->template getAttr<StackAttr::BackwardStep>();
     }
-
-
-    /**
-     * @brief Retrieve the names of the operator's input tensors.
-     * @return A vector of strings representing input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input", "max_elements"};
-    }
-
-    /**
-     * @brief Retrieve the names of the operator's output tensors.
-     * @return A vector of strings representing output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::StackAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index 73cce3f554f3ea7bbd802ee239de49077a676823..685fde9e83f92c0e08bdcdfbb18c867fd8b51fd1 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -46,40 +46,15 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Sub_Op : public OperatorTensor,
-    public Registrable<Sub_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Sub_Op&)>> {
+class Sub_Op : public OperatorTensorWithImpl<Sub_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Sub";
+    static constexpr const char* const InputsName[] = {"data_input_1", "data_input_2"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-public:
-    Sub_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Sub_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Sub_Op(const Sub_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Sub_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
+    Sub_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input_1", "data_input_2"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Sub(const std::string& name = "");
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
index 71b1511d962318eb784c23611dda1f346281d7ae..9df5b2a7cc957fd8cab8432355070253e66e7158 100644
--- a/include/aidge/operator/Tanh.hpp
+++ b/include/aidge/operator/Tanh.hpp
@@ -37,41 +37,13 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Tanh_Op : 
-    public OperatorTensor,
-    public Registrable<Tanh_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::unique_ptr<OperatorImpl>(const Tanh_Op&)>>
-{
+class Tanh_Op : public OperatorTensorWithImpl<Tanh_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "Tanh";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     Tanh_Op();
-
-    /**
-     * @brief Copy-constructor.
-     * @param op Tanh_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    Tanh_Op(const Tanh_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Tanh_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
-
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
 };
 
 std::shared_ptr<Node> Tanh(const std::string& name = "");
diff --git a/include/aidge/operator/TopK.hpp b/include/aidge/operator/TopK.hpp
index e1aa193bb0b7720fce0d1161d3a352f2e8109324..073ecc198e66765b559cc209229e519e11e72886 100644
--- a/include/aidge/operator/TopK.hpp
+++ b/include/aidge/operator/TopK.hpp
@@ -57,8 +57,7 @@ constexpr const char* const EnumStrings<Aidge::TopKAttr>::data[] = {
 
 namespace Aidge {
 
-class TopK_Op : public OperatorTensor,
-    public Registrable<TopK_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const TopK_Op&)>> {
+class TopK_Op : public OperatorTensorWithImpl<TopK_Op> {
 private:
     using Attributes_ =
         StaticAttributes<TopKAttr,
@@ -68,7 +67,9 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "TopK";
+    static constexpr const char* const InputsName[] = {"x", "k"};
+    static constexpr const char* const OutputsName[] = {"values", "indices"};
 
     TopK_Op(int64_t axis = -1,
         bool largest = true,
@@ -83,40 +84,15 @@ public:
      */
     TopK_Op(const TopK_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::TopK_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<TopK_Op>(*this);
-    }
-
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
     void setDataType(const DataType& dataType) const override final;
-    std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline int64_t& axis() const { return mAttributes->template getAttr<TopKAttr::Axis>(); }
     inline bool& largest() const { return mAttributes->template getAttr<TopKAttr::Largest>(); }
     inline bool& sorted() const { return mAttributes->template getAttr<TopKAttr::Sorted>(); }
     inline IOIndex_t& k() const { return mAttributes->template getAttr<TopKAttr::K>(); }
-
-    static const std::vector<std::string> getInputsName(){
-        return {"x", "k"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"values", "indices"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::TopKAttr>::data;
-	}
 };
 
 std::shared_ptr<Node> TopK(const std::string& name = "");
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 25d8d92f67901dbeb7cf0610a0f818cdbf60b0bd..978234616fd2c3fcf4d7f2003690ed00d3f8ed95 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -23,30 +23,7 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-
-namespace Aidge {
-/**
- * @brief implementation of the operator Transpose.
- * @note Since this operator implementation is agnostic to the backend it is
- * located here instead of in aidge_backend.
- */
-class TransposeImpl : public OperatorImpl {
-public:
-    /**
-     * @brief Constructor for TransposeImpl.
-     * @param[in] op The Operator instance.
-     * @param[in] backend The backend name (optional).
-     */
-    TransposeImpl(const Operator& op, const std::string& backend = "")
-        : OperatorImpl(op, backend)
-    {}
-
-    /**
-     * @brief Perform the forward operation for the transpose.
-     */
-    void forward() override;
-};
-} // namespace Aidge
+#include "aidge/backend/generic/operator/TransposeImpl.hpp"
 
 #define LIST_TRANSPOSE_ATTR(X) \
     X(OutputDimsOrder, "output_dims_order", std::vector<DimSize_t>)
@@ -66,15 +43,11 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class Transpose_Op : public OperatorTensor,
-                public Registrable<Transpose_Op,
-                                   std::string,
-                                   std::function<std::shared_ptr<OperatorImpl>(const Transpose_Op&)>> {
+class Transpose_Op : public OperatorTensorWithImpl<Transpose_Op, TransposeImpl> {
 public:
-    /**
-     * @brief Static type string for the Transpose operator.
-     */
-    static const std::string Type;
+    static constexpr const char* const Type = "Transpose";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
     /**
      * @enum Attr
@@ -113,12 +86,6 @@ public:
      */
     Transpose_Op(const Transpose_Op& op);
 
-    /**
-     * @brief Clone the operator using its copy constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Compute the output dimensions during the forward pass.
      * @param allowDataDependency Whether to allow data-dependent dimensions.
@@ -126,19 +93,6 @@ public:
      */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    /**
-     * @brief Set the backend for the Transpose operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the available backends for the Transpose operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -154,28 +108,6 @@ public:
     inline std::vector<DimSize_t>& outputDimsOrder() const noexcept {
         return mAttributes->getAttr<Attr::OutputDimsOrder>();
     }
-
-    /**
-     * @brief Get the input tensor names for the Transpose operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names for the Transpose operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -200,10 +132,6 @@ constexpr const char* const EnumStrings<Aidge::Transpose_Op::Attr>::data[] = {
 };
 }
 
-constexpr const char* const* Aidge::Transpose_Op::attributesName() {
-    return EnumStrings<Aidge::Transpose_Op::Attr>::data;
-}
-
 #undef LIST_TRANSPOSE_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_TRANSPOSE_H_ */
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index 664dafc93b570baf4b24216a4818e6716065330c..5bac27b9b295022673e03a7f79bcae65045eab4a 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -26,6 +26,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/UnfoldImpl.hpp"
 
 
 #define LIST_UNFOLD_ATTR(X)  \
@@ -45,6 +46,9 @@ namespace Aidge {
 enum class UnfoldAttr {
     GENERATE_LIST_ATTR_ENUM(LIST_UNFOLD_ATTR)
 };
+
+template <DimIdx_t DIM> struct Unfold_Op_Type {};
+template <> struct Unfold_Op_Type<2> { static constexpr const char* const value = "Unfold2D"; };
 }  // namespace Aidge
 
 namespace {
@@ -74,20 +78,21 @@ namespace Aidge {
  * @see Registrable
  */
 template <DimIdx_t DIM>
-class Unfold_Op : public OperatorTensor,
-                  public Registrable<Unfold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM>&)>> {
-public:
-    /**
-     * @brief Static type string for the Unfold operator.
-     */
-    static const std::string Type;
-
+class Unfold_Op : public OperatorTensorWithImpl<Unfold_Op<DIM>, Unfold_OpImpl<DIM>> {
 private:
     using Attributes_ = StaticAttributes<UnfoldAttr, GENERATE_LIST_ATTR_TYPE(LIST_UNFOLD_ATTR)>;
     template <UnfoldAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+    using OperatorTensorWithImpl<Unfold_Op<DIM>, Unfold_OpImpl<DIM>>::getInput;
+    using OperatorTensorWithImpl<Unfold_Op<DIM>, Unfold_OpImpl<DIM>>::inputsAssociated;
+    using OperatorTensorWithImpl<Unfold_Op<DIM>, Unfold_OpImpl<DIM>>::mOutputs;
+
+    static constexpr const char* const Type = Unfold_Op_Type<DIM>::value;
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
+
     /**
      * @brief Deleted default constructor.
      */
@@ -111,12 +116,6 @@ public:
      */
     Unfold_Op(const Unfold_Op<DIM>& op);
 
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @return A shared pointer to the cloned operator.
-     */
-    std::shared_ptr<Operator> clone() const override;
-
     /**
      * @brief Compute the output dimensions during the forward pass.
      * @param[in] allowDataDependency Whether to allow data-dependent dimensions.
@@ -124,19 +123,6 @@ public:
      */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    /**
-     * @brief Set the backend for the Unfold operator.
-     * @param[in] name Name of the backend.
-     * @param[in] device Device index (optional).
-     */
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
-
-    /**
-     * @brief Get the available backends for the Unfold operator.
-     * @return A set of backend names.
-     */
-    std::set<std::string> getAvailableBackends() const override;
-
     /**
      * @brief Get the attributes of the operator.
      * @return A shared pointer to the attributes.
@@ -166,30 +152,6 @@ public:
     inline std::array<DimSize_t, DIM>& kernelDims() const {
         return mAttributes->template getAttr<UnfoldAttr::KernelDims>();
     }
-
-    /**
-     * @brief Get the input tensor names for the Unfold operator.
-     * @return A vector of input tensor names.
-     */
-    static const std::vector<std::string> getInputsName() {
-        return {"data_input"};
-    }
-
-    /**
-     * @brief Get the output tensor names for the Unfold operator.
-     * @return A vector of output tensor names.
-     */
-    static const std::vector<std::string> getOutputsName() {
-        return {"data_output"};
-    }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::UnfoldAttr>::data;
-	}
 };
 
 /**
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index 4a66c37b2d56962259e9e00c75987a54194f2eb4..d3bf1fa6c0ee617b617b49536e2f533508d8f4ac 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/generic/operator/UnsqueezeImpl.hpp"
 
 
 #define LIST_UNSQUEEZE_ATTR(X)  \
@@ -62,14 +63,7 @@ namespace Aidge {
  * dims_to_unsqueeze[i] < tensor.nbDim() +
  * dims_to_unsqueeze.size()
  */
-class Unsqueeze_Op
-    : public OperatorTensor,
-      public Registrable<Unsqueeze_Op, std::string,
-                         std::function<std::shared_ptr<OperatorImpl>(const Unsqueeze_Op &)>> {
-
-public:
-  static const std::string Type;
-
+class Unsqueeze_Op : public OperatorTensorWithImpl<Unsqueeze_Op, Unsqueeze_OpImpl> {
 private:
   using Attributes_ = StaticAttributes<UnsqueezeAttr, GENERATE_LIST_ATTR_TYPE(LIST_UNSQUEEZE_ATTR)>;
   template <UnsqueezeAttr e>
@@ -77,6 +71,10 @@ private:
   const std::shared_ptr<Attributes_> mAttributes;
 
 public:
+  static constexpr const char* const Type = "Unsqueeze";
+  static constexpr const char* const InputsName[] = {"data_input", "axes_to_unsqueeze"};
+  static constexpr const char* const OutputsName[] = {"unsqueezed"};
+
   Unsqueeze_Op() = delete;
 
   /**
@@ -93,14 +91,6 @@ public:
    */
   Unsqueeze_Op(const Unsqueeze_Op &op);
 
-  /**
-   * @brief Clone the operator using its copy-constructor.
-   * @see Operator::MatMul_Op
-   */
-  std::shared_ptr<Operator> clone() const override final {
-    return std::make_shared<Unsqueeze_Op>(*this);
-  }
-
   /**
    * @brief Compute dimensions for the output Tensor
    */
@@ -108,10 +98,6 @@ public:
 
   bool dimsForwarded() const override final;
 
-  void setBackend(const std::string &name,
-                  DeviceIdx_t device = 0) override final;
-  std::set<std::string> getAvailableBackends() const override;
-
   inline std::shared_ptr<Attributes> attributes() const override {
     return mAttributes;
   }
@@ -124,21 +110,6 @@ public:
   inline std::vector<int8_t> &axes() const noexcept {
     return mAttributes->template getAttr<UnsqueezeAttr::Axes>();
   }
-
-  static const std::vector<std::string> getInputsName() {
-    return {"data_input", "axes_to_unsqueeze"};
-  }
-  static const std::vector<std::string> getOutputsName() {
-    return {"unsqueezed"};
-  }
-
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static constexpr const char* const* attributesName(){
-		return EnumStrings<Aidge::UnsqueezeAttr>::data;
-	}
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
diff --git a/include/aidge/operator/WeightInterleaving.hpp b/include/aidge/operator/WeightInterleaving.hpp
index a8f8c3d743aaef11bea0bbc03c949907348a7d7c..e7eae855b9bac51f882469fd9a29bdf98d9de2b8 100644
--- a/include/aidge/operator/WeightInterleaving.hpp
+++ b/include/aidge/operator/WeightInterleaving.hpp
@@ -39,43 +39,16 @@ namespace Aidge {
  * @see OperatorTensor
  * @see Registrable
  */
-class WeightInterleaving_Op :
-    public OperatorTensor,
-    public Registrable<WeightInterleaving_Op,  // <Op, backend, implementation creation function>
-        std::string,
-        std::function<std::shared_ptr<OperatorImpl>(const WeightInterleaving_Op&)>>
-{
+class WeightInterleaving_Op : public OperatorTensorWithImpl<WeightInterleaving_Op> {
 public:
-    static const std::string Type;
+    static constexpr const char* const Type = "WeightInterleaving";
+    static constexpr const char* const InputsName[] = {"data_input"};
+    static constexpr const char* const OutputsName[] = {"data_output"};
 
-    WeightInterleaving_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
-
-    /**
-     * @brief Copy-constructor.
-     * @param op WeightInterleaving_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not
-     * its input tensors. The new operator has no associated input.
-     */
-    WeightInterleaving_Op(const WeightInterleaving_Op& op);
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::WeightInterleaving_Op
-     */
-    std::shared_ptr<Operator> clone() const override;
+    WeightInterleaving_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
-
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
-    }
-    static const std::vector<std::string> getOutputsName(){
-        return {"data_output"};
-    }
-
     /**
      * @brief Calculates the required size for the 8-bits`compactData` vector.
      *
diff --git a/python_binding/operator/pybind_Abs.cpp b/python_binding/operator/pybind_Abs.cpp
index 8df1bfd13bb8720e84e5595cca2c6419f2737293..0c49061b7e55670943121f2fc787e31d7aa7d41b 100644
--- a/python_binding/operator/pybind_Abs.cpp
+++ b/python_binding/operator/pybind_Abs.cpp
@@ -22,8 +22,12 @@ namespace Aidge {
 void init_Abs(py::module& m) {
     py::class_<Abs_Op, std::shared_ptr<Abs_Op>, OperatorTensor>(m, "AbsOp", py::multiple_inheritance())
     .def(py::init<>())
-    .def_static("get_inputs_name", &Abs_Op::getInputsName)
-    .def_static("get_outputs_name", &Abs_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Abs_Op::InputsName), std::end(Abs_Op::InputsName));
+    })
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Abs_Op::OutputsName), std::end(Abs_Op::OutputsName));
+    })
     .def_readonly_static("Type", &Abs_Op::Type);
     declare_registrable<Abs_Op>(m, "AbsOp");
 
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 566f7bc88c4250c52a4219f7d6cb71a2111bedc9..819d7f78dc1a15a8a945b8d14fc270524ee2f1ea 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -38,8 +38,12 @@ void declare_Add(py::module &m) {
     :type name: str, optional
     )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Add_Op::getInputsName)
-    .def_static("get_outputs_name", &Add_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Add_Op::InputsName), std::end(Add_Op::InputsName));
+    })
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Add_Op::OutputsName), std::end(Add_Op::OutputsName));
+    })
     .def_readonly_static("Type", &Add_Op::Type);
 
   declare_registrable<Add_Op>(m, "AddOp");
diff --git a/python_binding/operator/pybind_And.cpp b/python_binding/operator/pybind_And.cpp
index bd3366ef843efde47ef03f86f23e8a2b4df15a01..9d2d410fa32d07ddb85bb17047fa68ad4e50b0dd 100644
--- a/python_binding/operator/pybind_And.cpp
+++ b/python_binding/operator/pybind_And.cpp
@@ -31,8 +31,13 @@ void init_And(py::module& m) {
         :type name : str
         )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &And_Op::getInputsName)
-    .def_static("get_outputs_name", &And_Op::getOutputsName);
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(And_Op::InputsName), std::end(And_Op::InputsName));
+    })
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(And_Op::OutputsName), std::end(And_Op::OutputsName));
+    })
+    .def_readonly_static("Type", &And_Op::Type);
 
     declare_registrable<And_Op>(m, "AndOp");
 
diff --git a/python_binding/operator/pybind_ArgMax.cpp b/python_binding/operator/pybind_ArgMax.cpp
index 75f3257499fc2edf5007aaa51c1198d39182d880..6e1b7e31512f20bfdb7addb6506d56043d2092c8 100644
--- a/python_binding/operator/pybind_ArgMax.cpp
+++ b/python_binding/operator/pybind_ArgMax.cpp
@@ -41,15 +41,15 @@ void init_ArgMax(py::module &m) {
 			:type select_last_index: bool
 		)mydelimiter")
     .def(py::init<std::int32_t, bool, bool>(), py::arg("axis"), py::arg("keep_dims"), py::arg("select_last_index"))
-    .def_static("get_inputs_name", &ArgMax_Op::getInputsName)
-    .def_static("get_outputs_name", &ArgMax_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(ArgMax_Op::InputsName), std::end(ArgMax_Op::InputsName));
+    })
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(ArgMax_Op::OutputsName), std::end(ArgMax_Op::OutputsName));
+    })
+    .def_readonly_static("Type", &ArgMax_Op::Type)
 	.def_static("attributes_name", []() {
-		std::vector<std::string> result;
-		auto attributes = ArgMax_Op::attributesName();
-		for (size_t i = 0; i < size(EnumStrings<ArgMaxAttr>::data); ++i) {
-			result.emplace_back(attributes[i]);
-		}
-		return result;
+		return std::vector<std::string>(std::begin(EnumStrings<ArgMaxAttr>::data), std::end(EnumStrings<ArgMaxAttr>::data));
 	})
     ;
   declare_registrable<ArgMax_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_Atan.cpp b/python_binding/operator/pybind_Atan.cpp
index 6f2e00333e674998beebc0d3655c1a279eae2036..3f05fc1489dca8d0ee4de9075db47f321efb35ed 100644
--- a/python_binding/operator/pybind_Atan.cpp
+++ b/python_binding/operator/pybind_Atan.cpp
@@ -27,8 +27,13 @@ void init_Atan(py::module& m) {
         :type type : :py:class:`str`
         )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Atan_Op::getInputsName)
-    .def_static("get_outputs_name", &Atan_Op::getOutputsName);
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Atan_Op::InputsName), std::end(Atan_Op::InputsName));
+    })
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Atan_Op::OutputsName), std::end(Atan_Op::OutputsName));
+    })
+    .def_readonly_static("Type", &Atan_Op::Type);
 
     declare_registrable<Atan_Op>(m, "AtanOp");
 
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 6130fc2717b0505de41648e5d617b570f7feca5c..9b025d819f1ff4adc924d94dac9d1b1f715f2082 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -60,18 +60,17 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
             py::arg("stride_dims") = create_array<DimSize_t, DIM>(1),
             py::arg("dilations") = create_array<DimSize_t, DIM>(1),
             py::arg("ceil_mode") = false)
-    .def_static("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
-    .def_static("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(AvgPooling_Op<DIM>::InputsName), std::end(AvgPooling_Op<DIM>::InputsName));
+    })
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(AvgPooling_Op<DIM>::OutputsName), std::end(AvgPooling_Op<DIM>::OutputsName));
+    })
+    .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = AvgPooling_Op<DIM>::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<AvgPoolingAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
-		})
-    .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type);
+      return std::vector<std::string>(std::begin(EnumStrings<AvgPoolingAttr>::data), std::end(EnumStrings<AvgPoolingAttr>::data));
+		});
 
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 4bcb94c4a78d96828b010e2448bd52f3d2486384..f869d8e2eab9e7459500475bd8f064574c672c1f 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -40,18 +40,17 @@ void declare_BatchNormOp(py::module& m) {
             py::arg("epsilon"),
             py::arg("momentum"),
             py::arg("training_mode"))
-        .def_static("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(BatchNorm_Op<DIM>::InputsName), std::end(BatchNorm_Op<DIM>::InputsName));
+        })
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(BatchNorm_Op<DIM>::OutputsName), std::end(BatchNorm_Op<DIM>::OutputsName));
+        })
+        .def_readonly_static("Type", &BatchNorm_Op<DIM>::Type)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = BatchNorm_Op<DIM>::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<BatchNormAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
-		})
-        .def_readonly_static("Type", &BatchNorm_Op<DIM>::Type);
+            return std::vector<std::string>(std::begin(EnumStrings<BatchNormAttr>::data), std::end(EnumStrings<BatchNormAttr>::data));
+		});
 
     declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_BitShift.cpp b/python_binding/operator/pybind_BitShift.cpp
index 7c5e5ff315d127d6492d027220cb8bd539002cb8..87d40322555e30fc2a5488409d349f9a876ca3b8 100644
--- a/python_binding/operator/pybind_BitShift.cpp
+++ b/python_binding/operator/pybind_BitShift.cpp
@@ -11,59 +11,60 @@
 
  #include <pybind11/pybind11.h>
 
- #include <string>
- #include "aidge/backend/OperatorImpl.hpp"
- #include "aidge/data/Tensor.hpp"
- #include "aidge/operator/BitShift.hpp"
- #include "aidge/operator/OperatorTensor.hpp"
- #include "aidge/utils/Types.h"
- 
- namespace py = pybind11;
- namespace Aidge {
- 
- void init_BitShift(py::module &m) {
-     // Binding for BitShiftOp class
-     auto pyBitShiftOp = py::class_<BitShift_Op, std::shared_ptr<BitShift_Op>, OperatorTensor>(m, "BitShiftOp", py::multiple_inheritance(),R"mydelimiter(
-         BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements.
-         This class allows shifting tensor values either to the left or right based on the 
-         specified direction. The direction can be accessed and controlled using the 
-         BitShiftDirection enum.
-         :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right)
-         :type direction: BitShiftDirection
-         :param rounding: flag to apply bitshift rounding
-         :type rounding: boolean
-         :param name: name of the node.
-     )mydelimiter")
-         .def(py::init<BitShift_Op::BitShiftDirection,bool>(), py::arg("direction"),py::arg("rounding") = false)
-         .def_static("get_inputs_name", &BitShift_Op::getInputsName, "Get the names of the input tensors.")
-         .def_static("get_outputs_name", &BitShift_Op::getOutputsName, "Get the names of the output tensors.")
-         .def_static("attributes_name", []() {
-             std::vector<std::string> result;
-             auto attributes = BitShift_Op::attributesName();
-             for (size_t i = 0; i < size(EnumStrings<BitShiftAttr>::data); ++i) {
-                 result.emplace_back(attributes[i]);
-             }
-             return result;
-         });
- 
-     // Enum binding under BitShiftOp class
-     py::enum_<BitShift_Op::BitShiftDirection>(pyBitShiftOp, "BitShiftDirection")
-         .value("Right", BitShift_Op::BitShiftDirection::right)
-         .value("Left", BitShift_Op::BitShiftDirection::left)
-         .export_values();
- 
-     // Binding for the BitShift function
-     m.def("BitShift", &BitShift, py::arg("direction") = BitShift_Op::BitShiftDirection::right,py::arg("rounding") = false, py::arg("name") = "",
-         R"mydelimiter(
-         BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements.
-         This class allows shifting tensor values either to the left or right based on the 
-         specified direction. The direction can be accessed and controlled using the 
-         BitShiftDirection enum.
-         :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right)
-         :type direction: BitShiftDirection
-         :param rounding: flag to apply bitshift rounding
-         :type rounding: boolean
-         :param name: name of the node.
-     )mydelimiter");
- }
- } // namespace Aidge
\ No newline at end of file
+#include <string>
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/BitShift.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_BitShift(py::module &m) {
+    // Binding for BitShiftOp class
+    auto pyBitShiftOp = py::class_<BitShift_Op, std::shared_ptr<BitShift_Op>, OperatorTensor>(m, "BitShiftOp", py::multiple_inheritance(),R"mydelimiter(
+        BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements.
+        This class allows shifting tensor values either to the left or right based on the 
+        specified direction. The direction can be accessed and controlled using the 
+        BitShiftDirection enum.
+        :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right)
+        :type direction: BitShiftDirection
+        :param rounding: flag to apply bitshift rounding
+        :type rounding: boolean
+        :param name: name of the node.
+    )mydelimiter")
+        .def(py::init<BitShift_Op::BitShiftDirection,bool>(), py::arg("direction"),py::arg("rounding") = false)
+        .def("direction", &BitShift_Op::direction, "Get the direction of the bit shift (left or right).")
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(BitShift_Op::InputsName), std::end(BitShift_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(BitShift_Op::OutputsName), std::end(BitShift_Op::OutputsName));
+        }, "Get the names of the output tensors.")
+        .def_readonly_static("Type", &BitShift_Op::Type)
+		.def_static("attributes_name", []() {
+            return std::vector<std::string>(std::begin(EnumStrings<BitShiftAttr>::data), std::end(EnumStrings<BitShiftAttr>::data));
+		});
+
+    // Enum binding under BitShiftOp class
+    py::enum_<BitShift_Op::BitShiftDirection>(pyBitShiftOp, "BitShiftDirection")
+        .value("Right", BitShift_Op::BitShiftDirection::right)
+        .value("Left", BitShift_Op::BitShiftDirection::left)
+        .export_values();
+
+    // Binding for the BitShift function
+    m.def("BitShift", &BitShift, py::arg("direction") = BitShift_Op::BitShiftDirection::right, py::arg("rounding") = false, py::arg("name") = "",
+        R"mydelimiter(
+        BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements.
+        This class allows shifting tensor values either to the left or right based on the 
+        specified direction. The direction can be accessed and controlled using the 
+        BitShiftDirection enum.
+        :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right)
+        :type direction: BitShiftDirection
+        :param rounding: flag to apply bitshift rounding
+        :type rounding: boolean
+        :param name: name of the node.
+    )mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Cast.cpp b/python_binding/operator/pybind_Cast.cpp
index 1e0ad7f9b27b94016ff28d868f4a74a8e37fadf1..2aca399f9310771fde7ee8c88c3eb95921847c5a 100644
--- a/python_binding/operator/pybind_Cast.cpp
+++ b/python_binding/operator/pybind_Cast.cpp
@@ -31,15 +31,15 @@ void init_Cast(py::module &m) {
     )mydelimiter")
         .def(py::init<DataType>(), py::arg("target_type"))
         .def("target_type", &Cast_Op::targetType, "Get the targeted type, output tensor data type")
-        .def_static("get_inputs_name", &Cast_Op::getInputsName, "Get the names of the input tensors.")
-        .def_static("get_outputs_name", &Cast_Op::getOutputsName, "Get the names of the output tensors.")
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Cast_Op::InputsName), std::end(Cast_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Cast_Op::OutputsName), std::end(Cast_Op::OutputsName));
+        }, "Get the names of the output tensors.")
+        .def_readonly_static("Type", &Cast_Op::Type)
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Cast_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<CastAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<CastAttr>::data), std::end(EnumStrings<CastAttr>::data));
 		});
 
     // Binding for the Cast function
diff --git a/python_binding/operator/pybind_Clip.cpp b/python_binding/operator/pybind_Clip.cpp
index a22a002d470261ba0ab88286891674c63a1cf691..c5b6cc22a2db0c7a1e17eb6a9e515943370c0374 100644
--- a/python_binding/operator/pybind_Clip.cpp
+++ b/python_binding/operator/pybind_Clip.cpp
@@ -31,16 +31,16 @@ void init_Clip(py::module& m) {
         :type max : :py:class:`float`
         )mydelimiter")
     .def(py::init<float, float>(), py::arg("min") = std::numeric_limits<float>::lowest(), py::arg("max") = std::numeric_limits<float>::max())
-    .def_static("get_inputs_name", &Clip_Op::getInputsName)
-    .def_static("get_outputs_name", &Clip_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Clip_Op::InputsName), std::end(Clip_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Clip_Op::OutputsName), std::end(Clip_Op::OutputsName));
+    }, "Get the names of the output tensors.")
+    .def_readonly_static("Type", &Clip_Op::Type)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Clip_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ClipAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+      return std::vector<std::string>(std::begin(EnumStrings<ClipAttr>::data), std::end(EnumStrings<ClipAttr>::data));
 		})
     .def("min", &Clip_Op::min, py::return_value_policy::reference_internal)
     .def("max", &Clip_Op::max, py::return_value_policy::reference_internal);
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 236f1692263e94a8fdf4278f18f61d71e247e1df..112cedb10ef8b11e2ccaad0d587d95c2bb5c317c 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -32,18 +32,16 @@ void init_Concat(py::module& m) {
         .def(py::init<const IOIndex_t, const int>(),
              py::arg("nb_inputs"),
              py::arg("axis") = 0)
-        .def_static("get_inputs_name", &Concat_Op::getInputsName)
-        .def_static("get_outputs_name", &Concat_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Concat_Op::InputsName), std::end(Concat_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Concat_Op::OutputsName), std::end(Concat_Op::OutputsName));
+        }, "Get the names of the output tensors.")
+        .def_readonly_static("Type", &Concat_Op::Type)
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Concat_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ConcatAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
-		})
-        .def_readonly_static("Type", &Concat_Op::Type);
+            return std::vector<std::string>(std::begin(EnumStrings<ConcatAttr>::data), std::end(EnumStrings<ConcatAttr>::data));
+		});
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
 
diff --git a/python_binding/operator/pybind_ConstantOfShape.cpp b/python_binding/operator/pybind_ConstantOfShape.cpp
index b185f2f80a70faab7cd5269d43ba695466449654..ffd2b85a36e3ea5f8f1b0173c162e2e5fbb6e827 100644
--- a/python_binding/operator/pybind_ConstantOfShape.cpp
+++ b/python_binding/operator/pybind_ConstantOfShape.cpp
@@ -31,15 +31,15 @@ void init_ConstantOfShape(py::module &m) {
                      that will fill the output tensor.
       :type value : :py:class:`Tensor`
       )mydelimiter")
-      .def_static("get_inputs_name", &ConstantOfShape_Op::getInputsName)
-      .def_static("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
+      .def_static("get_inputs_name", []() {
+          return std::vector<std::string>(std::begin(ConstantOfShape_Op::InputsName), std::end(ConstantOfShape_Op::InputsName));
+      }, "Get the names of the input tensors.")
+      .def_static("get_outputs_name", []() {
+          return std::vector<std::string>(std::begin(ConstantOfShape_Op::OutputsName), std::end(ConstantOfShape_Op::OutputsName));
+      }, "Get the names of the output tensors.")
+      .def_readonly_static("Type", &ConstantOfShape_Op::Type)
       .def_static("attributes_name", []() {
-        std::vector<std::string> result;
-        auto attributes = ConstantOfShape_Op::attributesName();
-        for (size_t i = 0; i < size(EnumStrings<ConstantOfShapeAttr>::data); ++i) {
-          result.emplace_back(attributes[i]);
-        }
-        return result;
+        return std::vector<std::string>(std::begin(EnumStrings<ConstantOfShapeAttr>::data), std::end(EnumStrings<ConstantOfShapeAttr>::data));
       })
       .def("value", &ConstantOfShape_Op::value);
 
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index e65a74c0c65ae413e8f76a87e52644690634cfef..20e7b411d6c4b8d522fa3eb9b14b0511f97bdb11 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -41,20 +41,19 @@ void declare_ConvOp(py::module &m) {
         }), py::arg("kernel_dims"),
             py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
             py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
-        .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Conv_Op<DIM>::InputsName), std::end(Conv_Op<DIM>::InputsName));
+        })
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Conv_Op<DIM>::OutputsName), std::end(Conv_Op<DIM>::OutputsName));
+        })
+        .def_readonly_static("Type", &Conv_Op<DIM>::Type)
 
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Conv_Op<DIM>::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ConvAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<ConvAttr>::data), std::end(EnumStrings<ConvAttr>::data));
 		})
         .def("in_channels", &Conv_Op<DIM>::inChannels)
         .def("out_channels", &Conv_Op<DIM>::outChannels)
-        .def_readonly_static("Type", &Conv_Op<DIM>::Type)
         ;
 
   declare_registrable<Conv_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 7ddbefd3dea69be8bedb750c5686e13811151c04..04113a27d14d0365d7f8080096138a3d9ac8399f 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -54,16 +54,14 @@ void declare_ConvDepthWiseOp(py::module &m) {
     }), py::arg("kernel_dims"),
         py::arg("stride_dims"),
         py::arg("dilation_dims"))
-  .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
-  .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
-
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(ConvDepthWise_Op<DIM>::InputsName), std::end(ConvDepthWise_Op<DIM>::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(ConvDepthWise_Op<DIM>::OutputsName), std::end(ConvDepthWise_Op<DIM>::OutputsName));
+    }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = ConvDepthWise_Op<DIM>::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ConvDepthWiseAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-				return result;
+      return std::vector<std::string>(std::begin(EnumStrings<ConvDepthWiseAttr>::data), std::end(EnumStrings<ConvDepthWiseAttr>::data));
 		})
   .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels)
   .def_readonly_static("Type", &ConvDepthWise_Op<DIM>::Type);
diff --git a/python_binding/operator/pybind_ConvTranspose.cpp b/python_binding/operator/pybind_ConvTranspose.cpp
index 854e0aea11e9dd76ed78da371b131179e45ab737..e86712f6dbb4cc2b619a58a8f1ff25edead53619 100644
--- a/python_binding/operator/pybind_ConvTranspose.cpp
+++ b/python_binding/operator/pybind_ConvTranspose.cpp
@@ -57,8 +57,12 @@ template <DimIdx_t DIM> void declare_ConvTransposeOp(py::module &m) {
              py::arg("kernel_dims"),
              py::arg("stride_dims") = std::vector<DimSize_t>(DIM, 1),
              py::arg("dilation_dims") = std::vector<DimSize_t>(DIM, 1))
-        .def_static("get_inputs_name", &ConvTranspose_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &ConvTranspose_Op<DIM>::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(ConvTranspose_Op<DIM>::InputsName), std::end(ConvTranspose_Op<DIM>::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(ConvTranspose_Op<DIM>::OutputsName), std::end(ConvTranspose_Op<DIM>::OutputsName));
+        }, "Get the names of the output tensors.")
         .def("in_channels", &ConvTranspose_Op<DIM>::inChannels)
         .def("out_channels", &ConvTranspose_Op<DIM>::outChannels)
         .def_readonly_static("Type", &ConvTranspose_Op<DIM>::Type);
diff --git a/python_binding/operator/pybind_CryptoHash.cpp b/python_binding/operator/pybind_CryptoHash.cpp
index 923f91b604ea4990916525f2768cbb60e8eb7f9a..10913ec9b8bde7c9327d0dbadcd3995342939353 100644
--- a/python_binding/operator/pybind_CryptoHash.cpp
+++ b/python_binding/operator/pybind_CryptoHash.cpp
@@ -25,8 +25,15 @@
   
      py::class_<CryptoHash_Op, std::shared_ptr<CryptoHash_Op>, OperatorTensor>(m, "CryptoHashOp", py::multiple_inheritance())
          .def(py::init<>())
-         .def_static("get_inputs_name", &CryptoHash_Op::getInputsName)
-         .def_static("get_outputs_name", &CryptoHash_Op::getOutputsName)
+         .def_static("get_inputs_name", []() {
+             return std::vector<std::string>(std::begin(CryptoHash_Op::InputsName), std::end(CryptoHash_Op::InputsName));
+         }, "Get the names of the input tensors.")
+         .def_static("get_outputs_name", []() {
+             return std::vector<std::string>(std::begin(CryptoHash_Op::OutputsName), std::end(CryptoHash_Op::OutputsName));
+         }, "Get the names of the output tensors.")
+         .def_static("attributes_name", []() {
+           return std::vector<std::string>(std::begin(EnumStrings<CryptoHashAttr>::data), std::end(EnumStrings<CryptoHashAttr>::data));
+         })
          .def_readonly_static("Type", &CryptoHash_Op::Type);
  
      declare_registrable<CryptoHash_Op>(m, "CryptoHashOp");
diff --git a/python_binding/operator/pybind_DepthToSpace.cpp b/python_binding/operator/pybind_DepthToSpace.cpp
index d33386711784f64c97535194366522f04f76f39c..e6841af7db2ba2188ada1756fc1420e1279b3140 100644
--- a/python_binding/operator/pybind_DepthToSpace.cpp
+++ b/python_binding/operator/pybind_DepthToSpace.cpp
@@ -35,16 +35,14 @@ void declare_DepthToSpace(py::module &m) {
     .def(py::init([](const std::uint32_t blockSize, const std::string& mode) {
             return new DepthToSpace_Op(blockSize, stringToMode(mode));
         }), py::arg("block_size"), py::arg("mode") = "CRD")
-    .def_static("get_inputs_name", &DepthToSpace_Op::getInputsName)
-    .def_static("get_outputs_name", &DepthToSpace_Op::getOutputsName)
-
+      .def_static("get_inputs_name", []() {
+          return std::vector<std::string>(std::begin(DepthToSpace_Op::InputsName), std::end(DepthToSpace_Op::InputsName));
+      }, "Get the names of the input tensors.")
+      .def_static("get_outputs_name", []() {
+          return std::vector<std::string>(std::begin(DepthToSpace_Op::OutputsName), std::end(DepthToSpace_Op::OutputsName));
+      }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = DepthToSpace_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<DepthToSpaceAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+      return std::vector<std::string>(std::begin(EnumStrings<DepthToSpaceAttr>::data), std::end(EnumStrings<DepthToSpaceAttr>::data));
 		})
     .def_readonly_static("Type", &DepthToSpace_Op::Type)
     .def("__repr__", [](DepthToSpace_Op& b) {
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
index ef5a035b1904570e86fe628b14d1ae734f84a878..57148e4321ead83b75f09a69d5f3cc794443a88c 100644
--- a/python_binding/operator/pybind_Div.cpp
+++ b/python_binding/operator/pybind_Div.cpp
@@ -37,8 +37,12 @@ void init_Div(py::module& m) {
     :type name: str, Optional
     )mydelimiter")
         .def(py::init<>())
-        .def_static("get_inputs_name", &Div_Op::getInputsName)
-        .def_static("get_outputs_name", &Div_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Div_Op::InputsName), std::end(Div_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Div_Op::OutputsName), std::end(Div_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &Div_Op::Type);
 
     declare_registrable<Div_Op>(m, "DivOp");
diff --git a/python_binding/operator/pybind_Dropout.cpp b/python_binding/operator/pybind_Dropout.cpp
index 4925b34a62a4dc5c07f5c0acfa12c8662df85fb3..bdc9467e4c876d21cc58ce40a1994d92c1e85ec2 100644
--- a/python_binding/operator/pybind_Dropout.cpp
+++ b/python_binding/operator/pybind_Dropout.cpp
@@ -23,16 +23,15 @@ void init_Dropout(py::module& m) {
     py::class_<Dropout_Op, std::shared_ptr<Dropout_Op>, OperatorTensor>(
         m, "DropoutOp", py::multiple_inheritance())
         .def(py::init<float>(), py::arg("probability") = 0.5f)
-        .def_static("get_inputs_name", &Dropout_Op::getInputsName)
-        .def_static("get_outputs_name", &Dropout_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Dropout_Op::InputsName), std::end(Dropout_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Dropout_Op::OutputsName), std::end(Dropout_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Dropout_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<DropoutAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
-		})
+          return std::vector<std::string>(std::begin(EnumStrings<DropoutAttr>::data), std::end(EnumStrings<DropoutAttr>::data));
+        })
         .def_readonly_static("Type", &Dropout_Op::Type);
 
     // Declaring the operator as registrable
diff --git a/python_binding/operator/pybind_Equal.cpp b/python_binding/operator/pybind_Equal.cpp
index ef4488edce3c096c368f43a07de6b0d65f368013..a4242a42cb9ba293d61d4c9d2d7e7d97718ad606 100644
--- a/python_binding/operator/pybind_Equal.cpp
+++ b/python_binding/operator/pybind_Equal.cpp
@@ -22,8 +22,13 @@ void init_Equal(py::module& m) {
     py::class_<Equal_Op, std::shared_ptr<Equal_Op>, OperatorTensor>(m, "Equal_Op", py::multiple_inheritance(),
           R"mydelimiter( Initialize an Equal operator.)mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Equal_Op::getInputsName)
-    .def_static("get_outputs_name", &Equal_Op::getOutputsName);
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Equal_Op::InputsName), std::end(Equal_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Equal_Op::OutputsName), std::end(Equal_Op::OutputsName));
+    }, "Get the names of the output tensors.");
+
     declare_registrable<Equal_Op>(m, "EqualOp");
     m.def("Equal", &Equal, py::arg("name") = "",
 	   R"mydelimiter(
diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp
index 546f9769226b1c3c251062d5e8ccfb25cacbd5ce..4d32691a3fc253832fc07289068074eeb3e87ce8 100644
--- a/python_binding/operator/pybind_Erf.cpp
+++ b/python_binding/operator/pybind_Erf.cpp
@@ -28,8 +28,12 @@ void init_Erf(py::module& m) {
             erf(x) = (2 / sqrt(pi)) * integral from 0 to x of exp(-t^2) dt
         )mydelimiter")
         .def(py::init<>())
-        .def_static("get_inputs_name", &Erf_Op::getInputsName)
-        .def_static("get_outputs_name", &Erf_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Erf_Op::InputsName), std::end(Erf_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Erf_Op::OutputsName), std::end(Erf_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &Erf_Op::Type);
 
     declare_registrable<Erf_Op>(m, "ErfOp");
diff --git a/python_binding/operator/pybind_Expand.cpp b/python_binding/operator/pybind_Expand.cpp
index c20e47e849bbfad7331b8f5f2de82bc32bda033e..c12b5280ae71161bdc16e9132b68d2c4a0a58fce 100644
--- a/python_binding/operator/pybind_Expand.cpp
+++ b/python_binding/operator/pybind_Expand.cpp
@@ -44,8 +44,12 @@ void init_Expand(py::module &m) {
       broadcasting rules
 )mydelimiter")
         .def(py::init<>())
-        .def_static("get_inputs_name", &Expand_Op::getInputsName)
-        .def_static("get_outputs_name", &Expand_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Expand_Op::InputsName), std::end(Expand_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Expand_Op::OutputsName), std::end(Expand_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &Expand_Op::Type);
 
     declare_registrable<Expand_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index c29b6e1d3723f03f6a9c9b1f03156b42160c6cf3..33dd806d92a61b21e08c52e5d27d989e2568660a 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -30,8 +30,12 @@ void declare_FC(py::module &m) {
     :type type : :py:class:`str`
     )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &FC_Op::getInputsName)
-    .def_static("get_outputs_name", &FC_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(FC_Op::InputsName), std::end(FC_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(FC_Op::OutputsName), std::end(FC_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &FC_Op::Type)
     .def("out_channels", &FC_Op::outChannels)
     .def("__repr__", [](FC_Op& b) {
diff --git a/python_binding/operator/pybind_Flatten.cpp b/python_binding/operator/pybind_Flatten.cpp
index 899e5d7758d6e4737f89b4308872bb0926f1f98f..3ba07f20591c0bb1368079016214a0872533a05e 100644
--- a/python_binding/operator/pybind_Flatten.cpp
+++ b/python_binding/operator/pybind_Flatten.cpp
@@ -31,8 +31,12 @@ void init_Flatten(py::module &m) {
                         between [-r;r-1] with r = input_tensor.nbDims()
 		:type axes : :py:class: List[Int]
 		)mydelimiter")
-      .def("get_inputs_name", &Flatten_Op::getInputsName)
-      .def("get_outputs_name", &Flatten_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Flatten_Op::InputsName), std::end(Flatten_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Flatten_Op::OutputsName), std::end(Flatten_Op::OutputsName));
+        }, "Get the names of the output tensors.")
       .def("axis", &Flatten_Op::axis);
   // Here we bind the constructor of the Flatten Node. We add an argument
   // for each attribute of the operator (in here we only have 'axis') and
diff --git a/python_binding/operator/pybind_Fold.cpp b/python_binding/operator/pybind_Fold.cpp
index 747abc1611a1a51d9b317de365b5036436b1494a..1b489c06d35d0cffd3534fd55839e9c0e95dd396 100644
--- a/python_binding/operator/pybind_Fold.cpp
+++ b/python_binding/operator/pybind_Fold.cpp
@@ -46,17 +46,15 @@ void declare_FoldOp(py::module &m) {
             py::arg("kernel_dims"),
             py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
             py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
-        .def_static("get_inputs_name", &Fold_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &Fold_Op<DIM>::getOutputsName)
-
-		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Fold_Op<DIM>::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<FoldAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
-		})
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Fold_Op<DIM>::InputsName), std::end(Fold_Op<DIM>::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Fold_Op<DIM>::OutputsName), std::end(Fold_Op<DIM>::OutputsName));
+        }, "Get the names of the output tensors.")
+        .def_static("attributes_name", []() {
+            return std::vector<std::string>(std::begin(EnumStrings<FoldAttr>::data), std::end(EnumStrings<FoldAttr>::data));
+        })
         .def_readonly_static("Type", &Fold_Op<DIM>::Type)
         ;
 
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index 1c1f027dc56627a1fdb5292b5ec94197ad9c2d29..a0463bb21fcf466c686db93c5c45743688a1330e 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -42,16 +42,14 @@ void init_Gather(py::module& m) {
                 py::arg("axis"),
                 py::arg("indices"),
                 py::arg("gathered_shape"))
-        .def_static("get_inputs_name", &Gather_Op::getInputsName)
-        .def_static("get_outputs_name", &Gather_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Gather_Op::InputsName), std::end(Gather_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Gather_Op::OutputsName), std::end(Gather_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Gather_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<Gather_Op::Attr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<Gather_Op::Attr>::data), std::end(EnumStrings<Gather_Op::Attr>::data));
 		})
         .def_readonly_static("Type", &Gather_Op::Type);
 
diff --git a/python_binding/operator/pybind_GlobalAveragePooling.cpp b/python_binding/operator/pybind_GlobalAveragePooling.cpp
index 691456027bb1536c7b27d6ce9a3546dbf59cffb9..0f8b68711c7a09aaae2f8b4803876c9b6414fb3b 100644
--- a/python_binding/operator/pybind_GlobalAveragePooling.cpp
+++ b/python_binding/operator/pybind_GlobalAveragePooling.cpp
@@ -35,8 +35,12 @@ void init_GlobalAveragePooling(py::module &m) {
                              :type name : str
                              )mydelimiter")
       .def(py::init<>())
-      .def_static("get_inputs_name", &GlobalAveragePooling_Op::getInputsName)
-      .def_static("get_outputs_name", &GlobalAveragePooling_Op::getOutputsName)
+      .def_static("get_inputs_name", []() {
+          return std::vector<std::string>(std::begin(GlobalAveragePooling_Op::InputsName), std::end(GlobalAveragePooling_Op::InputsName));
+      }, "Get the names of the input tensors.")
+      .def_static("get_outputs_name", []() {
+          return std::vector<std::string>(std::begin(GlobalAveragePooling_Op::OutputsName), std::end(GlobalAveragePooling_Op::OutputsName));
+      }, "Get the names of the output tensors.")
       .def_readonly_static("Type", &GlobalAveragePooling_Op::Type);
 
   declare_registrable<GlobalAveragePooling_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
index f4f0335fd11f2bc083dbc3d5b318818983949298..c835e436bf3d98e482fe8afa09ac5f3b5e38b1d8 100644
--- a/python_binding/operator/pybind_GridSample.cpp
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -63,16 +63,14 @@ void declare_GridSampleOp(py::module &m) {
         }), py::arg("mode") = "linear",
             py::arg("padding_mode") = "zeros",
             py::arg("align_corners") = false)
-        .def_static("get_inputs_name", &GridSample_Op::getInputsName)
-        .def_static("get_outputs_name", &GridSample_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(GridSample_Op::InputsName), std::end(GridSample_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(GridSample_Op::OutputsName), std::end(GridSample_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = GridSample_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<GridSampleAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<GridSampleAttr>::data), std::end(EnumStrings<GridSampleAttr>::data));
 		})
         .def_readonly_static("Type", &GridSample_Op::Type)
         ;
diff --git a/python_binding/operator/pybind_Heaviside.cpp b/python_binding/operator/pybind_Heaviside.cpp
index 078b766a09306ea2808827243ea10f119d281604..d2a7059648b43a9e92127258339d693da7012a1b 100644
--- a/python_binding/operator/pybind_Heaviside.cpp
+++ b/python_binding/operator/pybind_Heaviside.cpp
@@ -35,16 +35,14 @@ void init_Heaviside(py::module &m) {
           :param name : Name of the node.
           )mydelimiter")
         .def(py::init<float>(), py::arg("value"))
-        .def_static("get_inputs_name", &Heaviside_Op::getInputsName)
-        .def_static("get_outputs_name", &Heaviside_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Heaviside_Op::InputsName), std::end(Heaviside_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Heaviside_Op::OutputsName), std::end(Heaviside_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Heaviside_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<Heaviside_Op::Attr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<Heaviside_Op::Attr>::data), std::end(EnumStrings<Heaviside_Op::Attr>::data));
 		})
         .def_readonly_static("Type", &Heaviside_Op::Type);
 
diff --git a/python_binding/operator/pybind_Identity.cpp b/python_binding/operator/pybind_Identity.cpp
index 22ddf940213d3a18ec42a6cdbd346a47384a5a26..e6c47b882a679d8ac12df425bb679901ac4d66da 100644
--- a/python_binding/operator/pybind_Identity.cpp
+++ b/python_binding/operator/pybind_Identity.cpp
@@ -24,8 +24,12 @@ void init_Identity(py::module& m) {
     A class representing the Identity operator, which returns the input as-is.
     )mydelimiter")
         .def(py::init<>())
-        .def_static("get_inputs_name", &Identity_Op::getInputsName)
-        .def_static("get_outputs_name", &Identity_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Identity_Op::InputsName), std::end(Identity_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Identity_Op::OutputsName), std::end(Identity_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &Identity_Op::Type);
 
     m.def("Identity", &Identity, py::arg("name") = "",
diff --git a/python_binding/operator/pybind_LRN.cpp b/python_binding/operator/pybind_LRN.cpp
index 8807eb04081396b03db1076cf2e4b83a222b5f09..c3fe248ad47ee2f7e3ee07da2d6df6f317f74c84 100644
--- a/python_binding/operator/pybind_LRN.cpp
+++ b/python_binding/operator/pybind_LRN.cpp
@@ -28,16 +28,14 @@ void init_LRN(py::module& m) {
     based on its neighbors within a local region defined by the given size parameter.
     )mydelimiter")
         .def(py::init<std::int32_t>(), py::arg("size"))
-        .def_static("get_inputs_name", &LRN_Op::getInputsName)
-        .def_static("get_outputs_name", &LRN_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(LRN_Op::InputsName), std::end(LRN_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(LRN_Op::OutputsName), std::end(LRN_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = LRN_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<LRN_Op::Attr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-				return result;
+            return std::vector<std::string>(std::begin(EnumStrings<LRN_Op::Attr>::data), std::end(EnumStrings<LRN_Op::Attr>::data));
 		})
         .def_readonly_static("Type", &LRN_Op::Type);
 
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 8bc120c8aa3e585a3e792ab3337fc9c602f6afe9..15b1182aeb857562428a93dcff51f6c856218e42 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -28,16 +28,14 @@ void init_LeakyReLU(py::module& m) {
     The negative_slope parameter controls the angle of the negative part of the function.
     )mydelimiter")
         .def(py::init<float>(), py::arg("negative_slope"))
-        .def_static("get_inputs_name", &LeakyReLU_Op::getInputsName)
-        .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(LeakyReLU_Op::InputsName), std::end(LeakyReLU_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(LeakyReLU_Op::OutputsName), std::end(LeakyReLU_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = LeakyReLU_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<LeakyReLU_Op::Attr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<LeakyReLU_Op::Attr>::data), std::end(EnumStrings<LeakyReLU_Op::Attr>::data));
 		})
         .def_readonly_static("Type", &LeakyReLU_Op::Type);
 
diff --git a/python_binding/operator/pybind_Ln.cpp b/python_binding/operator/pybind_Ln.cpp
index 61fc3583d478dddd9a9eb05101ce2e07e07e9759..78d20861d7b4d08593f52065d0ac4c498dceb62f 100755
--- a/python_binding/operator/pybind_Ln.cpp
+++ b/python_binding/operator/pybind_Ln.cpp
@@ -26,8 +26,12 @@ void init_Ln(py::module& m) {
     The operator computes the element-wise natural logarithm of the input tensor.
     )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Ln_Op::getInputsName)
-    .def_static("get_outputs_name", &Ln_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Ln_Op::InputsName), std::end(Ln_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Ln_Op::OutputsName), std::end(Ln_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &Ln_Op::Type);
 
     m.def("Ln", &Ln, py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index 459dcea625a720f5b9e2775a66ed57307144a88d..90d3e41ea405ef6db4fdefbcca6b8a99a57de7a2 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -36,8 +36,12 @@ void init_MatMul(py::module &m) {
     :type name: str, Optional
     )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &MatMul_Op::getInputsName)
-    .def_static("get_outputs_name", &MatMul_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(MatMul_Op::InputsName), std::end(MatMul_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(MatMul_Op::OutputsName), std::end(MatMul_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &MatMul_Op::Type);
 
   declare_registrable<MatMul_Op>(m, "MatMulOp");
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 953e56ebec8fc0a8d030f6cf9d79c9359848fa05..f1366ddfafd57a66336b0fb5699e01af42d660df 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -50,16 +50,15 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         py::arg("stride_dims"),
         py::arg("dilations"),
         py::arg("ceil_mode"))
-  .def_static("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
-  .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(MaxPooling_Op<DIM>::InputsName), std::end(MaxPooling_Op<DIM>::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(MaxPooling_Op<DIM>::OutputsName), std::end(MaxPooling_Op<DIM>::OutputsName));
+    }, "Get the names of the output tensors.")
 
   .def_static("attributes_name", []() {
-    std::vector<std::string> result;
-    auto attributes = MaxPooling_Op<DIM>::attributesName();
-    for (size_t i = 0; i < size(EnumStrings<MaxPoolingAttr>::data); ++i) {
-      result.emplace_back(attributes[i]);
-    }
-    return result;
+    return std::vector<std::string>(std::begin(EnumStrings<MaxPoolingAttr>::data), std::end(EnumStrings<MaxPoolingAttr>::data));
   })
   .def_readonly_static("Type", &MaxPooling_Op<DIM>::Type);
   
diff --git a/python_binding/operator/pybind_Memorize.cpp b/python_binding/operator/pybind_Memorize.cpp
index ed87f68c77d196da91f31bb8730f93c26da5938f..34680f13fdfcb988ca948570f5ede378ad6b5483 100644
--- a/python_binding/operator/pybind_Memorize.cpp
+++ b/python_binding/operator/pybind_Memorize.cpp
@@ -22,15 +22,14 @@ namespace Aidge {
 void init_Memorize(py::module& m) {
     py::class_<Memorize_Op, std::shared_ptr<Memorize_Op>, OperatorTensor>(m, "MemorizeOp", py::multiple_inheritance())
         .def(py::init<const std::uint32_t>(), py::arg("end_step"))
-        .def_static("get_inputs_name", &Memorize_Op::getInputsName)
-        .def_static("get_outputs_name", &Memorize_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Memorize_Op::InputsName), std::end(Memorize_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Memorize_Op::OutputsName), std::end(Memorize_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Memorize_Op::attributesName();
-			for (size_t i = 0;i < size(EnumStrings<Memorize_Op::Attr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+			return std::vector<std::string>(std::begin(EnumStrings<Memorize_Op::Attr>::data), std::end(EnumStrings<Memorize_Op::Attr>::data));
 		});
 
     declare_registrable<Memorize_Op>(m, "MemorizeOp");
diff --git a/python_binding/operator/pybind_Mod.cpp b/python_binding/operator/pybind_Mod.cpp
index aa88f2068ff8a55ae3cc1fbfec6190f8a03334e7..058c56e2af49f21fca14d68a092dc25363510b96 100644
--- a/python_binding/operator/pybind_Mod.cpp
+++ b/python_binding/operator/pybind_Mod.cpp
@@ -33,8 +33,12 @@
      :type name : str
      )mydelimiter")
          .def(py::init<>())
-         .def_static("get_inputs_name", &Mod_Op::getInputsName)
-         .def_static("get_outputs_name", &Mod_Op::getOutputsName)
+         .def_static("get_inputs_name", []() {
+             return std::vector<std::string>(std::begin(Mod_Op::InputsName), std::end(Mod_Op::InputsName));
+         }, "Get the names of the input tensors.")
+         .def_static("get_outputs_name", []() {
+             return std::vector<std::string>(std::begin(Mod_Op::OutputsName), std::end(Mod_Op::OutputsName));
+         }, "Get the names of the output tensors.")
          .def_readonly_static("Type", &Mod_Op::Type);
  
      declare_registrable<Mod_Op>(m, "ModOp");
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
index 3cdcec20b0115c8c5d167c045f3b2a399699328a..0fd18be578ba29d132f714f5f9b1e745e8ea6ac5 100644
--- a/python_binding/operator/pybind_Mul.cpp
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -27,8 +27,12 @@ void init_Mul(py::module& m) {
     :type name: str, Optional
     )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Mul_Op::getInputsName)
-    .def_static("get_outputs_name", &Mul_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Mul_Op::InputsName), std::end(Mul_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Mul_Op::OutputsName), std::end(Mul_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &Mul_Op::Type);
     declare_registrable<Mul_Op>(m, "MulOp");
 
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 7b37bb20677f8c426adba6c84ac206aa94cc140b..8032bcfce091c5a5c72f9b3df1c9c269ed2d6edd 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -48,15 +48,14 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
         py::arg("beginEndTuples"),
         py::arg("borderType") = PadBorderType::Constant,
         py::arg("borderValue") = 0.0)
-    .def_static("get_inputs_name", &Pad_Op<DIM>::getInputsName)
-    .def_static("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Pad_Op<DIM>::InputsName), std::end(Pad_Op<DIM>::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Pad_Op<DIM>::OutputsName), std::end(Pad_Op<DIM>::OutputsName));
+    }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Pad_Op<DIM>::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<PadAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+			return std::vector<std::string>(std::begin(EnumStrings<PadAttr>::data), std::end(EnumStrings<PadAttr>::data));
 		})
     .def_readonly_static("Type", &Pad_Op<DIM>::Type);
 
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
index 20606d24df7716cc410a141971e569f960e472a8..30c6b93d43f32b7ec3f559af5c43e914747766e6 100644
--- a/python_binding/operator/pybind_Pop.cpp
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -21,16 +21,14 @@ namespace Aidge {
 void init_Pop(py::module& m) {
     py::class_<Pop_Op, std::shared_ptr<Pop_Op>, OperatorTensor>(m, "PopOp", py::multiple_inheritance())
     .def(py::init<>())
-    .def_static("get_inputs_name", &Pop_Op::getInputsName)
-    .def_static("get_outputs_name", &Pop_Op::getOutputsName)
-
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Pop_Op::InputsName), std::end(Pop_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Pop_Op::OutputsName), std::end(Pop_Op::OutputsName));
+    }, "Get the names of the output tensors.")
 	.def_static("attributes_name", []() {
-		std::vector<std::string> result;
-		auto attributes = Pop_Op::attributesName();
-		for (size_t i = 0; i < size(EnumStrings<PopAttr>::data); ++i) {
-			result.emplace_back(attributes[i]);
-		}
-		return result;
+		return std::vector<std::string>(std::begin(EnumStrings<PopAttr>::data), std::end(EnumStrings<PopAttr>::data));
 	})
     .def_readonly_static("Type", &Pop_Op::Type);
 
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
index c112f895ecb79d6f8f98cb655705c70703e2cafd..e34750f90664c598675658b8c319938f12f6e027 100644
--- a/python_binding/operator/pybind_Pow.cpp
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -37,8 +37,12 @@ void init_Pow(py::module& m) {
     :type name: str, optional
     )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Pow_Op::getInputsName)
-    .def_static("get_outputs_name", &Pow_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Pow_Op::InputsName), std::end(Pow_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Pow_Op::OutputsName), std::end(Pow_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &Pow_Op::Type);
     declare_registrable<Pow_Op>(m, "PowOp");
 
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 3467ed970c3f830298b46897717d123a0ab11800..1f9dcf5c00eeb735511f783c9ac417b94cc9b84f 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -37,8 +37,12 @@ void init_Producer(py::module &m) {
         py::multiple_inheritance())
         .def(py::init<const std::shared_ptr<Tensor>, bool>(), py::arg("tensor"), py::arg("constant"))
         .def("dims", &Producer_Op::dims)
-        .def_static("get_inputs_name", &Producer_Op::getInputsName)
-        .def_static("get_outputs_name", &Producer_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>();
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Producer_Op::OutputsName), std::end(Producer_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &Producer_Op::Type);
 
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index 41ef91ed9383f72bc9b3bb3971dedbec0256c7b0..4dcd6c9d2c5e0477c4fa316f7588ca8693404ff5 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -31,8 +31,12 @@ void init_ReLU(py::module& m) {
         :type name : str
         )mydelimiter")
         .def(py::init<>())
-        .def_static("get_inputs_name", &ReLU_Op::getInputsName)
-        .def_static("get_outputs_name", &ReLU_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(ReLU_Op::InputsName), std::end(ReLU_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(ReLU_Op::OutputsName), std::end(ReLU_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &ReLU_Op::Type);
 
     declare_registrable<ReLU_Op>(m, "ReLUOp");
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 546ea1cfe537fb31fdf1fffbf8c2811dcc0c73a5..cb1631f631b3db749b48e16a2bca25363905b1b5 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -38,15 +38,14 @@ void declare_ReduceMeanOp(py::module &m) {
 		:type noop_with_empty_axes: bool
 		)mydelimiter")
     .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes") = std::vector<std::int32_t>(), py::arg("keep_dims") = true, py::arg("noop_with_empty_axes") = false)
-    .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
-    .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
-	.def_static("attributes_name", []() {
-		std::vector<std::string> result;
-		auto attributes = ReduceMean_Op::attributesName();
-		for (size_t i = 0; i < size(EnumStrings<ReduceMean_Op::Attr>::data); ++i) {
-			result.emplace_back(attributes[i]);
-		}
-		return result;
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(ReduceMean_Op::InputsName), std::end(ReduceMean_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(ReduceMean_Op::OutputsName), std::end(ReduceMean_Op::OutputsName));
+    }, "Get the names of the output tensors.")
+    .def_static("attributes_name", []() {
+		return std::vector<std::string>(std::begin(EnumStrings<ReduceMean_Op::Attr>::data), std::end(EnumStrings<ReduceMean_Op::Attr>::data));
 	})
     .def_readonly_static("Type", &ReduceMean_Op::Type)
     ;
diff --git a/python_binding/operator/pybind_ReduceSum.cpp b/python_binding/operator/pybind_ReduceSum.cpp
index 7517c62d2082215a25a3f632a5bc59555319fa57..0d131edde24bf8437f1903d0fcdfd598b29d1a67 100644
--- a/python_binding/operator/pybind_ReduceSum.cpp
+++ b/python_binding/operator/pybind_ReduceSum.cpp
@@ -40,16 +40,14 @@ void init_ReduceSum(py::module &m) {
 			:type noop_with_empty_axes: bool
 		)mydelimiter")
     .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes"), py::arg("keep_dims"), py::arg("noop_with_empty_axes"))
-    .def_static("get_inputs_name", &ReduceSum_Op::getInputsName)
-    .def_static("get_outputs_name", &ReduceSum_Op::getOutputsName)
-
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(ReduceSum_Op::InputsName), std::end(ReduceSum_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(ReduceSum_Op::OutputsName), std::end(ReduceSum_Op::OutputsName));
+    }, "Get the names of the output tensors.")
 	.def_static("attributes_name", []() {
-		std::vector<std::string> result;
-		auto attributes = ReduceSum_Op::attributesName();
-		for (size_t i = 0; i < size(EnumStrings<ReduceSum_Op::Attr>::data); ++i) {
-			result.emplace_back(attributes[i]);
-		}
-		return result;
+		return std::vector<std::string>(std::begin(EnumStrings<ReduceSum_Op::Attr>::data), std::end(EnumStrings<ReduceSum_Op::Attr>::data));
 	})
     ;
   declare_registrable<ReduceSum_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index d263796ce016e4218807926781f6382b998f7e38..f78fa0aa4938aa0076db60bf986ff731060bb3b1 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -33,16 +33,14 @@ void init_Reshape(py::module& m) {
         :type allowzero: bool
         )mydelimiter")
     .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
-    .def_static("get_inputs_name", &Reshape_Op::getInputsName)
-    .def_static("get_outputs_name", &Reshape_Op::getOutputsName)
-
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Reshape_Op::InputsName), std::end(Reshape_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Reshape_Op::OutputsName), std::end(Reshape_Op::OutputsName));
+    }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Reshape_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ReshapeAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+      return std::vector<std::string>(std::begin(EnumStrings<ReshapeAttr>::data), std::end(EnumStrings<ReshapeAttr>::data));
 		})
     .def_readonly_static("Type", &Reshape_Op::Type);
 
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
index 10a60e1f947a98d0325c72096a287df5fbe77d77..bcf9090ddbff7218d6f8a23da1236a44449345ec 100644
--- a/python_binding/operator/pybind_Resize.cpp
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -26,15 +26,14 @@ void init_Resize(py::module &m) {
   py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(
           m, "ResizeOp", py::multiple_inheritance())
         .def(py::init<Interpolation::CoordinateTransformation, Interpolation::Mode, float, PadBorderType>(), py::arg("coordinate_transformation_mode"), py::arg("interpolation_mode"), py::arg("cubic_coeff_a") = -0.75f, py::arg("padding_mode") = PadBorderType::Edge)
-        .def_static("get_inputs_name", &Resize_Op::getInputsName)
-        .def_static("get_outputs_name", &Resize_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Resize_Op::InputsName), std::end(Resize_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Resize_Op::OutputsName), std::end(Resize_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Resize_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ResizeAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-		    return result;
+            return std::vector<std::string>(std::begin(EnumStrings<ResizeAttr>::data), std::end(EnumStrings<ResizeAttr>::data));
 		})
         .def_readonly_static("Type", &Resize_Op::Type);
 
diff --git a/python_binding/operator/pybind_Round.cpp b/python_binding/operator/pybind_Round.cpp
index c055ab7fde3eb021a693a4de40a71bafbfd87d5a..aac5d8d40d483f6d785880d11f2f08ccfd3a78d4 100644
--- a/python_binding/operator/pybind_Round.cpp
+++ b/python_binding/operator/pybind_Round.cpp
@@ -29,8 +29,12 @@ void init_Round(py::module& m) {
         :type name: str
         )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Round_Op::getInputsName)
-    .def_static("get_outputs_name", &Round_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Round_Op::InputsName), std::end(Round_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Round_Op::OutputsName), std::end(Round_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &Round_Op::Type);
 
     declare_registrable<Round_Op>(m, "RoundOp");
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
deleted file mode 100644
index ba975bb0616131b045f3a3076ffc595f69d8aa90..0000000000000000000000000000000000000000
--- a/python_binding/operator/pybind_Scaling.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2024 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <pybind11/pybind11.h>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/operator/Scaling.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
-
-namespace py = pybind11;
-
-namespace Aidge {
-
-void init_Scaling(py::module& m) {
-    py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, OperatorTensor>(
-        m, "ScalingOp", py::multiple_inheritance(),
-        R"mydelimiter(
-        Initialize a Scaling operator for element-wise tensor scaling.
-
-        This operator scales tensor elements by a specified scaling factor, 
-        optionally constraining the output to a specified bit-width and signedness.
-
-        :param scaling_factor: The scaling factor to apply to tensor elements.
-        :type scaling_factor: float
-        :param nb_bits: The number of bits for quantization of the output. Must be a positive integer.
-        :type nb_bits: int
-        :param is_output_unsigned: Specifies whether the output should be unsigned (True) or signed (False).
-        :type is_output_unsigned: bool
-        )mydelimiter")
-        .def(py::init<float, size_t, bool>(),
-             py::arg("scaling_factor"),
-             py::arg("nb_bits"),
-             py::arg("is_output_unsigned"))
-        .def_static("get_inputs_name", &Scaling_Op::getInputsName)
-        .def_static("get_outputs_name", &Scaling_Op::getOutputsName)
-
-		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Scaling_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ScalingAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
-		})
-        .def_readonly_static("Type", &Scaling_Op::Type);
-
-    declare_registrable<Scaling_Op>(m, "ScalingOp");
-
-    m.def("Scaling", &Scaling,
-          py::arg("scaling_factor") = 1.0f,
-          py::arg("nb_bits") = 8,
-          py::arg("is_output_unsigned") = true,
-          py::arg("name") = "",
-          R"mydelimiter(
-          Initialize a node containing a Scaling operator to scale tensor elements.
-
-          This operator applies a scaling factor to each element of the input tensor. The result 
-          can optionally be quantized to a specific bit-width and constrained to unsigned or signed output.
-
-          :param scaling_factor: The factor by which to scale the tensor elements. Default is 1.0.
-          :type scaling_factor: float
-          :param nb_bits: The number of bits for quantized output. Default is 8.
-          :type nb_bits: int
-          :param is_output_unsigned: Indicates whether the output tensor values should be unsigned. Default is True.
-          :type is_output_unsigned: bool
-          :param name: The name of the node (optional).
-          :type name: str
-          :return: A node containing the Scaling operator.
-          :rtype: :py:class:`ScalingOp`
-          )mydelimiter");
-}
-
-}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Select.cpp b/python_binding/operator/pybind_Select.cpp
index 0cb858acd776bf553462439d487626ed2ab42463..de580d8edc4eb2cc608d6f5a92e40374c095b4b2 100644
--- a/python_binding/operator/pybind_Select.cpp
+++ b/python_binding/operator/pybind_Select.cpp
@@ -29,8 +29,12 @@ void init_Select(py::module& m) {
         )mydelimiter")
         .def(py::init<const IOIndex_t>(),
              py::arg("nb_inputs"))
-        .def_static("get_inputs_name", &Select_Op::getInputsName)
-        .def_static("get_outputs_name", &Select_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Select_Op::InputsName), std::end(Select_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Select_Op::OutputsName), std::end(Select_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &Select_Op::Type);
 
     declare_registrable<Select_Op>(m, "SelectOp");
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
index 3c8974bf0e572322dd4ddc0641f35b7ecbe7b56f..9287d8ef8c1e8f6e2fb56fca33b0fe107d5c6e36 100644
--- a/python_binding/operator/pybind_Shape.cpp
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -32,16 +32,14 @@ void init_Shape(py::module& m) {
         :type end: int
         )mydelimiter")
         .def(py::init<const std::int64_t, const std::int64_t>(), py::arg("start"), py::arg("end"))
-        .def_static("get_inputs_name", &Shape_Op::getInputsName)
-        .def_static("get_outputs_name", &Shape_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Shape_Op::InputsName), std::end(Shape_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Shape_Op::OutputsName), std::end(Shape_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Shape_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<ShapeAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<ShapeAttr>::data), std::end(EnumStrings<ShapeAttr>::data));
 		})
         .def_readonly_static("Type", &Shape_Op::Type);
 
diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp
index b061d806f24de3e778e745a7a880cbdcd3d9dbb2..1ec08205a55b0aaea256b8626f33d2a77c97e25a 100644
--- a/python_binding/operator/pybind_Sigmoid.cpp
+++ b/python_binding/operator/pybind_Sigmoid.cpp
@@ -30,8 +30,12 @@ void init_Sigmoid(py::module& m) {
         :type name : str
         )mydelimiter")
         .def(py::init<>())
-        .def_static("get_inputs_name", &Sigmoid_Op::getInputsName)
-        .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Sigmoid_Op::InputsName), std::end(Sigmoid_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Sigmoid_Op::OutputsName), std::end(Sigmoid_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_readonly_static("Type", &Sigmoid_Op::Type);
 
 
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index 1cfd63f656f2fb9594dc6c4ee3a2591efa1ad25f..ed4fa0e2fbc565112f4c344cd0c6eaed7a23667f 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -43,16 +43,14 @@ void init_Slice(py::module& m) {
                   py::arg("ends"),
                   py::arg("axes") = std::vector<std::int8_t>(),
                   py::arg("steps") = std::vector<std::int64_t>())
-    .def_static("get_inputs_name", &Slice_Op::getInputsName)
-    .def_static("get_outputs_name", &Slice_Op::getOutputsName)
-
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Slice_Op::InputsName), std::end(Slice_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Slice_Op::OutputsName), std::end(Slice_Op::OutputsName));
+    }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Slice_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<SliceAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+      return std::vector<std::string>(std::begin(EnumStrings<SliceAttr>::data), std::end(EnumStrings<SliceAttr>::data));
 		})
     .def_readonly_static("Type", &Slice_Op::Type);
 
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 7a4a687fd812c8d0366a435d2670a5e0110022f6..c5379d421e5a3b61c8cb4ee23eef184c248772f4 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -28,16 +28,14 @@ void init_Softmax(py::module& m) {
             :type axis: int
         )mydelimiter")
         .def(py::init<std::int32_t>(), py::arg("axis"))
-        .def_static("get_inputs_name", &Softmax_Op::getInputsName)
-        .def_static("get_outputs_name", &Softmax_Op::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Softmax_Op::InputsName), std::end(Softmax_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Softmax_Op::OutputsName), std::end(Softmax_Op::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Softmax_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<SoftmaxAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<SoftmaxAttr>::data), std::end(EnumStrings<SoftmaxAttr>::data));
 		})
         .def_readonly_static("Type", &Softmax_Op::Type);
     declare_registrable<Softmax_Op>(m, "SoftmaxOp");
diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp
index 052fa277e400d0ca25d7c123384e84f6ad607628..70720829e5345b5824891c8c42ec5030e82e590f 100644
--- a/python_binding/operator/pybind_Split.cpp
+++ b/python_binding/operator/pybind_Split.cpp
@@ -33,17 +33,15 @@ void init_Split(py::module& m) {
     .def(py::init<DimSize_t, std::int8_t, std::vector<DimSize_t>&>(),
             py::arg("nb_outputs"),
             py::arg("axis"),
-            py::arg("split"))
-    .def_static("get_inputs_name", &Split_Op::getInputsName)
-    .def_static("get_outputs_name", &Split_Op::getOutputsName)
-
+        py::arg("split"))
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Split_Op::InputsName), std::end(Split_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Split_Op::OutputsName), std::end(Split_Op::OutputsName));
+    }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Split_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<SplitAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+      return std::vector<std::string>(std::begin(EnumStrings<SplitAttr>::data), std::end(EnumStrings<SplitAttr>::data));
 		})
     .def_readonly_static("Type", &Split_Op::Type);
 
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index d383ae0a40585a1928fedb33874baf7f21f1dedd..4ed6a8bdf6764c0809cf401f88670cd73e1f6da5 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -24,8 +24,12 @@ void init_Sqrt(py::module& m) {
         This operator computes the square root of each element in the input tensor. The input values must be non-negative.
         )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Sqrt_Op::getInputsName)
-    .def_static("get_outputs_name", &Sqrt_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Sqrt_Op::InputsName), std::end(Sqrt_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Sqrt_Op::OutputsName), std::end(Sqrt_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &Sqrt_Op::Type);
 
     declare_registrable<Sqrt_Op>(m, "SqrtOp");
diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp
index 7808c78da081f11875df2d3755506ecaccc03181..7a06240c317f687b8ffb3895be68b7a22a2b4f57 100644
--- a/python_binding/operator/pybind_Squeeze.cpp
+++ b/python_binding/operator/pybind_Squeeze.cpp
@@ -32,16 +32,14 @@ void init_Squeeze(py::module &m) {
     				& r in [-128 , 127]
     :type axes: :py:class: List[Int]
     )mydelimiter")
-    .def_static("get_inputs_name", &Squeeze_Op::getInputsName)
-    .def_static("get_outputs_name", &Squeeze_Op::getOutputsName)
-
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Squeeze_Op::InputsName), std::end(Squeeze_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Squeeze_Op::OutputsName), std::end(Squeeze_Op::OutputsName));
+    }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Squeeze_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<SqueezeAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+      return std::vector<std::string>(std::begin(EnumStrings<SqueezeAttr>::data), std::end(EnumStrings<SqueezeAttr>::data));
 		})
     .def("axes", &Squeeze_Op::axes);
 
diff --git a/python_binding/operator/pybind_Stack.cpp b/python_binding/operator/pybind_Stack.cpp
index c27853fad27ce2ee90a3504a48dfe579065f1a52..641475efb3816a214a597e1d1e629ae41adcad0a 100644
--- a/python_binding/operator/pybind_Stack.cpp
+++ b/python_binding/operator/pybind_Stack.cpp
@@ -24,16 +24,14 @@ void init_Stack(py::module &m) {
         py::multiple_inheritance(),
         R"mydelimiter(Initialize a Stack operator.)mydelimiter")
         .def(py::init<const std::uint32_t>(), py::arg("max_elements"))
-        .def_static("get_inputs_name", &StackOp::getInputsName)
-        .def_static("get_outputs_name", &StackOp::getOutputsName)
-
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(StackOp::InputsName), std::end(StackOp::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(StackOp::OutputsName), std::end(StackOp::OutputsName));
+        }, "Get the names of the output tensors.")
 		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = StackOp::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<StackAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
+            return std::vector<std::string>(std::begin(EnumStrings<StackAttr>::data), std::end(EnumStrings<StackAttr>::data));
 		})
         .def_readonly_static("Type", &StackOp::Type);
 
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
index b94de2f52be60e003c5b9707a0ac4fdc21ea969c..046b1a6deef41447c4d185eddc577448b80c04de 100644
--- a/python_binding/operator/pybind_Sub.cpp
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -36,8 +36,12 @@ void init_Sub(py::module& m) {
         :type name: str, optional
         )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Sub_Op::getInputsName)
-    .def_static("get_outputs_name", &Sub_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Sub_Op::InputsName), std::end(Sub_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Sub_Op::OutputsName), std::end(Sub_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &Sub_Op::Type);
     declare_registrable<Sub_Op>(m, "SubOp");
     m.def("Sub", &Sub, py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Tanh.cpp b/python_binding/operator/pybind_Tanh.cpp
index 6c0d026e65bbcd0e9e2f39fdabd25ca6024f148f..3f103b0db5715360f1e4aa522ab83d6cd5a29711 100644
--- a/python_binding/operator/pybind_Tanh.cpp
+++ b/python_binding/operator/pybind_Tanh.cpp
@@ -29,8 +29,12 @@ void init_Tanh(py::module& m) {
         :type name : str
         )mydelimiter")
     .def(py::init<>())
-    .def_static("get_inputs_name", &Tanh_Op::getInputsName)
-    .def_static("get_outputs_name", &Tanh_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Tanh_Op::InputsName), std::end(Tanh_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Tanh_Op::OutputsName), std::end(Tanh_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &Tanh_Op::Type);
 
     m.def("Tanh", &Tanh, py::arg("name") = "",
diff --git a/python_binding/operator/pybind_TopK.cpp b/python_binding/operator/pybind_TopK.cpp
index 314a3283baf251171904c497aa93cc9da282d0d0..8c06d08010267816452afbb9973952a871640422 100644
--- a/python_binding/operator/pybind_TopK.cpp
+++ b/python_binding/operator/pybind_TopK.cpp
@@ -21,15 +21,14 @@ namespace Aidge {
 void init_TopK(py::module& m) {
     py::class_<TopK_Op, std::shared_ptr<TopK_Op>, OperatorTensor>(m, "TopKOp", py::multiple_inheritance())
     .def(py::init<int64_t, bool, bool, IOIndex_t>(), py::arg("axis") = -1, py::arg("largest") = true, py::arg("sorted") = true, py::arg("k") = 0)
-    .def_static("get_inputs_name", &TopK_Op::getInputsName)
-    .def_static("get_outputs_name", &TopK_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(TopK_Op::InputsName), std::end(TopK_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(TopK_Op::OutputsName), std::end(TopK_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_static("attributes_name", []() {
-        std::vector<std::string> result;
-        auto attributes = TopK_Op::attributesName();
-        for (size_t i = 0; i < size(EnumStrings<TopKAttr>::data); ++i) {
-            result.emplace_back(attributes[i]);
-        }
-        return result;
+        return std::vector<std::string>(std::begin(EnumStrings<TopKAttr>::data), std::end(EnumStrings<TopKAttr>::data));
     })
     .def_readonly_static("Type", &TopK_Op::Type);
 
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index 344817976cebc0d79f960f838c005f09aa0d5c3e..34ec49baa7b3ba26695f6b3c30bb170f477843c8 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -36,15 +36,14 @@ void declare_Transpose(py::module &m) {
 		:type output_dims_order : :py:class: List[Int]
 		)mydelimiter")
     .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order")=std::vector<std::size_t>())
-    .def_static("get_inputs_name", &Transpose_Op::getInputsName)
-    .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(Transpose_Op::InputsName), std::end(Transpose_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(Transpose_Op::OutputsName), std::end(Transpose_Op::OutputsName));
+    }, "Get the names of the output tensors.")
 	.def_static("attributes_name", []() {
-		std::vector<std::string> result;
-		auto attributes = Transpose_Op::attributesName();
-		for (size_t i = 0; i < size(EnumStrings<Transpose_Op::Attr>::data); ++i) {
-			result.emplace_back(attributes[i]);
-		}
-		return result;
+		return std::vector<std::string>(std::begin(EnumStrings<Transpose_Op::Attr>::data), std::end(EnumStrings<Transpose_Op::Attr>::data));
 	})
     .def_readonly_static("Type", &Transpose_Op::Type);
   declare_registrable<Transpose_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_Unfold.cpp b/python_binding/operator/pybind_Unfold.cpp
index 86f8f26d63ef1c59ad68842a6e7291fa4706e8e1..05347ea3d112e2e9d8242f95b927a58566899dbb 100644
--- a/python_binding/operator/pybind_Unfold.cpp
+++ b/python_binding/operator/pybind_Unfold.cpp
@@ -41,17 +41,15 @@ void declare_UnfoldOp(py::module &m) {
         }), py::arg("kernel_dims"),
             py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
             py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
-        .def_static("get_inputs_name", &Unfold_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &Unfold_Op<DIM>::getOutputsName)
-
-		.def_static("attributes_name", []() {
-			std::vector<std::string> result;
-			auto attributes = Unfold_Op<DIM>::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<UnfoldAttr>::data); ++i) {
-				result.emplace_back(attributes[i]);
-			}
-			return result;
-		})
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Unfold_Op<DIM>::InputsName), std::end(Unfold_Op<DIM>::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Unfold_Op<DIM>::OutputsName), std::end(Unfold_Op<DIM>::OutputsName));
+        }, "Get the names of the output tensors.")
+        .def_static("attributes_name", []() {
+            return std::vector<std::string>(std::begin(EnumStrings<UnfoldAttr>::data), std::end(EnumStrings<UnfoldAttr>::data));
+        })
         .def_readonly_static("Type", &Unfold_Op<DIM>::Type)
         ;
 
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
index 1ef94202cba1fe53e63a30780e95689526ec900a..a43878a5b2faa1ed286cfedc095dbed5e50b2b1c 100644
--- a/python_binding/operator/pybind_Unsqueeze.cpp
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -28,15 +28,14 @@ void init_Unsqueeze(py::module &m) {
             :type axes: :py:class: List[Int]
 		)mydelimiter")
       // Here we bind the methods of the Unsqueeze_Op that will want to access
-      .def_static("get_inputs_name", &Unsqueeze_Op::getInputsName)
-      .def_static("get_outputs_name", &Unsqueeze_Op::getOutputsName)
+        .def_static("get_inputs_name", []() {
+            return std::vector<std::string>(std::begin(Unsqueeze_Op::InputsName), std::end(Unsqueeze_Op::InputsName));
+        }, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", []() {
+            return std::vector<std::string>(std::begin(Unsqueeze_Op::OutputsName), std::end(Unsqueeze_Op::OutputsName));
+        }, "Get the names of the output tensors.")
         .def_static("attributes_name", []() {
-            std::vector<std::string> result;
-            auto attributes = Unsqueeze_Op::attributesName();
-            for (size_t i = 0; i < size(EnumStrings<UnsqueezeAttr>::data); ++i) {
-                result.emplace_back(attributes[i]);
-            }
-            return result;
+            return std::vector<std::string>(std::begin(EnumStrings<UnsqueezeAttr>::data), std::end(EnumStrings<UnsqueezeAttr>::data));
         })
       .def_readonly_static("Type", &Unsqueeze_Op::Type)
       ;
diff --git a/python_binding/operator/pybind_WeightInterleaving.cpp b/python_binding/operator/pybind_WeightInterleaving.cpp
index 25b423bd66503b39f031695121cf673c45c34bbe..81bb69d11b8fafb22be670e11261bf25de5cf717 100644
--- a/python_binding/operator/pybind_WeightInterleaving.cpp
+++ b/python_binding/operator/pybind_WeightInterleaving.cpp
@@ -19,8 +19,12 @@ namespace Aidge {
 void declare_WeightInterleaving(py::module &m) {
   py::class_<WeightInterleaving_Op, std::shared_ptr<WeightInterleaving_Op>, OperatorTensor>(m, "WeightInterleavingOp", py::multiple_inheritance())
     .def(py::init<>())
-    .def_static("get_inputs_name", &WeightInterleaving_Op::getInputsName)
-    .def_static("get_outputs_name", &WeightInterleaving_Op::getOutputsName)
+    .def_static("get_inputs_name", []() {
+        return std::vector<std::string>(std::begin(WeightInterleaving_Op::InputsName), std::end(WeightInterleaving_Op::InputsName));
+    }, "Get the names of the input tensors.")
+    .def_static("get_outputs_name", []() {
+        return std::vector<std::string>(std::begin(WeightInterleaving_Op::OutputsName), std::end(WeightInterleaving_Op::OutputsName));
+    }, "Get the names of the output tensors.")
     .def_readonly_static("Type", &WeightInterleaving_Op::Type)
 
     .def("__repr__", [](WeightInterleaving_Op& b) {
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index f151aaae10fb1a2b2e5aa6e76a33ce031d64d7c4..c4534d3ee4bdfda14e0c11e40c0ded7c2fab421a 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -87,7 +87,6 @@ void init_ReduceSum(py::module&);
 void init_Reshape(py::module&);
 void init_Resize(py::module&);
 void init_Round(py::module&);
-void init_Scaling(py::module&);
 void init_Select(py::module&);
 void init_Shape(py::module&);
 void init_Sigmoid(py::module&);
@@ -198,7 +197,6 @@ void init_Aidge(py::module& m) {
     init_Reshape(m);
     init_Resize(m);
     init_Round(m);
-    init_Scaling(m);
     init_Select(m);
     init_Shape(m);
     init_Sigmoid(m);
diff --git a/src/backend/generic/operator/TransposeImpl.cpp b/src/backend/generic/operator/TransposeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..78b868810b830c44c1167b6c73faf7aad4e47663
--- /dev/null
+++ b/src/backend/generic/operator/TransposeImpl.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/generic/operator/TransposeImpl.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Transpose.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace Aidge {
+
+void Aidge::TransposeImpl::forward() {
+    const Transpose_Op& op = dynamic_cast<const Transpose_Op&>(mOp);
+    op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.outputDimsOrder());
+}
+
+} // namespace Aidge
diff --git a/src/operator/Abs.cpp b/src/operator/Abs.cpp
index 86d4dd94bbb78239eb5a6ee27cf68d8876944423..956f57421ffa355dbadd829586ec2f42196d3ed3 100644
--- a/src/operator/Abs.cpp
+++ b/src/operator/Abs.cpp
@@ -9,43 +9,29 @@
  *
  ********************************************************************************/
 
-#include "aidge/operator/Abs.hpp"
-
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
 #include <string>
+#include <vector>
 
 #include "aidge/data/Tensor.hpp"
-#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Abs.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
 
-namespace Aidge {
-
-const std::string Abs_Op::Type = "Abs";
-
-Abs_Op::Abs_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+constexpr const char* const Aidge::Abs_Op::Type;
+constexpr const char* const Aidge::Abs_Op::InputsName[];
+constexpr const char* const Aidge::Abs_Op::OutputsName[];
 
-Abs_Op::Abs_Op(const Abs_Op& op)
-    : OperatorTensor(op)
+Aidge::Abs_Op::Abs_Op()
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1)
 {
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Abs_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-void Abs_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(Abs_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Abs_Op::getAvailableBackends() const {
-    return Registrar<Abs_Op>::getKeys();
+    // ctor
 }
 
 ////////////////////////////////////////////////////////////////////////////////
 
-std::shared_ptr<Node> Abs(const std::string& name) {
+std::shared_ptr<Aidge::Node> Aidge::Abs(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Abs_Op>(), name);
 }
-
-} // namespace Aidge
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index f6fd0cd9fc647e29402d36f1f6838642e099ae6c..461c6f3dc233429db3060d291d97312e255e1e3b 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -20,28 +20,16 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 
-const std::string Aidge::Add_Op::Type = "Add";
+constexpr const char* const Aidge::Add_Op::Type;
+constexpr const char* const Aidge::Add_Op::InputsName[];
+constexpr const char* const Aidge::Add_Op::OutputsName[];
 
 Aidge::Add_Op::Add_Op()
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1)
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1)
 {
     // ctor
 }
 
-Aidge::Add_Op::Add_Op(const Add_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Add_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Add_Op::clone() const {
-    return std::make_shared<Add_Op>(*this);
-}
-
 bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
@@ -78,15 +66,6 @@ bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(Add_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Add_Op::getAvailableBackends() const {
-    return Registrar<Add_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Add(const std::string& name) {
diff --git a/src/operator/And.cpp b/src/operator/And.cpp
index 1d496e11e33e74c8d0b218c93a1d1623382a8746..1934e3e250eed95cb29165478d2c762e83ba7da4 100644
--- a/src/operator/And.cpp
+++ b/src/operator/And.cpp
@@ -21,22 +21,14 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
+constexpr const char* const Aidge::And_Op::Type;
+constexpr const char* const Aidge::And_Op::InputsName[];
+constexpr const char* const Aidge::And_Op::OutputsName[];
 
-const std::string Aidge::And_Op::Type = "And";
-
-And_Op::And_Op()
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1)
-{}
-
-And_Op::And_Op(const And_Op& op)
-    : OperatorTensor(op)
+Aidge::And_Op::And_Op()
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1)
 {
-    if (op.mImpl) {
-        SET_IMPL_MACRO(And_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
+    // ctor
 }
 
 bool Aidge::And_Op::forwardDims(bool /*allowDataDependency*/) {
@@ -68,19 +60,8 @@ bool Aidge::And_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::And_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(And_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::And_Op::getAvailableBackends() const {
-    return Registrar<And_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
-std::shared_ptr<Node> And(const std::string& name) {
+std::shared_ptr<Aidge::Node> Aidge::And(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<And_Op>(), name);
 }
-
-} // namespace AIdge
\ No newline at end of file
diff --git a/src/operator/ArgMax.cpp b/src/operator/ArgMax.cpp
index 5abb4e9693e01d37e174b8c1f10bd45bb8f7d27d..873848024784160ee4e9df5cd90783d79a197354 100644
--- a/src/operator/ArgMax.cpp
+++ b/src/operator/ArgMax.cpp
@@ -22,10 +22,12 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::ArgMax_Op::Type = "ArgMax";
+constexpr const char* const Aidge::ArgMax_Op::Type;
+constexpr const char* const Aidge::ArgMax_Op::InputsName[];
+constexpr const char* const Aidge::ArgMax_Op::OutputsName[];
 
 Aidge::ArgMax_Op::ArgMax_Op(std::int32_t axis, bool keep_dims, bool select_last_index)
-: OperatorTensor(Type, {InputCategory::Data}, 1),
+: OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
   mAttributes(std::make_shared<Attributes_>(
     attr<ArgMaxAttr::Axis>(axis),
     attr<ArgMaxAttr::KeepDims>(keep_dims),
@@ -33,19 +35,9 @@ Aidge::ArgMax_Op::ArgMax_Op(std::int32_t axis, bool keep_dims, bool select_last_
 {}
 
 Aidge::ArgMax_Op::ArgMax_Op(const Aidge::ArgMax_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(ArgMax_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::ArgMax_Op::clone() const {
-    return std::make_shared<ArgMax_Op>(*this);
-}
+{}
 
 bool Aidge::ArgMax_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -69,15 +61,6 @@ bool Aidge::ArgMax_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::ArgMax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(ArgMax_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::ArgMax_Op::getAvailableBackends() const {
-    return Registrar<ArgMax_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ArgMax(std::int32_t axis,
diff --git a/src/operator/Atan.cpp b/src/operator/Atan.cpp
index c0a494ee66fb11bcccac21141da30df5546f0b3c..457f724c44d2ff83e045157248848abb87dc1901 100644
--- a/src/operator/Atan.cpp
+++ b/src/operator/Atan.cpp
@@ -18,35 +18,13 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Atan_Op::Type = "Atan";
+constexpr const char* const Aidge::Atan_Op::Type;
+constexpr const char* const Aidge::Atan_Op::InputsName[];
+constexpr const char* const Aidge::Atan_Op::OutputsName[];
 
-Aidge::Atan_Op::Atan_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+Aidge::Atan_Op::Atan_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
-Aidge::Atan_Op::Atan_Op(const Aidge::Atan_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Atan_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Atan_Op::clone() const {
-    return std::make_shared<Atan_Op>(*this);
-}
-
-
-void Aidge::Atan_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Atan_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Atan_Op::getAvailableBackends() const {
-    return Registrar<Atan_Op>::getKeys();
-}
-
-///////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Atan(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Atan_Op>(), name);
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 25eb5933002569fdf0ec118ee09e499768264996..9d70a3e989e46277fd43997a04088c0f479b059e 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -23,16 +23,16 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling" + std::to_string(DIM) + "D";
-
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::AvgPooling_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::AvgPooling_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::AvgPooling_Op<DIM>::OutputsName[];
 
 template <Aidge::DimIdx_t DIM>
 Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
     const std::array<DimSize_t, DIM> &stride_dims,
     const std::array<DimSize_t, DIM> &dilations,
     bool ceil_mode)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl<AvgPooling_Op<DIM>>(Type, {InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
     attr<AvgPoolingAttr::StrideDims>(stride_dims),
     attr<AvgPoolingAttr::KernelDims>(kernel_dims),
@@ -42,20 +42,9 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const std::array<DimSize_t, DIM> &kerne
 
 template <Aidge::DimIdx_t DIM>
 Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl<AvgPooling_Op<DIM>>(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::AvgPooling_Op<DIM>::clone() const {
-    return std::make_shared<AvgPooling_Op<DIM>>(*this);
-}
+{}
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -132,24 +121,12 @@ Aidge::AvgPooling_Op<DIM>::computeReceptiveField(const std::vector<Aidge::DimSiz
     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
 }
 
-
-template <Aidge::DimIdx_t DIM>
-void Aidge::AvgPooling_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::AvgPooling_Op<DIM>::getAvailableBackends() const {
-    return Registrar<AvgPooling_Op<DIM>>::getKeys();
-}
-
 template class Aidge::AvgPooling_Op<1>;
 template class Aidge::AvgPooling_Op<2>;
 template class Aidge::AvgPooling_Op<3>;
 template class Aidge::AvgPooling_Op<4>;
 
-////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::AvgPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
@@ -160,6 +137,7 @@ std::shared_ptr<Aidge::Node> Aidge::AvgPooling(const std::array<Aidge::DimSize_t
     AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", AvgPooling_Op<DIM>::Type);
     return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilations, ceil_mode), name);
 }
+
 template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, bool);
 template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
 template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&, const std::array<Aidge::DimSize_t, 3>&, bool);
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index 3d58d6ce397ac8c2601b1fce543ca127abf3aaca..e86b27209821c139a91336e79dfd287468e2d819 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -23,12 +23,13 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::BatchNorm_Op<DIM>::Type = "BatchNorm" + std::to_string(DIM) + "D";
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::BatchNorm_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::BatchNorm_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::BatchNorm_Op<DIM>::OutputsName[];
 
 template <Aidge::DimIdx_t DIM>
 Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(float epsilon, float momentum, bool trainingMode)
-    : OperatorTensor(Type,
+    : OperatorTensorWithImpl<BatchNorm_Op<DIM>>(Type,
                         {InputCategory::Data,
                         InputCategory::Param,
                         InputCategory::Param,
@@ -44,20 +45,9 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(float epsilon, float momentum, bool train
 
 template <Aidge::DimIdx_t DIM>
 Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl<BatchNorm_Op<DIM>>(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::BatchNorm_Op<DIM>::clone() const {
-    return std::make_shared<BatchNorm_Op<DIM>>(*this);
-}
+{}
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -76,50 +66,12 @@ bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::BatchNorm_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-
-    // By default, automatically set backend for scale, shift, mean and variance
-    if (getInput(1)) {
-        getInput(1)->setBackend(name, device);
-    }
-    else {
-        Log::notice("BatchNorm_Op::setBackend(): could not set backend for scale input, because input is not connected");
-    }
-
-    if (getInput(2)) {
-        getInput(2)->setBackend(name, device);
-    }
-    else {
-        Log::notice("BatchNorm_Op::setBackend(): could not set backend for shift input, because input is not connected");
-    }
-
-    if (getInput(3)) {
-        getInput(3)->setBackend(name, device);
-    }
-    else {
-        Log::notice("BatchNorm_Op::setBackend(): could not set backend for variance input, because input is not connected");
-    }
-
-    if (getInput(4)) {
-        getInput(4)->setBackend(name, device);
-    }
-    else {
-        Log::notice("BatchNorm_Op::setBackend(): could not set backend for mean input, because input is not connected");
-    }
-}
-
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::BatchNorm_Op<DIM>::getAvailableBackends() const {
-    return Registrar<BatchNorm_Op<DIM>>::getKeys();
-}
-
 template class Aidge::BatchNorm_Op<2>;
 template class Aidge::BatchNorm_Op<3>;
 template class Aidge::BatchNorm_Op<4>;
 
+////////////////////////////////////////////////////////////////////////////////
+
 template <Aidge::DimSize_t DIM>
 inline std::shared_ptr<Aidge::Node> Aidge::BatchNorm(const Aidge::DimSize_t nbFeatures,
                                        const float epsilon,
diff --git a/src/operator/BitShift.cpp b/src/operator/BitShift.cpp
index c2fa39d0bd47b9f8d89e9dc5a18ffeb8a132fc18..6f13147ff551bb17ed57cf0f9315727e6de87ef2 100644
--- a/src/operator/BitShift.cpp
+++ b/src/operator/BitShift.cpp
@@ -21,29 +21,23 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
+constexpr const char* const Aidge::BitShift_Op::Type;
+constexpr const char* const Aidge::BitShift_Op::InputsName[];
+constexpr const char* const Aidge::BitShift_Op::OutputsName[];
 
-const std::string BitShift_Op::Type = "BitShift";
-
-BitShift_Op::BitShift_Op(BitShiftDirection direction, bool rounding)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1),
+Aidge::BitShift_Op::BitShift_Op(BitShiftDirection direction, bool rounding)
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1),
       mAttributes(std::make_shared<Attributes_>(
         attr<BitShiftAttr::BitShiftdirection>(direction),
         attr<BitShiftAttr::Rounding>(rounding)))
 {}
 
-BitShift_Op::BitShift_Op(const BitShift_Op& op)
-    : OperatorTensor(op),
+Aidge::BitShift_Op::BitShift_Op(const BitShift_Op& op)
+    : OperatorTensorWithImpl(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(BitShift_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
+{}
 
-bool BitShift_Op::forwardDims(bool /*allowDataDependency*/) {
+bool Aidge::BitShift_Op::forwardDims(bool /*allowDataDependency*/) {
     if (!inputsAssociated()) {
     return false;
     }
@@ -73,20 +67,8 @@ bool BitShift_Op::forwardDims(bool /*allowDataDependency*/) {
     return true;
 }
 
-
-void BitShift_Op::setBackend(const std::string &name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(BitShift_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> BitShift_Op::getAvailableBackends() const {
-    return Registrar<BitShift_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
-std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direction, bool rounding, const std::string& name) {
+std::shared_ptr<Aidge::Node> Aidge::BitShift(const BitShift_Op::BitShiftDirection direction, bool rounding, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<BitShift_Op>(direction, rounding), name);
 }
-
-} // namespace Aidge
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index ac9d55247762c8a4c6181ce8266da82357e81c75..518a2dc245682458f43498e0d71f4345a21b6191 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -24,30 +24,22 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
+constexpr const char* const Aidge::Cast_Op::Type;
+constexpr const char* const Aidge::Cast_Op::InputsName[];
+constexpr const char* const Aidge::Cast_Op::OutputsName[];
 
-const std::string Cast_Op::Type = "Cast";
-
-Cast_Op::Cast_Op(const DataType targetType)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+Aidge::Cast_Op::Cast_Op(const DataType targetType)
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
       mAttributes(std::make_shared<Attributes_>(
         attr<CastAttr::TargetType>(targetType)))
 {
-    mImpl = std::make_shared<Cast_OpImpl>(*this);
     mOutputs[0]->setDataType(targetType);
 }
 
-Cast_Op::Cast_Op(const Cast_Op& op)
-    : OperatorTensor(op),
+Aidge::Cast_Op::Cast_Op(const Cast_Op& op)
+    : OperatorTensorWithImpl(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Cast_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Cast_OpImpl>(*this);
-    }
-}
+{}
 
 bool Aidge::Cast_Op::forwardDType(){
     mOutputs[0]->setDataType(mAttributes->getAttr<CastAttr::TargetType>());
@@ -60,24 +52,8 @@ void Aidge::Cast_Op::setDataType(const DataType& dataType) const {
     }
 }
 
-void Cast_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    if (Registrar<Cast_Op>::exists({name})) {
-        SET_IMPL_MACRO(Cast_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Cast_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Cast_Op::getAvailableBackends() const {
-    return Registrar<Cast_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
-std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name) {
+std::shared_ptr<Aidge::Node> Aidge::Cast(const DataType targetType, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
 }
-
-} // namespace Aidge
diff --git a/src/operator/Clip.cpp b/src/operator/Clip.cpp
index 0f8d2d5f7c3db076cde396ccab3db88c96d7aff2..a1bb7fd1707527eff4389df1caaf8c3d7c7502f1 100644
--- a/src/operator/Clip.cpp
+++ b/src/operator/Clip.cpp
@@ -17,28 +17,20 @@
 #include "aidge/utils/Types.h"
 #include "aidge/operator/Clip.hpp"
 
-namespace Aidge {
+constexpr const char* const Aidge::Clip_Op::Type;
+constexpr const char* const Aidge::Clip_Op::InputsName[];
+constexpr const char* const Aidge::Clip_Op::OutputsName[];
 
-const std::string Clip_Op::Type = "Clip";
-
-Clip_Op::Clip_Op(float min, float max)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData}, 1),
+Aidge::Clip_Op::Clip_Op(float min, float max)
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData}, 1),
         mAttributes(std::make_shared<Attributes_>(attr<ClipAttr::Min>(min), attr<ClipAttr::Max>(max))) {}
 
-Clip_Op::Clip_Op(const Clip_Op& op)
-    : OperatorTensor(op),
+Aidge::Clip_Op::Clip_Op(const Clip_Op& op)
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Clip_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-Clip_Op::~Clip_Op() noexcept = default;
+{}
 
-bool Clip_Op::dimsForwarded() const {
+bool Aidge::Clip_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined())
         || (getInput(2) && !getInput(2)->undefined()))
     {
@@ -107,20 +99,9 @@ float& Aidge::Clip_Op::max() const {
     return mAttributes->getAttr<ClipAttr::Max>();
 }
 
-
-void Aidge::Clip_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Clip_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-std::set<std::string> Clip_Op::getAvailableBackends() const {
-    return Registrar<Clip_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
-std::shared_ptr<Node> Clip(const std::string &name, float min, float max)
+std::shared_ptr<Aidge::Node> Aidge::Clip(const std::string &name, float min, float max)
 {
     return std::make_shared<Node>(std::make_shared<Clip_Op>(min, max), name);
 }
-
-} // namespace Aidge
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 8cc4f5f3b1e258830814e4ebca9c164122b6e79e..19cb13429fee8b9f603281c618cab1951d70dc41 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -20,34 +20,24 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Concat_Op::Type = "Concat";
+constexpr const char* const Aidge::Concat_Op::Type;
+constexpr const char* const Aidge::Concat_Op::InputsName[];
+constexpr const char* const Aidge::Concat_Op::OutputsName[];
 
 Aidge::Concat_Op::Concat_Op(const Aidge::IOIndex_t nbIn, const std::int32_t axis)
-    : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
+    : OperatorTensorWithImpl(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<ConcatAttr::Axis>(axis)))
 {
     if (nbIn == 0) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
     }
-    mImpl = std::make_shared<Concat_OpImpl>(*this);
 }
 
 Aidge::Concat_Op::Concat_Op(const Aidge::Concat_Op& op)
-    : OperatorTensor(op),
-        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Concat_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Concat_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Concat_Op::clone() const {
-    return std::make_shared<Concat_Op>(*this);
-}
+    : OperatorTensorWithImpl(op),
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+{}
 
 bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
     if (!inputsAssociated()) {
@@ -87,21 +77,7 @@ bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
     return true;
 }
 
-void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    if (Registrar<Concat_Op>::exists({name})) {
-        SET_IMPL_MACRO(Concat_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Concat_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Concat_Op::getAvailableBackends() const {
-    return Registrar<Concat_Op>::getKeys();
-}
-
-/////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Concat(const Aidge::IOIndex_t nbIn, const std::int32_t axis, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
diff --git a/src/operator/ConstantOfShape.cpp b/src/operator/ConstantOfShape.cpp
index 2e9adb8181511d2851a84f6965051bbe3e402f0e..5210d421c568d7c5f6f3c37aa004240aa55b2f49 100644
--- a/src/operator/ConstantOfShape.cpp
+++ b/src/operator/ConstantOfShape.cpp
@@ -24,26 +24,20 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
+constexpr const char* const Aidge::ConstantOfShape_Op::Type;
+constexpr const char* const Aidge::ConstantOfShape_Op::InputsName[];
+constexpr const char* const Aidge::ConstantOfShape_Op::OutputsName[];
 
-const std::string ConstantOfShape_Op::Type = "ConstantOfShape";
-
-ConstantOfShape_Op::ConstantOfShape_Op(const Tensor &value)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+Aidge::ConstantOfShape_Op::ConstantOfShape_Op(const Tensor &value)
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
       mAttributes(std::make_shared<Attributes_>(
         attr<ConstantOfShapeAttr::Value>(value))) {}
 
-ConstantOfShape_Op::ConstantOfShape_Op(const ConstantOfShape_Op &op)
-    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(ConstantOfShape_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
+Aidge::ConstantOfShape_Op::ConstantOfShape_Op(const ConstantOfShape_Op &op)
+    : OperatorTensorWithImpl(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+{}
 
-bool ConstantOfShape_Op::forwardDims(bool allowDataDependency) {
+bool Aidge::ConstantOfShape_Op::forwardDims(bool allowDataDependency) {
   if (!inputsAssociated()) {
     return false;
   }
@@ -72,22 +66,15 @@ bool ConstantOfShape_Op::forwardDims(bool allowDataDependency) {
   return true;
 }
 
-void ConstantOfShape_Op::setBackend(const std::string &name,
+void Aidge::ConstantOfShape_Op::setBackend(const std::string &name,
                                        Aidge::DeviceIdx_t device) {
-  SET_IMPL_MACRO(ConstantOfShape_Op, *this, name);
-  mOutputs[0]->setBackend(name, device);
+  OperatorTensorWithImpl::setBackend(name, device);
   value().setBackend(name,device);
 }
 
-std::set<std::string> Aidge::ConstantOfShape_Op::getAvailableBackends() const {
-  return Registrar<ConstantOfShape_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
-std::shared_ptr<Node> ConstantOfShape(const Tensor value, const std::string &name) {
+std::shared_ptr<Aidge::Node> Aidge::ConstantOfShape(const Tensor value, const std::string &name) {
   return std::make_shared<Node>(std::make_shared<ConstantOfShape_Op>(value),
                                 name);
 }
-
-} // namespace Aidge
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 89a7236e3e8bfb81030398118cb70c6c677c3689..923b33fff091b74430cac1a57be43e8ef5e3e96f 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -24,20 +24,14 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Conv_Op<DIM>::Type = "Conv" + std::to_string(DIM) + "D";
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Conv_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Conv_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Conv_Op<DIM>::OutputsName[];
 
 template <Aidge::DimIdx_t DIM>
 Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
-    : OperatorTensor(op),
-      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
+    : OperatorTensorWithImpl<Conv_Op<DIM>>(op),
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {}
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -45,7 +39,7 @@ bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
         return false;
     // first check weight since it defines inChannels and outChannels
     if (getInput(0)->nbDims() != (DIM+2)) {
-        Log::error("Wrong number of dimensions for input '{}'.", getInputsName()[0]);
+        Log::error("Wrong number of dimensions for input '{}'.", InputsName[0]);
         return false;
     }
     if(getInput(0)->dataFormat() == Aidge::DataFormat::NHWC) {
@@ -161,25 +155,6 @@ Aidge::Conv_Op<DIM>::computeReceptiveField(
     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
 }
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Conv_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-
-    // By default, automatically set backend for weight and bias inputs
-    if (getInput(1)) {
-        getInput(1)->setBackend(name, device);
-    }
-    else {
-        Log::notice("Conv_Op::setBackend(): could not set backend for weight input, because input is not connected");
-    }
-
-    if (getInput(2)) {
-        // Bias is optional
-        getInput(2)->setBackend(name, device);
-    }
-}
-
 template <Aidge::DimIdx_t DIM>
 Aidge::DimSize_t Aidge::Conv_Op<DIM>::inChannels() const {
     if (!getInput(1)) {
@@ -202,16 +177,11 @@ Aidge::DimSize_t Aidge::Conv_Op<DIM>::outChannels() const {
     return getInput(1)->template dims<DIM+2>()[0];
 }
 
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::Conv_Op<DIM>::getAvailableBackends() const {
-    return Registrar<Conv_Op<DIM>>::getKeys();
-}
-
 template class Aidge::Conv_Op<1>;
 template class Aidge::Conv_Op<2>;
 template class Aidge::Conv_Op<3>;
 
-/////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::Conv(Aidge::DimSize_t inChannels,
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index 0a8480acec5cade021e5e3405212b2c446914a31..ccec6e6f280ef4d3da386673e81884319fba4333 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -24,20 +24,15 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise" + std::to_string(DIM) + "D";
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::ConvDepthWise_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::ConvDepthWise_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::ConvDepthWise_Op<DIM>::OutputsName[];
 
 template <Aidge::DimIdx_t DIM>
 Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl<ConvDepthWise_Op<DIM>>(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
+{}
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -138,34 +133,10 @@ Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField(
     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
 }
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-
-    // By default, automatically set backend for weight and bias inputs
-    if (getInput(1)) {
-        getInput(1)->setBackend(name, device);
-    }
-    else {
-        Log::notice("ConvDepthWise_Op::setBackend(): could not set backend for weight input, because input is not connected");
-    }
-
-    if (getInput(2)) {
-        // Bias is optional
-        getInput(2)->setBackend(name, device);
-    }
-}
-
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::ConvDepthWise_Op<DIM>::getAvailableBackends() const {
-    return Registrar<ConvDepthWise_Op<DIM>>::getKeys();
-}
-
 template class Aidge::ConvDepthWise_Op<1>;
 template class Aidge::ConvDepthWise_Op<2>;
 
-////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise(const Aidge::DimSize_t nbChannels,
diff --git a/src/operator/ConvTranspose.cpp b/src/operator/ConvTranspose.cpp
index e315ad8cc1488597038760effcd487f1ff2426a1..1d8ea654cce84e968e385d887b4881819d71fab7 100644
--- a/src/operator/ConvTranspose.cpp
+++ b/src/operator/ConvTranspose.cpp
@@ -26,19 +26,15 @@
 
 namespace Aidge {
 
-template <DimIdx_t DIM>
-const std::string ConvTranspose_Op<DIM>::Type =
-    "ConvTranspose" + std::to_string(DIM) + "D";
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::ConvTranspose_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::ConvTranspose_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::ConvTranspose_Op<DIM>::OutputsName[];
 
 template <DimIdx_t DIM>
 ConvTranspose_Op<DIM>::ConvTranspose_Op(const ConvTranspose_Op<DIM> &op)
-    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
-    if (op.mImpl) {
-        SET_IMPL_MACRO(ConvTranspose_Op<DIM>, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
+    : OperatorTensorWithImpl<ConvTranspose_Op<DIM>>(op),
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+{}
 
 template <DimIdx_t DIM>
 bool ConvTranspose_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -195,36 +191,11 @@ ConvTranspose_Op<DIM>::computeReceptiveField(
     return res;
 }
 
-template <DimIdx_t DIM>
-void ConvTranspose_Op<DIM>::setBackend(const std::string &name,
-                                       DeviceIdx_t device) {
-    SET_IMPL_MACRO(ConvTranspose_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-
-    // By default, automatically set backend for weight and bias inputs
-    if (getInput(1)) {
-        getInput(1)->setBackend(name, device);
-    } else {
-        Log::notice("ConvTranspose_Op::setBackend(): could not set backend "
-                    "for weight input, because input is not connected");
-    }
-
-    if (getInput(2)) {
-        // Bias is optional
-        getInput(2)->setBackend(name, device);
-    }
-}
-
-template <DimIdx_t DIM>
-std::set<std::string> ConvTranspose_Op<DIM>::getAvailableBackends() const {
-    return Registrar<ConvTranspose_Op<DIM>>::getKeys();
-}
-
 template class ConvTranspose_Op<1>;
 template class ConvTranspose_Op<2>;
 template class ConvTranspose_Op<3>;
 
-/////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<DimIdx_t, 1>::size_type DIM>
 std::shared_ptr<Node>
diff --git a/src/operator/CryptoHash.cpp b/src/operator/CryptoHash.cpp
index 530e94766e7d790e64dae70239bdf3ef6eed3a53..a6dc6513a74ee64c2e50f6bd4a43e301b8155038 100644
--- a/src/operator/CryptoHash.cpp
+++ b/src/operator/CryptoHash.cpp
@@ -20,44 +20,27 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::CryptoHash_Op::Type = "CryptoHash";
+constexpr const char* const Aidge::CryptoHash_Op::Type;
+constexpr const char* const Aidge::CryptoHash_Op::InputsName[];
+constexpr const char* const Aidge::CryptoHash_Op::OutputsName[];
 
 Aidge::CryptoHash_Op::CryptoHash_Op()
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
         attr<CryptoHashAttr::CryptoHashFunction>(CryptoHashFunction::SHA256)))
 {}
 
 Aidge::CryptoHash_Op::CryptoHash_Op(const Aidge::CryptoHash_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(CryptoHash_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::CryptoHash_Op::clone() const {
-    return std::make_shared<CryptoHash_Op>(*this);
-}
+{}
 
 bool Aidge::CryptoHash_Op::forwardDims(bool /*allowDataDependency*/) {
     mOutputs[0]->resize({256 / getDataTypeBitWidth(mOutputs[0]->dataType())});
     return true;
 }
 
-void Aidge::CryptoHash_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(CryptoHash_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::CryptoHash_Op::getAvailableBackends() const {
-    return Registrar<CryptoHash_Op>::getKeys();
-}
-
-///////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::CryptoHash(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<CryptoHash_Op>(), name);
diff --git a/src/operator/DepthToSpace.cpp b/src/operator/DepthToSpace.cpp
index 9ab7034e7bdc80396742ce5d436b7a768b3686f0..cbaaaca6ded8f555f0f3068b60a1c1bde788731f 100644
--- a/src/operator/DepthToSpace.cpp
+++ b/src/operator/DepthToSpace.cpp
@@ -21,11 +21,12 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-
-const std::string Aidge::DepthToSpace_Op::Type = "DepthToSpace";
+constexpr const char* const Aidge::DepthToSpace_Op::Type;
+constexpr const char* const Aidge::DepthToSpace_Op::InputsName[];
+constexpr const char* const Aidge::DepthToSpace_Op::OutputsName[];
 
 Aidge::DepthToSpace_Op::DepthToSpace_Op(const std::uint32_t blockSize, const Aidge::DepthToSpace_Op::Mode mode)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<DepthToSpaceAttr::BlockSize>(blockSize),
         attr<DepthToSpaceAttr::Mode>(mode)))
@@ -34,19 +35,9 @@ Aidge::DepthToSpace_Op::DepthToSpace_Op(const std::uint32_t blockSize, const Aid
 }
 
 Aidge::DepthToSpace_Op::DepthToSpace_Op(const Aidge::DepthToSpace_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(DepthToSpace_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::DepthToSpace_Op::clone() const {
-    return std::make_shared<DepthToSpace_Op>(*this);
-}
+{}
 
 bool Aidge::DepthToSpace_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -68,24 +59,10 @@ bool Aidge::DepthToSpace_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::DepthToSpace_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<DepthToSpace_Op>::exists({name})) {
-        SET_IMPL_MACRO(DepthToSpace_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<DepthToSpace_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::DepthToSpace_Op::getAvailableBackends() const {
-    return Registrar<DepthToSpace_Op>::getKeys();
-}
-
-//////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::DepthToSpace(const std::uint32_t blockSize,
                                     const Aidge::DepthToSpace_Op::Mode mode,
                                     const std::string& name) {
     return std::make_shared<Node>(std::make_shared<DepthToSpace_Op>(blockSize, mode), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 96eea3df966b273445be8a6e9d9a5acf2d6fafb2..81fe0e4bad5da9d3ead52ea241480c2982a55812 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -20,7 +20,9 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::Div_Op::Type = "Div";
+constexpr const char* const Aidge::Div_Op::Type;
+constexpr const char* const Aidge::Div_Op::InputsName[];
+constexpr const char* const Aidge::Div_Op::OutputsName[];
 
 bool Aidge::Div_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -51,18 +53,8 @@ bool Aidge::Div_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-
-void Aidge::Div_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Div_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Div_Op::getAvailableBackends() const {
-    return Registrar<Div_Op>::getKeys();
-}
-
-///////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Div(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Div_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Dropout.cpp b/src/operator/Dropout.cpp
index 0063a446ee3741691f5c963935f8e9a3fd116d0f..86eba16a006988415776785b8c0f6482afde7d02 100644
--- a/src/operator/Dropout.cpp
+++ b/src/operator/Dropout.cpp
@@ -21,10 +21,12 @@
 
 namespace Aidge {
 
-const std::string Dropout_Op::Type = "Dropout";
+constexpr const char* const Dropout_Op::Type;
+constexpr const char* const Dropout_Op::InputsName[];
+constexpr const char* const Dropout_Op::OutputsName[];
 
 Dropout_Op::Dropout_Op(float probability)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
       mAttributes(std::make_shared<Attributes_>(
           attr<DropoutAttr::Probability>(probability)))
 {
@@ -32,20 +34,9 @@ Dropout_Op::Dropout_Op(float probability)
 }
 
 Dropout_Op::Dropout_Op(const Dropout_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    // Copy constructor implementation
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Dropout_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Operator> Dropout_Op::clone() const {
-    return std::make_shared<Dropout_Op>(*this);
-}
+{}
 
 bool Dropout_Op::forwardDims(bool allowDataDependency) {
     if (!inputsAssociated())
@@ -73,20 +64,11 @@ bool Dropout_Op::forwardDims(bool allowDataDependency) {
     return true;
 }
 
-void Dropout_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(Dropout_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Dropout_Op::getAvailableBackends() const {
-    return Registrar<Dropout_Op>::getKeys();
-}
-
 void Dropout_Op::checkProbability() const {
     AIDGE_ASSERT(probability() >= 0.0f && probability() < 1.0f, "'Probability' attribute must be set in [0.0, 1.0) interval.");
 }
 
-////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Node> Dropout(float probability,
                               const std::string& name) {
diff --git a/src/operator/Equal.cpp b/src/operator/Equal.cpp
index cc0fcd984062baeac3da47d03a3d64cda63eada3..1e37d9c832bfe599792d244d31e4e5caf9ca06d7 100644
--- a/src/operator/Equal.cpp
+++ b/src/operator/Equal.cpp
@@ -21,7 +21,9 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Equal_Op::Type = "Equal";
+constexpr const char* const Aidge::Equal_Op::Type;
+constexpr const char* const Aidge::Equal_Op::InputsName[];
+constexpr const char* const Aidge::Equal_Op::OutputsName[];
 
 bool Aidge::Equal_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -51,12 +53,3 @@ bool Aidge::Equal_Op::forwardDims(bool /*allowDataDependency*/) {
 
     return false;
 }
-
-void Aidge::Equal_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Equal_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Equal_Op::getAvailableBackends() const {
-    return Registrar<Equal_Op>::getKeys();
-}
diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp
index bd5f76f8aa7c0889311e4f922fec8d20168e24b5..a992536485ace865d67e9c2d7a31c987107c0ad9 100644
--- a/src/operator/Erf.cpp
+++ b/src/operator/Erf.cpp
@@ -17,33 +17,12 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Erf_Op::Type = "Erf";
+constexpr const char* const Aidge::Erf_Op::Type;
+constexpr const char* const Aidge::Erf_Op::InputsName[];
+constexpr const char* const Aidge::Erf_Op::OutputsName[];
 
-Aidge::Erf_Op::Erf_Op(const Aidge::Erf_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Erf_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Erf_Op::clone() const {
-    return std::make_shared<Erf_Op>(*this);
-}
-
-void Aidge::Erf_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Erf_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Erf_Op::getAvailableBackends() const {
-    return Registrar<Erf_Op>::getKeys();
-}
-
-/////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Erf(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Erf_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Expand.cpp b/src/operator/Expand.cpp
index 969dd6e59b1d6b4399a991b498cb4ae046aa7754..ad7ef3016a6cec299e3c066bc8bc7f5f7cecc67d 100644
--- a/src/operator/Expand.cpp
+++ b/src/operator/Expand.cpp
@@ -21,19 +21,9 @@
 
 namespace Aidge {
 
-const std::string Expand_Op::Type = "Expand";
-
-Expand_Op::Expand_Op(const Expand_Op &op) : OperatorTensor(op) {
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Expand_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Expand_Op::clone() const {
-    return std::make_shared<Expand_Op>(*this);
-}
+constexpr const char* const Expand_Op::Type;
+constexpr const char* const Expand_Op::InputsName[];
+constexpr const char* const Expand_Op::OutputsName[];
 
 bool Expand_Op::forwardDims(bool allowDataDependency) {
     /////////////////
@@ -92,15 +82,7 @@ bool Expand_Op::forwardDims(bool allowDataDependency) {
     return true;
 }
 
-void Expand_Op::setBackend(const std::string &name,
-                           Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Expand_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Expand_Op::getAvailableBackends() const {
-    return Registrar<Expand_Op>::getKeys();
-}
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Expand(const std::string &name) {
     return std::make_shared<Node>(std::make_shared<Expand_Op>(), name);
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 07208b5221326eaf1c0cfd8829c97dc4543c659b..4639fd69a05ac931a3fa89267b092037ba50cad7 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -21,11 +21,9 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::FC_Op::Type = "FC";
-
-std::shared_ptr<Aidge::Operator> Aidge::FC_Op::clone() const {
-    return std::make_shared<FC_Op>(*this);
-}
+constexpr const char* const Aidge::FC_Op::Type;
+constexpr const char* const Aidge::FC_Op::InputsName[];
+constexpr const char* const Aidge::FC_Op::OutputsName[];
 
 void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs());
@@ -86,27 +84,7 @@ bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(FC_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-
-    // By default, automatically set backend for weight and bias inputs
-    if (getInput(1)) {
-        getInput(1)->setBackend(name, device);
-    }
-    else {
-        Log::notice("FC_Op::setBackend(): could not set backend for weight input, because input is not connected");
-    }
-
-    if (getInput(2)) {
-        // Bias is optional
-        getInput(2)->setBackend(name, device);
-    }
-}
-
-std::set<std::string> Aidge::FC_Op::getAvailableBackends() const {
-    return Registrar<FC_Op>::getKeys();
-}
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::FC(const Aidge::DimSize_t inChannels,
                                        const Aidge::DimSize_t outChannels,
diff --git a/src/operator/Flatten.cpp b/src/operator/Flatten.cpp
index 4d4b6385bfc766283045042e82f66e3f450654e4..f0107e857c887bc21209b9a129de2622f7d01e93 100644
--- a/src/operator/Flatten.cpp
+++ b/src/operator/Flatten.cpp
@@ -23,32 +23,20 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-
-const std::string Aidge::Flatten_Op::Type = "Flatten";
+constexpr const char* const Aidge::Flatten_Op::Type;
+constexpr const char* const Aidge::Flatten_Op::InputsName[];
+constexpr const char* const Aidge::Flatten_Op::OutputsName[];
 
 Aidge::Flatten_Op::Flatten_Op(const std::int64_t axis)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<FlattenAttr::Axis>(axis)))
-{
-    mImpl = std::make_shared<Flatten_OpImpl>(*this);
-}
+{}
 
 Aidge::Flatten_Op::Flatten_Op(const Aidge::Flatten_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Flatten_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Flatten_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Flatten_Op::clone() const {
-    return std::make_shared<Flatten_Op>(*this);
-}
+{}
 
 bool Aidge::Flatten_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -62,24 +50,10 @@ bool Aidge::Flatten_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::Flatten_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Flatten_Op>::exists({name})){
-        SET_IMPL_MACRO(Flatten_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Flatten_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Flatten_Op::getAvailableBackends() const {
-    return Registrar<Flatten_Op>::getKeys();
-}
-
-//////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Flatten(std::int64_t axis,
                             const std::string &name)
 {
     return std::make_shared<Node>(std::make_shared<Flatten_Op>(axis), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
index 50a474cd37d5e28edd5512d0725337ea6923e229..dce0915b81643377b9d33e2f38f84d3ab14d1c6a 100644
--- a/src/operator/Fold.cpp
+++ b/src/operator/Fold.cpp
@@ -23,26 +23,15 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Fold_Op<DIM>::Type = "Fold" + std::to_string(DIM) + "D";
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Fold_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Fold_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Fold_Op<DIM>::OutputsName[];
 
 template <Aidge::DimIdx_t DIM>
 Aidge::Fold_Op<DIM>::Fold_Op(const Aidge::Fold_Op<DIM> &op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl<Fold_Op<DIM>>(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
-    }
-    else {
-        mImpl = nullptr;
-    }
-}
-
-template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::Fold_Op<DIM>::clone() const {
-    return std::make_shared<Fold_Op<DIM>>(*this);
-}
+{}
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -76,20 +65,9 @@ bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::Fold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Fold_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::Fold_Op<DIM>::getAvailableBackends() const {
-    return Registrar<Fold_Op<DIM>>::getKeys();
-}
-
 template class Aidge::Fold_Op<2>;
 
-///////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::Fold(const std::array<Aidge::DimSize_t, DIM> &outputDims,
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 410403adcf1794411a1f4a7de43b67601abd74fa..e2ad84f2d70155a02aa97fed637adf1fdecd9c72 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -21,35 +21,24 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Gather_Op::Type = "Gather";
-
+constexpr const char* const Aidge::Gather_Op::Type;
+constexpr const char* const Aidge::Gather_Op::InputsName[];
+constexpr const char* const Aidge::Gather_Op::OutputsName[];
 
 Aidge::Gather_Op::Gather_Op(std::int8_t axis,
               const std::vector<int64_t>& indices,
               const std::vector<Aidge::DimSize_t>& gatheredShape)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
     mAttributes(std::make_shared<Attributes_>(
         attr<Attr::Axis>(axis),
         attr<Attr::Indices>(indices),
         attr<Attr::GatheredShape>(gatheredShape)))
-{
-    mImpl = std::make_shared<Gather_OpImpl>(*this);
-}
+{}
 
 Aidge::Gather_Op::Gather_Op(const Aidge::Gather_Op& op)
-    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Gather_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Gather_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Gather_Op::clone() const {
-    return std::make_shared<Gather_Op>(*this);
-}
+    : OperatorTensorWithImpl(op),
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+{}
 
 bool Aidge::Gather_Op::dimsForwarded() const {
     if (getInput(1) && !getInput(1)->undefined()) {
@@ -105,21 +94,7 @@ bool Aidge::Gather_Op::forwardDims(bool allowDataDependency) {
     return false;
 }
 
-void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Gather_Op>::exists({name})) {
-        SET_IMPL_MACRO(Gather_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Gather_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Gather_Op::getAvailableBackends() const {
-    return Registrar<Gather_Op>::getKeys();
-}
-
-/////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Gather(std::int8_t axis,
                                         const std::vector<int64_t>& indices,
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index e0f7cf34a91268c33395dfc94d20c25b4cb0e3d1..867fc57758fd079af9c7c4162e99115bcc17fd4a 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -103,7 +103,7 @@ void Aidge::GenericOperator_Op::setBackend(const std::string & name, DeviceIdx_t
     }
 }
 
-///////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
                                             const std::vector<Aidge::InputCategory>& inputCategory,
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index 57886ec2faec86bc5d3a515ed685fdcfd0e15e4e..feaae1ea392139898370e16ab6df64ffe1756172 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -19,21 +19,9 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::GlobalAveragePooling_Op::Type = "GlobalAveragePooling";
-
-Aidge::GlobalAveragePooling_Op::GlobalAveragePooling_Op(const Aidge::GlobalAveragePooling_Op &op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::GlobalAveragePooling_Op::clone() const {
-    return std::make_shared<GlobalAveragePooling_Op>(*this);
-}
+constexpr const char* const Aidge::GlobalAveragePooling_Op::Type;
+constexpr const char* const Aidge::GlobalAveragePooling_Op::InputsName[];
+constexpr const char* const Aidge::GlobalAveragePooling_Op::OutputsName[];
 
 bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -54,16 +42,7 @@ bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::GlobalAveragePooling_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::GlobalAveragePooling_Op::getAvailableBackends() const {
-    return Registrar<GlobalAveragePooling_Op>::getKeys();
-}
-
-////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::GlobalAveragePooling(const std::string &name) {
   return std::make_shared<Node>(std::make_shared<GlobalAveragePooling_Op>(), name);
diff --git a/src/operator/GridSample.cpp b/src/operator/GridSample.cpp
index e1b8d1442848a4a2a0f1807ad45280a7db70b068..c3e3bc4054717baec05bc38cca8bbb09448dbe4d 100644
--- a/src/operator/GridSample.cpp
+++ b/src/operator/GridSample.cpp
@@ -21,15 +21,15 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-
-const std::string Aidge::GridSample_Op::Type = "GridSample";
-
+constexpr const char* const Aidge::GridSample_Op::Type;
+constexpr const char* const Aidge::GridSample_Op::InputsName[];
+constexpr const char* const Aidge::GridSample_Op::OutputsName[];
 
 Aidge::GridSample_Op::GridSample_Op(
     typename Aidge::GridSample_Op::Mode mode,
     typename Aidge::GridSample_Op::PaddingMode paddingMode,
     bool alignCorners)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Param}, 1),
       mAttributes(std::make_shared<Attributes_>(
         attr<GridSampleAttr::Mode>(mode),
         attr<GridSampleAttr::PaddingMode>(paddingMode),
@@ -40,25 +40,12 @@ Aidge::GridSample_Op::GridSample_Op(
 
 
 Aidge::GridSample_Op::GridSample_Op(const Aidge::GridSample_Op& other)
-    : OperatorTensor(other),
+    : OperatorTensorWithImpl(other),
       mAttributes(std::make_shared<Attributes_>(*other.mAttributes))
-{
-    if (other.mImpl) {
-        SET_IMPL_MACRO(GridSample_Op, *this, other.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
+{}
 
 Aidge::GridSample_Op::~GridSample_Op() noexcept = default;
 
-
-std::shared_ptr<Aidge::Operator> Aidge::GridSample_Op::clone() const {
-    return std::make_shared<GridSample_Op>(*this);
-}
-
-
 bool Aidge::GridSample_Op::forwardDims(bool /*allowDataDependency*/) {
     // TODO: adapt for other formats than NCHW
     if (inputsAssociated()) {
@@ -88,20 +75,7 @@ bool Aidge::GridSample_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-
-
-void Aidge::GridSample_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(GridSample_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::GridSample_Op::getAvailableBackends() const {
-    return Registrar<GridSample_Op>::getKeys();
-}
-
-
-////////////////////////////////////////////////
-
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::GridSample(
                         typename Aidge::GridSample_Op::Mode mode,
diff --git a/src/operator/Heaviside.cpp b/src/operator/Heaviside.cpp
index 3c6fe5495653e0db5876e957b4be0bd378971819..0c68de6fcd39f44efca4cdbc1ef13c39fdc3ca32 100644
--- a/src/operator/Heaviside.cpp
+++ b/src/operator/Heaviside.cpp
@@ -25,34 +25,19 @@ namespace Aidge {
 // ----------------------------------------------------------- Heaviside_Op
 // class
 
-const std::string Heaviside_Op::Type = "Heaviside";
+constexpr const char* const Heaviside_Op::Type;
+constexpr const char* const Heaviside_Op::InputsName[];
+constexpr const char* const Heaviside_Op::OutputsName[];
 
 Heaviside_Op::Heaviside_Op(float value)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
       mAttributes(
           std::make_shared<Attributes_>(attr<Attr::Value>(value))) {}
 
 Heaviside_Op::Heaviside_Op(const Heaviside_Op &op)
-    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Heaviside_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Heaviside_Op::clone() const {
-    return std::make_shared<Heaviside_Op>(*this);
-}
-
-void Heaviside_Op::setBackend(const std::string &name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(Heaviside_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Heaviside_Op::getAvailableBackends() const {
-    return Registrar<Heaviside_Op>::getKeys();
-}
+    : OperatorTensorWithImpl(op),
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+{}
 
 // --------------------------------------------------------------- Free
 // functions
diff --git a/src/operator/ILayerNorm.cpp b/src/operator/ILayerNorm.cpp
index daa7ecf86b7ea9a9b10b962d356581f926e92eed..788d697337f04e4373fa6600392ab2c60482efd2 100644
--- a/src/operator/ILayerNorm.cpp
+++ b/src/operator/ILayerNorm.cpp
@@ -19,7 +19,9 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::ILayerNorm_Op::Type = "ILayerNorm";
+constexpr const char* const Aidge::ILayerNorm_Op::Type;
+constexpr const char* const Aidge::ILayerNorm_Op::InputsName[];
+constexpr const char* const Aidge::ILayerNorm_Op::OutputsName[];
 
 void Aidge::ILayerNorm_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs());
@@ -42,15 +44,3 @@ bool Aidge::ILayerNorm_Op::forwardDims(bool /*allowDataDependency*/) {
     }
     return false;
 }
-
-
-void Aidge::ILayerNorm_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(ILayerNorm_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-    getInput(1)->setBackend(name, device);
-    getInput(2)->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::ILayerNorm_Op::getAvailableBackends() const {
-    return Registrar<ILayerNorm_Op>::getKeys();
-}
diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp
index 25bb5a5b63d76fba4effa8d8532a029fbc2d8cbc..32248718460a8cbd816cb8c3932df5455a53623e 100644
--- a/src/operator/Identity.cpp
+++ b/src/operator/Identity.cpp
@@ -15,32 +15,16 @@
 
 #include "aidge/backend/generic/operator/IdentityImpl.hpp"
 
-
-const std::string Aidge::Identity_Op::Type = "Identity";
+constexpr const char* const Aidge::Identity_Op::Type;
+constexpr const char* const Aidge::Identity_Op::InputsName[];
+constexpr const char* const Aidge::Identity_Op::OutputsName[];
 
 Aidge::Identity_Op::Identity_Op()
-    : OperatorTensor(Type, {InputCategory::Data}, 1)
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1)
 {
-    mImpl = std::make_shared<Identity_OpImpl>(*this);
 }
 
-Aidge::Identity_Op::Identity_Op(const Aidge::Identity_Op& op)
-    : OperatorTensor(op)
-{
-    mImpl = std::make_shared<Identity_OpImpl>(*this, op.backend());
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Identity_Op::clone() const {
-    return std::make_shared<Identity_Op>(*this);
-}
-
-void Aidge::Identity_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Identity_Op::getAvailableBackends() const {
-    return Registrar<Identity_Op>::getKeys();
-}
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Identity(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Identity_Op>(), name);
diff --git a/src/operator/LRN.cpp b/src/operator/LRN.cpp
index 36dde6712c9009d54162753e1076321f9a16688b..09f7f3518dc8d34785859dda1c184be38c9989f4 100644
--- a/src/operator/LRN.cpp
+++ b/src/operator/LRN.cpp
@@ -18,10 +18,12 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::LRN_Op::Type = "LRN";
+constexpr const char* const Aidge::LRN_Op::Type;
+constexpr const char* const Aidge::LRN_Op::InputsName[];
+constexpr const char* const Aidge::LRN_Op::OutputsName[];
 
 Aidge::LRN_Op::LRN_Op(std::int32_t size)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
         attr<Attr::Alpha>(0.0001),
         attr<Attr::Beta>(0.75),
@@ -30,31 +32,12 @@ Aidge::LRN_Op::LRN_Op(std::int32_t size)
 {}
 
 Aidge::LRN_Op::LRN_Op(const Aidge::LRN_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(LRN_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::LRN_Op::clone() const {
-    return std::make_shared<LRN_Op>(*this);
-}
-
-void Aidge::LRN_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<LRN_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::LRN_Op::getAvailableBackends() const {
-    return Registrar<LRN_Op>::getKeys();
-}
+{}
 
-////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::LRN(std::int32_t size, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<LRN_Op>(size), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/LeakyReLU.cpp b/src/operator/LeakyReLU.cpp
index b5e1a9d6acaa6208617857501b770c7c1dcf9f55..8a943de1042929980bf3812ab3ad0b97e1343e9c 100644
--- a/src/operator/LeakyReLU.cpp
+++ b/src/operator/LeakyReLU.cpp
@@ -16,33 +16,17 @@
 
 #include "aidge/data/Tensor.hpp"
 
-const std::string Aidge::LeakyReLU_Op::Type = "LeakyReLU";
+constexpr const char* const Aidge::LeakyReLU_Op::Type;
+constexpr const char* const Aidge::LeakyReLU_Op::InputsName[];
+constexpr const char* const Aidge::LeakyReLU_Op::OutputsName[];
 
 Aidge::LeakyReLU_Op::LeakyReLU_Op(const Aidge::LeakyReLU_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
-    if (op.mImpl){
-        SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::LeakyReLU_Op::clone() const {
-    return std::make_shared<LeakyReLU_Op>(*this);
-}
-
-void Aidge::LeakyReLU_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::LeakyReLU_Op::getAvailableBackends() const {
-    return Registrar<LeakyReLU_Op>::getKeys();
-}
-
-/////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::LeakyReLU(float negativeSlope, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
diff --git a/src/operator/Ln.cpp b/src/operator/Ln.cpp
index 90ae8d8c7dac464665828248c923a1f278dad79b..dac01db0b8b3d9d1110bd56869a8e637ed6442a7 100755
--- a/src/operator/Ln.cpp
+++ b/src/operator/Ln.cpp
@@ -17,33 +17,12 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Ln_Op::Type = "Ln";
+constexpr const char* const Aidge::Ln_Op::Type;
+constexpr const char* const Aidge::Ln_Op::InputsName[];
+constexpr const char* const Aidge::Ln_Op::OutputsName[];
 
-Aidge::Ln_Op::Ln_Op(const Aidge::Ln_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Ln_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Ln_Op::clone() const {
-    return std::make_shared<Ln_Op>(*this);
-}
-
-void Aidge::Ln_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    mImpl = Registrar<Ln_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Ln_Op::getAvailableBackends() const {
-    return Registrar<Ln_Op>::getKeys();
-}
-
-/////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Ln(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Ln_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 8fd2aa068c91dfebd6d1a3a47900c3aa9b0f9585..963c23e3fc5272185e36f9bf9c64b0c720979a1e 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -18,21 +18,9 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::MatMul_Op::Type = "MatMul";
-
-Aidge::MatMul_Op::MatMul_Op(const Aidge::MatMul_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(MatMul_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::MatMul_Op::clone() const {
-    return std::make_shared<MatMul_Op>(*this);
-}
+constexpr const char* const Aidge::MatMul_Op::Type;
+constexpr const char* const Aidge::MatMul_Op::InputsName[];
+constexpr const char* const Aidge::MatMul_Op::OutputsName[];
 
 bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated(false)) {
@@ -92,17 +80,8 @@ bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(MatMul_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::MatMul_Op::getAvailableBackends() const {
-    return Registrar<MatMul_Op>::getKeys();
-}
-
-////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::MatMul(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<MatMul_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
index 2ee3613407d0d9c334f0947adec36734d328a8f5..f04a706d9e2c629e93f3fe0369991b1489372eda 100644
--- a/src/operator/MaxPooling.cpp
+++ b/src/operator/MaxPooling.cpp
@@ -19,15 +19,16 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::MaxPooling_Op<DIM>::Type = "MaxPooling" + std::to_string(DIM) + "D";
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::MaxPooling_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::MaxPooling_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::MaxPooling_Op<DIM>::OutputsName[];
 
 template <Aidge::DimIdx_t DIM>
 Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
                             const std::array<Aidge::DimSize_t, DIM> &stride_dims,
                             const std::array<Aidge::DimSize_t, DIM> &dilations,
                             bool ceil_mode)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl<MaxPooling_Op<DIM>>(Type, {InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
     attr<MaxPoolingAttr::KernelDims>(kernel_dims),
     attr<MaxPoolingAttr::StrideDims>(stride_dims),
@@ -37,20 +38,9 @@ Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM>
 
 template <Aidge::DimIdx_t DIM>
 Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const Aidge::MaxPooling_Op<DIM>& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl<MaxPooling_Op<DIM>>(op),
     mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::MaxPooling_Op<DIM>::clone() const {
-    return std::make_shared<MaxPooling_Op<DIM>>(*this);
-}
+{}
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::MaxPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -83,22 +73,11 @@ bool Aidge::MaxPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::MaxPooling_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::MaxPooling_Op<DIM>::getAvailableBackends() const {
-    return Registrar<MaxPooling_Op<DIM>>::getKeys();
-}
-
 template class Aidge::MaxPooling_Op<1>;
 template class Aidge::MaxPooling_Op<2>;
 template class Aidge::MaxPooling_Op<3>;
 
-///////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::MaxPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index c3ccc12ac090f0e813f521abac693233115370b3..b3828a5a74a09055fb6a96ccb3a5d1f0459105c0 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -20,11 +20,12 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-
-const std::string Aidge::Memorize_Op::Type = "Memorize";
+constexpr const char* const Aidge::Memorize_Op::Type;
+constexpr const char* const Aidge::Memorize_Op::InputsName[];
+constexpr const char* const Aidge::Memorize_Op::OutputsName[];
 
 Aidge::Memorize_Op::Memorize_Op(const std::uint32_t endStep)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 2),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 2),
         mAttributes(std::make_shared<Attributes_>(
                     attr<Attr::ScheduleStep>(0),
                     attr<Attr::ForwardStep>(0),
@@ -36,22 +37,12 @@ Aidge::Memorize_Op::Memorize_Op(const std::uint32_t endStep)
 }
 
 Aidge::Memorize_Op::Memorize_Op(const Aidge::Memorize_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
-    } else {
-        mImpl = std::make_shared<Memorize_OpImpl>(*this);
-    }
     mOutputs[1] = mOutputs[0];
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Memorize_Op::clone() const {
-    return std::make_shared<Memorize_Op>(*this);
-}
-
-
 void Aidge::Memorize_Op::updateConsummerProducer() {
     Operator::updateConsummerProducer();
     ++scheduleStep();
@@ -87,16 +78,6 @@ bool Aidge::Memorize_Op::dimsForwarded() const {
     return forwarded;
 }
 
-void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Memorize_Op>::exists({name})){
-        SET_IMPL_MACRO(Memorize_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Memorize_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
 void Aidge::Memorize_Op::forward() {
     OperatorTensor::forward();
     ++forwardStep();
@@ -107,11 +88,7 @@ void Aidge::Memorize_Op::backward() {
     OperatorTensor::backward();
 }
 
-std::set<std::string> Aidge::Memorize_Op::getAvailableBackends() const {
-    return Registrar<Memorize_Op>::getKeys();
-}
-
-/////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Memorize(const std::uint32_t endStep, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
diff --git a/src/operator/Mod.cpp b/src/operator/Mod.cpp
index 673c00225da906dbb1b1bda5b57e52482ec86d31..76d21a1569d78a2c4fea670aceb89588c412d673 100644
--- a/src/operator/Mod.cpp
+++ b/src/operator/Mod.cpp
@@ -20,28 +20,20 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::Mod_Op::Type = "Mod";
+constexpr const char* const Aidge::Mod_Op::Type;
+constexpr const char* const Aidge::Mod_Op::InputsName[];
+constexpr const char* const Aidge::Mod_Op::OutputsName[];
 
 Aidge::Mod_Op::Mod_Op()
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
         attr<ModAttr::Fmod>(false)))
 {}
 
 Aidge::Mod_Op::Mod_Op(const Aidge::Mod_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Mod_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Mod_Op::clone() const {
-    return std::make_shared<Mod_Op>(*this);
-}
+{}
 
 bool Aidge::Mod_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -72,17 +64,7 @@ bool Aidge::Mod_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-
-void Aidge::Mod_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Mod_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Mod_Op::getAvailableBackends() const {
-    return Registrar<Mod_Op>::getKeys();
-}
-
-///////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Mod(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Mod_Op>(), name);
diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp
index a637f8331190b07e317706197c6a199cdc491e9a..230894a72859ef832605563b8f0e197c2c05e5b1 100644
--- a/src/operator/Move.cpp
+++ b/src/operator/Move.cpp
@@ -16,7 +16,9 @@
 #include "aidge/backend/generic/operator/MoveImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 
-const std::string Aidge::Move_Op::Type = "Move";
+constexpr const char* const Aidge::Move_Op::Type;
+constexpr const char* const Aidge::Move_Op::InputsName[];
+constexpr const char* const Aidge::Move_Op::OutputsName[];
 
 Aidge::Move_Op::Move_Op()
     : OperatorTensor(Type, {InputCategory::Data}, 1)
@@ -58,7 +60,7 @@ std::set<std::string> Aidge::Move_Op::getAvailableBackends() const {
     return backendsList;
 }
 
-////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Move(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Move_Op>(), name);
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index 3f163c9d6a572cc488c621a0ec6819ea68143304..1e8e9d2c03e6576e5fa19579aaba11cb3e4c62a0 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -21,21 +21,9 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Mul_Op::Type = "Mul";
-
-Aidge::Mul_Op::Mul_Op(const Aidge::Mul_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(Mul_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Mul_Op::clone() const {
-    return std::make_shared<Mul_Op>(*this);
-}
+constexpr const char* const Aidge::Mul_Op::Type;
+constexpr const char* const Aidge::Mul_Op::InputsName[];
+constexpr const char* const Aidge::Mul_Op::OutputsName[];
 
 bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -66,17 +54,8 @@ bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Mul_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Mul_Op::getAvailableBackends() const {
-    return Registrar<Mul_Op>::getKeys();
-}
-
-///////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Mul(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
index d2cbe039a9f3193b3d7f9730d7d4f9815328c34d..6dafdd887ada056d29068f29b78780019cd7e550 100644
--- a/src/operator/Pad.cpp
+++ b/src/operator/Pad.cpp
@@ -19,13 +19,9 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Pad_Op<DIM>::Type = "Pad" + std::to_string(DIM) + "D";
-
-template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::Pad_Op<DIM>::clone() const {
-    return std::make_shared<Pad_Op<DIM>>(*this);
-}
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Pad_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Pad_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Pad_Op<DIM>::OutputsName[];
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Pad_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -71,17 +67,6 @@ bool Aidge::Pad_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     return true;
 }
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::Pad_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Pad_Op<DIM>, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::Pad_Op<DIM>::getAvailableBackends() const {
-    return Registrar<Pad_Op<DIM>>::getKeys();
-}
-
 template class Aidge::Pad_Op<1>;
 template class Aidge::Pad_Op<2>;
 template class Aidge::Pad_Op<3>;
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index 01b45e6d3dd2f62d64f6b048a181bbf6ed4b65f9..33271fbb96e37355e60ee40399c821f9be1a193e 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -22,31 +22,19 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-
-const std::string Aidge::Pop_Op::Type = "Pop";
+constexpr const char* const Aidge::Pop_Op::Type;
+constexpr const char* const Aidge::Pop_Op::InputsName[];
+constexpr const char* const Aidge::Pop_Op::OutputsName[];
 
 Aidge::Pop_Op::Pop_Op()
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(attr<PopAttr::ForwardStep>(0), attr<PopAttr::BackwardStep>(0)))
-{
-    mImpl = std::make_shared<Pop_OpImpl>(*this);
-}
+{}
 
 Aidge::Pop_Op::Pop_Op(const Aidge::Pop_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
     mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Pop_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Pop_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Pop_Op::clone() const {
-    return std::make_shared<Pop_Op>(*this);
-}
+{}
 
 bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -65,20 +53,6 @@ void Aidge::Pop_Op::updateConsummerProducer() {
     mAttributes->template getAttr<PopAttr::BackwardStep>() = 0;
 }
 
-void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Pop_Op>::exists({name})){
-        SET_IMPL_MACRO(Pop_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Pop_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Pop_Op::getAvailableBackends() const {
-    return Registrar<Pop_Op>::getKeys();
-}
-
 void Aidge::Pop_Op::forward() {
     OperatorTensor::forward();
     ++mAttributes->template getAttr<PopAttr::ForwardStep>();
@@ -90,7 +64,7 @@ void Aidge::Pop_Op::backward() {
     --mAttributes->template getAttr<PopAttr::BackwardStep>();
 }
 
-///////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Pop(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Pop_Op>(), name);
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index ada71d6cc56c6d88ff64bf720595b220b296801d..17410d33a558932d6361ae524f1cec265f0cd882 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -20,7 +20,9 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::Pow_Op::Type = "Pow";
+constexpr const char* const Aidge::Pow_Op::Type;
+constexpr const char* const Aidge::Pow_Op::InputsName[];
+constexpr const char* const Aidge::Pow_Op::OutputsName[];
 
 bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -51,17 +53,8 @@ bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Pow_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Pow_Op::getAvailableBackends() const {
-    return Registrar<Pow_Op>::getKeys();
-}
-
-////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Pow(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 505192661eb4c519feaec4b79d8a54dd523f07c3..db2d6a3ba6a0c2ec3c92057ecaabed8429c0899f 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -23,8 +23,8 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-
-const std::string Aidge::Producer_Op::Type = "Producer";
+constexpr const char* const Aidge::Producer_Op::Type;
+constexpr const char* const Aidge::Producer_Op::OutputsName[];
 
 template <std::size_t DIM>
 Aidge::Producer_Op::Producer_Op(
@@ -106,7 +106,7 @@ void Aidge::Producer_Op::setOutput(const Aidge::IOIndex_t outputIdx, const std::
     OperatorTensor::setOutput(outputIdx, data);
 }
 
-/////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::Producer(const std::array<Aidge::DimSize_t, DIM> &dims,
diff --git a/src/operator/ReLU.cpp b/src/operator/ReLU.cpp
index bda26fa3332ee914325820f47d0babcb622905c8..ed2f8e48dd6fc1dc75b24585ccdc2e95dd9b0f22 100644
--- a/src/operator/ReLU.cpp
+++ b/src/operator/ReLU.cpp
@@ -17,32 +17,11 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::ReLU_Op::Type = "ReLU";
+constexpr const char* const Aidge::ReLU_Op::Type;
+constexpr const char* const Aidge::ReLU_Op::InputsName[];
+constexpr const char* const Aidge::ReLU_Op::OutputsName[];
 
-Aidge::ReLU_Op::ReLU_Op(const Aidge::ReLU_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(ReLU_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::ReLU_Op::clone() const {
-    return std::make_shared<ReLU_Op>(*this);
-}
-
-void Aidge::ReLU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(ReLU_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::ReLU_Op::getAvailableBackends() const {
-    return Registrar<ReLU_Op>::getKeys();
-}
-
-/////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ReLU(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index ec6e68fbee2a88233d604f53aa633c5f789516d3..7188027cb86827ab5f8a89e68ab3d53186f30cb3 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -25,10 +25,12 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
+constexpr const char* const Aidge::ReduceMean_Op::Type;
+constexpr const char* const Aidge::ReduceMean_Op::InputsName[];
+constexpr const char* const Aidge::ReduceMean_Op::OutputsName[];
 
 Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<Attr::Axes>(axes),
         attr<Attr::KeepDims>(keep_dims),
@@ -36,19 +38,9 @@ Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, bool
 {}
 
 Aidge::ReduceMean_Op::ReduceMean_Op(const Aidge::ReduceMean_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::ReduceMean_Op::clone() const {
-    return std::make_shared<ReduceMean_Op>(*this);
-}
+{}
 
 bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -89,18 +81,9 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::ReduceMean_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(ReduceMean_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::ReduceMean_Op::getAvailableBackends() const {
-    return Registrar<ReduceMean_Op>::getKeys();
-}
-
 Aidge::ReduceMean_Op::~ReduceMean_Op() noexcept = default;
 
-////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ReduceMean(const std::vector<std::int32_t> &axes,
                                         bool keep_dims,
@@ -108,4 +91,4 @@ std::shared_ptr<Aidge::Node> Aidge::ReduceMean(const std::vector<std::int32_t> &
                                         const std::string& name) {
     AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported");
     return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims, noop_with_empty_axes), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/ReduceSum.cpp b/src/operator/ReduceSum.cpp
index 73b6722e15ebc7a32cbb502e83d5779558c1cac7..45330ebf37f0f985ae25f8487c66645af14540de 100644
--- a/src/operator/ReduceSum.cpp
+++ b/src/operator/ReduceSum.cpp
@@ -25,7 +25,9 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::ReduceSum_Op::Type = "ReduceSum";
+constexpr const char* const Aidge::ReduceSum_Op::Type;
+constexpr const char* const Aidge::ReduceSum_Op::InputsName[];
+constexpr const char* const Aidge::ReduceSum_Op::OutputsName[];
 
 bool Aidge::ReduceSum_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -65,12 +67,3 @@ bool Aidge::ReduceSum_Op::forwardDims(bool /*allowDataDependency*/) {
     }
     return false;
 }
-
-void Aidge::ReduceSum_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(ReduceSum_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::ReduceSum_Op::getAvailableBackends() const {
-    return Registrar<ReduceSum_Op>::getKeys();
-}
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 50768a0980ef092568041b65e28679c6aa18ab35..6f3686fa363439eb2f442143cc88a19460a93e07 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -23,33 +23,22 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-
-const std::string Aidge::Reshape_Op::Type = "Reshape";
+constexpr const char* const Aidge::Reshape_Op::Type;
+constexpr const char* const Aidge::Reshape_Op::InputsName[];
+constexpr const char* const Aidge::Reshape_Op::OutputsName[];
 
 Aidge::Reshape_Op::Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<ReshapeAttr::Shape>(shape),
         attr<ReshapeAttr::AllowZero>(allowzero)))
 {
-    mImpl = std::make_shared<Reshape_OpImpl>(*this);
 }
 
 Aidge::Reshape_Op::Reshape_Op(const Aidge::Reshape_Op& op)
-    : OperatorTensor(op),
-        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Reshape_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Reshape_Op::clone() const {
-    return std::make_shared<Reshape_Op>(*this);
-}
+    : OperatorTensorWithImpl(op),
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+{}
 
 bool Aidge::Reshape_Op::dimsForwarded() const {
     if (getInput(1) && !getInput(1)->undefined()) {
@@ -131,25 +120,11 @@ bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
     return false;
 }
 
-void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Reshape_Op>::exists({name})){
-        SET_IMPL_MACRO(Reshape_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Reshape_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Reshape_Op::getAvailableBackends() const {
-    return Registrar<Reshape_Op>::getKeys();
-}
-
-//////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Reshape(const std::vector<std::int64_t>& shape,
                             bool allowzero,
                             const std::string &name)
 {
     return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape, allowzero), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index b2ef56572a5f972cd0f5be6a276780e5f27536de..946b650e622ad05010bfcf7f86b35924234f1d98 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -29,7 +29,9 @@
 
 namespace Aidge {
 
-const std::string Resize_Op::Type = "Resize";
+constexpr const char* const Resize_Op::Type;
+constexpr const char* const Resize_Op::InputsName[];
+constexpr const char* const Resize_Op::OutputsName[];
 
 bool Resize_Op::dimsForwarded() const {
     // in case of ROI add getInput(1) condition
@@ -136,23 +138,6 @@ bool Resize_Op::forwardDims(bool allowDataDependency) {
     return true;
 }
 
-void Resize_Op::setBackend(const std::string &name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(Resize_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-
-    // By default, automatically set backend for all optional inputs: roi, scales and
-    // sizes
-    if (getInput(1)) {
-        getInput(1)->setBackend(name, device);
-    }
-    if (getInput(2)) {
-        getInput(2)->setBackend(name, device);
-    }
-    if (getInput(3)) {
-        getInput(3)->setBackend(name, device);
-    }
-}
-
 std::shared_ptr<Node>
 Resize(std::vector<float> scale,
         std::vector<std::size_t> size,
diff --git a/src/operator/Round.cpp b/src/operator/Round.cpp
index ba4eff9d1e1cf06cc5a4bbda54010aec8c2f2f63..742864c55cb93115750f1aca4dfee672af19f43a 100644
--- a/src/operator/Round.cpp
+++ b/src/operator/Round.cpp
@@ -19,31 +19,11 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Round_Op::Type = "Round";
+constexpr const char* const Aidge::Round_Op::Type;
+constexpr const char* const Aidge::Round_Op::InputsName[];
+constexpr const char* const Aidge::Round_Op::OutputsName[];
 
-Aidge::Round_Op::Round_Op(const Aidge::Round_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Round_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-
-std::shared_ptr<Aidge::Operator> Aidge::Round_Op::clone() const {
-    return std::make_shared<Round_Op>(*this);
-}
-
-void Aidge::Round_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Round_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Round_Op::getAvailableBackends() const {
-    return Registrar<Round_Op>::getKeys();
-}
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Round(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Round_Op>(), name);
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
deleted file mode 100644
index 218d25cbd19db224189fce0aa1ef39ec306648f8..0000000000000000000000000000000000000000
--- a/src/operator/Scaling.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include "aidge/operator/Scaling.hpp"
-
-#include <memory>
-#include <string>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/utils/Types.h"
-
-
-//Caution: This operator is now deprecated and should no longer be used. 
-//It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
-
-const std::string Aidge::Scaling_Op::Type = "Scaling";
-
-Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
-        mAttributes(std::make_shared<Attributes_>(
-        attr<ScalingAttr::ScalingFactor>(scalingFactor),
-        attr<ScalingAttr::QuantizedNbBits>(nbBits),
-        attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
-{
-    Log::warn("Caution: The [Scaling] operator is now deprecated and should no longer be used.\nIt has been replaced by the MetaOperator [Quantizer] (located directly in aidge_quantization).");
-} 
-
-Aidge::Scaling_Op::Scaling_Op(const Aidge::Scaling_Op& op)
-    : OperatorTensor(op),
-    mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    Log::warn("Caution: The [Scaling] operator is now deprecated and should no longer be used. \nIt has been replaced by the MetaOperator [Quantizer] (located directly in aidge_quantization).");
-    if (op.mImpl){
-        SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Scaling_Op::clone() const {
-    return std::make_shared<Scaling_Op>(*this);
-}
-
-void Aidge::Scaling_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Scaling_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Scaling_Op::getAvailableBackends() const {
-    return Registrar<Scaling_Op>::getKeys();
-}
-
-////////////////////////////////////////////////
-
-std::shared_ptr<Aidge::Node> Aidge::Scaling(float scalingFactor,
-                                     std::size_t quantizedNbBits,
-                                     bool isOutputUnsigned,
-                                     const std::string& name)
-{
-    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
-}
\ No newline at end of file
diff --git a/src/operator/Select.cpp b/src/operator/Select.cpp
index 6e686ecc4ea097a480f281cb0e9ff24315334e15..715a65e19b2548c41eafc7c9978bb6f71943f0f2 100644
--- a/src/operator/Select.cpp
+++ b/src/operator/Select.cpp
@@ -21,30 +21,15 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 
-
-const std::string Aidge::Select_Op::Type = "Select";
+constexpr const char* const Aidge::Select_Op::Type;
+constexpr const char* const Aidge::Select_Op::InputsName[];
+constexpr const char* const Aidge::Select_Op::OutputsName[];
 
 Aidge::Select_Op::Select_Op(const Aidge::IOIndex_t nbIn)
-    : OperatorTensor(Type, std::vector<InputCategory>(nbIn + 1, InputCategory::Data), 1)
+    : OperatorTensorWithImpl(Type, std::vector<InputCategory>(nbIn + 1, InputCategory::Data), 1)
 {
     // ctor
     AIDGE_ASSERT(nbIn > 1, "Select operator should have at least two inputs.");
-    mImpl = std::make_shared<Select_OpImpl>(*this);
-}
-
-Aidge::Select_Op::Select_Op(const Select_Op& op)
-    : OperatorTensor(op)
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Select_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Select_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Select_Op::clone() const {
-    return std::make_shared<Select_Op>(*this);
 }
 
 bool Aidge::Select_Op::forwardDims(bool /*allowDataDependency*/) {
@@ -65,20 +50,6 @@ bool Aidge::Select_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::Select_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    if (Registrar<Select_Op>::exists({name})){
-        SET_IMPL_MACRO(Select_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Select_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Select_Op::getAvailableBackends() const {
-    return Registrar<Select_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Select(const Aidge::IOIndex_t nbIn, const std::string& name) {
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index 0927d3a6b8ea4f287677bfdd521c60503521b9a0..7083cab3fa4f17b8a887627d004bbc8c19ecf760 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -21,34 +21,22 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/Log.hpp"
 
-///////////////////////////////////////////////
-
-const std::string Aidge::Shape_Op::Type = "Shape";
+constexpr const char* const Aidge::Shape_Op::Type;
+constexpr const char* const Aidge::Shape_Op::InputsName[];
+constexpr const char* const Aidge::Shape_Op::OutputsName[];
 
 Aidge::Shape_Op::Shape_Op(const std::int64_t start, const std::int64_t end)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<ShapeAttr::Start>(start),
         attr<ShapeAttr::End>(end)))
-{
-    mImpl = std::make_shared<Shape_OpImpl>(*this);
-}
+{}
 
 Aidge::Shape_Op::Shape_Op(const Aidge::Shape_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Shape_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Shape_OpImpl>(*this);
-    }
-}
+{}
 
-std::shared_ptr<Aidge::Operator> Aidge::Shape_Op::clone() const {
-    return std::make_shared<Shape_Op>(*this);
-}
 bool Aidge::Shape_Op::forwardDType(){
     setDataType(NativeType_v<outDType>);
     return true;
@@ -96,16 +84,6 @@ bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
     return true;
 }
 
-void Aidge::Shape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Shape_Op>::exists({name})) {
-        SET_IMPL_MACRO(Shape_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Shape_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
 void Aidge::Shape_Op::setDataType(const Aidge::DataType &datatype) const {
     if (datatype != NativeType_v<outDType>)
         Log::warn("Shape operator output type was forcibly set to {}, as it is the only supported type. "
@@ -115,11 +93,7 @@ void Aidge::Shape_Op::setDataType(const Aidge::DataType &datatype) const {
     getOutput(0)->setDataType(NativeType_v<outDType>);
 }
 
-std::set<std::string> Aidge::Shape_Op::getAvailableBackends() const {
-    return Registrar<Shape_Op>::getKeys();
-}
-
-//////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Shape(const std::int64_t start, const std::int64_t end, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
diff --git a/src/operator/ShiftGELU.cpp b/src/operator/ShiftGELU.cpp
index bd229e6cf58a430922d08cff5301aa16ef636d5e..8d41c93183c5073d597b67ccb0c55c37a3bad72e 100644
--- a/src/operator/ShiftGELU.cpp
+++ b/src/operator/ShiftGELU.cpp
@@ -19,35 +19,14 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::ShiftGELU_Op::Type = "ShiftGELU";
+constexpr const char* const Aidge::ShiftGELU_Op::Type;
+constexpr const char* const Aidge::ShiftGELU_Op::InputsName[];
+constexpr const char* const Aidge::ShiftGELU_Op::OutputsName[];
 
-Aidge::ShiftGELU_Op::ShiftGELU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+Aidge::ShiftGELU_Op::ShiftGELU_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
-Aidge::ShiftGELU_Op::ShiftGELU_Op(const Aidge::ShiftGELU_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(ShiftGELU_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::ShiftGELU_Op::clone() const {
-    return std::make_shared<ShiftGELU_Op>(*this);
-}
-
-void Aidge::ShiftGELU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(ShiftGELU_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::ShiftGELU_Op::getAvailableBackends() const {
-    return Registrar<ShiftGELU_Op>::getKeys();
-}
-
-///////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ShiftGELU(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<ShiftGELU_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/ShiftMax.cpp b/src/operator/ShiftMax.cpp
index 58d4bf46100ce116ad4a179e972cbef81bc5b5c1..24bd3a403bc9b51c243a192a566a2751105fae64 100644
--- a/src/operator/ShiftMax.cpp
+++ b/src/operator/ShiftMax.cpp
@@ -19,39 +19,14 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::ShiftMax_Op::Type = "ShiftMax";
+constexpr const char* const Aidge::ShiftMax_Op::Type;
+constexpr const char* const Aidge::ShiftMax_Op::InputsName[];
+constexpr const char* const Aidge::ShiftMax_Op::OutputsName[];
 
-Aidge::ShiftMax_Op::ShiftMax_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+Aidge::ShiftMax_Op::ShiftMax_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
-Aidge::ShiftMax_Op::ShiftMax_Op(const Aidge::ShiftMax_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(ShiftMax_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-/**
- * @brief Clone the operator using its copy-constructor.
- * @see Operator::ShiftMax_Op
- */
-std::shared_ptr<Aidge::Operator> Aidge::ShiftMax_Op::clone() const {
-    return std::make_shared<ShiftMax_Op>(*this);
-}
-
-void Aidge::ShiftMax_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(ShiftMax_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::ShiftMax_Op::getAvailableBackends() const {
-    return Registrar<ShiftMax_Op>::getKeys();
-}
-
-/////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::ShiftMax(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<ShiftMax_Op>(), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp
index d97f8c52341dee4e6e0840afa6e023d8a4e3fd52..8cf683b565aa6525336d7c606db812fadca20701 100644
--- a/src/operator/Sigmoid.cpp
+++ b/src/operator/Sigmoid.cpp
@@ -18,35 +18,13 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Sigmoid_Op::Type = "Sigmoid";
+constexpr const char* const Aidge::Sigmoid_Op::Type;
+constexpr const char* const Aidge::Sigmoid_Op::InputsName[];
+constexpr const char* const Aidge::Sigmoid_Op::OutputsName[];
 
-Aidge::Sigmoid_Op::Sigmoid_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+Aidge::Sigmoid_Op::Sigmoid_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
-Aidge::Sigmoid_Op::Sigmoid_Op(const Aidge::Sigmoid_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Sigmoid_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Sigmoid_Op::clone() const {
-    return std::make_shared<Sigmoid_Op>(*this);
-}
-
-
-void Aidge::Sigmoid_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Sigmoid_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Sigmoid_Op::getAvailableBackends() const {
-    return Registrar<Sigmoid_Op>::getKeys();
-}
-
-///////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Sigmoid(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Sigmoid_Op>(), name);
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 60ec176c7a1d412801d8e0f6da9503b8f9f8ea8d..315a1066c231107d721db9bfaac973483c817aee 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -26,13 +26,15 @@
 #include "aidge/utils/Registrar.hpp"
 
 
-const std::string Aidge::Slice_Op::Type = "Slice";
+constexpr const char* const Aidge::Slice_Op::Type;
+constexpr const char* const Aidge::Slice_Op::InputsName[];
+constexpr const char* const Aidge::Slice_Op::OutputsName[];
 
 Aidge::Slice_Op::Slice_Op(const std::vector<std::int64_t>& starts,
                         const std::vector<std::int64_t>& ends,
                         const std::vector<std::int8_t>& axes,
                         const std::vector<std::int64_t>& steps)
-    : OperatorTensor(Type,
+    : OperatorTensorWithImpl(Type,
         {InputCategory::Data,
             InputCategory::OptionalData,
             InputCategory::OptionalData,
@@ -45,23 +47,11 @@ Aidge::Slice_Op::Slice_Op(const std::vector<std::int64_t>& starts,
         attr<SliceAttr::Axes>(axes),
         attr<SliceAttr::Steps>(steps)))
 {
-    mImpl = std::make_shared<Slice_OpImpl>(*this);
 }
 
 Aidge::Slice_Op::Slice_Op(const Aidge::Slice_Op& op)
-    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Slice_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Slice_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Slice_Op::clone() const {
-    return std::make_shared<Slice_Op>(*this);
-}
+    : OperatorTensorWithImpl(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+{}
 
 bool Aidge::Slice_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined())
@@ -213,21 +203,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
     return false;
 }
 
-void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Slice_Op>::exists({name})){
-        SET_IMPL_MACRO(Slice_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Slice_OpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Slice_Op::getAvailableBackends() const {
-    return Registrar<Slice_Op>::getKeys();
-}
-
-////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Slice(const std::vector<std::int64_t>& starts,
                                    const std::vector<std::int64_t>& ends,
diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp
index df8a9e0217ad4c4fa514b89258a6aa4c02ba608b..b43d498bd9fe51a4f2b3bf346e0f31e5c8289f05 100644
--- a/src/operator/Softmax.cpp
+++ b/src/operator/Softmax.cpp
@@ -18,40 +18,23 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Softmax_Op::Type = "Softmax";
+constexpr const char* const Aidge::Softmax_Op::Type;
+constexpr const char* const Aidge::Softmax_Op::InputsName[];
+constexpr const char* const Aidge::Softmax_Op::OutputsName[];
 
 Aidge::Softmax_Op::Softmax_Op(std::int32_t axis)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
         attr<SoftmaxAttr::Axis>(axis)))
 {}
 
 Aidge::Softmax_Op::Softmax_Op(const Aidge::Softmax_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Softmax_Op::clone() const {
-    return std::make_shared<Softmax_Op>(*this);
-}
-
-void Aidge::Softmax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Softmax_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Softmax_Op::getAvailableBackends() const {
-    return Registrar<Softmax_Op>::getKeys();
-}
+{}
 
-////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Softmax(std::int32_t axis, const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index 4bdf01b694b6d5e764c60a0b94f63a877164139b..7aa732140e9a6257daf74798eb2062b4d564c02a 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -22,34 +22,24 @@
 #include "aidge/utils/Types.h"
 
 
-const std::string Aidge::Split_Op::Type = "Split";
+constexpr const char* const Aidge::Split_Op::Type;
+constexpr const char* const Aidge::Split_Op::InputsName[];
+constexpr const char* const Aidge::Split_Op::OutputsName[];
 
 Aidge::Split_Op::Split_Op(std::int8_t axis,
                         Aidge::DimSize_t nbOutputs,
                         const std::vector<Aidge::DimSize_t>& split)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
     mAttributes(std::make_shared<Attributes_>(
         attr<SplitAttr::Axis>(axis),
         attr<SplitAttr::Split>(split)))
 {
-    mImpl = std::make_shared<Split_OpImpl>(*this);
 }
 
 Aidge::Split_Op::Split_Op(const Aidge::Split_Op &op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
     mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Split_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Split_OpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Split_Op::clone() const {
-    return std::make_shared<Split_Op>(*this);
-}
+{}
 
 bool Aidge::Split_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined()))
@@ -121,29 +111,11 @@ bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
     return false;
 }
 
-void Aidge::Split_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Split_Op>::exists({name})) {
-        SET_IMPL_MACRO(Split_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Split_OpImpl>(*this);
-    }
-    for (std::size_t i = 0; i < this->nbOutputs(); i++)
-    {
-        mOutputs[i]->setBackend(name, device);
-    }
-
-}
-
-std::set<std::string> Aidge::Split_Op::getAvailableBackends() const {
-    return Registrar<Split_Op>::getKeys();
-}
-
-////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Split(Aidge::DimSize_t nbOutput,
                                    std::int8_t axis,
                                    const std::vector<Aidge::DimSize_t>& split,
                                    const std::string &name) {
     return std::make_shared<Node>(std::make_shared<Split_Op>(axis, nbOutput, split), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Sqrt.cpp b/src/operator/Sqrt.cpp
index bd3286f098cd5c6985d7f33f88b723523ef94765..bec6eb44775762bb68de322d6177b333349cbf2b 100644
--- a/src/operator/Sqrt.cpp
+++ b/src/operator/Sqrt.cpp
@@ -19,33 +19,11 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Sqrt_Op::Type = "Sqrt";
+constexpr const char* const Aidge::Sqrt_Op::Type;
+constexpr const char* const Aidge::Sqrt_Op::InputsName[];
+constexpr const char* const Aidge::Sqrt_Op::OutputsName[];
 
-Aidge::Sqrt_Op::Sqrt_Op(const Aidge::Sqrt_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Sqrt_Op, *this, op.backend());
-    }else{
-        mImpl = nullptr;
-    }
-}
-
-
-std::shared_ptr<Aidge::Operator> Aidge::Sqrt_Op::clone() const {
-    return std::make_shared<Sqrt_Op>(*this);
-}
-
-void Aidge::Sqrt_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Sqrt_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Sqrt_Op::getAvailableBackends() const {
-    return Registrar<Sqrt_Op>::getKeys();
-}
-
-////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Sqrt(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Sqrt_Op>(), name);
diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp
index 53b8e76ed0b35ad979811929ea5e9dceb68f0b45..b97cd8262441cd656834d95822d77b43e4297c6a 100644
--- a/src/operator/Squeeze.cpp
+++ b/src/operator/Squeeze.cpp
@@ -26,29 +26,24 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-const std::string Squeeze_Op::Type = "Squeeze";
+constexpr const char* const Squeeze_Op::Type;
+constexpr const char* const Squeeze_Op::InputsName[];
+constexpr const char* const Squeeze_Op::OutputsName[];
 
 Squeeze_Op::Squeeze_Op(const std::vector<std::int8_t> &axes)
-    : OperatorTensor(
+    : OperatorTensorWithImpl(
         Type,
         {InputCategory::Data, InputCategory::OptionalData},
         1),
     mAttributes(
         std::make_shared<Attributes_>(attr<SqueezeAttr::Axes>(axes)))
 {
-    mImpl = std::make_shared<Squeeze_OpImpl>(*this);
 }
 
 Squeeze_Op::Squeeze_Op(const Squeeze_Op &op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Squeeze_Op, *this, op.backend());
-    } else {
-        mImpl = std::make_shared<Squeeze_OpImpl>(*this);
-    }
-}
+{}
 
 bool Squeeze_Op::dimsForwarded() const {
   if ((getInput(1) && !getInput(1)->undefined())) {
@@ -136,20 +131,6 @@ bool Squeeze_Op::forwardDims(bool allowDataDependency) {
     return true;
 }
 
-void Squeeze_Op::setBackend(const std::string &name,
-                            Aidge::DeviceIdx_t device) {
-  if (Registrar<Squeeze_Op>::exists({name})) {
-    SET_IMPL_MACRO(Squeeze_Op, *this, name);
-  } else {
-    mImpl = std::make_shared<Squeeze_OpImpl>(*this);
-  }
-  mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Squeeze_Op::getAvailableBackends() const {
-  return Registrar<Squeeze_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Node> Squeeze(const std::vector<std::int8_t> axes,
diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp
index 9e66fac648052ad79be64e205a567f134f476fc0..893e3f43747554a140d124b810be3d548fa5f3a2 100644
--- a/src/operator/Stack.cpp
+++ b/src/operator/Stack.cpp
@@ -22,30 +22,22 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-
-const std::string StackOp::Type = "Stack";
+constexpr const char* const StackOp::Type;
+constexpr const char* const StackOp::InputsName[];
+constexpr const char* const StackOp::OutputsName[];
 
 StackOp::StackOp(std::uint32_t maxElements)
-    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
       mAttributes(std::make_shared<Attributes_>(
           attr<StackAttr::MaxElements>(maxElements),
           attr<StackAttr::BackwardStep>(0),
           attr<StackAttr::ForwardStep>(0))) {
-    mImpl = std::make_shared<StackOpImpl>(*this);
 }
 
 StackOp::StackOp(const Aidge::StackOp &op)
-    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(StackOp, *this, op.backend());
-    } else {
-        mImpl = std::make_shared<StackOpImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::StackOp::clone() const {
-    return std::make_shared<StackOp>(*this);
-}
+    : OperatorTensorWithImpl(op),
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
+{}
 
 bool Aidge::StackOp::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined()))
@@ -83,19 +75,6 @@ bool Aidge::StackOp::forwardDims(bool allowDataDependency) {
     return false;
 }
 
-void StackOp::setBackend(const std::string &name, DeviceIdx_t device) {
-    if (Registrar<StackOp>::exists({name})) {
-        SET_IMPL_MACRO(StackOp, *this, name);
-    } else {
-        mImpl = std::make_shared<StackOpImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> StackOp::getAvailableBackends() const {
-    return Registrar<StackOp>::getKeys();
-}
-
 void StackOp::forward() {
     OperatorTensor::forward();
     ++forwardStep();
@@ -107,6 +86,8 @@ void StackOp::backward() {
     --backwardStep();
 }
 
+////////////////////////////////////////////////////////////////////////////////
+
 std::shared_ptr<Node> Stack(std::uint32_t maxElements,
                             const std::string &name) {
     return std::make_shared<Node>(std::make_shared<StackOp>(maxElements),
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index ca7348b3b415375c09ac1cfd69ac3d6f6e3488eb..c1c185897c057be06e8dd552652819f7a8897f6f 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -22,21 +22,9 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Sub_Op::Type = "Sub";
-
-Aidge::Sub_Op::Sub_Op(const Aidge::Sub_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Sub_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Sub_Op::clone() const {
-    return std::make_shared<Sub_Op>(*this);
-}
+constexpr const char* const Aidge::Sub_Op::Type;
+constexpr const char* const Aidge::Sub_Op::InputsName[];
+constexpr const char* const Aidge::Sub_Op::OutputsName[];
 
 bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -67,16 +55,7 @@ bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Sub_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Sub_Op::getAvailableBackends() const {
-    return Registrar<Sub_Op>::getKeys();
-}
-
-//////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Sub(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp
index fe295ab71b67e8e62562066b1464ffba6e8ae404..bef62605dd463c0cbf8ac185254c40705d5177ad 100644
--- a/src/operator/Tanh.cpp
+++ b/src/operator/Tanh.cpp
@@ -18,34 +18,13 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Tanh_Op::Type = "Tanh";
+constexpr const char* const Aidge::Tanh_Op::Type;
+constexpr const char* const Aidge::Tanh_Op::InputsName[];
+constexpr const char* const Aidge::Tanh_Op::OutputsName[];
 
-Aidge::Tanh_Op::Tanh_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+Aidge::Tanh_Op::Tanh_Op() : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1) {}
 
-Aidge::Tanh_Op::Tanh_Op(const Aidge::Tanh_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl){
-        SET_IMPL_MACRO(Tanh_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Tanh_Op::clone() const {
-    return std::make_shared<Tanh_Op>(*this);
-}
-
-void Aidge::Tanh_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Tanh_Op>::create(name)(*this);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Tanh_Op::getAvailableBackends() const {
-    return Registrar<Tanh_Op>::getKeys();
-}
-
-////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Tanh(const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Tanh_Op>(), name);
diff --git a/src/operator/TopK.cpp b/src/operator/TopK.cpp
index 4d574784fb9c79e7239a3c01b90dde873804aeb1..e9ac96ce4190af086013f9ef6b916c56736bd59b 100644
--- a/src/operator/TopK.cpp
+++ b/src/operator/TopK.cpp
@@ -22,15 +22,16 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-
-const std::string Aidge::TopK_Op::Type = "TopK";
+constexpr const char* const TopK_Op::Type;
+constexpr const char* const TopK_Op::InputsName[];
+constexpr const char* const TopK_Op::OutputsName[];
 
 TopK_Op::TopK_Op(
     int64_t axis,
     bool largest,
     bool sorted,
     IOIndex_t k)
-    : OperatorTensor(Type,
+    : OperatorTensorWithImpl(Type,
                      {InputCategory::Data,
                       InputCategory::OptionalData},
                      2),
@@ -44,15 +45,9 @@ TopK_Op::TopK_Op(
 }
 
 TopK_Op::TopK_Op(const TopK_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(TopK_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
+{}
 
 bool Aidge::TopK_Op::forwardDims(bool allowDataDependency) {
     if (inputsAssociated()) {
@@ -88,21 +83,11 @@ bool Aidge::TopK_Op::forwardDims(bool allowDataDependency) {
     return false;
 }
 
-void Aidge::TopK_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(TopK_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-    mOutputs[1]->setBackend(name, device);
-}
-
 void Aidge::TopK_Op::setDataType(const DataType& dataType) const {
     mOutputs[0]->setDataType(dataType);
     // mOutputs[1] data type is fixed (Int64)
 }
 
-std::set<std::string> Aidge::TopK_Op::getAvailableBackends() const {
-    return Registrar<TopK_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Node> TopK(const std::string& name) {
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index b0c2c11d618835818cc0aa728cf5c50d697fed7d..78df803d8581449ace1d5ae3473fbbc957c5d22b 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -23,38 +23,20 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-void Aidge::TransposeImpl::forward() {
-    const Transpose_Op& op = dynamic_cast<const Transpose_Op&>(mOp);
-    op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.outputDimsOrder());
-}
-
-///////////////////////////////////////////////////
-
-const std::string Aidge::Transpose_Op::Type = "Transpose";
+constexpr const char* const Aidge::Transpose_Op::Type;
+constexpr const char* const Aidge::Transpose_Op::InputsName[];
+constexpr const char* const Aidge::Transpose_Op::OutputsName[];
 
 Aidge::Transpose_Op::Transpose_Op(const std::vector<Aidge::DimSize_t> &outputDimsOrder)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<Attr::OutputDimsOrder>(outputDimsOrder)))
-{
-    mImpl = std::make_shared<TransposeImpl>(*this);
-}
+{}
 
 Aidge::Transpose_Op::Transpose_Op(const Aidge::Transpose_Op& op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
     mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<TransposeImpl>(*this);
-    }
-}
-
-std::shared_ptr<Aidge::Operator> Aidge::Transpose_Op::clone() const {
-    return std::make_shared<Transpose_Op>(*this);
-}
+{}
 
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -96,23 +78,9 @@ bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-void Aidge::Transpose_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Transpose_Op>::exists({name})){
-        SET_IMPL_MACRO(Transpose_Op, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<TransposeImpl>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Transpose_Op::getAvailableBackends() const {
-    return Registrar<Transpose_Op>::getKeys();
-}
-
-//////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Transpose(const std::vector<Aidge::DimSize_t> &outputDimsOrder,
                                               const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp
index 5fac669b8a7a45b89ee78628641b84df8c642be7..67cd79b90a0f2efec37dd36b0db734874782557f 100644
--- a/src/operator/Unfold.cpp
+++ b/src/operator/Unfold.cpp
@@ -24,40 +24,26 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Unfold_Op<DIM>::Type = "Unfold";
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Unfold_Op<DIM>::Type;
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Unfold_Op<DIM>::InputsName[];
+template <Aidge::DimIdx_t DIM> constexpr const char* const Aidge::Unfold_Op<DIM>::OutputsName[];
 
 template <Aidge::DimIdx_t DIM>
 Aidge::Unfold_Op<DIM>::Unfold_Op(const std::array<Aidge::DimSize_t, DIM> &kernelDims,
                     const std::array<Aidge::DimSize_t, DIM> &strideDims,
                     const std::array<Aidge::DimSize_t, DIM> &dilationDims)
-    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    : OperatorTensorWithImpl<Unfold_Op<DIM>, Unfold_OpImpl<DIM>>(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
         attr<UnfoldAttr::StrideDims>(strideDims),
         attr<UnfoldAttr::DilationDims>(dilationDims),
         attr<UnfoldAttr::KernelDims>(kernelDims)))
-{
-    mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
-}
+{}
 
 template <Aidge::DimIdx_t DIM>
 Aidge::Unfold_Op<DIM>::Unfold_Op(const Aidge::Unfold_Op<DIM> &op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl<Unfold_Op<DIM>, Unfold_OpImpl<DIM>>(op),
         mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
-    }
-    else {
-        mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
-    }
-}
-
-template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::Unfold_Op<DIM>::clone() const {
-    return std::make_shared<Unfold_Op>(*this);
-}
+{}
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Unfold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
@@ -83,25 +69,9 @@ bool Aidge::Unfold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::Unfold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    if (Registrar<Unfold_Op<DIM>>::exists({name})){
-        SET_IMPL_MACRO(Unfold_Op<DIM>, *this, name);
-    }
-    else {
-        mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
-    }
-    mOutputs[0]->setBackend(name, device);
-}
-
-template <Aidge::DimIdx_t DIM>
-std::set<std::string> Aidge::Unfold_Op<DIM>::getAvailableBackends() const {
-    return Registrar<Unfold_Op<DIM>>::getKeys();
-}
-
 template class Aidge::Unfold_Op<2>;
 
-///////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::Unfold(const std::array<Aidge::DimSize_t, DIM> &kernelDims,
@@ -115,4 +85,4 @@ std::shared_ptr<Aidge::Node> Aidge::Unfold(const std::array<Aidge::DimSize_t, DI
 template std::shared_ptr<Aidge::Node> Aidge::Unfold<2>(const std::array<Aidge::DimSize_t, 2>&,
                                   const std::string&,
                                   const std::array<Aidge::DimSize_t, 2>&,
-                                  const std::array<Aidge::DimSize_t, 2>&);
\ No newline at end of file
+                                  const std::array<Aidge::DimSize_t, 2>&);
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
index b73b416c915b46babf7bfdffd99770c7103fd84c..dcd68fbccd1f44d1c7fc4d5842203ddedcfb3f5b 100644
--- a/src/operator/Unsqueeze.cpp
+++ b/src/operator/Unsqueeze.cpp
@@ -23,28 +23,21 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-const std::string Unsqueeze_Op::Type = "Unsqueeze";
-
+constexpr const char* const Unsqueeze_Op::Type;
+constexpr const char* const Unsqueeze_Op::InputsName[];
+constexpr const char* const Unsqueeze_Op::OutputsName[];
 
 Unsqueeze_Op::Unsqueeze_Op(const std::vector<int8_t> &axes)
-    : OperatorTensor(Type,
+    : OperatorTensorWithImpl(Type,
                     {InputCategory::Data, InputCategory::OptionalData},
                     1),
       mAttributes(std::make_shared<Attributes_>(attr<UnsqueezeAttr::Axes>(axes)))
-{
-    mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
-}
+{}
 
 Unsqueeze_Op::Unsqueeze_Op(const Unsqueeze_Op &op)
-    : OperatorTensor(op),
+    : OperatorTensorWithImpl(op),
       mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Unsqueeze_Op, *this, op.backend());
-    } else {
-        mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
-    }
-}
+{}
 
 bool Aidge::Unsqueeze_Op::dimsForwarded() const {
   if ((getInput(1) && !getInput(1)->undefined())) {
@@ -124,20 +117,6 @@ bool Unsqueeze_Op::forwardDims(bool allowDataDependency) {
   return true;
 }
 
-void Unsqueeze_Op::setBackend(const std::string &name,
-                              Aidge::DeviceIdx_t device) {
-  if (Registrar<Unsqueeze_Op>::exists({name})) {
-    SET_IMPL_MACRO(Unsqueeze_Op, *this, name);
-  } else {
-    mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
-  }
-  mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::Unsqueeze_Op::getAvailableBackends() const {
-  return Registrar<Unsqueeze_Op>::getKeys();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes,
diff --git a/src/operator/WeightInterleaving.cpp b/src/operator/WeightInterleaving.cpp
index 66af1d51f87c24b5b8d7d9c1f0ab3701f122515d..0852aa8521bd7dc011b9322ca236364a7403e199 100644
--- a/src/operator/WeightInterleaving.cpp
+++ b/src/operator/WeightInterleaving.cpp
@@ -21,29 +21,9 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::WeightInterleaving_Op::Type = "WeightInterleaving";
-
-/**
- * @brief Copy-constructor.
- * @param op WeightInterleaving_Op to copy.
- * @details Copies the operator attributes and its output tensor(s), but not
- * its input tensors. The new operator has no associated input.
- */
-Aidge::WeightInterleaving_Op::WeightInterleaving_Op(const WeightInterleaving_Op& op)
-    : OperatorTensor(op)
-{
-    if (op.mImpl) {
-        SET_IMPL_MACRO(WeightInterleaving_Op, *this, op.backend());
-    } else {
-        mImpl = nullptr;
-    }
-}
-
-
-std::shared_ptr<Aidge::Operator> Aidge::WeightInterleaving_Op::clone() const {
-    return std::make_shared<WeightInterleaving_Op>(*this);
-}
-
+constexpr const char* const Aidge::WeightInterleaving_Op::Type;
+constexpr const char* const Aidge::WeightInterleaving_Op::InputsName[];
+constexpr const char* const Aidge::WeightInterleaving_Op::OutputsName[];
 
 bool Aidge::WeightInterleaving_Op::forwardDims(bool /*allowDataDependency*/) {
     
@@ -92,21 +72,6 @@ bool Aidge::WeightInterleaving_Op::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
-
-void Aidge::WeightInterleaving_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(WeightInterleaving_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
-
-std::set<std::string> Aidge::WeightInterleaving_Op::getAvailableBackends() const {
-    return Registrar<WeightInterleaving_Op>::getKeys();
-}
-
-std::shared_ptr<Aidge::Node> Aidge::WeightInterleaving(const std::string& name) {
-    return std::make_shared<Node>(std::make_shared<WeightInterleaving_Op>(), name);
-}
-
-
 std::size_t Aidge::WeightInterleaving_Op::compactDataSize(std::size_t dataSize, std::uint8_t nbBits) {
     AIDGE_ASSERT(nbBits > 0 && nbBits < 8, "nbBits must be between 1 and 4"); // Ensure valid bit width
 
@@ -118,4 +83,10 @@ std::size_t Aidge::WeightInterleaving_Op::compactDataSize(std::size_t dataSize,
     std::size_t requiredSize = (dataSize + nbSlot - 1) / nbSlot;
 
     return requiredSize;
-}
\ No newline at end of file
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::WeightInterleaving(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<WeightInterleaving_Op>(), name);
+}
diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp
index a0b1664980427475def19d8ec226c8f65c52a067..9bc0e855ef76f6a8eaf4eeae1f7c805b865fb43d 100644
--- a/src/recipes/HorizontalTiling.cpp
+++ b/src/recipes/HorizontalTiling.cpp
@@ -83,7 +83,6 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
         }
     }
 
-    const std::vector<std::string> sliceInputsNames = Slice_Op::getInputsName();
     // coordinates of the first value of the current output slice
     std::vector<DimSize_t> currentFirstDims = std::vector<DimSize_t>(outTensor->nbDims(), 0);
     for (IOIndex_t i = 0; currentFirstDims[axis] < outTensor->dims()[axis]; currentFirstDims[axis] += outputDims[axis], ++i) {
@@ -107,7 +106,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
         starts -> setBackend(backend);
         starts -> resize(std::vector<std::size_t>({inputDimsStart.size()}));
         starts -> getImpl() -> copyFromHost(inputDimsStart.data(), inputDimsStart.size());
-        auto startsNode = Producer(starts, slice->name() + "_" + sliceInputsNames[1]);
+        auto startsNode = Producer(starts, slice->name() + "_1");
         startsNode -> addChild(slice, 0, 1);
 
         // Create Slice's Ends producer node
@@ -120,7 +119,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
         ends -> setBackend(backend);
         ends -> resize(std::vector<std::size_t>({inputDimsEnd.size()}));
         ends -> getImpl() -> copyFromHost(inputDimsEnd.data(), inputDimsEnd.size());
-        auto endsNode = Producer(ends, slice->name() + "_" + sliceInputsNames[2]);
+        auto endsNode = Producer(ends, slice->name() + "_2");
         endsNode -> addChild(slice, 0, 2);
 
         // Create Slice's Axes producer node
@@ -131,7 +130,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
         axes -> setBackend(backend);
         axes -> resize(std::vector<std::size_t>({usedDims.size()}));
         axes -> getImpl() -> copyFromHost(usedDims.data(), usedDims.size());
-        auto axesNode = Producer(axes, slice->name() + "_" + sliceInputsNames[3]);
+        auto axesNode = Producer(axes, slice->name() + "_3");
         axesNode -> addChild(slice, 0, 3);
 
         // Create Slice's Steps producer node
@@ -141,7 +140,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
         steps -> setBackend(backend);
         steps -> resize(std::vector<std::size_t>({inputDimsSteps.size()}));
         steps -> getImpl() -> copyFromHost(inputDimsSteps.data(), inputDimsSteps.size());
-        auto stepsNode = Producer(steps, slice->name() + "_" + sliceInputsNames[4]);
+        auto stepsNode = Producer(steps, slice->name() + "_4");
         stepsNode -> addChild(slice, 0, 4);
 
         // auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, inputDimsSteps);