diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 3115cedca1f2a3bcc4a1330b96e90669bf7611a2..93cfb44514e39a489ccb75d86fd6e114da5c6162 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -12,15 +12,11 @@
 #ifndef AIDGE_CORE_OPERATOR_ADD_H_
 #define AIDGE_CORE_OPERATOR_ADD_H_
 
-#include <numeric>
-#include <vector>
-#include <cmath>
 #include <memory>
+#include <string>
 #include <vector>
 
-#include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
@@ -44,15 +40,7 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Add_Op(const Add_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Add_Op, *this, op.mOutputs[0]->getImpl()->backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    Add_Op(const Add_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -74,10 +62,7 @@ public:
 
     void computeOutputDims() override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Add_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input_0", "data_input_n"};
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index e427aac72ad3948d0d03f588c930cfccedfb1885..031046500e0c50443a0a1f4e98a6471625f25eb4 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -13,14 +13,18 @@
 #define AIDGE_CORE_OPERATOR_AVGPOOLING_H_
 
 #include <array>
-#include <numeric>
+#include <cmath>    // std::floor
+#include <cstddef>  // std::size_t
+#include <string>
+#include <utility>  // std::pair
 #include <vector>
-#include <cmath>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
@@ -60,9 +64,9 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
-            SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
-        }else{
+        if (op.mImpl) {
+            SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend());
+        } else {
             mImpl = nullptr;
         }
     }
@@ -101,8 +105,7 @@ public:
     std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
     computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
                             const std::vector<DimSize_t>& outputDims,
-                            const IOIndex_t outputIdx = 0) const override final
-    {
+                            const IOIndex_t outputIdx = 0) const override final {
         if (outputIdx != 0) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
         }
@@ -153,8 +156,8 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string AvgPooling_Op<DIM>::Type = "AvgPooling";
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling";
 
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 83ad2dbbb695e42c11cb794c7d5bd4578056d941..51673dd3c8b41c657c1df6e951a2cb3a842308b5 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -55,7 +55,7 @@ public:
           Attributes_(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+            SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend());
         }else{
             mImpl = nullptr;
         }
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 7cc3985674219daf087381049d3a845299b3e250..bbc776a1175a1fc29d08c3872649a6b7aac2f04f 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -39,7 +39,11 @@ public:
     Cast_Op(const Cast_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Cast_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl) {
+            SET_IMPL_MACRO(Cast_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -50,12 +54,7 @@ public:
         return std::make_shared<Cast_Op>(*this);
     }
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        if (Registrar<Cast_Op>::exists({name})) {
-            mImpl = Registrar<Cast_Op>::create({name})(*this);
-        }
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     void forward() override;
 
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 450c40bd210e0a4be891e436f03330a984e221be..611ff6bd53b1f16f87f73dd951d0645b9765262e 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -12,16 +12,16 @@
 #ifndef AIDGE_CORE_OPERATOR_CONCAT_H_
 #define AIDGE_CORE_OPERATOR_CONCAT_H_
 
-#include <numeric>
-#include <vector>
-#include <cmath>
 #include <memory>
+#include <stdexcept>
+#include <string>
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
@@ -56,7 +56,7 @@ public:
           Attributes_(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(Concat_Op, *this, op.mOutputs[0]->getImpl()->backend());
+            SET_IMPL_MACRO(Concat_Op, *this, op.backend());
         }else{
             mImpl = nullptr;
         }
@@ -70,51 +70,9 @@ public:
         return std::make_shared<Concat_Op>(*this);
     }
 
-    // Data operator[](const char* inputName) override final {
-    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
-    //         (strcmp(inputName, "weight") ? mInputs[1] :
-    //         (strcmp(inputName, "bias") ? mInputs[2] :
-    //         nullptr));
-    //     assert((in!=nullptr) && "No such parameter");
-    //     return *in;
-    // }
-
+    void computeOutputDims() override final;
 
-    void computeOutputDims() override final {
-        // Every input is non-empty with the same number of dimensions
-        bool associated = (getInput(0) != nullptr);
-        associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input
-        auto outputDims =  getInput(0)->dims();
-        const auto firstInputNbDims = getInput(0) -> nbDims();
-        for (IOIndex_t i = 1; i < nbInputs(); ++i) {
-            if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-            }
-
-            if (getInput(i)->nbDims() == firstInputNbDims) {
-                for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) {
-                    if (dim == getAttr<ConcatAttr::Axis>()) {
-                        outputDims[dim] += getInput(i)->dims()[dim];
-                    }
-                    else {
-                        associated &= (getInput(i)->dims()[dim] == outputDims[dim]);
-                    }
-                }
-            }
-            else {
-                associated = false;
-                break;
-            }
-        }
-        if (associated) {
-            getOutput(0)->resize(outputDims);
-        }
-    }
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Concat_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_0", "data_input_n"};
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 82cd5df8e24457bd9f5e07c89826904c7d2283ad..892663c849b1f3f07565d3beaecbb63ca441fc62 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -13,17 +13,20 @@
 #define AIDGE_CORE_OPERATOR_CONV_H_
 
 #include <array>
-#include <cmath>
-#include <cstddef>
-#include <numeric>
+#include <cmath>    // std::floor
+#include <cstddef>  // std::size_t
+#include <string>
+#include <utility>  // std::pair
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -65,9 +68,9 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
-        }else{
+        if (op.mImpl) {
+            SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
+        } else {
             mImpl = nullptr;
         }
     }
@@ -122,8 +125,10 @@ public:
         }
     }
 
-
-std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
+    std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
+    computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
+                          const std::vector<DimSize_t>& outputDims,
+                          const IOIndex_t outputIdx = 0) const override {
         if (outputIdx != 0) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
         }
@@ -177,6 +182,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> co
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
     }
 
+
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
         SET_IMPL_MACRO(Conv_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 7fa9124d4c750cee53d9c4a402a2fa6196ac8158..2620ecaaf90e4c547c588ec06fb22fe2c7183c51 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -13,14 +13,17 @@
 #define AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_
 
 #include <array>
-#include <cmath>
-#include <numeric>
+#include <cmath>    // std::floor
+#include <cstddef>  // std::size_t
+#include <string>
+#include <utility>  // std::pair
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
@@ -68,7 +71,7 @@ public:
           Attributes_(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+            SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
         }else{
             mImpl = nullptr;
         }
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index be654a3c015e5810892c1e23f08cc1f4b83b2d93..49410db044518dc3ca2cc33285d570197d83b10a 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -12,14 +12,13 @@
 #ifndef AIDGE_CORE_OPERATOR_DIV_H_
 #define AIDGE_CORE_OPERATOR_DIV_H_
 
-#include <cassert>
 #include <memory>
+#include <string>
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
@@ -40,9 +39,9 @@ public:
     Div_Op(const Div_Op& op)
         : OperatorTensor(op)
     {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Div_Op, *this, op.mOutputs[0]->getImpl()->backend());
-        }else{
+        if (op.mImpl) {
+            SET_IMPL_MACRO(Div_Op, *this, op.backend());
+        } else {
             mImpl = nullptr;
         }
     }
@@ -57,11 +56,7 @@ public:
 
     void computeOutputDims() override final;
 
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Div_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_1", "data_input_2"};
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index 5a92b5dc45b6a090be0d9306dbfc21b1c0ae6edb..5ec10522e889bb1188b2304940fd892c0928b414 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -12,16 +12,14 @@
 #ifndef AIDGE_CORE_OPERATOR_ERF_H_
 #define AIDGE_CORE_OPERATOR_ERF_H_
 
-#include <cassert>
 #include <memory>
+#include <string>
 #include <vector>
 
-#include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -40,9 +38,9 @@ public:
     Erf_Op(const Erf_Op& op)
         : OperatorTensor(op)
     {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Erf_Op, *this, op.mOutputs[0]->getImpl()->backend());
-        }else{
+        if (op.mImpl) {
+            SET_IMPL_MACRO(Erf_Op, *this, op.backend());
+        } else {
             mImpl = nullptr;
         }
     }
@@ -55,10 +53,7 @@ public:
         return std::make_shared<Erf_Op>(*this);
     }
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Erf_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index c111e38b00e69c8d0aecd9df0023f07a47a3865d..39b28c125c917f07c2cf238988e68075adeceb8e 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -13,13 +13,10 @@
 #define AIDGE_CORE_OPERATOR_FC_H_
 
 #include <array>
-#include <cmath>
-#include <numeric>
 #include <memory>
 #include <vector>
 
 #include "aidge/utils/Types.h"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
@@ -58,7 +55,7 @@ public:
           Attributes_(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(FC_Op, *this, op.mOutputs[0]->getImpl()->backend());
+            SET_IMPL_MACRO(FC_Op, *this, op.backend());
         }else{
             mImpl = nullptr;
         }
@@ -68,46 +65,15 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::FC_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<Operator> clone() const override final {
         return std::make_shared<FC_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        assert(data->type() == Tensor::Type && "input data must be of Tensor type");
-        // TODO: FIXME: check this, because data dims may not be initialized at this point...
-        //if (inputIdx == 2) {
-        //    assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0));
-        //    assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
-        //}
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-        if (inputIdx == 0 && getInput(0)->nbDims() == 1)
-            mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()});
-    }
+    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
-    void computeOutputDims() override final {
-        bool associated = true;
-        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-            if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-            }
-            associated &= !(getInput(i)->empty());
-        }
-        if (associated) {
-            // <batch, OutChannels>
-            mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()});
-        }
-    }
+    void computeOutputDims() override final;
 
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(FC_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-
-        // By default, automatically set backend for weight and bias inputs
-        getInput(1)->setBackend(name, device);
-        getInput(2)->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 142f6582a3afbc85ccd951fcfeff2a924a35e718..b7d18e6443404730bbcb73cf7e6da97b8b3e6a7c 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -12,16 +12,14 @@
 #ifndef AIDGE_CORE_OPERATOR_GATHER_H_
 #define AIDGE_CORE_OPERATOR_GATHER_H_
 
-#include <cassert>
+#include <cstdint>  // std::int64_t
 #include <memory>
+#include <string>
 #include <vector>
 
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/operator/Producer.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
@@ -59,8 +57,8 @@ public:
           Attributes_(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(Gather_Op, *this, op.mOutputs[0]->getImpl()->backend());
-        }else{
+            SET_IMPL_MACRO(Gather_Op, *this, op.backend());
+        } else {
             mImpl = nullptr;
         }
     }
@@ -75,10 +73,7 @@ public:
 
     void computeOutputDims() override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Gather_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 20b0cdc4aa8a42043c37851ef110427a561e5e1d..e7d60285b4d45826f1d73635d54f4532b4fb1598 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -15,8 +15,6 @@
 #include <memory>
 #include <vector>
 #include <string>
-#include <cassert>
-#include <cstring>
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
@@ -38,8 +36,8 @@ private:
 public:
     GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
         : OperatorTensor(type, nbData, nbParam, nbOut)
-    {        
-        mImpl = std::make_shared<OperatorImpl>(*this);
+    {
+        mImpl = std::make_shared<OperatorImpl>(*this, "");
     }
 
     /**
@@ -49,9 +47,11 @@ public:
     GenericOperator_Op(const GenericOperator_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = std::make_shared<OperatorImpl>(*this);
+        mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
     }
 
+    ~GenericOperator_Op() = default;
+
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::GenericOperator_Op
@@ -60,50 +60,20 @@ public:
         return std::make_shared<GenericOperator_Op>(*this);
     }
 
+public:
+    void computeOutputDims() override final;
+
+    bool outputDimsForwarded() const override final;
+
+    void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); }
+    void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); }
+
     // Helper functions that can be used with setComputeOutputDims():
     static const ComputeDimsFunc Identity;
     static const ComputeDimsFunc InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs);
-
     inline void setComputeOutputDims(ComputeDimsFunc func) {
         mComputeOutputDims = func;
     }
-
-
-    void computeOutputDims() override final {
-        if (mComputeOutputDims) {
-            std::vector<std::vector<size_t>> inputsDims(nbInputs(), std::vector<size_t>());
-            for (std::size_t i = 0; i < nbInputs(); ++i) {
-                if (getInput(i)) {
-                    inputsDims[i] = getInput(i)->dims();
-                }
-            }
-
-            const auto& outputsDims = mComputeOutputDims(inputsDims);
-            assert(outputsDims.size() == nbOutputs() && "The provided ComputeDimsFunc function returns the wrong number of outputs");
-            for (std::size_t i = 0; i < nbOutputs(); ++i) {
-                mOutputs[i]->resize(outputsDims[i]);
-            }
-        }
-        else {
-            assert(false && "Cannot compute output dim of a GenericOperator");
-        }
-    }
-
-    bool outputDimsForwarded() const override final {
-        if (mComputeOutputDims) {
-            return !(mOutputs[0]->empty());
-        }
-        else {
-            assert(false && "GenericOperator cannot forward dims");
-            return false;
-        }
-    }
-
-
-    ~GenericOperator_Op() = default;
-
-    void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); }
-    void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); }
 };
 
 /**
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index c2e6eaff77971c3dcf350a02bc5089d08b5c8488..27432bc5bb251003e9e93261593e12c2fa704f3d 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -40,9 +40,9 @@ public:
     static const std::string Type;
 
     Identity_Op()
-            : OperatorTensor(Type, 1, 0, 1)
+        : OperatorTensor(Type, 1, 0, 1)
     {
-        mImpl = std::make_shared<OperatorImpl>(*this);
+        mImpl = std::make_shared<OperatorImpl>(*this, "");
     }
 
     /**
@@ -52,7 +52,7 @@ public:
     Identity_Op(const Identity_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = std::make_shared<OperatorImpl>(*this);
+        mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
     }
 
     /**
@@ -65,11 +65,16 @@ public:
 
     void computeOutputDims() override final {} // Do nothing
 
+    /**
+     * @brief Check if output dimensions have been computed.
+     * @note Since Indentity has no output Tensor, this function checks if its
+     * only input's dimensions have been computed.
+     *
+     * @return true Input has dimensions.
+     * @return false Input has no dimensions or is a nullptr.
+     */
     bool outputDimsForwarded() const override final {
-        if (mInputs[0])
-            return !mInputs[0]->empty();
-        else
-            return false;
+        return mInputs[0] ? !mInputs[0]->empty() : false;
     }
 
 
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index c48b85b4a7af71fde0f8136732597e098c966839..83a7c30fce7e0f68576f367d4b0bfe48edf4b3b6 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -55,8 +55,8 @@ public:
           Attributes_(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(LeakyReLU_Op, *this, op.mOutputs[0]->getImpl()->backend());
-        }else{
+            SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
+        } else {
             mImpl = nullptr;
         }
     }
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index 596aa634693941d8e3a23ac955281cfd131e56ef..43bd8b1654206df15cd869cf2d37a216fcc4a733 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -17,7 +17,6 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -39,7 +38,11 @@ public:
      */
     MatMul_Op(const MatMul_Op& op) : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<MatMul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(MatMul_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -64,10 +67,7 @@ public:
     void computeOutputDims() override final;
 
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final {
-        SET_IMPL_MACRO(MatMul_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input1", "data_input2"};
@@ -82,4 +82,4 @@ inline std::shared_ptr<Node> MatMul(const std::string& name = "") {
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */
+#endif /* AIDGE_CORE_OPERATOR_MATMUL_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 06ac30158f80a946b9310a93c8f81cc3ee975c84..5b09aa02cd0665172a9ae69549d8d9311e10d024 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -13,16 +13,20 @@
 #define AIDGE_CORE_OPERATOR_MAXPOOLING_H_
 
 #include <array>
-#include <numeric>
+#include <cmath>       // std::ceil, std::floor
+#include <cstddef>     // std::size_t
+#include <functional>
+#include <memory>
+#include <stdexcept>   // std::runtime_error
 #include <vector>
-#include <cmath>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -64,9 +68,9 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
-            SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
-        }else{
+        if (op.mImpl) {
+            SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
+        } else {
             mImpl = nullptr;
         }
     }
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index 8991ccb44eb4926f375ff102858f4683e1bea4d8..73433aaca51d07fc3f01682e47cc19433c5c86bf 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -54,7 +54,7 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Memorize_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Memorize_Op>::create(op.backend())(*this) : nullptr;
         mOutputs[1] = mOutputs[0];
     }
 
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 7f36eca2c4586f61f72e0d842d2d576450cd1596..4d719b6cb755bb2ddff96905f2e5b6bc24844e37 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -12,10 +12,18 @@
 #ifndef AIDGE_CORE_OPERATOR_METAOPERATOR_H_
 #define AIDGE_CORE_OPERATOR_METAOPERATOR_H_
 
-#include "aidge/operator/OperatorTensor.hpp"
+#include <array>
+#include <memory>
+#include <string>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
 
 namespace Aidge {
 class MetaOperator_Op : public OperatorTensor,
@@ -28,7 +36,7 @@ public:
     std::weak_ptr<Node> mUpperNode;
 
    public:
-    MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph);
+    MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 75304078829475b1488640dc39aeee8b64f1c3e5..cc9fba59431356a132330e453288f2f6e7141178 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -19,7 +19,6 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
@@ -43,7 +42,11 @@ public:
     Mul_Op(const Mul_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Mul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl) {
+            SET_IMPL_MACRO(Mul_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -56,10 +59,7 @@ public:
 
     void computeOutputDims() override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Mul_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_1", "data_input_2"};
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 396c60e46127ee9312745a92f9112dbc0742a584..17c8204c1fec4a54e8194bf2db1dc6e5a616fd23 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -81,7 +81,7 @@ public:
     virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
 
     /**
-     * @brief Set the specified input by performing a deep copy of the given data.
+     * @brief Set the specified input value by performing a deep copy of the given data.
      * The pointer itself is not changed, thus keeping the current connections.
      * @param inputIdx Index of the input to set.
      * @param data Data to copy.
@@ -90,7 +90,7 @@ public:
     virtual void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) = 0;
     virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const = 0;
         /**
-     * @brief Set the specified output by performing a deep copy of the given data.
+     * @brief Set the specified output value by performing a deep copy of the given data.
      * The pointer itself is not changed, thus keeping the current connections.
      * @param inputIdx Index of the input to set.
      */
@@ -110,6 +110,9 @@ public:
 ///////////////////////////////////////////////////////
 //        IMPLEMENTATION
 ///////////////////////////////////////////////////////
+    std::string backend() const noexcept {
+        return mImpl ? mImpl->backend() : "";
+    }
 
     virtual void setBackend(const std::string& name, DeviceIdx_t device = 0) = 0;
     virtual void setDataType(const DataType& dataType) const = 0;
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index 504a416488651d43126a60981cd8afe0f95821f2..adf45c2d8311112fa145097ee98f46d120bd41ff 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -17,12 +17,12 @@
 #include <vector>
 
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 
+class Tensor;
 class OperatorTensor : public Operator {
     /* TODO: Add an attribute specifying the type of Data used by the Operator.
      * The same way ``Type`` attribute specifies the type of Operator. Hence this
@@ -41,26 +41,9 @@ public:
     OperatorTensor() = delete;
 
     OperatorTensor(const std::string& type, const IOIndex_t nbData, const IOIndex_t nbParam,
-                   const IOIndex_t nbOut)
-        : Operator(type, nbData, nbParam, nbOut, OperatorType::Tensor),
-          mInputs(std::vector<std::shared_ptr<Tensor>>(nbData + nbParam, nullptr)),
-          mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) {
-        for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) {
-            mOutputs[i] = std::make_shared<Tensor>();
-            mOutputs[i]->setDataType(DataType::Float32);
-        }
-    }
+                   const IOIndex_t nbOut);
 
-    OperatorTensor(const OperatorTensor& other)
-        : Operator(other),
-          mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)),
-          mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) {
-        for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
-            mOutputs[i] = std::make_shared<Tensor>();
-            // mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i)));
-            // datatype already copied
-        }
-    }
+    OperatorTensor(const OperatorTensor& other);
 
     ~OperatorTensor();
 
@@ -76,17 +59,13 @@ public:
     void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
     void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final;
     const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const;
-    inline std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        return std::static_pointer_cast<Data>(getInput(inputIdx));
-    }
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final;
 
     // output management
     void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override;
     void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override;
     virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const;
-    inline std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final {
-        return std::static_pointer_cast<Data>(getOutput(outputIdx));
-    }
+    std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final;
     ///////////////////////////////////////////////////
 
     ///////////////////////////////////////////////////
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index cb4ba871a55b9dfd1c835c05949c3c18966b7f5a..9109ccaeb8bc648fe74510216fad93299740b9bf 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -12,17 +12,16 @@
 #ifndef AIDGE_CORE_OPERATOR_POP_H_
 #define AIDGE_CORE_OPERATOR_POP_H_
 
-#include <cassert>
 #include <memory>
+#include <string>
 #include <vector>
 
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/utils/Types.h"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
 
 namespace Aidge {
 enum class PopAttr { ForwardStep };
@@ -40,9 +39,7 @@ public:
     Pop_Op()
         : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<PopAttr::ForwardStep>(0))
-    {
-        
-    }
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -52,7 +49,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Pop_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Pop_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -63,10 +64,7 @@ public:
         return std::make_shared<Pop_Op>(*this);
     }
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Pop_Op>::create({name})(*this);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
     void computeOutputDims() override final;
     void updateConsummerProducer() override;
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index ec4eebf9ddba475310ba292dd5923ba50933545d..aadbf92c4ba02aa69665a9994afb93fa5461a402 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -19,8 +19,6 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
@@ -41,7 +39,7 @@ public:
         : OperatorTensor(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(Pow_Op, *this, op.mOutputs[0]->getImpl()->backend());
+            SET_IMPL_MACRO(Pow_Op, *this, op.backend());
         }else{
             mImpl = nullptr;
         }
@@ -58,15 +56,12 @@ public:
     void computeOutputDims() override final;
 
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Pow_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
-    static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName() {
         return {"data_input_1", "data_input_2"};
     }
-    static const std::vector<std::string> getOutputsName(){
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index c9b1f6e4aa5d82006d4bed880151ac1a22a4882b..66c66d90b4ed465d31ed20dd41245fed7a71d58e 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -12,7 +12,9 @@
 #ifndef AIDGE_CORE_OPERATOR_PRODUCER_H_
 #define AIDGE_CORE_OPERATOR_PRODUCER_H_
 
+#include <cstddef>
 #include <array>
+#include <memory>
 #include <vector>
 
 #include "aidge/utils/Types.h"
@@ -42,41 +44,40 @@ public:
     Producer_Op(const std::array<DimSize_t, DIM>& dims,
                 bool constant = false)
         : OperatorTensor(Type, 0, 0, 1),
-        Attributes_(attr<ProdAttr::Constant>(constant))
+          Attributes_(attr<ProdAttr::Constant>(constant))
     {
         mOutputs[0]->resize(dims);
-        mImpl = std::make_shared<OperatorImpl>(*this);
+        // mImpl = std::make_shared<OperatorImpl>(*this, "");
+        mImpl = nullptr;
     }
 
-    Producer_Op(const std::shared_ptr<Tensor> tensor, bool constant = false)
-        : OperatorTensor(Type, 0, 0, 1),
-        Attributes_(attr<ProdAttr::Constant>(constant))
-    {
-        mOutputs[0] = tensor; // copy the pointer of the Tensor
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+    /**
+     * @brief Construct a new Producer_Op object from a Tensor.
+     *
+     * @param tensor Tensor to set in the Prducer.
+     * @param constant Whether the Producer should be considered constant.
+     */
+    Producer_Op(const std::shared_ptr<Tensor> tensor, bool constant = false);
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
      * @param op OperatorTensor to copy.
      */
-    Producer_Op(const Producer_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
-            mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i)));
-        }
-        if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
-            SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
-        }else{
-            mImpl = std::make_shared<OperatorImpl>(*this);
-        }
-    }
+    Producer_Op(const Producer_Op& op);
+
+public:
+    /**
+     * @brief Conversion operator from Producer to Tensor.
+     *
+     * @return std::shared_ptr<Tensor>
+     */
+    operator std::shared_ptr<Tensor>() const { return mOutputs[0]; }
 
+public:
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Producer_Op
+     * @see Operator::Producer_Op(const Producer_Op&)
      */
     std::shared_ptr<Operator> clone() const override {
         return std::make_shared<Producer_Op>(*this);
@@ -86,17 +87,14 @@ public:
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
     }
 
-    void computeOutputDims() override final {}
+    void computeOutputDims() noexcept override final {}
 
-    bool outputDimsForwarded() const override final {return true;}
+    inline bool outputDimsForwarded() const noexcept override final { return true; }
 
 
     inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Producer_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
         return {};
@@ -105,7 +103,6 @@ public:
         return {"data_output"};
     }
 
-public:
     void forward() override final {
         fmt::print("Basic Producer forward() function.\n");
     }
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 5b8f5c4b819f9a2f8cf518bdc50c445fbce38102..963de31c49f48784e92434b2b563d6c008e2d4fd 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -16,11 +16,11 @@
 #include <memory>
 #include <vector>
 
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -40,7 +40,7 @@ public:
         : OperatorTensor(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(ReLU_Op, *this, op.mOutputs[0]->getImpl()->backend());
+            SET_IMPL_MACRO(ReLU_Op, *this, op.backend());
         }else{
             mImpl = nullptr;
         }
@@ -55,10 +55,7 @@ public:
     }
 
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(ReLU_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 09f1d58359b265af58fd78ef8de54dd1944b5cf1..609f5be5f28efee8ebd266b6756b5bf172e45bd5 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -12,17 +12,18 @@
 #ifndef AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
 #define AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
 
-#include <algorithm>  // std::for_each
+#include <algorithm>  // std::for_each, std::sort
 #include <array>
-#include <cmath>
 #include <cstdint>    // std::int32_t
-#include <numeric>
+#include <memory>
+#include <stdexcept>  // std::runtime_error
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
@@ -58,7 +59,7 @@ class ReduceMean_Op : public OperatorTensor,
           Attributes_(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+            SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, op.backend());
         }else{
             mImpl = nullptr;
         }
@@ -95,10 +96,8 @@ class ReduceMean_Op : public OperatorTensor,
                     outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
             }
 
-            if(outDims.size()>0)
-                mOutputs[0]->resize(outDims);
-            else
-                mOutputs[0]->resize({1});
+            mOutputs[0]->resize((outDims.size()>0) ? outDims : {1});
+
         }
     }
 
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 8914bbc9a9f3748276ead32aba8cb023ba14b1b7..060029bb87ea142728056b3817b8162d566cb458 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -12,7 +12,6 @@
 #ifndef AIDGE_CORE_OPERATOR_RESHAPE_H_
 #define AIDGE_CORE_OPERATOR_RESHAPE_H_
 
-#include <cassert>
 #include <memory>
 #include <vector>
 
@@ -54,8 +53,8 @@ public:
           Attributes_(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(Reshape_Op, *this, op.mOutputs[0]->getImpl()->backend());
-        }else{
+            SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
+        } else {
             mImpl = nullptr;
         }
     }
@@ -70,10 +69,7 @@ public:
 
     void computeOutputDims() override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Reshape_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 29ce0527a9b8b15c7b45c0b0241a83957abb5565..8f54ab217631ac69a4e16555f8e58f550ab0156c 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -9,18 +9,17 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_Scaling_H__
-#define __AIDGE_CORE_OPERATOR_Scaling_H__
+#ifndef AIDGE_CORE_OPERATOR_SCALING_H_
+#define AIDGE_CORE_OPERATOR_SCALING_H_
 
 #include <vector>
 #include <memory>
 
-#include "aidge/utils/StaticAttributes.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -56,7 +55,7 @@ public:
           Attributes_(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(Scaling_Op, *this, op.mOutputs[0]->getImpl()->backend());
+            SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
         } else {
             mImpl = nullptr;
         }
@@ -70,10 +69,7 @@ public:
         return std::make_shared<Scaling_Op>(*this);
     }
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Scaling_Op>::create(name)(*this);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
@@ -99,4 +95,4 @@ const char* const EnumStrings<Aidge::ScalingAttr>::data[]
     = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
+#endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index ab97bf3211edb53d65a90d16dba5d0c66dfa33da..bea9fc45eaa7f17f71963106b5bd3e1340a48a92 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -39,7 +39,11 @@ public:
     Sigmoid_Op(const Sigmoid_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Sigmoid_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Sigmoid_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -51,10 +55,7 @@ public:
     }
 
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Sigmoid_Op>::create(name)(*this);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 363c3c2b4ec397fdd62dc3260b63a0cd6d6c0081..f68aa17f480038d8ff7850577c438cfdc6704d59 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -56,7 +56,7 @@ public:
           Attributes_(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(Slice_Op, *this, op.mOutputs[0]->getImpl()->backend());
+            SET_IMPL_MACRO(Slice_Op, *this, op.backend());
         }else{
             mImpl = nullptr;
         }
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 943f69a588ebfedf28ec5ebb3a782e7510fa710a..d48dbc2b60e46eb5c074b8adae065383e29b1769 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -12,14 +12,10 @@
 #ifndef AIDGE_CORE_OPERATOR_SOFTMAX_H_
 #define AIDGE_CORE_OPERATOR_SOFTMAX_H_
 
-#include <cassert>
 #include <memory>
 #include <vector>
 
-
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
@@ -56,7 +52,7 @@ public:
           Attributes_(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(Softmax_Op, *this, op.mOutputs[0]->getImpl()->backend());
+            SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
         }else{
             mImpl = nullptr;
         }
@@ -70,10 +66,7 @@ public:
         return std::make_shared<Softmax_Op>(*this);
     }
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Softmax_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index dd3fa541b9fd5177ddd3b9e8bcd781c0ea3a1867..f5ffa431192d73a703c1ce973cb485dadb31420d 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -12,16 +12,13 @@
 #ifndef AIDGE_CORE_OPERATOR_SQRT_H_
 #define AIDGE_CORE_OPERATOR_SQRT_H_
 
-#include <cassert>
 #include <memory>
 #include <vector>
 
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -46,7 +43,7 @@ public:
         : OperatorTensor(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(Sqrt_Op, *this, op.mOutputs[0]->getImpl()->backend());
+            SET_IMPL_MACRO(Sqrt_Op, *this, op.backend());
         }else{
             mImpl = nullptr;
         }
@@ -60,10 +57,7 @@ public:
         return std::make_shared<Sqrt_Op>(*this);
     }
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Sqrt_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index 5683a9be5ea2278d92fe7da081f0c4a80ff9500d..fbcebcc9f62c23e9c60b5dff6f0d41c10d8b8717 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -12,16 +12,13 @@
 #ifndef AIDGE_CORE_OPERATOR_SUB_H_
 #define AIDGE_CORE_OPERATOR_SUB_H_
 
-#include <cassert>
 #include <memory>
 #include <vector>
 
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -46,8 +43,8 @@ public:
         : OperatorTensor(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(Sub_Op, *this, op.mOutputs[0]->getImpl()->backend());
-        }else{
+            SET_IMPL_MACRO(Sub_Op, *this, op.backend());
+        } else {
             mImpl = nullptr;
         }
     }
@@ -63,10 +60,7 @@ public:
     void computeOutputDims() override final;
 
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Sub_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_1", "data_input_2"};
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
index ce0dc12a06d242d215c07dc6593bb7e2cb2c3c8a..3fd5377d30cfff864743dcab2da9e690e26e5263 100644
--- a/include/aidge/operator/Tanh.hpp
+++ b/include/aidge/operator/Tanh.hpp
@@ -12,15 +12,13 @@
 #ifndef AIDGE_CORE_OPERATOR_TANH_H_
 #define AIDGE_CORE_OPERATOR_TANH_H_
 
-#include <cassert>
 #include <memory>
 #include <vector>
 
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -39,7 +37,11 @@ public:
     Tanh_Op(const Tanh_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Tanh_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+       if (op.mImpl){
+            SET_IMPL_MACRO(Tanh_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -51,10 +53,7 @@ public:
     }
 
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Tanh_Op>::create(name)(*this);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index b040fc907dd5ac1f40a8a1885d27364785ba9188..1beb5781b9262669cd2acb6ce4ef3aae85843573 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -57,7 +57,7 @@ class Transpose_Op : public OperatorTensor,
           Attributes_(op)
     {
         if (op.mImpl){
-            SET_IMPL_MACRO(Transpose_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+            SET_IMPL_MACRO(Transpose_Op<DIM>, *this, op.backend());
         }else{
             mImpl = nullptr;
         }
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index a54302d06059d43336800d81e4d18744b6243785..85bc4b7aef53e8064a8f31815a42689013880812 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -14,12 +14,24 @@
 #include <string>
 #include <vector>
 
+#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Add.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
 
 const std::string Aidge::Add_Op::Type = "Add";
 
+Aidge::Add_Op::Add_Op(const Add_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Add_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
 void Aidge::Add_Op::computeOutputDims() {
     // check inputs have been associated
     bool associated = (nbInputs() > 0); // do not compute anything if no input
@@ -59,3 +71,8 @@ void Aidge::Add_Op::computeOutputDims() {
         mOutputs[0]->resize(outDims);
     }
 }
+
+void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(Add_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index f09d8eb83c6a6dae6416ffebcc01b22fb479a862..7dfb4d3bf6bf6dbb99e288a486df2ed56b904470 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -24,3 +24,8 @@ void Aidge::Cast_Op::forward() {
 
     runHooks();
 }
+
+void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Cast_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index eafcd126480df6da2c0127bdbb896d3ce98d0e0a..7df5b6dbf6122da44aed280da0d717232ba42fef 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -9,8 +9,49 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/Concat.hpp"
+
 #include <string>
+#include <vector>
 
-#include "aidge/operator/Concat.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Concat_Op::Type = "Concat";
+
+void Aidge::Concat_Op::computeOutputDims() {
+    // Every input is non-empty with the same number of dimensions
+    bool associated = (getInput(0) != nullptr);
+    associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input
+    auto outputDims =  getInput(0)->dims();
+    const auto firstInputNbDims = getInput(0) -> nbDims();
+    for (IOIndex_t i = 1; i < nbInputs(); ++i) {
+        if (!getInput(i)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+        }
+
+        if (getInput(i)->nbDims() == firstInputNbDims) {
+            for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) {
+                if (dim == getAttr<ConcatAttr::Axis>()) {
+                    outputDims[dim] += getInput(i)->dims()[dim];
+                }
+                else {
+                    associated &= (getInput(i)->dims()[dim] == outputDims[dim]);
+                }
+            }
+        }
+        else {
+            associated = false;
+            break;
+        }
+    }
+    if (associated) {
+        getOutput(0)->resize(outputDims);
+    }
+}
 
-const std::string Aidge::Concat_Op::Type = "Concat";
\ No newline at end of file
+void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(Concat_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 6b55338f4ab7ac9131231fcced21869274c1bd47..5ffe5f08dbcbfe42c406846990c432a7fbd325e0 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -14,6 +14,7 @@
 #include <string>
 #include <vector>
 
+#include "aidge/data/Tensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Div.hpp"
 #include "aidge/utils/Types.h"
@@ -50,4 +51,10 @@ void Aidge::Div_Op::computeOutputDims() {
         }
         mOutputs[0]->resize(outDims);
     }
-}
\ No newline at end of file
+}
+
+
+void Aidge::Div_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Div_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp
index 387af4edf417f8c7ac6ee9b8b2b7069179ad59cb..81c87f10b10210c2af203a05df53e3330bb33b72 100644
--- a/src/operator/Erf.cpp
+++ b/src/operator/Erf.cpp
@@ -9,8 +9,17 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/Erf.hpp"
+
 #include <string>
 
-#include "aidge/operator/Erf.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Erf_Op::Type = "Erf";
 
-const std::string Aidge::Erf_Op::Type = "Erf";
\ No newline at end of file
+void Aidge::Erf_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Erf_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 32114f5bf9e0d160db9fdc2d1971481be0b4e703..9865d64f6a0b87be96244bc4b39c91b605f02b6f 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -9,8 +9,52 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/FC.hpp"
+
+#include <memory>
 #include <string>
+#include <vector>
 
-#include "aidge/operator/FC.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::FC_Op::Type = "FC";
+
+void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
+    AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs());
+    AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
+    // TODO: FIXME: check this, because data dims may not be initialized at this point...
+    //if (inputIdx == 2) {
+    //    assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0));
+    //    assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
+    //}
+    mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    if (inputIdx == 0 && getInput(0)->nbDims() == 1)
+        mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()});
+}
+
+void Aidge::FC_Op::computeOutputDims() {
+    bool associated = true;
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (!getInput(i)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+        }
+        associated &= !(getInput(i)->empty());
+    }
+    if (associated) {
+        // <batch, OutChannels>
+        mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()});
+    }
+}
+
+void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(FC_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
 
-const std::string Aidge::FC_Op::Type = "FC";
\ No newline at end of file
+    // By default, automatically set backend for weight and bias inputs
+    getInput(1)->setBackend(name, device);
+    getInput(2)->setBackend(name, device);
+}
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index b5f9d738a0280b3bacdb2ce201c8303b2b4d0a1f..259e6513994970eb7e677f44c981888388825fae 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -9,15 +9,18 @@
  *
  ********************************************************************************/
 
-#include <cstddef>
-#include <cstdint>
+#include "aidge/operator/Gather.hpp"
+
+#include <cstddef>  // std::size_t
+#include <cstdint>  // std::int64_t
 #include <string>
 #include <vector>
 
-#include "aidge/operator/Gather.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
+
 const std::string Aidge::Gather_Op::Type = "Gather";
 
 void Aidge::Gather_Op::computeOutputDims() {
@@ -44,4 +47,9 @@ void Aidge::Gather_Op::computeOutputDims() {
 
         mOutputs[0]->resize(outDims);
     }
-}
\ No newline at end of file
+}
+
+void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Gather_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index 5556f4ff5c87d1adc23f5bff1aaf90c230de06cc..3eae49b69ce639529d49dd1c0d241f12ece5d98b 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -9,13 +9,48 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/GenericOperator.hpp"
+
+#include <cstddef>  // std::size_t
 #include <vector>
 
-#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
 const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Identity
-    = [](const std::vector<std::vector<size_t>>& inputsDims) { return inputsDims; };
+    = [](const std::vector<std::vector<std::size_t>>& inputsDims) { return inputsDims; };
 
 const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs) {
-    return [nbOutputs, inputIdx](const std::vector<std::vector<size_t>>& inputsDims) { return std::vector<std::vector<size_t>>(nbOutputs, inputsDims[inputIdx]); };
+    return [nbOutputs, inputIdx](const std::vector<std::vector<std::size_t>>& inputsDims) { return std::vector<std::vector<std::size_t>>(nbOutputs, inputsDims[inputIdx]); };
 }
+
+void Aidge::GenericOperator_Op::computeOutputDims() {
+    if (mComputeOutputDims) {
+        std::vector<std::vector<std::size_t>> inputsDims(nbInputs(), std::vector<std::size_t>());
+        for (std::size_t i = 0; i < nbInputs(); ++i) {
+            if (getInput(i)) {
+                inputsDims[i] = getInput(i)->dims();
+            }
+        }
+
+        const auto& outputsDims = mComputeOutputDims(inputsDims);
+        AIDGE_ASSERT((outputsDims.size() == nbOutputs()), "The provided ComputeDimsFunc function returns the wrong number of outputs");
+        for (std::size_t i = 0; i < nbOutputs(); ++i) {
+            mOutputs[i]->resize(outputsDims[i]);
+        }
+    }
+    else {
+        AIDGE_ASSERT(false, "Cannot compute output dim of a GenericOperator");
+    }
+}
+
+bool Aidge::GenericOperator_Op::outputDimsForwarded() const {
+    if (mComputeOutputDims) {
+        return !(mOutputs[0]->empty());
+    }
+    else {
+        AIDGE_ASSERT(false, "GenericOperator cannot forward dims");
+        return false;
+    }
+}
\ No newline at end of file
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index f48c7ca81d6abd1d5150f54eb7d98bf109307d33..56899875338d487294163aa018e0d98b5f7a5269 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -13,6 +13,7 @@
 #include <string>
 #include <vector>
 
+#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
@@ -70,3 +71,8 @@ void Aidge::MatMul_Op::computeOutputDims() {
         mOutputs[0]->resize(outDims);
     }
 }
+
+void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(MatMul_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index 883185021b395b42e5c47ef0461ebc0614f14456..45e7556265d1af4e95e50be4cf60e8067ded332f 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -10,9 +10,16 @@
  ********************************************************************************/
 
 #include "aidge/operator/MetaOperator.hpp"
+
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
-Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph)
+Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph)
     : OperatorTensor(type, graph->dataInputs().size(), (graph->getOrderedInputs().size() - graph->dataInputs().size()), graph->getOrderedOutputs().size()),
         mGraph(graph)
 {
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index d4a594e95b2695b496fc28b8e8a7fcf3442e9253..89bef9e0edcf6731dfbaf9ebf48ebddf5b71e815 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -10,14 +10,16 @@
  ********************************************************************************/
 
 #include <cstddef>    // std::size_t
+#include <memory>
 #include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
 
 #include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Mul.hpp"
-#include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
 
 const std::string Aidge::Mul_Op::Type = "Mul";
 
@@ -53,4 +55,9 @@ void Aidge::Mul_Op::computeOutputDims() {
     else if (!getInput(0)->empty() && !getInput(1)->empty()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible input dimensions for Operator Mul: {} and {}", getInput(0)->dims(), getInput(1)->dims());
     }
-}
\ No newline at end of file
+}
+
+void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Mul_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index c0ada265410f9bc46aab3b43fae270f1e74dd5eb..33f93d8e677ed93266944c4918226a8e092fb674 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -19,6 +19,32 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 
+Aidge::OperatorTensor::OperatorTensor(const std::string& type,
+                                                            const IOIndex_t nbData,
+                                                            const IOIndex_t nbParam,
+                                                            const IOIndex_t nbOut)
+: Operator(type, nbData, nbParam, nbOut, OperatorType::Tensor),
+        mInputs(std::vector<std::shared_ptr<Tensor>>(nbData + nbParam, nullptr)),
+        mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) {
+    for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) {
+        mOutputs[i] = std::make_shared<Tensor>();
+        mOutputs[i]->setDataType(DataType::Float32);
+    }
+}
+
+
+Aidge::OperatorTensor::OperatorTensor(const OperatorTensor& other)
+    : Operator(other),
+        mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)),
+        mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) {
+    for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
+        mOutputs[i] = std::make_shared<Tensor>();
+        // mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i)));
+        // datatype already copied
+    }
+}
+
+
 void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs());
     AIDGE_ASSERT(data->type() == Tensor::Type, "Input data must be of Tensor type");
@@ -45,6 +71,9 @@ void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, std::share
     }
 }
 
+std::shared_ptr<Aidge::Data> Aidge::OperatorTensor::getRawInput(const Aidge::IOIndex_t inputIdx) const {
+    return std::static_pointer_cast<Data>(getInput(inputIdx));
+}
 const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getInput(const Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs());
     return mInputs[inputIdx];
@@ -53,13 +82,23 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getInput(const Aidg
 void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
     AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs());
-    *mOutputs[outputIdx] = *std::dynamic_pointer_cast<Tensor>(data);
+    const auto& data_tensor = std::dynamic_pointer_cast<Tensor>(data);
+    // if (mImpl)
+    //     AIDGE_ASSERT(data_tensor->getImpl()->backend() == backend(), "Data parameter and Operator have different backends: {} and {}", data_tensor->getImpl()->backend(), backend());
+    *mOutputs[outputIdx] = *data_tensor;
 }
 
 void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) {
     AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
     AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs());
-    *mOutputs[outputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data));
+    auto&& data_tensor =std::move(std::dynamic_pointer_cast<Tensor>(data));
+    // if (mImpl)
+    //     AIDGE_ASSERT(data_tensor->getImpl()->backend() == backend(), "Data parameter and Operator have different backends: {} and {}", data_tensor->getImpl()->backend(), backend());
+    *mOutputs[outputIdx] = std::move(*data_tensor);
+}
+
+std::shared_ptr<Aidge::Data> Aidge::OperatorTensor::getRawOutput(const Aidge::IOIndex_t outputIdx) const {
+    return std::static_pointer_cast<Data>(getOutput(outputIdx));
 }
 
 const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aidge::IOIndex_t outputIdx) const {
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index 3dd65eb4d34266f6e419bdc86362b8da4a55fdf0..06999e301ce0968b2d9979e47f412c02e59de3ad 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -9,9 +9,17 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/Pop.hpp"
+
+#include <memory>
 #include <string>
 
-#include "aidge/operator/Pop.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
 
 const std::string Aidge::Pop_Op::Type = "Pop";
 
@@ -36,3 +44,8 @@ void Aidge::Pop_Op::forward() {
     Operator::forward();
     ++this->template getAttr<PopAttr::ForwardStep>();
 }
+
+void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Pop_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index 5e29eae0c0f42e7d566a933e9409766026369dad..6b16117d6387c5de4f0d81e20b89568dde97a5b2 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -50,4 +50,9 @@ void Aidge::Pow_Op::computeOutputDims() {
         }
         mOutputs[0]->resize(outDims);
     }
+}
+
+void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Pow_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
 }
\ No newline at end of file
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 7bccbe763b90f2697997a889b30b610e4b531334..4a63b207ca417df83f50c0b94ea988ea8739048e 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -9,8 +9,61 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/Producer.hpp"
+
+#include <cstddef>
+#include <array>
+#include <memory>
 #include <string>
 
-#include "aidge/operator/Producer.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
 
 const std::string Aidge::Producer_Op::Type = "Producer";
+
+
+Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, bool constant)
+    : OperatorTensor(Type, 0, 0, 1),
+      Attributes_(attr<ProdAttr::Constant>(constant))
+{
+    mOutputs[0] = tensor; // copy the pointer of the Tensor
+    mImpl = (tensor->hasImpl()) ?
+                std::make_shared<OperatorImpl>(*this, tensor->getImpl()->backend()) :
+                nullptr;
+}
+
+/**
+ * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+ * but not its input tensors (the new operator has no input associated).
+ * @param op OperatorTensor to copy.
+ */
+Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
+    : OperatorTensor(op),
+      Attributes_(op)
+{
+    mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0)));
+    if (mOutputs[0]->hasImpl()) {
+        if (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
+            setImpl(Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this));
+        }
+        else  {
+            mImpl = std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend());
+        }
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Producer_Op>::exists(name)) {
+        setImpl(Registrar<Producer_Op>::create(name)(*this));
+    } else {
+        mImpl = std::make_shared<OperatorImpl>(*this, name);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/ReLU.cpp b/src/operator/ReLU.cpp
index 0f7874acfe7d865ea8c56d4bca02b51864480df6..7b945a7d62ab0ef7f73a25f6f74430e725d17b48 100644
--- a/src/operator/ReLU.cpp
+++ b/src/operator/ReLU.cpp
@@ -9,8 +9,17 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/ReLU.hpp"
+
+#include <memory>
 #include <string>
 
-#include "aidge/operator/ReLU.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ReLU_Op::Type = "ReLU";
 
-const std::string Aidge::ReLU_Op::Type = "ReLU";
\ No newline at end of file
+void Aidge::ReLU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(ReLU_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 30b060cd2a58d7995a7447bd9b85b9bc0026a7f7..79cfc0659849248bac791ba5b1db25096824e928 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -9,14 +9,18 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/Reshape.hpp"
+
 #include <cstddef>    // std::size_t
 #include <cstdint>    // std::int64_t
+#include <memory>
 #include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
 
-#include "aidge/operator/Reshape.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 const std::string Aidge::Reshape_Op::Type = "Reshape";
@@ -55,4 +59,9 @@ void Aidge::Reshape_Op::computeOutputDims() {
 
         mOutputs[0]->resize(outDims);
     }
+}
+
+void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Reshape_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
 }
\ No newline at end of file
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
index 4c121e1268c1e1a62f793f38c6d816e7c6b48c25..8b0d6f9db698e36d232dec38fd8cdd0fad5f8c59 100644
--- a/src/operator/Scaling.cpp
+++ b/src/operator/Scaling.cpp
@@ -9,8 +9,18 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/Scaling.hpp"
+
+#include <memory>
 #include <string>
 
-#include "aidge/operator/Scaling.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Scaling_Op::Type = "Scaling";
 
-const std::string Aidge::Scaling_Op::Type = "Scaling";
\ No newline at end of file
+void Aidge::Scaling_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    mImpl = Registrar<Scaling_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp
index 48ed5f8286712c94bcf87f3234e70080652ab141..a6edcf823695f95253d6c56e45975480909679d3 100644
--- a/src/operator/Sigmoid.cpp
+++ b/src/operator/Sigmoid.cpp
@@ -9,8 +9,18 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/Sigmoid.hpp"
+
+#include <memory>
 #include <string>
 
-#include "aidge/operator/Sigmoid.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Sigmoid_Op::Type = "Sigmoid";
 
-const std::string Aidge::Sigmoid_Op::Type = "Sigmoid";
\ No newline at end of file
+void Aidge::Sigmoid_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    mImpl = Registrar<Sigmoid_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp
index e88ff4bb4ec6e2cb1357d578c2d07cc4edcb59f7..612c61b0f66b97eb4630214538a22154a67b80d8 100644
--- a/src/operator/Softmax.cpp
+++ b/src/operator/Softmax.cpp
@@ -9,8 +9,18 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/Softmax.hpp"
+
+#include <memory>
 #include <string>
 
-#include "aidge/operator/Softmax.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Softmax_Op::Type = "Softmax";
 
-const std::string Aidge::Softmax_Op::Type = "Softmax";
\ No newline at end of file
+void Aidge::Softmax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    mImpl = Registrar<Softmax_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/Sqrt.cpp b/src/operator/Sqrt.cpp
index dbcaba42619762f8fd00bb2f6e0aa0de11d92960..d8ac8b8b0bf28110bd52493d7833f64e9e80fc6a 100644
--- a/src/operator/Sqrt.cpp
+++ b/src/operator/Sqrt.cpp
@@ -9,8 +9,18 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/Sqrt.hpp"
+
+#include <memory>
 #include <string>
 
-#include "aidge/operator/Sqrt.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Sqrt_Op::Type = "Sqrt";
 
-const std::string Aidge::Sqrt_Op::Type = "Sqrt";
\ No newline at end of file
+void Aidge::Sqrt_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    mImpl = Registrar<Sqrt_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index 9d933bf6c97348842fae8f405d3e709e68d56916..0c12e6a1fdb7f3b1056e19bf694996d0061b5b04 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -9,15 +9,18 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/Sub.hpp"
+
 #include <cstddef>    // std::size_t
 #include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
 
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/operator/Sub.hpp"
-#include "aidge/utils/Types.h"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
 
 const std::string Aidge::Sub_Op::Type = "Sub";
 
@@ -50,4 +53,9 @@ void Aidge::Sub_Op::computeOutputDims() {
         }
         mOutputs[0]->resize(outDims);
     }
-}
\ No newline at end of file
+}
+
+void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Sub_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp
index de55a6d6c69df5706b945ef9f56027f7a09ce8d7..c113ee6f2da52f40a66a8df04ca33ec4b85f3387 100644
--- a/src/operator/Tanh.cpp
+++ b/src/operator/Tanh.cpp
@@ -9,8 +9,18 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/Tanh.hpp"
+
+#include <memory>
 #include <string>
 
-#include "aidge/operator/Tanh.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Tanh_Op::Type = "Tanh";
 
-const std::string Aidge::Tanh_Op::Type = "Tanh";
\ No newline at end of file
+void Aidge::Tanh_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    mImpl = Registrar<Tanh_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file