From cbcd268cef29247d5fea09c66ea142bc8c17f02a Mon Sep 17 00:00:00 2001
From: NAUD Maxence <maxence.naud@cea.fr>
Date: Mon, 20 Nov 2023 10:57:05 +0000
Subject: [PATCH] Uniformize operators and apply new class OperatorTensor
 induced changes in every operator

- Change parent class from Operator to OperatorTensor
- Remove shared and not customed functions from operators
- Uniformize operators behaviour:
    - inputs are set to nullptr at initialization by default
    - parameters whose size can be computed at initialization are (FC, ConvDepthWise)
    - Many more checks in functions with AIDGE_THROW_OR_ABORT()
---
 include/aidge/operator/Add.hpp             |  96 ++-------------
 include/aidge/operator/AvgPooling.hpp      |  34 +++---
 include/aidge/operator/BatchNorm.hpp       | 113 +++++-------------
 include/aidge/operator/Concat.hpp          | 122 +++++--------------
 include/aidge/operator/Conv.hpp            |  86 +++++++-------
 include/aidge/operator/ConvDepthWise.hpp   | 127 +++++++-------------
 include/aidge/operator/Div.hpp             |  87 ++------------
 include/aidge/operator/FC.hpp              | 117 +++++--------------
 include/aidge/operator/GenericOperator.hpp | 108 +++--------------
 include/aidge/operator/LeakyReLU.hpp       |  86 ++------------
 include/aidge/operator/MatMul.hpp          | 102 ++++------------
 include/aidge/operator/MaxPooling.hpp      |  97 ++++------------
 include/aidge/operator/MetaOperator.hpp    |  60 ++--------
 include/aidge/operator/Mul.hpp             |  98 +++-------------
 include/aidge/operator/Pad.hpp             | 101 ++++------------
 include/aidge/operator/Pow.hpp             |  89 ++------------
 include/aidge/operator/Producer.hpp        |  76 +++---------
 include/aidge/operator/ReLU.hpp            |  82 ++-----------
 include/aidge/operator/Scaling.hpp         | 110 +++---------------
 include/aidge/operator/Slice.hpp           | 129 ++++++---------------
 include/aidge/operator/Softmax.hpp         |  80 ++-----------
 include/aidge/operator/Sqrt.hpp            |  75 ++----------
 include/aidge/operator/Sub.hpp             |  84 ++------------
 23 files changed, 445 insertions(+), 1714 deletions(-)

diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index b5e37f9bc..0c2854029 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -19,29 +19,25 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
 
-class Add_Op : public Operator,
+class Add_Op : public OperatorTensor,
     public Registrable<Add_Op, std::string, std::unique_ptr<OperatorImpl>(const Add_Op&)> {
-private:
-    // FIXME: change accessibility
-    std::vector<std::shared_ptr<Tensor>> mInputs;
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Add";
 
     Add_Op(const IOIndex_t nbIn)
-        : Operator(Type),
-          mInputs(std::vector<std::shared_ptr<Tensor>>(nbIn, std::make_shared<Tensor>()))
+        : OperatorTensor(Type, nbIn, 0, 1)
     {
-        assert(nbIn > 0 && "Add should have at least one input");
-        setDatatype(DataType::Float32);
+        if (nbIn == 0) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
+        }
     }
 
     /**
@@ -49,14 +45,9 @@ public:
      * @param op Operator to copy.
      */
     Add_Op(const Add_Op& op)
-        : Operator(Type),
-          mInputs(std::vector<std::shared_ptr<Tensor>>(op.nbInputs())),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        assert(op.nbInputs() > 0 && "Add should have at least one input");
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Add_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Add_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -76,88 +67,25 @@ public:
     //     return *in;
     // }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Add operator.");
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            const auto expectedDims =  mInputs[0]->dims();
-            std::size_t nonEmptyInputTensor = 1;
-            for (; nonEmptyInputTensor < nbInputs() && (!mInputs[nonEmptyInputTensor]->empty()); ++nonEmptyInputTensor) {
-                assert(expectedDims == mInputs[nonEmptyInputTensor]->dims());
-            }
-            if (nonEmptyInputTensor == nbInputs()) {
-                mOutput->resize(expectedDims);
-            }
-        }
-    }
-
-    bool outputDimsForwarded() const override final {
-        std::size_t forwarded = 0;
-        for (; forwarded < nbInputs() && (!mInputs[forwarded]->empty()); ++forwarded) {}
-        return ((forwarded==nbInputs()) && !(mOutput->empty()));
-    }
 
     // void checkDims() const override final {
     //     assert(outputDimsForwarded());
     //     for (const auto& in : mInputs) {
-    //         assert(in->dims() == mOutput->dims());
+    //         assert(in->dims() == mOutputs[0]->dims());
     //     }
     // }
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Add operator.");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "Add Operators has only 1 outputs");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Add operator.");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Add_Op>::create(name)(*this);
-        mOutput->setBackend(name);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
         for (std::size_t i = 0; i < nbInputs(); ++i) {
-            mInputs[i]->setBackend(name);
+            getInput(i)->setBackend(name);
         }
     }
 
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
-
-        // FIXME: temporary workaround
-        for (std::size_t i = 0; i < nbInputs(); ++i) {
-            mInputs[i]->setDatatype(datatype);
-        }
-    }
-
-    inline IOIndex_t nbInputs() const noexcept override final { return mInputs.size(); }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return mInputs.size(); }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
-
     static const std::vector<std::string> getInputsName(){
         return {"data_input_0", "data_input_n"};
     }
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 994239bc1..603fa94ce 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -50,22 +50,17 @@ public:
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
         : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
-                      attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {
-        setDataType(DataType::Float32);
-    }
+                      attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     AvgPooling_Op(const AvgPooling_Op<DIM>& op)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDataType(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -78,18 +73,23 @@ public:
 
 
     void computeOutputDims() override final {
-        if (!*mInputs[0]->empty()) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
+        // check inputs have been associated
+        if (!getInput(0)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
+        if (!(getInput(0)->empty())) {
+            std::array<DimSize_t, DIM + 2> outputDims;
+            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->dims<DIM+2>());
+            outputDims[0] = inputDims[0];
+            outputDims[1] = inputDims[1];
 
             for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                            std::floor(static_cast<float>(*mInputs[0]->dims()[dim+2] -
+                                            std::floor(static_cast<float>(inputDims[dim+2] -
                                                                     this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
                                             static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
             }
-            outputDims[1] = *mInputs[0]->dims()[1];
-            outputDims[0] = *mInputs[0]->dims()[0];
-            mOutputs[0]->resize(outputDims);
+            getOutput(0)->resize(outputDims);
         }
     }
 
@@ -132,10 +132,10 @@ public:
 
     void setBackend(const std::string &name) override {
         mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setBackend(name);
+        getInput(0)->setBackend(name);
     }
 
     static const std::vector<std::string> getInputsName(){
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index da7360c8b..09a9bb9ef 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -19,27 +19,20 @@
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class BatchNormAttr { Epsilon, Momentum };
 
+enum class BatchNormAttr { Epsilon, Momentum };
 
 template <DimIdx_t DIM>
-class BatchNorm_Op : public Operator,
+class BatchNorm_Op : public OperatorTensor,
                 public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
                 public StaticAttributes<BatchNormAttr, float, float> {
 public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 5> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
-                                                      std::make_shared<Tensor>(), std::make_shared<Tensor>(),
-                                                      std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
-   public:
     static constexpr const char *Type = "BatchNorm";
 
     BatchNorm_Op() = delete;
@@ -49,25 +42,19 @@ public:
     using attr = typename Attributes_::template attr<e>;
 
     constexpr BatchNorm_Op(float epsilon, float momentum)
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 4, 1),
           Attributes_(attr<BatchNormAttr::Epsilon>(epsilon),
-                           attr<BatchNormAttr::Momentum>(momentum)),
-          mOutput(std::make_shared<Tensor>()) {
-        setDatatype(DataType::Float32);
-    }
+                           attr<BatchNormAttr::Momentum>(momentum)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     BatchNorm_Op(const BatchNorm_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -87,83 +74,41 @@ public:
     //     return *in;
     // }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 5 && "operators supports only 5 inputs");
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) {
-                if(mInputs[i]->size() != mInputs[0]->dims()[1]) {
-                    mInputs[i]->resize(std::array<DimSize_t, 1>({mInputs[0]->dims()[1]}));
+        // check inputs have been associated
+        bool associated = true;
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            const DimSize_t nbChannels =  getInput(0)->dims()[1];
+            for (std::size_t i = nbData(); i < nbInputs(); ++i) {
+                if(getInput(i)->size() != nbChannels) {
+                    // /!\ Input size should be handled BEFORE calling this function
+                    // This should raise an error
+                    getInput(i)->resize(std::array<DimSize_t, 1>({getInput(0)->dims()[1]}));
                 }
             }
-            mOutput->resize(mInputs[0]->dims());
+            mOutputs[0]->resize(getInput(0)->dims());
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 5 && "operators supports only 5 inputs");
-        return *(mInputs[inputIdx].get()); }
-
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 5 && "BatchNorm Operators supports only 5 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "BatchNorm Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 5 && "operators supports only 5 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string &name) override {
         mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[1]->setBackend(name);
-        mInputs[2]->setBackend(name);
-        mInputs[3]->setBackend(name);
-        mInputs[4]->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[1]->setDatatype(datatype);
-        mInputs[2]->setDatatype(datatype);
-        mInputs[3]->setDatatype(datatype);
-        mInputs[4]->setDatatype(datatype);
+        getInput(1)->setBackend(name);
+        getInput(2)->setBackend(name);
+        getInput(3)->setBackend(name);
+        getInput(4)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 5; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
-    static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName() {
         return {"data_input", "scale", "shift", "mean", "variance"};
     }
-    static const std::vector<std::string> getOutputsName(){
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
@@ -187,4 +132,4 @@ template <>
 const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
 }
 
-#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
+#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 31b99370d..7f97eee00 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -19,7 +19,7 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
@@ -28,14 +28,9 @@
 namespace Aidge {
 enum class ConcatAttr { Axis };
 
-class Concat_Op : public Operator,
+class Concat_Op : public OperatorTensor,
     public Registrable<Concat_Op, std::string, std::unique_ptr<OperatorImpl>(const Concat_Op&)>,
     public StaticAttributes<ConcatAttr, DimSize_t> {
-private:
-    // FIXME: change accessibility
-    std::vector<std::shared_ptr<Tensor>> mInputs;
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Concat";
 
@@ -44,12 +39,12 @@ public:
     using attr = typename Attributes_::template attr<e>;
 
     Concat_Op(const IOIndex_t nbIn, const DimSize_t axis)
-        : Operator(Type),
-          mInputs(std::vector<std::shared_ptr<Tensor>>(nbIn, std::make_shared<Tensor>())),
+        : OperatorTensor(Type, nbIn, 0, 1),
           Attributes_(attr<ConcatAttr::Axis>(axis))
     {
-        assert(nbIn > 0 && "Concat should have at least one input");
-        setDatatype(DataType::Float32);
+        if (nbIn == 0) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
+        }
     }
 
     /**
@@ -57,15 +52,10 @@ public:
      * @param op Operator to copy.
      */
     Concat_Op(const Concat_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mInputs(std::vector<std::shared_ptr<Tensor>>(op.nbInputs(), std::make_shared<Tensor>())),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        assert(op.nbInputs() > 0 && "Concat should have at least one input");
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Concat_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Concat_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -85,90 +75,32 @@ public:
     //     return *in;
     // }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Concat operator.");
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        bool computable = !(mInputs[0]->empty()) && (getAttr<ConcatAttr::Axis>() < mInputs[0]->nbDims());
-        for (const auto& input : mInputs) {
-            computable &= !(input->empty());
-            computable &= (input->nbDims() == mInputs[0]->nbDims());
-        }
         // Every input is non-empty with the same number of dimensions
-        if (computable) {
-            auto outputDims =  mInputs[0]->dims();
-
-            for (std::size_t i = 1; i < nbInputs(); ++i) {
-                outputDims[getAttr<ConcatAttr::Axis>()] += mInputs[i]->dims()[getAttr<ConcatAttr::Axis>()];
+        bool associated = (getInput(0) != nullptr);
+        associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input
+        auto outputDims =  getInput(0)->dims();
+        const auto firstInputNbDims = getInput(0) -> nbDims();
+        for (IOIndex_t i = 1; i < nbInputs(); ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= (getInput(i)->nbDims() == firstInputNbDims);
+            for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) {
+                if (dim == getAttr<ConcatAttr::Axis>()) {
+                    outputDims[dim] += getInput(i)->dims()[dim];
+                }
+                else {
+                    associated &= (getInput(i)->dims()[dim] == outputDims[dim]);
+                }
             }
-            mOutput->resize(outputDims);
-        }
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-    // void checkDims() const override final {
-    //     assert(outputDimsForwarded());
-    //     for (const auto& in : mInputs) {
-    //         assert(in->dims() == mOutput->dims());
-    //     }
-    // }
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Concat operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Concat operator.");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "Concat Operators has only 1 outputs");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < nbInputs() && "wrong inputIdx for Concat operator.");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
-    void setBackend(const std::string& name) override {
-        mImpl = Registrar<Concat_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        for (std::size_t i = 0; i < nbInputs(); ++i) {
-            mInputs[i]->setBackend(name);
         }
-    }
-
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
-
-        // FIXME: temporary workaround
-        for (std::size_t i = 0; i < nbInputs(); ++i) {
-            mInputs[i]->setDatatype(datatype);
+        if (associated) {
+            getOutput(0)->resize(outputDims);
         }
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return mInputs.size(); }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return mInputs.size(); }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
-
     static const std::vector<std::string> getInputsName(){
         return {"data_input_0", "data_input_n"};
     }
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 5bce05413..6c6e64db1 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -45,32 +45,27 @@ public:
     template <ConvAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    constexpr Conv_Op(DimSize_t in_channels,
-                      DimSize_t out_channels,
-                      const std::array<DimSize_t, DIM> &kernel_dims,
-                      const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                      const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+    constexpr Conv_Op(DimSize_t inChannels,
+                      DimSize_t outChannels,
+                      const std::array<DimSize_t, DIM> &kernelDims,
+                      const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
         : OperatorTensor(Type, 1, 2, 1),
-          Attributes_(attr<ConvAttr::StrideDims>(stride_dims),
-                      attr<ConvAttr::DilationDims>(dilation_dims),
-                      attr<ConvAttr::InChannels>(in_channels),
-                      attr<ConvAttr::OutChannels>(out_channels),
-                      attr<ConvAttr::KernelDims>(kernel_dims)) {
-        setDataType(DataType::Float32);
-    }
+          Attributes_(attr<ConvAttr::StrideDims>(strideDims),
+                      attr<ConvAttr::DilationDims>(dilationDims),
+                      attr<ConvAttr::InChannels>(inChannels),
+                      attr<ConvAttr::OutChannels>(outChannels),
+                      attr<ConvAttr::KernelDims>(kernelDims)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Conv_Op(const Conv_Op<DIM>& op)
-        : OperatorTensor(Type, 1, 2, 1),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDataType(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -95,8 +90,17 @@ public:
     // }
 
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
+        // check inputs have been associated
+        bool associated = true;
+        for (IOIndex_t i = 0; i < 3; ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            std::array<DimSize_t, DIM + 2> outputDims{};
+            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->dims<DIM+2>());
 
             for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
                 const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
@@ -104,13 +108,13 @@ public:
                                                1;
 
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                        floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
+                        floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
                               static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
             }
 
             outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
-            outputDims[0] = mInputs[0]->dims()[0];
-            mOutput->resize(outputDims);
+            outputDims[0] = inputDims[0];
+            mOutputs[0]->resize(outputDims);
         }
     }
 
@@ -167,11 +171,11 @@ public:
 
     void setBackend(const std::string &name) override {
         mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[1]->setBackend(name);
-        mInputs[2]->setBackend(name);
+        getInput(1)->setBackend(name);
+        getInput(2)->setBackend(name);
     }
 
     static const std::vector<std::string> getInputsName(){
@@ -183,32 +187,32 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Conv(DimSize_t in_channels,
-                                  DimSize_t out_channels,
-                                  const std::array<DimSize_t, DIM> &kernel_dims,
+inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
+                                  DimSize_t outChannels,
+                                  const std::array<DimSize_t, DIM> &kernelDims,
                                   const std::string& name = "",
-                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+                                  const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), name);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims), name);
     // addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w");
-    addProducer(conv, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
-    addProducer(conv, 2, std::array<DimSize_t, 1>({out_channels}), "b");
+    addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
+    addProducer(conv, 2, std::array<DimSize_t, 1>({outChannels}), "b");
     return conv;
 }
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> Conv(
-    DimSize_t in_channels,
-    DimSize_t out_channels,
-    DimSize_t const (&kernel_dims)[DIM],
+    DimSize_t inChannels,
+    DimSize_t outChannels,
+    DimSize_t const (&kernelDims)[DIM],
     const std::string& name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+    const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    return Conv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, dilation_dims);
+    return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims);
 }
 }  // namespace Aidge
 
@@ -223,4 +227,4 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
 };
 }
 
-#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
+#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index f58f435ac..c2b043118 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -19,7 +19,7 @@
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,20 +29,14 @@ namespace Aidge {
 enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims };
 
 template <DimIdx_t DIM>
-class ConvDepthWise_Op : public Operator,
+class ConvDepthWise_Op : public OperatorTensor,
                 public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
                 public StaticAttributes<ConvDepthWiseAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        DimSize_t,
                                        std::array<DimSize_t, DIM>> {
-   public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
-                                                      std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
-   public:
+public:
     static constexpr const char *Type = "ConvDepthWise";
 
     ConvDepthWise_Op() = delete;
@@ -58,26 +52,21 @@ class ConvDepthWise_Op : public Operator,
     constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 2, 1),
           Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
                       attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
                       attr<ConvDepthWiseAttr::Channels>(0),
-                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {
-        setDatatype(DataType::Float32);
-    }
+                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -88,16 +77,20 @@ class ConvDepthWise_Op : public Operator,
         return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
+        // check inputs have been associated
+        // TODO : add a check of inputs dimensions ?
+        bool associated = true;
+        for (IOIndex_t i = 0; i < 3; ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
+            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->dims<DIM+2>());
 
             for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
                 const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
@@ -105,10 +98,10 @@ class ConvDepthWise_Op : public Operator,
                                                1;
 
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                        floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
+                        floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
                               static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
             }
-            this->template getAttr<ConvDepthWiseAttr::Channels>() = mInputs[0]->dims()[1];
+            this->template getAttr<ConvDepthWiseAttr::Channels>() = inputDims[1];
             // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>()));
             // if (mInputs[1]->empty()) {
             //     mInputs[1]->resize(weightDims);
@@ -116,14 +109,12 @@ class ConvDepthWise_Op : public Operator,
             // if (mInputs[2]->empty()) {
             //     mInputs[2]->resize({mInputs[0]->dims()[1]});
             // }
-            outputDims[1] = mInputs[0]->dims()[1];
-            outputDims[0] = mInputs[0]->dims()[0];
-            mOutput->resize(outputDims);
+            outputDims[1] = inputDims[1];
+            outputDims[0] = inputDims[0];
+            mOutputs[0]->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
     // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
     //     if (outputIdx != 0) {
     //         AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
@@ -160,57 +151,15 @@ class ConvDepthWise_Op : public Operator,
     //     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
     // }
 
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "ConvDepthWise Operators supports only 3 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "ConvDepthWise Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
-
     void setBackend(const std::string &name) override {
         mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[1]->setBackend(name);
-        mInputs[2]->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
-        mInputs[2]->setDatatype(datatype);
+        getInput(1)->setBackend(name);
+        getInput(2)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
@@ -220,27 +169,29 @@ class ConvDepthWise_Op : public Operator,
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
+inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
+                                           const std::array<DimSize_t, DIM> &kernelDims,
                                            const std::string& name = "",
-                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                           const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+                                           const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                                           const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), name);
-    addProducer(convDW, 1, std::array<DimSize_t,0>({}), "w");
-    addProducer(convDW, 2, std::array<DimSize_t,0>({}), "b");
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name);
+    addProducer(convDW, 1, append(nbChannels, append(1, kernelDims)), "w");
+    addProducer(convDW, 2, std::array<DimSize_t, 1>({nbChannels}), "b");
     return convDW;
 }
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> ConvDepthWise(
-    DimSize_t const (&kernel_dims)[DIM],
+    const DimSize_t nbChannels,
+    DimSize_t const (&kernelDims)[DIM],
     const std::string& name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+    const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    return ConvDepthWise(to_array(kernel_dims), name, stride_dims, dilation_dims);
+    return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims);
 }
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 4213f979c..b4acd79e4 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -17,42 +17,31 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
 
-class Div_Op : public Operator,
+class Div_Op : public OperatorTensor,
     public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
 public:
     static constexpr const char* Type = "Div";
 
-    Div_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Div_Op() : OperatorTensor(Type, 2, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Div_Op(const Div_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,73 +52,15 @@ public:
         return std::make_shared<Div_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty())
-            mOutput->resize(mInputs[0]->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Div Operator has 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Div Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Div_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index b949527c5..4cece292c 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -21,7 +21,7 @@
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,16 +29,11 @@
 namespace Aidge {
 enum class FCAttr { OutChannels, NoBias };
 
-class FC_Op : public Operator,
+class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
                                  std::unique_ptr<OperatorImpl>(const FC_Op &)>,
               public StaticAttributes<FCAttr, DimSize_t, bool> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "FC";
 
@@ -48,26 +43,21 @@ public:
     template <FCAttr e> using attr = typename Attributes_::template attr<e>;
 
     FC_Op(DimSize_t out_channels, bool noBias)
-            : Operator(Type),
-            Attributes_(
-                attr<FCAttr::OutChannels>(out_channels),
-                attr<FCAttr::NoBias>(noBias))
-    {
-        setDatatype(DataType::Float32);
-    }
+    : OperatorTensor(Type, 1, 2, 1),
+      Attributes_(
+        attr<FCAttr::OutChannels>(out_channels),
+        attr<FCAttr::NoBias>(noBias))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     FC_Op(const FC_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -78,7 +68,7 @@ public:
         return std::make_shared<FC_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
         if (inputIdx == 2) {
@@ -86,78 +76,35 @@ public:
             assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
         }
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-        if (inputIdx == 0 && mInputs[0]->nbDims() == 1)
-            mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, mInputs[inputIdx]->size()}));
+        if (inputIdx == 0 && getInput(0)->nbDims() == 1)
+            mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, getInput(inputIdx)->size()}));
     }
 
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            // <in_features**, out_channels>
-            std::array<DimSize_t, 2> weightDims = {this->template getAttr<FCAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
-            // <out_channels, batch>
-            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<FCAttr::OutChannels>()};
-
-            mInputs[1]->resize(weightDims);
-            mOutput->resize(outputDims);
+        bool associated = true;
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            // <batch, OutChannels>
+            mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()});
         }
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return *(mInputs[inputIdx].get()); }
-    inline Tensor& output(const IOIndex_t /*inputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "FC Operators supports only 3 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "FC Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
     }
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<FC_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-        mInputs[2]->setBackend(name);
-    }
-
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
-        mInputs[2]->setDatatype(datatype);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
+        getInput(2)->setBackend(name);
     }
 
-
-    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
@@ -166,11 +113,11 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") {
+inline std::shared_ptr<Node> FC(DimSize_t inChannels, DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name);
-    addProducer(fc, 1, std::array<DimSize_t, 2>({out_channels, 1}), "w");
-    addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({out_channels})), "b"); // already sets bias dims
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(outChannels, noBias), name);
+    addProducer(fc, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w");
+    addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({outChannels})), "b"); // already sets bias dims
     return fc;
 }
 } // namespace Aidge
@@ -181,4 +128,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
                                                         "NoBias"};
 }
 
-#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
+#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 55ccbf151..505c53449 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -19,7 +19,7 @@
 #include <cstring>
 
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
@@ -27,50 +27,26 @@
 
 namespace Aidge {
 class GenericOperator_Op
-    : public Operator,
+    : public OperatorTensor,
       public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>,
       public DynamicAttributes {
-   private:
+private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
-    IOIndex_t mNbDataIn;
-    IOIndex_t mNbIn;
-    IOIndex_t mNbOut;
-    std::vector<std::shared_ptr<Tensor>> mInputs;
-    std::vector<std::shared_ptr<Tensor>> mOutputs;
     ComputeDimsFunc mComputeOutputDims;
 
-   public:
-    GenericOperator_Op(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut)
-        : Operator(type), mNbDataIn(nbDataIn), mNbIn(nbIn), mNbOut(nbOut)
-    {
-        mInputs = std::vector<std::shared_ptr<Tensor>>(nbIn);
-        for (std::size_t i = 0; i < nbIn; ++i) {
-            mInputs[i] = std::make_shared<Tensor>();
-        }
-        mOutputs = std::vector<std::shared_ptr<Tensor>>(nbOut);
-        for (std::size_t i = 0; i < nbOut; ++i) {
-            mOutputs[i] = std::make_shared<Tensor>();
-        }
-    }
+public:
+    GenericOperator_Op(const char *type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
+        : OperatorTensor(type, nbData, nbParam, nbOut)
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     GenericOperator_Op(const GenericOperator_Op& op)
-        : Operator(op.type().c_str()), mNbDataIn(op.mNbDataIn), mNbIn(op.mNbIn), mNbOut(op.mNbOut)
-    {
-        // cpy-ctor
-        mInputs = std::vector<std::shared_ptr<Tensor>>(mNbIn);
-        for (std::size_t i = 0; i < mNbIn; ++i) {
-            mInputs[i] = std::make_shared<Tensor>();
-        }
-        mOutputs = std::vector<std::shared_ptr<Tensor>>(mNbOut);
-        for (std::size_t i = 0; i < mNbOut; ++i) {
-            mOutputs[i] = std::make_shared<Tensor>(*op.mOutputs[i]);
-        }
-    }
+        : OperatorTensor(op)
+    {}
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -87,28 +63,19 @@ class GenericOperator_Op
         mComputeOutputDims = func;
     }
 
-    // Override Virtual Opertor methods
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < mNbIn && "operators supports only x inputs");
-
-        if (strcmp(data->type(), Tensor::Type) == 0) {
-            // TODO: associate input only if of type Tensor, otherwise do nothing
-            mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-        }
-    }
 
     void computeOutputDims() override final {
         if (mComputeOutputDims) {
-            std::vector<std::vector<size_t>> inputsDims(mNbIn, std::vector<size_t>());
-            for (std::size_t i = 0; i < mNbIn; ++i) {
-                if (mInputs[i]) {
-                    inputsDims[i] = mInputs[i]->dims();
+            std::vector<std::vector<size_t>> inputsDims(nbInputs(), std::vector<size_t>());
+            for (std::size_t i = 0; i < nbInputs(); ++i) {
+                if (getInput(i)) {
+                    inputsDims[i] = getInput(i)->dims();
                 }
             }
 
             const auto& outputsDims = mComputeOutputDims(inputsDims);
-            assert(outputsDims.size() == mNbOut && "The provided ComputeDimsFunc function returns the wrong number of outputs");
-            for (std::size_t i = 0; i < mNbOut; ++i) {
+            assert(outputsDims.size() == nbOutputs() && "The provided ComputeDimsFunc function returns the wrong number of outputs");
+            for (std::size_t i = 0; i < nbOutputs(); ++i) {
                 mOutputs[i]->resize(outputsDims[i]);
             }
         }
@@ -127,47 +94,11 @@ class GenericOperator_Op
         }
     }
 
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
-        printf("Info: using getRawInput() on a GenericOperator.\n");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
-        printf("Info: using input() on a GenericOperator.\n");
-        return *mInputs[inputIdx];
-    }
-
-
-    std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
-        printf("Info: using getInput() on a GenericOperator.\n");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
-        printf("Info: using getOutput() on a GenericOperator.\n");
-        return mOutputs[outputIdx];
-    }
-
-
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
-        printf("Info: using getRawOutput() on a GenericOperator.\n");
-        return std::static_pointer_cast<Data>(mOutputs[outputIdx]);
-    }
-
-    Tensor& output(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
-        printf("Info: using output() on a GenericOperator.\n");
-        return *mOutputs[outputIdx];
-    }
 
     ~GenericOperator_Op() = default;
 
     void setBackend(const std::string & /*name*/) override { printf("setBackend: not available yet.\n"); }
-    void setDatatype(const DataType & /*datatype*/) override { printf("setDatatype: not available yet.\n"); }
+    void setDataType(const DataType& /*datatype*/) const override { printf("setDataType: not available yet.\n"); }
     void forward() override final {
         if(mImpl){
             mImpl->forward();
@@ -182,9 +113,6 @@ class GenericOperator_Op
             printf("backward: No implementation is linked.\n");
         }
     }
-    inline IOIndex_t nbInputs() const noexcept override final { return mNbIn; };
-    inline IOIndex_t nbDataInputs() const noexcept override final { return mNbDataIn; };
-    inline IOIndex_t nbOutputs() const noexcept override final { return mNbOut; };
 };
 
 /**
@@ -197,9 +125,9 @@ class GenericOperator_Op
  * @param name (optional) name of the Operator.
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
  */
-inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut,
+inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
                                              const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbDataIn, nbIn, nbOut), name);
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
 }
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index bcdcbc7ca..800c8c61d 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -17,7 +17,7 @@
 
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -29,14 +29,9 @@ enum class LeakyReLUAttr {
     NegativeSlope
 };
 
-class LeakyReLU_Op : public Operator,
+class LeakyReLU_Op : public OperatorTensor,
     public Registrable<LeakyReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
     public StaticAttributes<LeakyReLUAttr, float> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "LeakyReLU";
 
@@ -46,25 +41,20 @@ public:
     template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
 
     LeakyReLU_Op(float negativeSlope)
-            : Operator(Type),
-            Attributes_(
-                attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
-    {
-        setDatatype(DataType::Float32);
-    }
+        : OperatorTensor(Type, 1, 0, 1),
+          Attributes_(
+            attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     LeakyReLU_Op(const LeakyReLU_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -75,69 +65,17 @@ public:
         return std::make_shared<LeakyReLU_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "LeakyReLU Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "LeakyReLU Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<LeakyReLU_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
-        static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
     static const std::vector<std::string> getOutputsName(){
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index eed1ec045..23c12d458 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -21,7 +21,7 @@
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,15 +29,11 @@
 namespace Aidge {
 enum class MatMulAttr { OutChannels };
 
-class MatMul_Op : public Operator,
+class MatMul_Op : public OperatorTensor,
               public Registrable<MatMul_Op,
                                  std::string,
                                  std::unique_ptr<OperatorImpl>(const MatMul_Op &)>,
               public StaticAttributes<MatMulAttr, DimSize_t> {
-public:
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "MatMul";
 
@@ -47,25 +43,20 @@ public:
     template <MatMulAttr e> using attr = typename Attributes_::template attr<e>;
 
     MatMul_Op(DimSize_t out_channels)
-            : Operator(Type),
+            : OperatorTensor(Type, 1, 1, 1),
             Attributes_(
                 attr<MatMulAttr::OutChannels>(out_channels))
-    {
-        setDatatype(DataType::Float32);
-    }
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     MatMul_Op(const MatMul_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<MatMul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<MatMul_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -76,78 +67,31 @@ public:
         return std::make_shared<MatMul_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operators supports only 2 inputs");
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            // <in_features**, out_channels>
-            std::array<DimSize_t, 2> weightDims = {this->template getAttr<MatMulAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
-            // <out_channels, batch>
-            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()};
-
-            mInputs[1]->resize(weightDims);
-            mOutput->resize(outputDims);
+        bool associated = true;
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            // <batch, OutChannels>
+            mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()});
         }
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operators supports only 2 inputs");
-        return *(mInputs[inputIdx].get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "MatMul Operators has 2 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "MatMul Operators has 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operators supports only 2 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
     }
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<MatMul_Op>::create(name)(*this);
-        mOutput->setBackend(name);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
 
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
-    }
-
-
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight"};
     }
@@ -156,10 +100,10 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> MatMul(DimSize_t out_channels, const std::string& name = "") {
+inline std::shared_ptr<Node> MatMul(DimSize_t inChannels, DimSize_t outChannels, const std::string& name = "") {
     // FIXME: properly handle default w initialization in every cases
-    auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(out_channels), name);
-    addProducer(matmul, 1, std::array<DimSize_t, 2>({out_channels, 1}), "w");
+    auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(outChannels), name);
+    addProducer(matmul, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w");
     return matmul;
 }
 } // namespace Aidge
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index bcf47f13c..45aec34c1 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -19,7 +19,7 @@
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,17 +29,12 @@ namespace Aidge {
 enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
 
 template <DimIdx_t DIM>
-class MaxPooling_Op : public Operator,
+class MaxPooling_Op : public OperatorTensor,
                 public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
                 public StaticAttributes<MaxPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        bool> {
-private:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char *Type = "MaxPooling";
 
@@ -55,26 +50,21 @@ public:
     constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                             bool ceil_mode = false)
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
                       attr<MaxPoolingAttr::KernelDims>(kernel_dims),
-                      attr<MaxPoolingAttr::CeilMode>(ceil_mode)),
-          mOutput(std::make_shared<Tensor>()) {
-        setDatatype(DataType::Float32);
-    }
+                      attr<MaxPoolingAttr::CeilMode>(ceil_mode))
+        {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     MaxPooling_Op(const MaxPooling_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -85,17 +75,14 @@ public:
         return std::make_shared<MaxPooling_Op<DIM>>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 1 && "operators supports only 3 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInput->empty()) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
+        if (!getInput(0)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
+        if (!(getInput(0)->empty())) {
+            std::array<DimSize_t, DIM + 2> outputDims{};
+            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->dims<DIM+2>());
 
             std::function<float(float)> roundingFunction;
             if (this->template getAttr<MaxPoolingAttr::CeilMode>()) {
@@ -106,69 +93,25 @@ public:
 
             for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                            roundingFunction(static_cast<float>(mInput->dims()[dim+2] -
+                                            roundingFunction(static_cast<float>(inputDims[dim+2] -
                                                                     this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
                                             static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
             }
-            outputDims[1] = mInput->dims()[1];
-            outputDims[0] = mInput->dims()[0];
-            mOutput->resize(outputDims);
+            outputDims[1] = inputDims[1];
+            outputDims[0] = inputDims[0];
+            mOutputs[0]->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return *(mInput.get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "MaxPooling Operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "MaxPooling Operators has only 1 outputs");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
 
     void setBackend(const std::string &name) override {
         mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 72058dfcb..5775dd24d 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -12,26 +12,24 @@
 #ifndef AIDGE_CORE_OPERATOR_METAOPERATOR_H_
 #define AIDGE_CORE_OPERATOR_METAOPERATOR_H_
 
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
 
 namespace Aidge {
-class MetaOperator_Op : public Operator,
+class MetaOperator_Op : public OperatorTensor,
                 public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)> {
 public:
-    std::vector<std::shared_ptr<Tensor>> mInputs;
-    std::vector<std::shared_ptr<Tensor>> mOutputs; // These are shared with micro-graph outputs tensors
-
+    // outputs shared with micro-graph output Tensors
     // Micro-graph handling:
     std::shared_ptr<GraphView> mGraph; // Meta operator micro-graph
     std::shared_ptr<SequentialScheduler> mScheduler;
     // Need to store an ordored list of input/output operators for the micro-graph,
     // because input/output nodes in a GraphView are unordered.
     // TODO: refactor GraphView to handle ordered input/output?
-    std::vector<std::pair<std::shared_ptr<Operator>, IOIndex_t>> mInputOps;
-    std::vector<std::pair<std::shared_ptr<Operator>, IOIndex_t>> mOutputOps;
+    std::vector<std::pair<std::shared_ptr<OperatorTensor>, IOIndex_t>> mInputOps;
+    std::vector<std::pair<std::shared_ptr<OperatorTensor>, IOIndex_t>> mOutputOps;
 
    public:
     MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph,
@@ -43,11 +41,9 @@ public:
      * @param op Operator to copy.
      */
     MetaOperator_Op(const MetaOperator_Op& op)
-        : Operator(op.type().c_str()),
+        : OperatorTensor(op),
           mGraph(op.mGraph->clone())
-    {
-        // cpy-ctor
-    }
+    {}
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -65,7 +61,7 @@ public:
         return mScheduler;
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
 
         const auto& inputOp = mInputOps[inputIdx];
@@ -86,38 +82,6 @@ public:
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutputs[0]->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < mInputs.size() && "inputIdx out of range");
-        return *(mInputs[inputIdx].get());
-    }
-
-    inline Tensor& output(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
-        return *(mOutputs[outputIdx].get());
-    }
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < mInputs.size() && "inputIdx out of range");
-        return mInputs[inputIdx];
-    }
-
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
-        return mOutputs[outputIdx];
-    }
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < mInputs.size() && "inputIdx out of range");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
-        return std::static_pointer_cast<Data>(mOutputs[outputIdx]);
-    }
 
     void setBackend(const std::string &name) override {
         if (Registrar<MetaOperator_Op>::exists({name, type()})) {
@@ -131,17 +95,13 @@ public:
         mGraph->setBackend(name);
     }
 
-    void setDatatype(const DataType &datatype) override {
+    void setDataType(const DataType &datatype) const override {
         // The micro-graph should always be set to the right data type, since it
         // shares input/output tensors.
         // Input/output tensors data type are updated here.
-        mGraph->setDatatype(datatype);
+        mGraph->setDataType(datatype);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return mGraph->inputs().size(); }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return mGraph->dataInputs().size(); }
-    inline IOIndex_t nbOutputs() const noexcept override final { return mGraph->outputs().size(); }
-
     NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
     NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override;
     NbElts_t getNbProducedData(IOIndex_t outputIdx) const override;
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 4ea79fe52..f1537f5b2 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -12,47 +12,38 @@
 #ifndef AIDGE_CORE_OPERATOR_MUL_H_
 #define AIDGE_CORE_OPERATOR_MUL_H_
 
-#include <cassert>
 #include <memory>
+#include <string>
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 
-class Mul_Op : public Operator,
+/**
+ * @brief Tensor element-wise multiplication.
+ */
+class Mul_Op : public OperatorTensor,
     public Registrable<Mul_Op, std::string, std::unique_ptr<OperatorImpl>(const Mul_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Mul";
 
-    Mul_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Mul_Op() : OperatorTensor(Type, 2, 0, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Mul_Op(const Mul_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Mul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Mul_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,73 +54,16 @@ public:
         return std::make_shared<Mul_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty())
-            mOutput->resize(mInputs[0]->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Mul Operator has 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Mul Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Mul_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -141,6 +75,6 @@ public:
 inline std::shared_ptr<Node> Mul(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
 }
-}
+} // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
+#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index cbebb16e1..3fa6fa097 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -19,7 +19,7 @@
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -30,17 +30,12 @@ enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
 enum class PadBorderType { Constant, Edge, Reflect, Wrap };
 
 template <DimIdx_t DIM>
-class Pad_Op : public Operator,
+class Pad_Op : public OperatorTensor,
                 public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
                 public StaticAttributes<PadAttr,
                                        std::array<DimSize_t, 2*DIM>,
                                        PadBorderType,
                                        double> {
-private:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char *Type = "Pad";
 
@@ -56,25 +51,19 @@ public:
     constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
                      const PadBorderType &borderType = PadBorderType::Constant,
                      double borderValue = 0.0)
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples),
                            attr<PadAttr::BorderType>(borderType),
-                           attr<PadAttr::BorderValue>(borderValue)) {
-        setDatatype(DataType::Float32);
-    }
+                           attr<PadAttr::BorderValue>(borderValue)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Pad_Op(const Pad_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
-    {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-    }
+        : OperatorTensor(op),
+          Attributes_(op)
+    {}
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -84,82 +73,38 @@ public:
         return std::make_shared<Pad_Op<DIM>>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 1 && "operators supports only 3 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInput->empty()) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
+        bool associated = true;
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            std::array<DimSize_t, DIM + 2> outputDims{};
+            const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->dims<DIM+2>();
 
             for (std::size_t dim = 0; dim < DIM; ++dim) {
                 outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
-                                    + mInput->dims()[dim+2]
+                                    + inputDims[dim+2]
                                     + this->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
             }
-            outputDims[1] = mInput->dims()[1];
-            outputDims[0] = mInput->dims()[0];
-            mOutput->resize(outputDims);
+            outputDims[1] = inputDims[1];
+            outputDims[0] = inputDims[0];
+            mOutputs[0]->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return *(mInput.get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "Pad Operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "Pad Operators has only 1 outputs");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string &name) override {
         mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index 732cf36b4..0ab73441f 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -17,7 +17,7 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -26,33 +26,21 @@
 
 namespace Aidge {
 
-class Pow_Op : public Operator,
+class Pow_Op : public OperatorTensor,
     public Registrable<Pow_Op, std::string, std::unique_ptr<OperatorImpl>(const Pow_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Pow";
 
-    Pow_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Pow_Op() : OperatorTensor(Type, 2, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Pow_Op(const Pow_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Pow_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Pow_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,73 +51,16 @@ public:
         return std::make_shared<Pow_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty())
-            mOutput->resize(mInputs[0]->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Pow Operator has 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Pow Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Pow_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -141,6 +72,6 @@ public:
 inline std::shared_ptr<Node> Pow(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
 }
-}
+} // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
+#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index d747b3406..0b63e5a32 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -18,49 +18,40 @@
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
 
 class Producer_Op
-    : public Operator,
+    : public OperatorTensor,
       public Registrable<Producer_Op, std::string, std::unique_ptr<OperatorImpl>(
                                           const Producer_Op &)> {
-private:
-    std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Producer";
 
     template <std::size_t DIM>
     Producer_Op(const std::array<DimSize_t, DIM>& dims)
-        : Operator(Type)
+        : OperatorTensor(Type, 0, 0, 1)
     {
-        //ctor
-        setDatatype(DataType::Float32);
-        mOutput->resize(dims);
+        mOutputs[0]->resize(dims);
     }
 
     Producer_Op(const std::shared_ptr<Tensor> tensor)
-        : Operator(Type),
-          mOutput(tensor)
+        : OperatorTensor(Type, 0, 0, 1)
     {
-        setDatatype(tensor->dataType());
+        mOutputs[0] = tensor; // copy the pointer of the Tensor
     }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @param op OperatorTensor to copy.
      */
     Producer_Op(const Producer_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Producer_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -71,8 +62,8 @@ public:
         return std::make_shared<Producer_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final {
-        assert(false && "Producer operator takes no input");
+    void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
     }
 
     /**
@@ -81,8 +72,8 @@ public:
      *
      * @param newOutput Tensor containing the values to copy
      */
-    void setOutputTensor(const Tensor& newOutput) {
-        *mOutput = newOutput;
+    void setOutput(const std::shared_ptr<Tensor>& newOutput) {
+        mOutputs[0] = newOutput;
     }
 
     void computeOutputDims() override final {}
@@ -90,48 +81,13 @@ public:
     bool outputDimsForwarded() const override final {return true;}
 
 
-    [[noreturn]] inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final {
-      assert(false);
-      exit(-1);
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t /*inputIdx*/) const override final {
-      assert(false && "Producer Operator has no input");
-      return nullptr;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-      assert((outputIdx == 0) && "Producer Operator has only 1 output");
-      (void) outputIdx; // avoid unused warning
-      return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t /*inputIdx*/) const override final {
-        assert(false && "Producer operator takes no input");
-        return nullptr;
-    }
-
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-    inline const std::vector<DimSize_t> dims() const noexcept { return mOutput->dims(); }
+    inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Producer_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 0; };
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 0; };
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; };
     static const std::vector<std::string> getInputsName(){
         return {};
     }
@@ -181,4 +137,4 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, Dim
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
+#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 52f13f1c5..3444c25fc 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -17,42 +17,29 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 
-class ReLU_Op : public Operator,
+class ReLU_Op : public OperatorTensor,
     public Registrable<ReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const ReLU_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "ReLU";
 
-    ReLU_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    ReLU_Op() : OperatorTensor(Type, 1, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     ReLU_Op(const ReLU_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<ReLU_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<ReLU_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,68 +50,15 @@ public:
         return std::make_shared<ReLU_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "ReLU Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "ReLU Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<ReLU_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -138,4 +72,4 @@ inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
 }
 }
 
-#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
+#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 40bc397fa..fd6d6bcfc 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -15,14 +15,11 @@
 #include <vector>
 #include <memory>
 
-
-
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
@@ -31,14 +28,9 @@ enum class ScalingAttr {
     scalingFactor, quantizedNbBits, isOutputUnsigned
 };
 
-class Scaling_Op : public Operator,
+class Scaling_Op : public OperatorTensor,
     public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>,
     public StaticAttributes<ScalingAttr, float, size_t, bool> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Scaling";
 
@@ -48,27 +40,22 @@ public:
     template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
 
     Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
-            : Operator(Type),
-            Attributes_(
-                attr<ScalingAttr::scalingFactor>(scalingFactor),
-                attr<ScalingAttr::quantizedNbBits>(nbBits),
-                attr<ScalingAttr::isOutputUnsigned>(isOutputUnsigned)) {
-
-            setDatatype(DataType::Float32);
-        }
+        : OperatorTensor(Type, 1, 0, 1),
+          Attributes_(
+            attr<ScalingAttr::scalingFactor>(scalingFactor),
+            attr<ScalingAttr::quantizedNbBits>(nbBits),
+            attr<ScalingAttr::isOutputUnsigned>(isOutputUnsigned))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Scaling_Op(const Scaling_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Scaling_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Scaling_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -79,79 +66,17 @@ public:
         return std::make_shared<Scaling_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        (void) inputIdx; //avoid unused warning
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Scaling Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return *(mInput.get());
-    }
-    inline Tensor& output(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Scaling Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return *(mOutput.get());
-    }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Scaling Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Scaling Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning;
-        return mOutput;
-    }
-
-
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Scaling_Op>::create(name)(*this);
-        mOutput->setBackend(name);
+        mOutputs[0]->setBackend(name);
         // FIXME: temporary workaround
-        mInput->setBackend(name);
+        mInputs[0]->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
 
-        // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
-    }
-
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
-    static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
-    static const std::vector<std::string> getOutputsName(){
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
@@ -164,8 +89,7 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri
 inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, std::size_t quantizedNbBits=8, bool isOutputUnsigned=true, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
 }
-
-}
+} // namespace Aidge
 
 namespace {
 template <>
@@ -173,4 +97,4 @@ const char* const EnumStrings<Aidge::ScalingAttr>::data[]
     = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
+#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
\ No newline at end of file
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 6b1077c8d..f12afafab 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -16,10 +16,9 @@
 #include <vector>
 
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
@@ -29,14 +28,9 @@ enum class SliceAttr { Beginning, SliceDims };
 
 template <DimIdx_t DIM>
 class Slice_Op
-    : public Operator,
+    : public OperatorTensor,
       public Registrable<Slice_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op<DIM> &)>,
       public StaticAttributes<SliceAttr, std::size_t, std::array<DimSize_t, DIM>> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char *Type = "Slice";
 
@@ -47,12 +41,10 @@ public:
     using attr = typename Attributes_::template attr<e>;
 
     Slice_Op(std::size_t beginningPos, std::array<DimSize_t, DIM> sliceDims)
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<SliceAttr::Beginning>(beginningPos),
                       attr<SliceAttr::SliceDims>(sliceDims))
-    {
-        setDatatype(DataType::Float32);
-    }
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
@@ -60,13 +52,10 @@ public:
      * @param op Operator to copy.
      */
     Slice_Op(const Slice_Op &op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Slice_Op<DIM>>::create(mOutput->getImpl()->backend())(*this)
+        mImpl = op.mImpl ? Registrar<Slice_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this)
                          : nullptr;
     }
 
@@ -77,91 +66,49 @@ public:
      */
     std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void)inputIdx;  // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
     void computeOutputDims() override final {
-        if (!mInput->empty()) {
-            // Check input dimensions is compatible with slice dimensions
-            if (mInput->nbDims() != DIM) {
-                printf("Error: input and slice dimensions are not the same size.\n");
-                exit(-1);
-            }
-            std::array<DimSize_t, DIM> outputDims;
-
-            // Check that the sliced Tensor is actually part of the input Tensor
-            // For a 5*5 tensor ('x') and a 3*3 slice kernel ('o'):
-            // xxxxx               xxxxx
-            // xxxxx               xxxxx
-            // xxooo  --> ok       xxxoo --> out of bound
-            // xxooo               xxxoo
-            // xxooo               xxxoo
-            std::vector<std::size_t> beginningCoords = mInput->getCoord(this->template getAttr<SliceAttr::Beginning>());
-            for (std::size_t i = 0; i < DIM; ++i) {
-                if (beginningCoords[i] + this->template getAttr<SliceAttr::SliceDims>()[i] > mInput->dims()[i]) {
-                    printf("ROI of Slice operator out of bounds");
-                    exit(-1);
-                } else {
-                    outputDims[i] = this->template getAttr<SliceAttr::SliceDims>()[i];
-                }
+        if (!getInput(0) || (getInput(0)->empty())) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
+        // Check input dimensions is compatible with slice dimensions
+        if (getInput(0)->nbDims() != DIM) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: input and slice dimensions are not the same size.");
+        }
+        std::array<DimSize_t, DIM> outputDims;
+        const std::array<DimSize_t, DIM> inputDims = getInput(0)->dims<DIM>();
+
+        // Check that the sliced Tensor is actually part of the input Tensor
+        // For a 5*5 tensor ('x') and a 3*3 slice kernel ('o'):
+        // xxxxx               xxxxx
+        // xxxxx               xxxxx
+        // xxooo  --> ok       xxxoo --> out of bound
+        // xxooo               xxxoo
+        // xxooo               xxxoo
+        std::vector<std::size_t> beginningCoords = mInputs[0]->getCoord(this->template getAttr<SliceAttr::Beginning>());
+        for (std::size_t i = 0; i < DIM; ++i) {
+            if (beginningCoords[i] + this->template getAttr<SliceAttr::SliceDims>()[i] > inputDims[i]) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds");
+            } else {
+                outputDims[i] = this->template getAttr<SliceAttr::SliceDims>()[i];
             }
-
-            mOutput->resize(outputDims);
         }
-    }
-
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-    inline Tensor &input(const IOIndex_t /*inputIdx*/) const override final {
-        return *(mInput.get());
-    }
-    inline Tensor &output(const IOIndex_t /*outputIdx*/) const override final {
-        return *(mOutput.get());
-    }
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Slice Operator has only 1 input");
-        (void)inputIdx;  // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Slice Operator has only 1 output");
-        (void)outputIdx;  // avoid unused warning
-        return mOutput;
-    }
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void)inputIdx;  // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void)outputIdx;  // avoid unused warning
-        return mOutput;
+        mOutputs[0]->resize(outputDims);
     }
 
     void setBackend(const std::string &name) {
         mImpl = Registrar<Slice_Op>::create(name)(*this);
-        mOutput->setBackend(name);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setBackend(name);
+        getInput(0)->setBackend(name);
     }
-    void setDatatype(const DataType &datatype) {
-        mOutput->setDatatype(datatype);
 
-        // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
     }
-
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
 template <std::size_t DIM>
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index ba6132a5e..cc19cb821 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -17,7 +17,7 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -26,33 +26,21 @@
 
 namespace Aidge {
 
-class Softmax_Op : public Operator,
+class Softmax_Op : public OperatorTensor,
     public Registrable<Softmax_Op, std::string, std::unique_ptr<OperatorImpl>(const Softmax_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Softmax";
 
-    Softmax_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Softmax_Op() : OperatorTensor(Type, 1, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Softmax_Op(const Softmax_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Softmax_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Softmax_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,68 +51,14 @@ public:
         return std::make_shared<Softmax_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Softmax Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Softmax Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Softmax_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index 90b2ae6a8..a4069b59b 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -17,7 +17,7 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -26,7 +26,7 @@
 
 namespace Aidge {
 
-class Sqrt_Op : public Operator,
+class Sqrt_Op : public OperatorTensor,
     public Registrable<Sqrt_Op, std::string, std::unique_ptr<OperatorImpl>(const Sqrt_Op&)> {
 public:
     // FIXME: change accessibility
@@ -36,23 +36,16 @@ public:
 public:
     static constexpr const char* Type = "Sqrt";
 
-    Sqrt_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Sqrt_Op() : OperatorTensor(Type, 1, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Sqrt_Op(const Sqrt_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,68 +56,14 @@ public:
         return std::make_shared<Sqrt_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Sqrt Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Sqrt Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Sqrt_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index 451cba08f..3a826bd0f 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -17,7 +17,7 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -26,7 +26,7 @@
 
 namespace Aidge {
 
-class Sub_Op : public Operator,
+class Sub_Op : public OperatorTensor,
     public Registrable<Sub_Op, std::string, std::unique_ptr<OperatorImpl>(const Sub_Op&)> {
 public:
     // FIXME: change accessibility
@@ -36,23 +36,16 @@ public:
 public:
     static constexpr const char* Type = "Sub";
 
-    Sub_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Sub_Op() : OperatorTensor(Type, 2, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Sub_Op(const Sub_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Sub_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Sub_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,73 +56,16 @@ public:
         return std::make_shared<Sub_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty())
-            mOutput->resize(mInputs[0]->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Sub Operator has 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Sub Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Sub_Op>::create(name)(*this);
-        mOutput->setBackend(name);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
 
-        // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
-    }
-
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -141,6 +77,6 @@ public:
 inline std::shared_ptr<Node> Sub(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
 }
-}
+} // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
+#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
\ No newline at end of file
-- 
GitLab