diff --git a/aidge_core/export/node_export.py b/aidge_core/export/node_export.py
index 980cb05a5814b7476d64757353e393ad6130218b..bea61551d6b4363d234fba4df6138ccef3154331 100644
--- a/aidge_core/export/node_export.py
+++ b/aidge_core/export/node_export.py
@@ -37,15 +37,15 @@ class ExportNode(ABC):
         for idx, parent_node in enumerate(self.node.get_parents()):
             self.inputs.append(parent_node)
             if parent_node is not None:
-                self.inputs_dims.append(self.operator.input(idx).dims())
+                self.inputs_dims.append(self.operator.get_input(idx).dims())
             else:
                 self.inputs_dims.append(None)
 
         for idx, child_node in enumerate(self.node.get_children()):
             self.outputs.append(child_node)
-        
+
         # Dirty hot fix, change it quickly
-        self.outputs_dims.append(self.operator.output(0).dims())
+        self.outputs_dims.append(self.operator.get_output(0).dims())
 
     @abstractmethod
     def export(self, export_folder:str, list_configs:list):
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index 7bd1e730a973810db89aa786b52fa05c53c43590..825ca6100382116443699a00bcff27b9bbca028a 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -16,14 +16,14 @@ class test_operator_binding(unittest.TestCase):
     Can be remove in later stage of the developpement.
     """
     def setUp(self):
-        self.generic_operator = aidge_core.GenericOperator("FakeConv", 1, 1, 1).get_operator()
+        self.generic_operator = aidge_core.GenericOperator("FakeConv", 1, 0, 1).get_operator()
 
     def tearDown(self):
         pass
 
     def test_default_name(self):
         op_type = "Conv"
-        gop = aidge_core.GenericOperator(op_type, 1, 1, 1, "FictiveName")
+        gop = aidge_core.GenericOperator(op_type, 1, 0, 1, "FictiveName")
         # check node name is not operator type
         self.assertNotEqual(gop.name(), "Conv")
         # check node name is not default
@@ -95,12 +95,12 @@ class test_operator_binding(unittest.TestCase):
     def test_compute_output_dims(self):
         in_dims=[25, 25]
         input = aidge_core.Producer(in_dims, name="In")
-        genOp = aidge_core.GenericOperator("genOp", 1, 1, 1, name="genOp")
+        genOp = aidge_core.GenericOperator("genOp", 1, 0, 1, name="genOp")
         _ = aidge_core.sequential([input, genOp])
-        self.assertListEqual(genOp.get_operator().output(0).dims(), [])
+        self.assertListEqual(genOp.get_operator().get_output(0).dims(), [])
         genOp.get_operator().set_compute_output_dims(lambda x:x)
         genOp.get_operator().compute_output_dims()
-        self.assertListEqual(genOp.get_operator().output(0).dims(), in_dims)
+        self.assertListEqual(genOp.get_operator().get_output(0).dims(), in_dims)
 
     def test_set_impl(self):
 
@@ -116,7 +116,7 @@ class test_operator_binding(unittest.TestCase):
                 """
                 self.idx += 1
 
-        generic_node = aidge_core.GenericOperator("Relu", 1, 1, 1, name="myReLu")
+        generic_node = aidge_core.GenericOperator("Relu", 1, 0, 1, name="myReLu")
         generic_op = generic_node.get_operator()
         customImpl = PythonCustomImpl(generic_op)
 
diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index 566650713c36236c19763f466ee906970466c02e..620beb160fb3494f156c1a4b512d386447081154 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -32,15 +32,17 @@ class test_attributes(unittest.TestCase):
         self.assertEqual(conv_op.get_attr("KernelDims"), k_dims)
 
     def test_fc(self):
+        in_channels = 4
         out_channels = 8
         nb_bias = True
-        fc_op = aidge_core.FC(out_channels, nb_bias).get_operator()
+        fc_op = aidge_core.FC(in_channels, out_channels, nb_bias).get_operator()
         self.assertEqual(fc_op.get_attr("OutChannels"), out_channels)
         self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
 
     def test_matmul(self):
+        in_channels = 4
         out_channels = 8
-        matmul_op = aidge_core.MatMul(out_channels).get_operator()
+        matmul_op = aidge_core.MatMul(in_channels, out_channels).get_operator()
         self.assertEqual(matmul_op.get_attr("OutChannels"), out_channels)
 
     def test_producer_1D(self):
diff --git a/aidge_core/unit_tests/test_recipies.py b/aidge_core/unit_tests/test_recipies.py
index 353b5131038bc11bd5279300fe7e4da8bb3f5664..235d6f683dea763a1432d27a73cee71cb82c107e 100644
--- a/aidge_core/unit_tests/test_recipies.py
+++ b/aidge_core/unit_tests/test_recipies.py
@@ -34,8 +34,8 @@ class test_recipies(unittest.TestCase):
 
     def test_remove_flatten(self):
         graph_view = aidge_core.sequential([
-            aidge_core.GenericOperator("Flatten", 1, 1, 1, name="Flatten0"),
-            aidge_core.FC(50, name='0')
+            aidge_core.GenericOperator("Flatten", 1, 0, 1, name="Flatten0"),
+            aidge_core.FC(10, 50, name='0')
         ])
         old_nodes = graph_view.get_nodes()
         aidge_core.remove_flatten(graph_view)
@@ -45,10 +45,10 @@ class test_recipies(unittest.TestCase):
         self.assertTrue(all([i in old_nodes for i in graph_view.get_nodes()]))
 
     def test_fuse_matmul_add(self):
-        matmul0 = aidge_core.GenericOperator("MatMul", 1, 2, 1, name="MatMul0")
-        add0 = aidge_core.Add(name="Add0")
-        matmul1 = aidge_core.GenericOperator("MatMul", 1, 2, 1, name="MatMul1")
-        add1 = aidge_core.Add(name="Add1")
+        matmul0 = aidge_core.MatMul(1, 1, name="MatMul0")
+        add0 = aidge_core.Add(2, name="Add0")
+        matmul1 = aidge_core.MatMul(1, 1, name="MatMul1")
+        add1 = aidge_core.Add(2, name="Add1")
 
         graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
 
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 82aecf163d477072d9290574350daa47d506bca4..cc0979b07b07c2b95515eda09fda68a9ec4ac63e 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -14,8 +14,10 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/backend/TensorImpl.hpp"
+
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
+
 #include "aidge/graph/Connector.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
@@ -25,6 +27,7 @@
 #include "aidge/graphmatching/SeqStm.hpp"
 #include "aidge/graphmatching/StmFactory.hpp"
 #include "aidge/graphmatching/Utile.hpp"
+
 #include "aidge/operator/Add.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/BatchNorm.hpp"
@@ -55,10 +58,12 @@
 #include "aidge/operator/Sub.hpp"
 #include "aidge/operator/Transpose.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
+
+#include "aidge/recipies/Recipies.hpp"
+
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
-#include "aidge/utils/Recipies.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 //#include "aidge/utilsParsing/AstNode.hpp"
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 58c434bccc7c8dd39a93c46ecf74c38d7d834d1a..f8c3a48f7d5169dfee2cdceff37465f61bbb546c 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -299,7 +299,7 @@ class Tensor : public Data,
      */
     Tensor &operator=(const Tensor &t) {
         resize(t.dims());
-        setDatatype(t.dataType());
+        setDataType(t.dataType());
         if (t.hasImpl()) {
             setBackend(t.mImpl->backend());
             mImpl->copy(t.mImpl->rawPtr(), size());
@@ -362,7 +362,7 @@ class Tensor : public Data,
      * if the Tensor has already been initialized.
      * @param dt DataType.
      */
-    void setDatatype(const DataType dt) {
+    void setDataType(const DataType dt) {
         if (mImpl && (dataType() != dt)) {
             // get ptr before changing Tensor backend or the type difference will trigger a warning
             const void *data = mImpl->rawPtr();
diff --git a/include/aidge/graph/Connector.hpp b/include/aidge/graph/Connector.hpp
index f123cbb34ff61874498b1c328e8760404b06d66d..599ca7d6defd729b6e6536dcc95f326d345701d9 100644
--- a/include/aidge/graph/Connector.hpp
+++ b/include/aidge/graph/Connector.hpp
@@ -83,4 +83,4 @@ class Connector {
 std::shared_ptr<GraphView> generateGraph(std::vector<Connector> ctors);
 }  // namespace Aidge
 
-#endif /* AIDGE_CORE_GRAPH_CONNECTOR_H_ */
+#endif /* AIDGE_CORE_GRAPH_CONNECTOR_H_ */
\ No newline at end of file
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 4dea1ed974650ba9ae10c60c720733aa1581b055..6b23cda0d86a77487af7d63b3e7a0dfeae57bb37 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -162,6 +162,21 @@ public:
     std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs(
             std::string nodeName) const;
 
+    /**
+     * @brief Assert Datatype, Backend, data format and dimensions along the GraphView are coherent.
+     * If not, apply the required transformations.
+     * @details Sets the GraphView ready for computation in four steps:
+     * 1 - Assert input Tensors' datatype is compatible with each Operator's datatype.
+     * If not, a conversion Operator is inserted.
+     * 2 - Assert input Tensors' backend is compatible with each Operator's backend.
+     * If not, add a Transmitter Operator.
+     * 3 - Assert data format (NCHW, NHWC, ...) of each Operator's input Tensor is
+     * compatible with the selected kernel.
+     * If not, add a Transpose Operator.
+     * 4 - Propagate Tensor dimensions through the consecutive Operators.
+     */
+    void compile(const std::string& backend, const Aidge::DataType datatype);
+
     /**
      * @brief Compute dimensions of input/output Tensors for each Operator of the
      * GraphView object's Nodes.
@@ -171,7 +186,7 @@ public:
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string &backend);
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
-    void setDatatype(const DataType &datatype);
+    void setDataType(const DataType &datatype);
 
 ///////////////////////////////////////////////////////
 //        TOPOLOGY
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index de9f178347a228796d56d1653adddfed76ea7c5b..118d925e1e5b7c4fcd0c353236998ff831f7e42d 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -163,13 +163,6 @@ public:
     return std::pair<NodePtr, IOIndex_t>(mParents[inID], mIdOutParents[inID]);
   }
 
-  /**
-   * @brief Set fix value for the specified input by creating a Producer wrapping the given Tensor.
-   *
-   * @param idx Input index.
-   * @param tensor Constant Tensor to add as parent for specified index.
-   */
-  void setInput(const IOIndex_t idx, const std::shared_ptr<Tensor> tensor);
 
   /**
    * @brief Get the lowest index in the InputData Parent list equal to the
@@ -178,9 +171,9 @@ public:
    */
   inline IOIndex_t getFirstFreeDataInput() const {
     IOIndex_t i = 0;
-    for (; (i < nbDataInputs()) && (input(i).second != gk_IODefaultIndex); ++i) {}
-    // assert((i<nbDataInputs()) && "No free data input for Node");
-    return (i < nbDataInputs()) ? i : gk_IODefaultIndex;
+    for (; (i < nbData()) && (input(i).second != gk_IODefaultIndex); ++i) {}
+    // assert((i<nbData()) && "No free data input for Node");
+    return (i < nbData()) ? i : gk_IODefaultIndex;
   }
 
 
@@ -214,8 +207,8 @@ public:
    * @details [data, data, weight, bias] => 2
    * @return IOIndex_t
    */
-  inline IOIndex_t nbDataInputs() const noexcept {
-    return getOperator()->nbDataInputs();
+  inline IOIndex_t nbData() const noexcept {
+    return getOperator()->nbData();
   }
 
   /**
diff --git a/include/aidge/graphRegex/GraphParser.hpp b/include/aidge/graphRegex/GraphParser.hpp
index 29ee8c7b294eae2b8d8196de1702cb7e194cfa84..cfe25c22709a3516b4f55ba774a616e3b94a055c 100644
--- a/include/aidge/graphRegex/GraphParser.hpp
+++ b/include/aidge/graphRegex/GraphParser.hpp
@@ -21,8 +21,6 @@ class GraphParser{
      */
     GraphParser(const std::string gRegexExpressions);
 
-    virtual ~GraphParser() = default;
-
     /**
      * @brief AST graph creation function
      * @return The AST tree
@@ -31,7 +29,7 @@ class GraphParser{
 
 
     /**
-     * @brief get the query that be use in the parsing 
+     * @brief get the query that be use in the parsing
      * @return query
      */
     const std::string getQuery();
diff --git a/include/aidge/graphRegex/matchFsm/MatchResult.hpp b/include/aidge/graphRegex/matchFsm/MatchResult.hpp
index 29b9abb616a80899b9c2ad8d5e01e5f00e674757..4f7f9bf1dd9b0612e71a1f7894bfc382713c0ad0 100644
--- a/include/aidge/graphRegex/matchFsm/MatchResult.hpp
+++ b/include/aidge/graphRegex/matchFsm/MatchResult.hpp
@@ -1,10 +1,12 @@
 #ifndef AIDGE_CORE_MATCH_RESULT_H_
 #define AIDGE_CORE_MATCH_RESULT_H_
 
+#include <cstddef>
+#include <map>
 #include <memory>
+#include <string>
+#include <set>
 #include <vector>
-#include <map>
-
 
 #include "aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp"
 #include "aidge/graph/Node.hpp"
@@ -12,23 +14,25 @@
 namespace Aidge{
 
 /**
- * @brief contained the result of one match and the associate key , the query and the start node 
+ * @brief contained the result of one match and the associate key , the query and the start node
 */
 
 class MatchSolution{
 private:
-    std::map<std::string,std::set<NodePtr>> mSolution;
+    std::map<std::string, std::set<NodePtr>> mSolution;
     const std::string mQueryFrom;
     const std::vector<NodePtr> mStartNode;
 
 public:
     MatchSolution(std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence,const std::string query,const std::vector<NodePtr> startNode);
-    const std::set<NodePtr> & at(const std::string key);
-    const std::set<NodePtr>  getAll();
+    inline const std::set<NodePtr>& at(const std::string key) {
+        return mSolution[key];
+    }
+    const std::set<NodePtr> getAll();
     bool areCompatible(std::shared_ptr<MatchSolution> solution);
 
-    const std::string& getQuery(){ return mQueryFrom ;}
-    const std::vector<NodePtr>& getStartNode(){ return mStartNode ;}
+    inline const std::string& getQuery() const noexcept { return mQueryFrom; }
+    inline const std::vector<NodePtr>& getStartNode() const noexcept { return mStartNode; }
 
 };
 
@@ -59,15 +63,15 @@ public:
     MatchResult(std::vector<std::shared_ptr<FsmRunTimeContext>> allValid, std::size_t nbSubStm,
      const std::string& query,const std::vector<NodePtr>& startNodes);
 
-    virtual ~MatchResult() = default;
-
     /**
      * @brief get the set of the node match for une expression
      * @return the set of node of the graph that corresponding to an expression
     */
     std::shared_ptr<MatchSolution> getBiggerSolution(void);
 
-    std::vector<std::shared_ptr<MatchSolution>> getSolutions(void);
+    inline std::vector<std::shared_ptr<MatchSolution>> getSolutions(void) const noexcept {
+        return mSolve;
+    }
 
 private:
 
@@ -75,7 +79,6 @@ private:
  * @brief recurrent function use to init mSolve in the constructor
  *
  **/
-
 void _generateCombination( std::size_t idxSubStm, std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence,const std::string& query,const std::vector<NodePtr>& startNodes);
 
 };
diff --git a/include/aidge/nodeTester/ConditionalParser.hpp b/include/aidge/nodeTester/ConditionalParser.hpp
index a99f5374182f57c0adca3b4d44691ff4e37de44d..c21eca0407b77808287138fd39e33c00d241fb70 100644
--- a/include/aidge/nodeTester/ConditionalParser.hpp
+++ b/include/aidge/nodeTester/ConditionalParser.hpp
@@ -38,7 +38,6 @@ class ConditionalParser{
      */
     ConditionalParser(const std::string ConditionalExpressions);
 
-    virtual ~ConditionalParser() = default;
     /**
      * @brief AST graph creation function
      * @return The AST tree
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 65c7e8ce0e47bd470e2a1499a682ed2f2c8c2dbc..0c285402929ab7b071d732180891de1b738dc4a8 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -16,52 +16,38 @@
 #include <vector>
 #include <cmath>
 #include <memory>
-#include <array>
+#include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
 
-template <std::size_t NUM>
-class Add_Op : public Operator,
-    public Registrable<Add_Op<NUM>, std::string, std::unique_ptr<OperatorImpl>(const Add_Op<NUM>&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, NUM> mInputs;
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
+class Add_Op : public OperatorTensor,
+    public Registrable<Add_Op, std::string, std::unique_ptr<OperatorImpl>(const Add_Op&)> {
 public:
     static constexpr const char* Type = "Add";
 
-    constexpr Add_Op()
-            : Operator(Type)
+    Add_Op(const IOIndex_t nbIn)
+        : OperatorTensor(Type, nbIn, 0, 1)
     {
-        assert(NUM > 0 && "Add should have at least one input");
-        for (std::size_t i = 0; i<NUM; ++i) {
-            mInputs[i] = std::make_shared<Tensor>();
+        if (nbIn == 0) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
         }
-        setDatatype(DataType::Float32);
     }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Add_Op(const Add_Op<NUM>& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    Add_Op(const Add_Op& op)
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        assert(NUM > 0 && "Add should have at least one input");
-        for (std::size_t i = 0; i<NUM; ++i) {
-            mInputs[i] = std::make_shared<Tensor>();
-        }
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Add_Op<NUM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Add_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -81,88 +67,26 @@ public:
     //     return *in;
     // }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            const auto expectedDims =  mInputs[0]->dims();
-            std::size_t nonEmptyInputTensor = 1;
-            for (; nonEmptyInputTensor<NUM && (!mInputs[nonEmptyInputTensor]->empty()); ++nonEmptyInputTensor) {
-                assert(expectedDims == mInputs[nonEmptyInputTensor]->dims());
-            }
-            if (nonEmptyInputTensor == NUM) {
-                mOutput->resize(expectedDims);
-            }
-        }
-    }
-
-    bool outputDimsForwarded() const override final {
-        std::size_t forwarded = 0;
-        for (; forwarded < NUM && (!mInputs[forwarded]->empty()); ++forwarded) {}
-        return ((forwarded==NUM) && !(mOutput->empty()));
-    }
 
     // void checkDims() const override final {
     //     assert(outputDimsForwarded());
     //     for (const auto& in : mInputs) {
-    //         assert(in->dims() == mOutput->dims());
+    //         assert(in->dims() == mOutputs[0]->dims());
     //     }
     // }
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "Add Operators has only 1 outputs");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
 
 
     void setBackend(const std::string& name) override {
-        mImpl = Registrar<Add_Op<NUM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        for (std::size_t i = 0; i < NUM; ++i) {
-            mInputs[i]->setBackend(name);
-        }
-    }
-
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mImpl = Registrar<Add_Op>::create(name)(*this);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        for (std::size_t i = 0; i < NUM; ++i) {
-            mInputs[i]->setDatatype(datatype);
+        for (std::size_t i = 0; i < nbInputs(); ++i) {
+            getInput(i)->setBackend(name);
         }
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return NUM; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return NUM; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
-        static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName(){
         return {"data_input_0", "data_input_n"};
     }
     static const std::vector<std::string> getOutputsName(){
@@ -170,9 +94,8 @@ public:
     }
 };
 
-template <std::size_t NUM>
-inline std::shared_ptr<Node> Add(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Add_Op<NUM>>(), name);
+inline std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
 }
 }
 
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index dfcd0d5b3b4d892f201485e85710d42cd5b71dba..f0f9f6c54ed1953ed31b713ce19edc7a8e594d4a 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -19,7 +19,7 @@
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,15 +29,11 @@ namespace Aidge {
 enum class AvgPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
-class AvgPooling_Op : public Operator,
+class AvgPooling_Op : public OperatorTensor,
                 public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
                 public StaticAttributes<AvgPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>> {
-private:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
 public:
     static constexpr const char *Type = "AvgPooling";
@@ -52,24 +48,19 @@ public:
 
     constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
-                      attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {
-        setDatatype(DataType::Float32);
-    }
+                      attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     AvgPooling_Op(const AvgPooling_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -80,83 +71,73 @@ public:
         return std::make_shared<AvgPooling_Op<DIM>>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 1 && "operators supports only 3 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInput->empty()) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
+        // check inputs have been associated
+        if (!getInput(0)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
+        if (!(getInput(0)->empty())) {
+            std::array<DimSize_t, DIM + 2> outputDims;
+            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+            outputDims[0] = inputDims[0];
+            outputDims[1] = inputDims[1];
 
             for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                            std::floor(static_cast<float>(mInput->dims()[dim+2] -
+                                            std::floor(static_cast<float>(inputDims[dim+2] -
                                                                     this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
                                             static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
             }
-            outputDims[1] = mInput->dims()[1];
-            outputDims[0] = mInput->dims()[0];
-            mOutput->resize(outputDims);
+            getOutput(0)->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return *(mInput.get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "AvgPooling Operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "AvgPooling Operators has only 1 outputs");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
 
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
+    // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
+    //     if (outputIdx != 0) {
+    //         AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+    //     }
+    //     if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+    //         // Offset
+    //         const auto outputIdxDims = mOutput->getCoord(firstIdx);
+    //         std::vector<DimSize_t> inputIdxDims = outputIdxDims;
+
+    //         for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+    //             if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+    //                 AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+    //             }
+    //         }
+
+    //         // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+    //         // Width
+    //         std::vector<DimSize_t> inputDims;
+    //         inputDims.push_back(outputDims[0]); // same batch value
+    //         inputDims.push_back(outputDims[1]); // same channel value
+
+    //         for (DimIdx_t i = 0; i < DIM; ++i) {
+    //             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+    //                         * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
+    //                         + 1
+    //                         + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
+    //             inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
+    //         }
+    //         std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>();
+    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInput->getIdx(inputIdxDims), inputDims));
+    //         return res;
+    //     }
+    //     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+    // }
 
 
     void setBackend(const std::string &name) override {
         mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -190,4 +171,4 @@ const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
                                                           "KernelDims"};
 }
 
-#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
+#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index da7360c8ba3816cdfe1d2d00f80b08808a80f961..09a9bb9efac81431673ef3449f717fbcb9af5108 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -19,27 +19,20 @@
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class BatchNormAttr { Epsilon, Momentum };
 
+enum class BatchNormAttr { Epsilon, Momentum };
 
 template <DimIdx_t DIM>
-class BatchNorm_Op : public Operator,
+class BatchNorm_Op : public OperatorTensor,
                 public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
                 public StaticAttributes<BatchNormAttr, float, float> {
 public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 5> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
-                                                      std::make_shared<Tensor>(), std::make_shared<Tensor>(),
-                                                      std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
-   public:
     static constexpr const char *Type = "BatchNorm";
 
     BatchNorm_Op() = delete;
@@ -49,25 +42,19 @@ public:
     using attr = typename Attributes_::template attr<e>;
 
     constexpr BatchNorm_Op(float epsilon, float momentum)
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 4, 1),
           Attributes_(attr<BatchNormAttr::Epsilon>(epsilon),
-                           attr<BatchNormAttr::Momentum>(momentum)),
-          mOutput(std::make_shared<Tensor>()) {
-        setDatatype(DataType::Float32);
-    }
+                           attr<BatchNormAttr::Momentum>(momentum)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     BatchNorm_Op(const BatchNorm_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -87,83 +74,41 @@ public:
     //     return *in;
     // }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 5 && "operators supports only 5 inputs");
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) {
-                if(mInputs[i]->size() != mInputs[0]->dims()[1]) {
-                    mInputs[i]->resize(std::array<DimSize_t, 1>({mInputs[0]->dims()[1]}));
+        // check inputs have been associated
+        bool associated = true;
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            const DimSize_t nbChannels =  getInput(0)->dims()[1];
+            for (std::size_t i = nbData(); i < nbInputs(); ++i) {
+                if(getInput(i)->size() != nbChannels) {
+                    // /!\ Input size should be handled BEFORE calling this function
+                    // This should raise an error
+                    getInput(i)->resize(std::array<DimSize_t, 1>({getInput(0)->dims()[1]}));
                 }
             }
-            mOutput->resize(mInputs[0]->dims());
+            mOutputs[0]->resize(getInput(0)->dims());
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 5 && "operators supports only 5 inputs");
-        return *(mInputs[inputIdx].get()); }
-
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 5 && "BatchNorm Operators supports only 5 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "BatchNorm Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 5 && "operators supports only 5 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string &name) override {
         mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[1]->setBackend(name);
-        mInputs[2]->setBackend(name);
-        mInputs[3]->setBackend(name);
-        mInputs[4]->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[1]->setDatatype(datatype);
-        mInputs[2]->setDatatype(datatype);
-        mInputs[3]->setDatatype(datatype);
-        mInputs[4]->setDatatype(datatype);
+        getInput(1)->setBackend(name);
+        getInput(2)->setBackend(name);
+        getInput(3)->setBackend(name);
+        getInput(4)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 5; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
-    static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName() {
         return {"data_input", "scale", "shift", "mean", "variance"};
     }
-    static const std::vector<std::string> getOutputsName(){
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
@@ -187,4 +132,4 @@ template <>
 const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
 }
 
-#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
+#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 13543c67b1d7b632692961786ef4e951d7758100..8d9114c17b4a5692f04d90ceec725858ecace0a7 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -13,51 +13,39 @@
 #define AIDGE_CORE_OPERATOR_CONCAT_H_
 
 #include <cassert>
+#include <numeric>
+#include <vector>
+#include <cmath>
 #include <memory>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 enum class ConcatAttr { Axis };
 
-class Concat_Op : public Operator,
-                public Registrable<Concat_Op,
-                                   std::string,
-                                   std::unique_ptr<OperatorImpl>(const Concat_Op&)>,
-                public StaticAttributes<ConcatAttr, int> {
-public:
-    // FIXME: change accessibility    
-    IOIndex_t mNbIn;
-    std::vector<std::shared_ptr<Tensor>> mInputs;
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
+class Concat_Op : public OperatorTensor,
+    public Registrable<Concat_Op, std::string, std::unique_ptr<OperatorImpl>(const Concat_Op&)>,
+    public StaticAttributes<ConcatAttr, DimSize_t> {
 public:
     static constexpr const char* Type = "Concat";
 
-    Concat_Op() = delete;
+    using Attributes_ = StaticAttributes<ConcatAttr, DimSize_t>;
+    template <ConcatAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
-    
-    using Attributes_ = StaticAttributes<ConcatAttr, int>;
-    template <ConcatAttr e> using attr = typename Attributes_::template attr<e>;
-    Concat_Op(int axis, IOIndex_t nbIn)
-            : Operator(Type),
-              mNbIn(nbIn),
-              Attributes_(attr<ConcatAttr::Axis>(axis))
+    Concat_Op(const IOIndex_t nbIn, const DimSize_t axis)
+        : OperatorTensor(Type, nbIn, 0, 1),
+          Attributes_(attr<ConcatAttr::Axis>(axis))
     {
-        mInputs = std::vector<std::shared_ptr<Tensor>>(nbIn);
-        for (std::size_t i = 0; i < nbIn; ++i) {
-            mInputs[i] = std::make_shared<Tensor>();
+        if (nbIn == 0) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
         }
-        setDatatype(DataType::Float32);
     }
 
     /**
@@ -65,18 +53,10 @@ public:
      * @param op Operator to copy.
      */
     Concat_Op(const Concat_Op& op)
-        : Operator(Type),
-          mNbIn(op.mNbIn),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        mImpl = op.mImpl ? Registrar<Concat_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
-        mInputs = std::vector<std::shared_ptr<Tensor>>(op.mNbIn);
-        for (std::size_t i = 0; i < op.mNbIn; ++i) {
-            mInputs[i] = std::make_shared<Tensor>();
-        }
-        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Concat_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -87,11 +67,6 @@ public:
         return std::make_shared<Concat_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < mNbIn && "index out of bound");
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
         if (!mInputs.empty() && !mInputs[0]->empty())
@@ -99,7 +74,7 @@ public:
             Concat_Op::Attrs attr = getStaticAttributes();
             const int& axis = static_cast<const int&>(std::get<0>(attr));
             std::size_t dimOnAxis = 0;
-            for(std::size_t i=0; i<mNbIn; ++i)
+            for(std::size_t i=0; i< mInputs.size(); ++i)
             {
                 dimOnAxis += mInputs[i]->dims()[axis];
             }
@@ -110,65 +85,21 @@ public:
                 else
                     outputDims.push_back(mInputs[0]->dims()[i]);
             }
-            mOutput->resize(outputDims);
+            mOutputs[0]->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < mNbIn) && "input index out of range for this instance of Concat operator");
-        return *mInputs[inputIdx];
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < mNbIn) && "input index out of range for this instance of Concat operator");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Concat operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < mNbIn) && "input index out of range for this instance of Concat operator");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "Concat operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Concat_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        for (std::size_t i = 0; i < mNbIn; ++i) {
-            mInputs[i]->setBackend(name);
-        }
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        for (std::size_t i = 0; i < mNbIn; ++i) {
-            mInputs[i]->setDatatype(datatype);
+        for (std::size_t i = 0; i < nbInputs(); ++i) {
+            getInput(i)->setBackend(name);
         }
-
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return mNbIn; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return mNbIn; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input_0", "data_input_n"};
     }
@@ -177,8 +108,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Concat(int axis, IOIndex_t nbIn, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Concat_Op>(axis, nbIn), name);
+inline std::shared_ptr<Node> Concat(IOIndex_t nbIn, int axis, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
 }
 } // namespace Aidge
 
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index b1e3e34b0eff681632d90cb8314ebd8c96722eec..4f0fb1ea2717c1fdf4443c450000ec3a56bb9b5b 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -14,12 +14,13 @@
 
 #include <array>
 #include <cmath>
+#include <cstddef>
 #include <numeric>
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,17 +30,12 @@ namespace Aidge {
 enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims };
 
 template <DimIdx_t DIM>
-class Conv_Op : public Operator,
+class Conv_Op : public OperatorTensor,
                 public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
                 public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
                                        DimSize_t, std::array<DimSize_t, DIM>> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
-                                                      std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
-   public:
+public:
     static constexpr const char *Type = "Conv";
 
     Conv_Op() = delete;
@@ -49,32 +45,27 @@ public:
     template <ConvAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    constexpr Conv_Op(DimSize_t in_channels,
-                      DimSize_t out_channels,
-                      const std::array<DimSize_t, DIM> &kernel_dims,
-                      const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                      const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
-        : Operator(Type),
-          Attributes_(attr<ConvAttr::StrideDims>(stride_dims),
-                      attr<ConvAttr::DilationDims>(dilation_dims),
-                      attr<ConvAttr::InChannels>(in_channels),
-                      attr<ConvAttr::OutChannels>(out_channels),
-                      attr<ConvAttr::KernelDims>(kernel_dims)) {
-        setDatatype(DataType::Float32);
-    }
+    constexpr Conv_Op(DimSize_t inChannels,
+                      DimSize_t outChannels,
+                      const std::array<DimSize_t, DIM> &kernelDims,
+                      const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
+        : OperatorTensor(Type, 1, 2, 1),
+          Attributes_(attr<ConvAttr::StrideDims>(strideDims),
+                      attr<ConvAttr::DilationDims>(dilationDims),
+                      attr<ConvAttr::InChannels>(inChannels),
+                      attr<ConvAttr::OutChannels>(outChannels),
+                      attr<ConvAttr::KernelDims>(kernelDims)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Conv_Op(const Conv_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -98,16 +89,18 @@ public:
 
     // }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
+        // check inputs have been associated
+        bool associated = true;
+        for (IOIndex_t i = 0; i < 3; ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            std::array<DimSize_t, DIM + 2> outputDims{};
+            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
             for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
                 const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
@@ -115,68 +108,76 @@ public:
                                                1;
 
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                        floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
+                        floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
                               static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
             }
 
             outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
-            outputDims[0] = mInputs[0]->dims()[0];
-            mOutput->resize(outputDims);
+            outputDims[0] = inputDims[0];
+            mOutputs[0]->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return *(mInputs[inputIdx].get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "Conv Operators supports only 3 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Conv Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
 
+// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
+    //     if (outputIdx != 0) {
+    //         AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+    //     }
+    //     if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+    //         // Offset
+    //         const auto outputIdxDims = mOutput->getCoord(firstIdx);
+    //         auto inputIdxDims = outputIdxDims; // batch idx is the same
+    //         inputIdxDims[1] = 0; // each channel is used so start with the first one
+
+    //         for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+    //             if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+    //                 AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+    //             }
+    //         }
+
+    //         // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+    //         // Input
+    //         // same batch value, every input channel is used
+    //         std::vector<DimSize_t> inputDims{outputDims[0], mInputs[0]->dims()[1]};
+    //                     for (DimIdx_t i = 0; i < DIM; ++i) {
+    //             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+    //                         * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
+    //                         + 1
+    //                         + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+    //                         * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+    //             inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
+    //         }
+
+    //         // Weight
+    //         // same output value, every input channel is used
+    //         std::vector<DimSize_t> weightDims{outputDims[0], mInputs[0]->dims()[1]};
+    //         weightDims.insert(weightDims.end(), this->template getAttr<ConvAttr::KernelDims>()[0], this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(DIM)]);
+    //         std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
+    //         weightIdxDims[0] = outputIdxDims[1];
+
+    //         // Bias
+    //         const std::vector<DimSize_t> biasDims{outputDims[0]};
+    //         const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
+
+    //         // Result
+    //         std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
+    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
+    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[1]->getIdx(weightIdxDims), weightDims));
+    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[2]->getIdx(biasIdxDims), biasDims));
+    //         return res;
+    //     }
+    //     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+    // }
 
     void setBackend(const std::string &name) override {
         mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[1]->setBackend(name);
-        mInputs[2]->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
-        mInputs[2]->setDatatype(datatype);
+        getInput(1)->setBackend(name);
+        getInput(2)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
@@ -186,32 +187,32 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Conv(DimSize_t in_channels,
-                                  DimSize_t out_channels,
-                                  const std::array<DimSize_t, DIM> &kernel_dims,
+inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
+                                  DimSize_t outChannels,
+                                  const std::array<DimSize_t, DIM> &kernelDims,
                                   const std::string& name = "",
-                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+                                  const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), name);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims), name);
     // addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w");
-    addProducer(conv, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
-    addProducer(conv, 2, std::array<DimSize_t, 1>({out_channels}), "b");
+    addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
+    addProducer(conv, 2, std::array<DimSize_t, 1>({outChannels}), "b");
     return conv;
 }
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> Conv(
-    DimSize_t in_channels,
-    DimSize_t out_channels,
-    DimSize_t const (&kernel_dims)[DIM],
+    DimSize_t inChannels,
+    DimSize_t outChannels,
+    DimSize_t const (&kernelDims)[DIM],
     const std::string& name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+    const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    return Conv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, dilation_dims);
+    return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims);
 }
 }  // namespace Aidge
 
@@ -226,4 +227,4 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
 };
 }
 
-#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
+#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 4caec2032a3c61529d452ae855f00c1da411af10..ca6401e0ed3ac888f12858853f0d8f494c226041 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -19,7 +19,7 @@
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,20 +29,14 @@ namespace Aidge {
 enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims };
 
 template <DimIdx_t DIM>
-class ConvDepthWise_Op : public Operator,
+class ConvDepthWise_Op : public OperatorTensor,
                 public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
                 public StaticAttributes<ConvDepthWiseAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        DimSize_t,
                                        std::array<DimSize_t, DIM>> {
-   public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
-                                                      std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
-   public:
+public:
     static constexpr const char *Type = "ConvDepthWise";
 
     ConvDepthWise_Op() = delete;
@@ -55,29 +49,25 @@ class ConvDepthWise_Op : public Operator,
     template <ConvDepthWiseAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+    constexpr ConvDepthWise_Op(const DimSize_t nbChannels,
+                               const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 2, 1),
           Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
                       attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
-                      attr<ConvDepthWiseAttr::Channels>(0),
-                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {
-        setDatatype(DataType::Float32);
-    }
+                      attr<ConvDepthWiseAttr::Channels>(nbChannels),
+                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -88,16 +78,20 @@ class ConvDepthWise_Op : public Operator,
         return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
+        // check inputs have been associated
+        // TODO : add a check of inputs dimensions ?
+        bool associated = true;
+        for (IOIndex_t i = 0; i < 3; ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
+            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
             for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
                 const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
@@ -105,10 +99,9 @@ class ConvDepthWise_Op : public Operator,
                                                1;
 
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                        floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
+                        floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
                               static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
             }
-            this->template getAttr<ConvDepthWiseAttr::Channels>() = mInputs[0]->dims()[1];
             // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>()));
             // if (mInputs[1]->empty()) {
             //     mInputs[1]->resize(weightDims);
@@ -116,66 +109,57 @@ class ConvDepthWise_Op : public Operator,
             // if (mInputs[2]->empty()) {
             //     mInputs[2]->resize({mInputs[0]->dims()[1]});
             // }
-            outputDims[1] = mInputs[0]->dims()[1];
-            outputDims[0] = mInputs[0]->dims()[0];
-            mOutput->resize(outputDims);
+            outputDims[1] = inputDims[1];
+            outputDims[0] = inputDims[0];
+            mOutputs[0]->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "ConvDepthWise Operators supports only 3 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "ConvDepthWise Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
+    // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
+    //     if (outputIdx != 0) {
+    //         AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+    //     }
+    //     if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+    //         // Offset
+    //         const auto outputIdxDims = mOutput->getCoord(firstIdx);
+    //         auto inputIdxDims = outputIdxDims; // batch idx is the same
+
+    //         for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+    //             if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+    //                 AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+    //             }
+    //         }
+
+    //         // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+    //         // Width
+    //         std::vector<DimSize_t> inputDims;
+    //         inputDims.push_back(outputDims[0]); // same batch value
+    //         inputDims.push_back(outputDims[1]); // same channel value
+
+    //         for (DimIdx_t i = 0; i < DIM; ++i) {
+    //             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+    //                         * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
+    //                         + 1
+    //                         + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+    //                         * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+    //             inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
+    //         }
+    //         std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>();
+    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
+    //         return res;
+    //     }
+    //     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+    // }
 
     void setBackend(const std::string &name) override {
         mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[1]->setBackend(name);
-        mInputs[2]->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
-        mInputs[2]->setDatatype(datatype);
+        getInput(1)->setBackend(name);
+        getInput(2)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
@@ -185,27 +169,29 @@ class ConvDepthWise_Op : public Operator,
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
+inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
+                                           const std::array<DimSize_t, DIM> &kernelDims,
                                            const std::string& name = "",
-                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                           const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+                                           const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                                           const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), name);
-    addProducer(convDW, 1, std::array<DimSize_t,0>({}), "w");
-    addProducer(convDW, 2, std::array<DimSize_t,0>({}), "b");
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name);
+    addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
+    addProducer(convDW, 2, std::array<DimSize_t, 1>({nbChannels}), "b");
     return convDW;
 }
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> ConvDepthWise(
-    DimSize_t const (&kernel_dims)[DIM],
+    const DimSize_t nbChannels,
+    DimSize_t const (&kernelDims)[DIM],
     const std::string& name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+    const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    return ConvDepthWise(to_array(kernel_dims), name, stride_dims, dilation_dims);
+    return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims);
 }
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 4213f979cf9d675f523a228095edc5606f9412ee..ba76c0bdecfaf86644a3336a1076064b96b36046 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -17,42 +17,30 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 
-class Div_Op : public Operator,
+class Div_Op : public OperatorTensor,
     public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
 public:
     static constexpr const char* Type = "Div";
 
-    Div_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Div_Op() : OperatorTensor(Type, 2, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Div_Op(const Div_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,73 +51,18 @@ public:
         return std::make_shared<Div_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty())
-            mOutput->resize(mInputs[0]->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Div Operator has 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Div Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
+    void computeOutputDims() override final;
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Div_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index 1fa961c73ec244a14932611edc4881c113077c21..a0db65d45f6e1522a946f2ce6d5cb9d3b36d2dfa 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -17,7 +17,7 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -26,33 +26,21 @@
 
 namespace Aidge {
 
-class Erf_Op : public Operator,
+class Erf_Op : public OperatorTensor,
     public Registrable<Erf_Op, std::string, std::unique_ptr<OperatorImpl>(const Erf_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Erf";
 
-    Erf_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Erf_Op() : OperatorTensor(Type, 1, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Erf_Op(const Erf_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Erf_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Erf_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,68 +51,14 @@ public:
         return std::make_shared<Erf_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "Erf operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Erf operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Erf operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "Erf operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "Erf operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Erf_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index b949527c51b9330077dd3bd8f8b4bf1f1b9d719c..4cece292cb322c0a58f96380eb0f0083771d3c19 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -21,7 +21,7 @@
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,16 +29,11 @@
 namespace Aidge {
 enum class FCAttr { OutChannels, NoBias };
 
-class FC_Op : public Operator,
+class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
                                  std::unique_ptr<OperatorImpl>(const FC_Op &)>,
               public StaticAttributes<FCAttr, DimSize_t, bool> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "FC";
 
@@ -48,26 +43,21 @@ public:
     template <FCAttr e> using attr = typename Attributes_::template attr<e>;
 
     FC_Op(DimSize_t out_channels, bool noBias)
-            : Operator(Type),
-            Attributes_(
-                attr<FCAttr::OutChannels>(out_channels),
-                attr<FCAttr::NoBias>(noBias))
-    {
-        setDatatype(DataType::Float32);
-    }
+    : OperatorTensor(Type, 1, 2, 1),
+      Attributes_(
+        attr<FCAttr::OutChannels>(out_channels),
+        attr<FCAttr::NoBias>(noBias))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     FC_Op(const FC_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -78,7 +68,7 @@ public:
         return std::make_shared<FC_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
         if (inputIdx == 2) {
@@ -86,78 +76,35 @@ public:
             assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
         }
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-        if (inputIdx == 0 && mInputs[0]->nbDims() == 1)
-            mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, mInputs[inputIdx]->size()}));
+        if (inputIdx == 0 && getInput(0)->nbDims() == 1)
+            mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, getInput(inputIdx)->size()}));
     }
 
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            // <in_features**, out_channels>
-            std::array<DimSize_t, 2> weightDims = {this->template getAttr<FCAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
-            // <out_channels, batch>
-            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<FCAttr::OutChannels>()};
-
-            mInputs[1]->resize(weightDims);
-            mOutput->resize(outputDims);
+        bool associated = true;
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            // <batch, OutChannels>
+            mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()});
         }
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return *(mInputs[inputIdx].get()); }
-    inline Tensor& output(const IOIndex_t /*inputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "FC Operators supports only 3 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "FC Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
     }
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<FC_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-        mInputs[2]->setBackend(name);
-    }
-
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
-        mInputs[2]->setDatatype(datatype);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
+        getInput(2)->setBackend(name);
     }
 
-
-    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
@@ -166,11 +113,11 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") {
+inline std::shared_ptr<Node> FC(DimSize_t inChannels, DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name);
-    addProducer(fc, 1, std::array<DimSize_t, 2>({out_channels, 1}), "w");
-    addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({out_channels})), "b"); // already sets bias dims
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(outChannels, noBias), name);
+    addProducer(fc, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w");
+    addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({outChannels})), "b"); // already sets bias dims
     return fc;
 }
 } // namespace Aidge
@@ -181,4 +128,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
                                                         "NoBias"};
 }
 
-#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
+#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 3a2346d3fcc02f5cfae4cb888ff8440b92ccbcb4..6579331ca6555b17158820b51a6e03a8d2cf425d 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -20,7 +20,7 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
@@ -29,16 +29,11 @@
 namespace Aidge {
 enum class GatherAttr { Axis };
 
-class Gather_Op : public Operator,
+class Gather_Op : public OperatorTensor,
                 public Registrable<Gather_Op,
                                    std::string,
                                    std::unique_ptr<OperatorImpl>(const Gather_Op&)>,
                 public StaticAttributes<GatherAttr, int> {
-public:
-    // FIXME: change accessibility    
-    IOIndex_t mNbIn;
-    std::array<std::shared_ptr<Tensor>, 2> mInputs= {std::make_shared<Tensor>(),std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
 public:
     static constexpr const char* Type = "Gather";
@@ -49,25 +44,20 @@ public:
     using Attributes_ = StaticAttributes<GatherAttr, int>;
     template <GatherAttr e> using attr = typename Attributes_::template attr<e>;
     Gather_Op(int axis)
-            : Operator(Type),
+            : OperatorTensor(Type, 2, 0, 1),
             Attributes_(
                 attr<GatherAttr::Axis>(axis))
-    {
-        setDatatype(DataType::Float32);
-    }
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Gather_Op(const Gather_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Gather_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Gather_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -78,12 +68,6 @@ public:
         return std::make_shared<Gather_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "Gather operator supports only 2 inputs");
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
     void computeOutputDims() override final {
         if (!mInputs.empty() && !mInputs[0]->empty() && mInputs[1]->nbDims()==2)
         {
@@ -92,63 +76,19 @@ public:
             int axisIdx = this->template getAttr<GatherAttr::Axis>()>=0?this->template getAttr<GatherAttr::Axis>():this->template getAttr<GatherAttr::Axis>()+outDims.size();
             outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
             outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx), indexesDims.begin(),indexesDims.end());
-            mOutput->resize(outDims);
+            mOutputs[0]->resize(outDims);
         }
     }
 
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Gather operator supports only 2 inputs");
-        return *mInputs[inputIdx];
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Gather operator supports only 2 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Gather Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Gather operator supports only 2 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "Gather operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Gather_Op>::create(name)(*this);
-        mOutput->setBackend(name);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(DataType::Int32);
-
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "indexes"};
     }
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 55ccbf1516fa79663d57e1e44bc4017bc5c8b843..505c5344990453c8f4ab84fa3893e75b216d7a54 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -19,7 +19,7 @@
 #include <cstring>
 
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
@@ -27,50 +27,26 @@
 
 namespace Aidge {
 class GenericOperator_Op
-    : public Operator,
+    : public OperatorTensor,
       public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>,
       public DynamicAttributes {
-   private:
+private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
-    IOIndex_t mNbDataIn;
-    IOIndex_t mNbIn;
-    IOIndex_t mNbOut;
-    std::vector<std::shared_ptr<Tensor>> mInputs;
-    std::vector<std::shared_ptr<Tensor>> mOutputs;
     ComputeDimsFunc mComputeOutputDims;
 
-   public:
-    GenericOperator_Op(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut)
-        : Operator(type), mNbDataIn(nbDataIn), mNbIn(nbIn), mNbOut(nbOut)
-    {
-        mInputs = std::vector<std::shared_ptr<Tensor>>(nbIn);
-        for (std::size_t i = 0; i < nbIn; ++i) {
-            mInputs[i] = std::make_shared<Tensor>();
-        }
-        mOutputs = std::vector<std::shared_ptr<Tensor>>(nbOut);
-        for (std::size_t i = 0; i < nbOut; ++i) {
-            mOutputs[i] = std::make_shared<Tensor>();
-        }
-    }
+public:
+    GenericOperator_Op(const char *type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
+        : OperatorTensor(type, nbData, nbParam, nbOut)
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     GenericOperator_Op(const GenericOperator_Op& op)
-        : Operator(op.type().c_str()), mNbDataIn(op.mNbDataIn), mNbIn(op.mNbIn), mNbOut(op.mNbOut)
-    {
-        // cpy-ctor
-        mInputs = std::vector<std::shared_ptr<Tensor>>(mNbIn);
-        for (std::size_t i = 0; i < mNbIn; ++i) {
-            mInputs[i] = std::make_shared<Tensor>();
-        }
-        mOutputs = std::vector<std::shared_ptr<Tensor>>(mNbOut);
-        for (std::size_t i = 0; i < mNbOut; ++i) {
-            mOutputs[i] = std::make_shared<Tensor>(*op.mOutputs[i]);
-        }
-    }
+        : OperatorTensor(op)
+    {}
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -87,28 +63,19 @@ class GenericOperator_Op
         mComputeOutputDims = func;
     }
 
-    // Override Virtual Opertor methods
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < mNbIn && "operators supports only x inputs");
-
-        if (strcmp(data->type(), Tensor::Type) == 0) {
-            // TODO: associate input only if of type Tensor, otherwise do nothing
-            mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-        }
-    }
 
     void computeOutputDims() override final {
         if (mComputeOutputDims) {
-            std::vector<std::vector<size_t>> inputsDims(mNbIn, std::vector<size_t>());
-            for (std::size_t i = 0; i < mNbIn; ++i) {
-                if (mInputs[i]) {
-                    inputsDims[i] = mInputs[i]->dims();
+            std::vector<std::vector<size_t>> inputsDims(nbInputs(), std::vector<size_t>());
+            for (std::size_t i = 0; i < nbInputs(); ++i) {
+                if (getInput(i)) {
+                    inputsDims[i] = getInput(i)->dims();
                 }
             }
 
             const auto& outputsDims = mComputeOutputDims(inputsDims);
-            assert(outputsDims.size() == mNbOut && "The provided ComputeDimsFunc function returns the wrong number of outputs");
-            for (std::size_t i = 0; i < mNbOut; ++i) {
+            assert(outputsDims.size() == nbOutputs() && "The provided ComputeDimsFunc function returns the wrong number of outputs");
+            for (std::size_t i = 0; i < nbOutputs(); ++i) {
                 mOutputs[i]->resize(outputsDims[i]);
             }
         }
@@ -127,47 +94,11 @@ class GenericOperator_Op
         }
     }
 
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
-        printf("Info: using getRawInput() on a GenericOperator.\n");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
-        printf("Info: using input() on a GenericOperator.\n");
-        return *mInputs[inputIdx];
-    }
-
-
-    std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
-        printf("Info: using getInput() on a GenericOperator.\n");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
-        printf("Info: using getOutput() on a GenericOperator.\n");
-        return mOutputs[outputIdx];
-    }
-
-
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
-        printf("Info: using getRawOutput() on a GenericOperator.\n");
-        return std::static_pointer_cast<Data>(mOutputs[outputIdx]);
-    }
-
-    Tensor& output(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
-        printf("Info: using output() on a GenericOperator.\n");
-        return *mOutputs[outputIdx];
-    }
 
     ~GenericOperator_Op() = default;
 
     void setBackend(const std::string & /*name*/) override { printf("setBackend: not available yet.\n"); }
-    void setDatatype(const DataType & /*datatype*/) override { printf("setDatatype: not available yet.\n"); }
+    void setDataType(const DataType& /*datatype*/) const override { printf("setDataType: not available yet.\n"); }
     void forward() override final {
         if(mImpl){
             mImpl->forward();
@@ -182,9 +113,6 @@ class GenericOperator_Op
             printf("backward: No implementation is linked.\n");
         }
     }
-    inline IOIndex_t nbInputs() const noexcept override final { return mNbIn; };
-    inline IOIndex_t nbDataInputs() const noexcept override final { return mNbDataIn; };
-    inline IOIndex_t nbOutputs() const noexcept override final { return mNbOut; };
 };
 
 /**
@@ -197,9 +125,9 @@ class GenericOperator_Op
  * @param name (optional) name of the Operator.
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
  */
-inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut,
+inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
                                              const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbDataIn, nbIn, nbOut), name);
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
 }
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index 003990ecc4f438b8b2cdbd14aaaa690549f9e673..c5cd9bb62e0097c9a0e646caaf14cddd73bf512d 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -17,12 +17,13 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
 
@@ -33,19 +34,14 @@ namespace Aidge {
  * Has we need to update this class to remove the use of Impl.
  *
  */
-class Identity_Op : public Operator,
+class Identity_Op : public OperatorTensor,
     public Registrable<Identity_Op, std::string, std::unique_ptr<OperatorImpl>(const Identity_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Identity";
 
     Identity_Op()
-            : Operator(Type)
+            : OperatorTensor(Type, 1, 0, 0)
     {
-        setDatatype(DataType::Float32);
         mImpl = std::make_shared<OperatorImpl>(*this);
     }
 
@@ -54,10 +50,8 @@ public:
      * @param op Operator to copy.
      */
     Identity_Op(const Identity_Op& op)
-        : Operator(Type)
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mInput->dataType());
         mImpl = std::make_shared<OperatorImpl>(*this);
     }
 
@@ -69,64 +63,53 @@ public:
         return std::make_shared<Identity_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
     void computeOutputDims() override final {} // Do nothing
 
     bool outputDimsForwarded() const override final {
-        return true;
+        if (mInputs[0])
+            return !mInputs[0]->empty();
+        else
+            return false;
     }
 
 
-    void forward() override { runHooks(); }
-
-    void backward() override { }
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    // output = input
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mInput.get()); }
+    void forward() override final { runHooks(); }
 
+    void backward() override final { }
 
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "ReLU Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
+    void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override final {
+        if (strcmp(data->type(), "Tensor") != 0) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as outputs", type().c_str());
+        }
+        if (outputIdx >= nbInputs()) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbInputs());
+        }
+        *mInputs[outputIdx] = *std::dynamic_pointer_cast<Tensor>(data);
     }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Identity Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mInput;
-    }
-
 
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
+    void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override final {
+        if (strcmp(data->type(), "Tensor") != 0) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
+        }
+        if (outputIdx >= nbInputs()) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbInputs());
+        }
+        *mInputs[outputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data));
     }
 
-
-
-    void setBackend(const std::string& name) override {
+    const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const override final {
+        if (outputIdx >= nbInputs()) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbInputs());
+        }
+        return mInputs[outputIdx];
+    }
+    void setBackend(const std::string& name) override final {
         // setBackend do nothing, Identity node has no backend it just pass the same Tensor
     }
-    void setDatatype(const DataType& datatype) override {
+    void setDataType(const DataType& dataType) const override final {
         // setDatatype do nothing, Identity node has no backend it just pass the same Tensor
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index bcdcbc7cabd8eda46a7c0c4930f317e562fb46a0..800c8c61d876b6f33cce1af3365179b7eb14b68d 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -17,7 +17,7 @@
 
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -29,14 +29,9 @@ enum class LeakyReLUAttr {
     NegativeSlope
 };
 
-class LeakyReLU_Op : public Operator,
+class LeakyReLU_Op : public OperatorTensor,
     public Registrable<LeakyReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
     public StaticAttributes<LeakyReLUAttr, float> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "LeakyReLU";
 
@@ -46,25 +41,20 @@ public:
     template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
 
     LeakyReLU_Op(float negativeSlope)
-            : Operator(Type),
-            Attributes_(
-                attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
-    {
-        setDatatype(DataType::Float32);
-    }
+        : OperatorTensor(Type, 1, 0, 1),
+          Attributes_(
+            attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     LeakyReLU_Op(const LeakyReLU_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -75,69 +65,17 @@ public:
         return std::make_shared<LeakyReLU_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "LeakyReLU Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "LeakyReLU Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<LeakyReLU_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
-        static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
     static const std::vector<std::string> getOutputsName(){
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index eed1ec04535aa5896aa3d01a27d8023d37a42183..23c12d45802e25f29891c48164acfb2d3ad137ac 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -21,7 +21,7 @@
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,15 +29,11 @@
 namespace Aidge {
 enum class MatMulAttr { OutChannels };
 
-class MatMul_Op : public Operator,
+class MatMul_Op : public OperatorTensor,
               public Registrable<MatMul_Op,
                                  std::string,
                                  std::unique_ptr<OperatorImpl>(const MatMul_Op &)>,
               public StaticAttributes<MatMulAttr, DimSize_t> {
-public:
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "MatMul";
 
@@ -47,25 +43,20 @@ public:
     template <MatMulAttr e> using attr = typename Attributes_::template attr<e>;
 
     MatMul_Op(DimSize_t out_channels)
-            : Operator(Type),
+            : OperatorTensor(Type, 1, 1, 1),
             Attributes_(
                 attr<MatMulAttr::OutChannels>(out_channels))
-    {
-        setDatatype(DataType::Float32);
-    }
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     MatMul_Op(const MatMul_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<MatMul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<MatMul_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -76,78 +67,31 @@ public:
         return std::make_shared<MatMul_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operators supports only 2 inputs");
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            // <in_features**, out_channels>
-            std::array<DimSize_t, 2> weightDims = {this->template getAttr<MatMulAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
-            // <out_channels, batch>
-            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()};
-
-            mInputs[1]->resize(weightDims);
-            mOutput->resize(outputDims);
+        bool associated = true;
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            // <batch, OutChannels>
+            mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()});
         }
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operators supports only 2 inputs");
-        return *(mInputs[inputIdx].get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "MatMul Operators has 2 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "MatMul Operators has 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operators supports only 2 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
     }
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<MatMul_Op>::create(name)(*this);
-        mOutput->setBackend(name);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
 
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
-    }
-
-
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight"};
     }
@@ -156,10 +100,10 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> MatMul(DimSize_t out_channels, const std::string& name = "") {
+inline std::shared_ptr<Node> MatMul(DimSize_t inChannels, DimSize_t outChannels, const std::string& name = "") {
     // FIXME: properly handle default w initialization in every cases
-    auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(out_channels), name);
-    addProducer(matmul, 1, std::array<DimSize_t, 2>({out_channels, 1}), "w");
+    auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(outChannels), name);
+    addProducer(matmul, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w");
     return matmul;
 }
 } // namespace Aidge
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index bcf47f13cc34132f668ea1ffcb2c91ed6f06f44d..ad50a27a94a2217c94445fb556c84ec7f121c6b9 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -19,7 +19,7 @@
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,17 +29,12 @@ namespace Aidge {
 enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
 
 template <DimIdx_t DIM>
-class MaxPooling_Op : public Operator,
+class MaxPooling_Op : public OperatorTensor,
                 public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
                 public StaticAttributes<MaxPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        bool> {
-private:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char *Type = "MaxPooling";
 
@@ -55,26 +50,21 @@ public:
     constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                             bool ceil_mode = false)
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
                       attr<MaxPoolingAttr::KernelDims>(kernel_dims),
-                      attr<MaxPoolingAttr::CeilMode>(ceil_mode)),
-          mOutput(std::make_shared<Tensor>()) {
-        setDatatype(DataType::Float32);
-    }
+                      attr<MaxPoolingAttr::CeilMode>(ceil_mode))
+        {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     MaxPooling_Op(const MaxPooling_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -85,17 +75,14 @@ public:
         return std::make_shared<MaxPooling_Op<DIM>>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 1 && "operators supports only 3 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInput->empty()) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
+        if (!getInput(0)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
+        if (!(getInput(0)->empty())) {
+            std::array<DimSize_t, DIM + 2> outputDims{};
+            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
             std::function<float(float)> roundingFunction;
             if (this->template getAttr<MaxPoolingAttr::CeilMode>()) {
@@ -106,69 +93,25 @@ public:
 
             for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                            roundingFunction(static_cast<float>(mInput->dims()[dim+2] -
+                                            roundingFunction(static_cast<float>(inputDims[dim+2] -
                                                                     this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
                                             static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
             }
-            outputDims[1] = mInput->dims()[1];
-            outputDims[0] = mInput->dims()[0];
-            mOutput->resize(outputDims);
+            outputDims[1] = inputDims[1];
+            outputDims[0] = inputDims[0];
+            mOutputs[0]->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return *(mInput.get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "MaxPooling Operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "MaxPooling Operators has only 1 outputs");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
 
     void setBackend(const std::string &name) override {
         mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index baa7a486ced74375792cf1ebd3b2f7056168f027..4c8feb46c3e3db33bd380302e3e0683f1b8734f5 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -12,26 +12,24 @@
 #ifndef AIDGE_CORE_OPERATOR_METAOPERATOR_H_
 #define AIDGE_CORE_OPERATOR_METAOPERATOR_H_
 
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
 
 namespace Aidge {
-class MetaOperator_Op : public Operator,
+class MetaOperator_Op : public OperatorTensor,
                 public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)> {
 public:
-    std::vector<std::shared_ptr<Tensor>> mInputs;
-    std::vector<std::shared_ptr<Tensor>> mOutputs; // These are shared with micro-graph outputs tensors
-
+    // outputs shared with micro-graph output Tensors
     // Micro-graph handling:
     std::shared_ptr<GraphView> mGraph; // Meta operator micro-graph
     std::shared_ptr<SequentialScheduler> mScheduler;
     // Need to store an ordored list of input/output operators for the micro-graph,
     // because input/output nodes in a GraphView are unordered.
     // TODO: refactor GraphView to handle ordered input/output?
-    std::vector<std::pair<std::shared_ptr<Operator>, IOIndex_t>> mInputOps;
-    std::vector<std::pair<std::shared_ptr<Operator>, IOIndex_t>> mOutputOps;
+    std::vector<std::pair<std::shared_ptr<OperatorTensor>, IOIndex_t>> mInputOps;
+    std::vector<std::pair<std::shared_ptr<OperatorTensor>, IOIndex_t>> mOutputOps;
 
    public:
     MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph,
@@ -43,11 +41,9 @@ public:
      * @param op Operator to copy.
      */
     MetaOperator_Op(const MetaOperator_Op& op)
-        : Operator(op.type().c_str()),
+        : OperatorTensor(op),
           mGraph(op.mGraph->clone())
-    {
-        // cpy-ctor
-    }
+    {}
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -65,7 +61,7 @@ public:
         return mScheduler;
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
 
         const auto& inputOp = mInputOps[inputIdx];
@@ -80,38 +76,6 @@ public:
         mGraph->forwardDims();
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutputs[0]->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < mInputs.size() && "inputIdx out of range");
-        return *(mInputs[inputIdx].get());
-    }
-
-    inline Tensor& output(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
-        return *(mOutputs[outputIdx].get());
-    }
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < mInputs.size() && "inputIdx out of range");
-        return mInputs[inputIdx];
-    }
-
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
-        return mOutputs[outputIdx];
-    }
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < mInputs.size() && "inputIdx out of range");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
-        return std::static_pointer_cast<Data>(mOutputs[outputIdx]);
-    }
 
     void setBackend(const std::string &name) override {
         if (Registrar<MetaOperator_Op>::exists({name, type()})) {
@@ -125,17 +89,13 @@ public:
         mGraph->setBackend(name);
     }
 
-    void setDatatype(const DataType &datatype) override {
+    void setDataType(const DataType &datatype) const override {
         // The micro-graph should always be set to the right data type, since it
         // shares input/output tensors.
         // Input/output tensors data type are updated here.
-        mGraph->setDatatype(datatype);
+        mGraph->setDataType(datatype);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return mGraph->inputs().size(); }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return mGraph->dataInputs().size(); }
-    inline IOIndex_t nbOutputs() const noexcept override final { return mGraph->outputs().size(); }
-
     NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
     NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override;
     NbElts_t getNbProducedData(IOIndex_t outputIdx) const override;
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index 73feb134837787ae8d0d280dd723182c9d21438b..9ec6cdb928cdfa433b04ea23c69344133a3c7064 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -56,7 +56,8 @@ inline std::shared_ptr<Node> PaddedConv(
 }
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
+inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
+                                  const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
@@ -64,7 +65,7 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM
 {
     // Construct micro-graph
     auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
     // Need to specify the ordered list of input operators
     const std::vector<NodePtr> orderedInputNodes = {pad, conv};
 
@@ -77,13 +78,14 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> PaddedConvDepthWise(
+    const DimSize_t nb_channels,
     DimSize_t const (&kernel_dims)[DIM],
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
 {
-    return PaddedConvDepthWise(to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
+    return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
 }
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 4ea79fe52622b22f8ea8fbd9191d50d45e26acac..5b9ab4eb8c3924133f32ddfeb0a5f05963381771 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -12,47 +12,38 @@
 #ifndef AIDGE_CORE_OPERATOR_MUL_H_
 #define AIDGE_CORE_OPERATOR_MUL_H_
 
-#include <cassert>
 #include <memory>
+#include <string>
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 
-class Mul_Op : public Operator,
+/**
+ * @brief Tensor element-wise multiplication.
+ */
+class Mul_Op : public OperatorTensor,
     public Registrable<Mul_Op, std::string, std::unique_ptr<OperatorImpl>(const Mul_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Mul";
 
-    Mul_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Mul_Op() : OperatorTensor(Type, 2, 0, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Mul_Op(const Mul_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Mul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Mul_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,73 +54,17 @@ public:
         return std::make_shared<Mul_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty())
-            mOutput->resize(mInputs[0]->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Mul Operator has 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Mul Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
+    void computeOutputDims() override final;
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Mul_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -141,6 +76,6 @@ public:
 inline std::shared_ptr<Node> Mul(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
 }
-}
+} // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
+#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 903b6362adf3db0c867dc419086e0cb6ddaa65c7..b0f8435bd0126cf3fba9f956a432017585a4d873 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -15,50 +15,91 @@
 #include <memory>
 #include <string>
 #include <vector>
+#include <utility>
+#include <cstddef>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Data.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/hook/Hook.hpp"
 
 namespace Aidge {
 
+enum class OperatorType {
+    Data,
+    Tensor
+};
+
 class Operator : public std::enable_shared_from_this<Operator> {
 protected:
-  std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator
-  std::map<std::string, std::shared_ptr<Hook>> mHooks;
+    std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator
+    std::map<std::string, std::shared_ptr<Hook>> mHooks;
 
 private:
-  std::string mType;
+    std::string mType;
+    const OperatorType mOperatorType;
+    const IOIndex_t mNbData;
+    const IOIndex_t mNbParam;
+    const IOIndex_t mNbOut;
 
 public:
-  Operator() = delete;
-  Operator(const char* type) : mType(type) {}
-  virtual std::shared_ptr<Operator> clone() const = 0;
-  virtual ~Operator();
-
-  Operator(const Operator& op):
-    std::enable_shared_from_this<Operator>()
-  {
-    mType = op.mType;
-    mImpl = nullptr;
-    // Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation.
-    // See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion.
-    // Hooks are not copied.
-  }
+    Operator() = delete;
+    Operator(const char* type, const IOIndex_t nbData, const IOIndex_t nbParam, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)
+    : mType(type),
+      mOperatorType(operatorType),
+      mNbData(nbData),
+      mNbParam(nbParam),
+      mNbOut(nbOut)
+    {
+        // ctor
+    }
+
+    Operator(const Operator& op):
+        std::enable_shared_from_this<Operator>(),
+        mOperatorType(op.mOperatorType),
+        mNbData(op.mNbData),
+        mNbParam(op.mNbParam),
+        mNbOut(op.mNbOut)
+    {
+        mType = op.mType;
+        mImpl = nullptr;
+        // Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation.
+        // See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion.
+        // Hooks are not copied.
+    }
+
+    virtual ~Operator() noexcept;
 
 public:
+    virtual std::shared_ptr<Operator> clone() const = 0;
 
-    virtual void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) = 0;
-    virtual void computeOutputDims() = 0;
-    virtual bool outputDimsForwarded() const = 0;
+    virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
+    /**
+     * @brief For a given output feature area, compute the associated receptive
+     * field for each data input.
+     * @param firstIdx First index of the output feature.
+     * @param outputDims Size of output feature.
+     * @param outputIdx Index of the output. Default 0.
+     * @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
+     */
+    // virtual std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
+
+    /**
+     * @brief Set the specified input by performing a deep copy of the given data.
+     * The pointer itself is not changed, thus keeping the current connections.
+     * @param inputIdx Index of the input to set.
+     */
+    virtual void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
+    virtual void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) = 0;
     virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const = 0;
-    virtual std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const = 0;
-    virtual Tensor& input(const IOIndex_t /*inputIdx*/) const = 0;
+        /**
+     * @brief Set the specified output by performing a deep copy of the given data.
+     * The pointer itself is not changed, thus keeping the current connections.
+     * @param inputIdx Index of the input to set.
+     */
+    virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) = 0;
+    virtual void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) = 0;
     virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0;
-    virtual std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const = 0;
-    virtual Tensor& output(const IOIndex_t /*outputIdx*/) const = 0;
 
     std::shared_ptr<Hook> getHook(std::string hookName) {
         return mHooks[hookName];
@@ -74,7 +115,7 @@ public:
 ///////////////////////////////////////////////////////
 
     virtual void setBackend(const std::string& name) = 0;
-    virtual void setDatatype(const DataType& datatype) = 0;
+    virtual void setDataType(const DataType& dataType) const = 0;
 
     /**
      * @brief Set the a new OperatorImpl to the Operator
@@ -117,14 +158,20 @@ public:
 //        INNER
 ///////////////////////////////////////////////////////
 
-    std::string type() const {
+    inline std::string type() const noexcept {
         return mType;
     }
 
-    virtual IOIndex_t nbInputs() const noexcept = 0;
-    virtual IOIndex_t nbDataInputs() const noexcept = 0;
-    virtual IOIndex_t nbOutputs() const noexcept = 0;
-      static const std::vector<std::string> getInputsName(){
+    inline OperatorType operatorType() const noexcept{
+        return mOperatorType;
+    }
+
+    inline IOIndex_t nbInputs() const noexcept { return mNbData+mNbParam; };
+    inline IOIndex_t nbData() const noexcept { return mNbData; };
+    inline IOIndex_t nbParam() const noexcept { return mNbParam; };
+    inline IOIndex_t nbOutputs() const noexcept { return mNbOut; };
+
+    static const std::vector<std::string> getInputsName(){
         return {};
     }
     static const std::vector<std::string> getOutputsName(){
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a55d7ac2842f948d923f9e1e54d2ffed1fd0f954
--- /dev/null
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -0,0 +1,101 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_
+#define AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class OperatorTensor : public Operator {
+    /* TODO: Add an attribute specifying the type of Data used by the Operator.
+     * The same way ``Type`` attribute specifies the type of Operator. Hence this
+     * attribute could be checked in the forwardDims function to assert Operators
+     * being used work with Tensors and cast them to OpertorTensor instead of
+     * Operator.
+     */
+    /* TODO: Maybe change type attribute of Data object by an enum instead of an
+     * array of char. Faster comparisons.
+     */
+protected:
+    std::vector<std::shared_ptr<Tensor>> mInputs;
+    std::vector<std::shared_ptr<Tensor>> mOutputs;
+
+public:
+    OperatorTensor() = delete;
+
+    OperatorTensor(const char* type, const IOIndex_t nbData, const IOIndex_t nbParam,
+                   const IOIndex_t nbOut)
+        : Operator(type, nbData, nbParam, nbOut, OperatorType::Tensor),
+          mInputs(std::vector<std::shared_ptr<Tensor>>(nbData + nbParam, nullptr)),
+          mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) {
+        for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) {
+            mOutputs[i] = std::make_shared<Tensor>();
+            mOutputs[i]->setDataType(DataType::Float32);
+        }
+    }
+
+    OperatorTensor(const OperatorTensor& other)
+        : Operator(other),
+          mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)),
+          mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) {
+        for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
+            mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i)));
+            // datatype already copied
+        }
+    }
+
+    ~OperatorTensor();
+
+public:
+    ///////////////////////////////////////////////////
+    virtual void associateInput(const IOIndex_t inputIdx,
+                                const std::shared_ptr<Data>& data) override;
+    ///////////////////////////////////////////////////
+
+    ///////////////////////////////////////////////////
+    // Tensor access
+    // input management
+    void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
+    void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final;
+    const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const;
+    inline std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        return std::static_pointer_cast<Data>(getInput(inputIdx));
+    }
+
+    // output management
+    void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override;
+    void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override;
+    virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const;
+    inline std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final {
+        return std::static_pointer_cast<Data>(getOutput(outputIdx));
+    }
+    ///////////////////////////////////////////////////
+
+    ///////////////////////////////////////////////////
+    // Tensor dimensions
+    virtual void computeOutputDims();
+    virtual bool outputDimsForwarded() const;
+    ///////////////////////////////////////////////////
+
+    virtual void setDataType(const DataType& dataType) const override;
+};
+}  // namespace Aidge
+
+#endif  // AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_
\ No newline at end of file
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index cbebb16e1e24501b0ea371fb45211047f6e2b5e7..279b8b3d2c173d18c65c17e50385954a88fde77e 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -19,7 +19,7 @@
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -30,17 +30,12 @@ enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
 enum class PadBorderType { Constant, Edge, Reflect, Wrap };
 
 template <DimIdx_t DIM>
-class Pad_Op : public Operator,
+class Pad_Op : public OperatorTensor,
                 public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
                 public StaticAttributes<PadAttr,
                                        std::array<DimSize_t, 2*DIM>,
                                        PadBorderType,
                                        double> {
-private:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char *Type = "Pad";
 
@@ -56,25 +51,19 @@ public:
     constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
                      const PadBorderType &borderType = PadBorderType::Constant,
                      double borderValue = 0.0)
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples),
                            attr<PadAttr::BorderType>(borderType),
-                           attr<PadAttr::BorderValue>(borderValue)) {
-        setDatatype(DataType::Float32);
-    }
+                           attr<PadAttr::BorderValue>(borderValue)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Pad_Op(const Pad_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
-    {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-    }
+        : OperatorTensor(op),
+          Attributes_(op)
+    {}
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -84,82 +73,38 @@ public:
         return std::make_shared<Pad_Op<DIM>>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 1 && "operators supports only 3 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInput->empty()) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
+        bool associated = true;
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            std::array<DimSize_t, DIM + 2> outputDims{};
+            const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
 
             for (std::size_t dim = 0; dim < DIM; ++dim) {
                 outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
-                                    + mInput->dims()[dim+2]
+                                    + inputDims[dim+2]
                                     + this->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
             }
-            outputDims[1] = mInput->dims()[1];
-            outputDims[0] = mInput->dims()[0];
-            mOutput->resize(outputDims);
+            outputDims[1] = inputDims[1];
+            outputDims[0] = inputDims[0];
+            mOutputs[0]->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return *(mInput.get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "Pad Operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "Pad Operators has only 1 outputs");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string &name) override {
         mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index 732cf36b4ef7e7640648c542191acd02d0875a4f..0b0ae82f012eace8b5a2d5eb362a359386495b79 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -17,7 +17,7 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -26,33 +26,21 @@
 
 namespace Aidge {
 
-class Pow_Op : public Operator,
+class Pow_Op : public OperatorTensor,
     public Registrable<Pow_Op, std::string, std::unique_ptr<OperatorImpl>(const Pow_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Pow";
 
-    Pow_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Pow_Op() : OperatorTensor(Type, 2, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Pow_Op(const Pow_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Pow_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Pow_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,73 +51,18 @@ public:
         return std::make_shared<Pow_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty())
-            mOutput->resize(mInputs[0]->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Pow Operator has 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Pow Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
+    void computeOutputDims() override final;
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Pow_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -141,6 +74,6 @@ public:
 inline std::shared_ptr<Node> Pow(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
 }
-}
+} // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
+#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index d747b340618cc7e321f2cfc2ed9169798e5d77e9..fb6a20403adc1ee5cddb5869fd9d39ef59fb776e 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -18,49 +18,40 @@
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
 
 class Producer_Op
-    : public Operator,
+    : public OperatorTensor,
       public Registrable<Producer_Op, std::string, std::unique_ptr<OperatorImpl>(
                                           const Producer_Op &)> {
-private:
-    std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Producer";
 
     template <std::size_t DIM>
     Producer_Op(const std::array<DimSize_t, DIM>& dims)
-        : Operator(Type)
+        : OperatorTensor(Type, 0, 0, 1)
     {
-        //ctor
-        setDatatype(DataType::Float32);
-        mOutput->resize(dims);
+        mOutputs[0]->resize(dims);
     }
 
     Producer_Op(const std::shared_ptr<Tensor> tensor)
-        : Operator(Type),
-          mOutput(tensor)
+        : OperatorTensor(Type, 0, 0, 1)
     {
-        setDatatype(tensor->dataType());
+        mOutputs[0] = tensor; // copy the pointer of the Tensor
     }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @param op OperatorTensor to copy.
      */
     Producer_Op(const Producer_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Producer_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -71,18 +62,8 @@ public:
         return std::make_shared<Producer_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final {
-        assert(false && "Producer operator takes no input");
-    }
-
-    /**
-     * @brief Set the Output Tensor of the Producer operator.
-     * This method will create a copy of the Tensor.
-     *
-     * @param newOutput Tensor containing the values to copy
-     */
-    void setOutputTensor(const Tensor& newOutput) {
-        *mOutput = newOutput;
+    void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
     }
 
     void computeOutputDims() override final {}
@@ -90,48 +71,13 @@ public:
     bool outputDimsForwarded() const override final {return true;}
 
 
-    [[noreturn]] inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final {
-      assert(false);
-      exit(-1);
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t /*inputIdx*/) const override final {
-      assert(false && "Producer Operator has no input");
-      return nullptr;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-      assert((outputIdx == 0) && "Producer Operator has only 1 output");
-      (void) outputIdx; // avoid unused warning
-      return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t /*inputIdx*/) const override final {
-        assert(false && "Producer operator takes no input");
-        return nullptr;
-    }
-
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-    inline const std::vector<DimSize_t> dims() const noexcept { return mOutput->dims(); }
+    inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Producer_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 0; };
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 0; };
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; };
     static const std::vector<std::string> getInputsName(){
         return {};
     }
@@ -181,4 +127,4 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, Dim
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
+#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 52f13f1c5ce1d0b7a0d4ccaa4d7fe9927bcc3e53..3444c25fc2e1572e78a1377b3273580f494ac8f9 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -17,42 +17,29 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 
-class ReLU_Op : public Operator,
+class ReLU_Op : public OperatorTensor,
     public Registrable<ReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const ReLU_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "ReLU";
 
-    ReLU_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    ReLU_Op() : OperatorTensor(Type, 1, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     ReLU_Op(const ReLU_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<ReLU_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<ReLU_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,68 +50,15 @@ public:
         return std::make_shared<ReLU_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "ReLU Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "ReLU Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<ReLU_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -138,4 +72,4 @@ inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
 }
 }
 
-#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
+#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 70440da2d083691a2d2b94342dec05d7a24750dd..df1b46482558cc803455ce33ee18d8ec67f413da 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -19,7 +19,7 @@
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,13 +29,9 @@ namespace Aidge {
 enum class ReduceMeanAttr { Axes, KeepDims };
 
 template <DimIdx_t DIM>
-class ReduceMean_Op : public Operator,
+class ReduceMean_Op : public OperatorTensor,
                 public Registrable<ReduceMean_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>,
                 public StaticAttributes<ReduceMeanAttr, std::array<DimSize_t, DIM>, DimSize_t> {
-   public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
    public:
     static constexpr const char *Type = "ReduceMean";
@@ -47,24 +43,19 @@ class ReduceMean_Op : public Operator,
     using attr = typename Attributes_::template attr<e>;
 
     constexpr ReduceMean_Op(const std::array<DimSize_t, DIM> &axes, DimSize_t keep_dims)
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<ReduceMeanAttr::Axes>(axes),
-                      attr<ReduceMeanAttr::KeepDims>(keep_dims)) {
-        setDatatype(DataType::Float32);
-    }
+                      attr<ReduceMeanAttr::KeepDims>(keep_dims)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     ReduceMean_Op(const ReduceMean_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<ReduceMean_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<ReduceMean_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -75,16 +66,10 @@ class ReduceMean_Op : public Operator,
         return std::make_shared<ReduceMean_Op<DIM>>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "ReduceMean operator supports only 1 input");
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
     void computeOutputDims() override final {
-        if (!mInput->empty()) {
+        if (!getInput(0)->empty()) {
             std::vector<DimSize_t> outDims;
-            for(std::size_t d=0; d<mInput->dims().size(); ++d)
+            for(std::size_t d=0; d<getInput(0)->dims().size(); ++d)
             {
                 bool reducedDim =  false;
                 for(std::size_t i=0; i<DIM; ++i)
@@ -101,62 +86,20 @@ class ReduceMean_Op : public Operator,
                         outDims.push_back(1);
                 }
                 else
-                    outDims.push_back(mInput->dims()[d]);
+                    outDims.push_back(getInput(0)->dims()[d]);
             }
-            mOutput->resize(outDims);
+            mOutputs[0]->resize(outDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "ReduceMean operators supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "ReduceMean operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "ReduceMean operators supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "ReduceMean operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
-
     void setBackend(const std::string &name) override {
         mImpl = Registrar<ReduceMean_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index bc60fee91e8ff543daa091dcef1eed754affae2e..81cc7cd195d54cd4d251fbdcf8840ad7fb4b1631 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -17,7 +17,7 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -26,33 +26,22 @@
 
 namespace Aidge {
 
-class Reshape_Op : public Operator,
+class Reshape_Op : public OperatorTensor,
     public Registrable<Reshape_Op, std::string, std::unique_ptr<OperatorImpl>(const Reshape_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
 public:
     static constexpr const char* Type = "Reshape";
 
-    Reshape_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Reshape_Op() : OperatorTensor(Type, 2, 0, 1) {} //1,1,1
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Reshape_Op(const Reshape_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Reshape_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Reshape_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,12 +52,6 @@ public:
         return std::make_shared<Reshape_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "Reshape operator supports only 2 inputs");
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
     void computeOutputDims() override final {
         if (!mInputs[0]->empty() && !mInputs[1]->empty())
         {
@@ -78,65 +61,20 @@ public:
             {
                 outDims.push_back(shapeElem[i]);
             }
-            mOutput->resize(outDims);
+            mOutputs[0]->resize(outDims);
         }
     }
 
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Reshape operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Reshape operator has 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Reshape operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "Reshape operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "Reshape operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Reshape_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(DataType::Int32);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "output_shape"};
     }
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 43dd7beb10b49c3695e6c55fac0449a34565dd7f..fd6d6bcfccc36829671538e1f2e31b13644e3938 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -15,14 +15,11 @@
 #include <vector>
 #include <memory>
 
-
-
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
@@ -31,14 +28,9 @@ enum class ScalingAttr {
     scalingFactor, quantizedNbBits, isOutputUnsigned
 };
 
-class Scaling_Op : public Operator,
+class Scaling_Op : public OperatorTensor,
     public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>,
     public StaticAttributes<ScalingAttr, float, size_t, bool> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Scaling";
 
@@ -48,27 +40,22 @@ public:
     template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
 
     Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
-            : Operator(Type),
-            Attributes_(
-                attr<ScalingAttr::scalingFactor>(scalingFactor),
-                attr<ScalingAttr::quantizedNbBits>(nbBits),
-                attr<ScalingAttr::isOutputUnsigned>(isOutputUnsigned)) {
-            
-            setDatatype(DataType::Float32);
-        }
+        : OperatorTensor(Type, 1, 0, 1),
+          Attributes_(
+            attr<ScalingAttr::scalingFactor>(scalingFactor),
+            attr<ScalingAttr::quantizedNbBits>(nbBits),
+            attr<ScalingAttr::isOutputUnsigned>(isOutputUnsigned))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Scaling_Op(const Scaling_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Scaling_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Scaling_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -79,79 +66,17 @@ public:
         return std::make_shared<Scaling_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        (void) inputIdx; //avoid unused warning
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Scaling Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return *(mInput.get());
-    }
-    inline Tensor& output(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Scaling Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return *(mOutput.get());
-    }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Scaling Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Scaling Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning;
-        return mOutput;
-    }
-
-
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Scaling_Op>::create(name)(*this);
-        mOutput->setBackend(name);
+        mOutputs[0]->setBackend(name);
         // FIXME: temporary workaround
-        mInput->setBackend(name);
+        mInputs[0]->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
 
-        // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
-    }
-
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
-    static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
-    static const std::vector<std::string> getOutputsName(){
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
@@ -164,8 +89,7 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri
 inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, std::size_t quantizedNbBits=8, bool isOutputUnsigned=true, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
 }
-
-}
+} // namespace Aidge
 
 namespace {
 template <>
@@ -173,4 +97,4 @@ const char* const EnumStrings<Aidge::ScalingAttr>::data[]
     = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
+#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
\ No newline at end of file
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 4a99045e200487b7114b81b1625607e1a787554b..d1e000723886dca735618dcc82e9670d075d9521 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -18,6 +18,7 @@
 
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -26,51 +27,28 @@
 
 namespace Aidge {
 
-class Slice_Op : public Operator,
+class Slice_Op : public OperatorTensor,
     public Registrable<Slice_Op, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 4> mInputs = {std::make_shared<Tensor>(),
-                                                      std::make_shared<Tensor>(),
-                                                      std::make_shared<Tensor>(),
-                                                      std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
 public:
     static constexpr const char* Type = "Slice";
 
-    Slice_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Slice_Op() : OperatorTensor(Type,4,0,1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Slice_Op(const Slice_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    Slice_Op(const Slice_Op& op) : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Slice_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Slice_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Slice_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Slice_Op>(*this);
-    }
-
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 4 && "Slice operator supports only 4 inputs");
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
+    std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); }
 
     void computeOutputDims() override final {
         if (!mInputs[0]->empty() && !mInputs[1]->empty() && !mInputs[2]->empty()&& !mInputs[3]->empty())
@@ -85,67 +63,23 @@ public:
                 std::size_t axis = axes[i]>=0?axes[i]:axes[i]+mInputs[0]->nbDims();
                 outDims[axis] = ends[i] - starts[i] + 1;
             }
-            mOutput->resize(outDims);
+            mOutputs[0]->resize(outDims);
         }
     }
 
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < 4 && "wrong inputIdx for Slice operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 4) && "Slice operator has 4 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Slice operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 4 && "Slice operator supports only 4 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "Slice operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Slice_Op>::create(name)(*this);
-        mOutput->setBackend(name);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-        mInputs[2]->setBackend(name);
-        mInputs[3]->setBackend(name);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
+        getInput(2)->setBackend(name);
+        getInput(3)->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
 
-        // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(DataType::Int32);
-        mInputs[2]->setDatatype(DataType::Int32);
-        mInputs[3]->setDatatype(DataType::Int32);
-    }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 4; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 4; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "starts", "ends", "axes"};
     }
@@ -159,4 +93,4 @@ inline std::shared_ptr<Node> Slice(const std::string& name = "") {
 }
 }
 
-#endif /* AIDGE_CORE_OPERATOR_SLICE_H_ */
+#endif /* AIDGE_CORE_OPERATOR_SLICE_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index db078a6f1677c5dfc09035d384eeb304324cebcb..c3461f25aa7840ef0192d0df80b8e0b3a6a3b384 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -16,11 +16,12 @@
 #include <memory>
 #include <vector>
 
+
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
@@ -29,15 +30,11 @@
 namespace Aidge {
 enum class SoftmaxAttr { AxisIdx };
 
-class Softmax_Op : public Operator,
+class Softmax_Op : public OperatorTensor,
                 public Registrable<Softmax_Op,
                                    std::string,
                                    std::unique_ptr<OperatorImpl>(const Softmax_Op&)>,
                 public StaticAttributes<SoftmaxAttr, int> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
 public:
     static constexpr const char* Type = "Softmax";
@@ -47,25 +44,18 @@ public:
     using Attributes_ = StaticAttributes<SoftmaxAttr, int>;
     template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
     Softmax_Op(int axis)
-            : Operator(Type),
-            Attributes_(
-                attr<SoftmaxAttr::AxisIdx>(axis))
-    {
-        setDatatype(DataType::Float32);
-    }
+            :  OperatorTensor(Type, 1, 0, 1),
+            Attributes_(attr<SoftmaxAttr::AxisIdx>(axis)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Softmax_Op(const Softmax_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Softmax_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Softmax_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -76,67 +66,14 @@ public:
         return std::make_shared<Softmax_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "Softmax operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Softmax operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Softmax operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "Softmax operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "Softmax operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Softmax_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index 90b2ae6a8ae1311aef14e4eba4d3563a28a3d18e..a4069b59bbe7e7586d02b71a39d811d9bf972b77 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -17,7 +17,7 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -26,7 +26,7 @@
 
 namespace Aidge {
 
-class Sqrt_Op : public Operator,
+class Sqrt_Op : public OperatorTensor,
     public Registrable<Sqrt_Op, std::string, std::unique_ptr<OperatorImpl>(const Sqrt_Op&)> {
 public:
     // FIXME: change accessibility
@@ -36,23 +36,16 @@ public:
 public:
     static constexpr const char* Type = "Sqrt";
 
-    Sqrt_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Sqrt_Op() : OperatorTensor(Type, 1, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Sqrt_Op(const Sqrt_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,68 +56,14 @@ public:
         return std::make_shared<Sqrt_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Sqrt Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Sqrt Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Sqrt_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index 451cba08f58e7a580576531ce2a97c92fb9be3ae..becf98926d2da777c6551e8ed2fbd7b5fcf50017 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -17,7 +17,7 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -26,7 +26,7 @@
 
 namespace Aidge {
 
-class Sub_Op : public Operator,
+class Sub_Op : public OperatorTensor,
     public Registrable<Sub_Op, std::string, std::unique_ptr<OperatorImpl>(const Sub_Op&)> {
 public:
     // FIXME: change accessibility
@@ -36,23 +36,16 @@ public:
 public:
     static constexpr const char* Type = "Sub";
 
-    Sub_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Sub_Op() : OperatorTensor(Type, 2, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Sub_Op(const Sub_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Sub_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Sub_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,73 +56,18 @@ public:
         return std::make_shared<Sub_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty())
-            mOutput->resize(mInputs[0]->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Sub Operator has 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Sub Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
+    void computeOutputDims() override final;
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Sub_Op>::create(name)(*this);
-        mOutput->setBackend(name);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
 
-        // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
-    }
-
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -141,6 +79,6 @@ public:
 inline std::shared_ptr<Node> Sub(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
 }
-}
+} // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
+#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index b19ec6473b1fb11ee01451e24ecb31553ff6011b..6248dcfc558510a1e08db840dc9bc88284ba8f99 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -19,7 +19,7 @@
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,14 +29,10 @@ namespace Aidge {
 enum class TransposeAttr { OutputDimsOrder };
 
 template <DimIdx_t DIM>
-class Transpose_Op : public Operator,
+class Transpose_Op : public OperatorTensor,
                 public Registrable<Transpose_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>,
                 public StaticAttributes<TransposeAttr,
                                        std::array<DimSize_t, DIM>> {
-   public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
    public:
     static constexpr const char *Type = "Transpose";
@@ -49,23 +45,18 @@ class Transpose_Op : public Operator,
     using attr = typename Attributes_::template attr<e>;
 
     constexpr Transpose_Op(const std::array<DimSize_t, DIM> &output_dims_order)
-        : Operator(Type),
-          Attributes_(attr<TransposeAttr::OutputDimsOrder>(output_dims_order)) {
-        setDatatype(DataType::Float32);
-    }
+        : OperatorTensor(Type, 1, 0, 1),
+          Attributes_(attr<TransposeAttr::OutputDimsOrder>(output_dims_order)) { }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Transpose_Op(const Transpose_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Transpose_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Transpose_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -76,74 +67,27 @@ class Transpose_Op : public Operator,
         return std::make_shared<Transpose_Op<DIM>>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "Transpose operator supports only 1 input");
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
     void computeOutputDims() override final {
-        if (!mInput->empty()) {
+        printf("************** nbIn %d \n", this->nbInputs());
+        if (!getInput(0)->empty()) {
             auto attr = (this)->getStaticAttributes();
             const std::array<DimSize_t, DIM>& outDimsOrder = static_cast<const std::array<DimSize_t, DIM>&>(std::get<0>(attr));
             std::vector<DimSize_t> outputDims;
             for (std::size_t i = 0; i < DIM; ++i) {
-                outputDims.push_back(mInput->dims()[outDimsOrder[i]]);
+                outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
             }
-            mOutput->resize(outputDims);
+            mOutputs[0]->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0)  && "Transpose operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Transpose operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Transpose operator supports only 1 input");
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Transpose operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
-
     void setBackend(const std::string &name) override {
         mImpl = Registrar<Transpose_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/utils/Recipies.hpp b/include/aidge/recipies/Recipies.hpp
similarity index 85%
rename from include/aidge/utils/Recipies.hpp
rename to include/aidge/recipies/Recipies.hpp
index 197e959d01156b840e5a86489c056deb06a37d4d..5a7df30bb1d2cbf95ecec32b3ba4eceae9580529 100644
--- a/include/aidge/utils/Recipies.hpp
+++ b/include/aidge/recipies/Recipies.hpp
@@ -20,7 +20,7 @@
 #include "aidge/graphRegex/matchFsm/MatchResult.hpp"
 
 
-namespace Aidge{
+namespace Aidge {
 
 // FUSE MATMUL + ADD -> FC
 
@@ -102,6 +102,11 @@ void fuseBatchNorm(std::shared_ptr<MatchSolution> solution);
  */
 void fuseBatchNorm(std::shared_ptr<GraphView> graphView);
 
-}
+// std::set<std::shared_ptr<Node>> getHorizontalTiling(const std::shared_ptr<Node>& node, const DimIdx_t axis, const std::size_t nbSlices);
+// void horizontalTiling(std::shared_ptr<Node> node, DimIdx_t dim, std::size_t nbSlices);
+// std::set<std::shared_ptr<Node>> getHorizontalTiling(std::set<std::shared_ptr<Node>> setOfNodes, DimIdx_t dim, std::size_t nbSlices);
+// void horizontalTiling(std::set<std::shared_ptr<Node>> setOfNodes, DimIdx_t dim, std::size_t nbSlices);
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_RECIPIES_H_ */
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index b67f69ae7afc2c22f3b424812ec994b10974b668..50ed0895e82bb468dee57264534f0ec3a486a815 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -22,8 +22,8 @@
 
 namespace Aidge {
 /**
- * @brief This class is designed to handle static attributes (i.e. known at compile-time) 
- * with named accessors, with minimal overhead (the name strings are not stored in each object 
+ * @brief This class is designed to handle static attributes (i.e. known at compile-time)
+ * with named accessors, with minimal overhead (the name strings are not stored in each object
  * instance and it remains possible to access attribute without overhead at compile-time).
 */
 template <class ATTRS_ENUM, class ...T>
@@ -97,6 +97,17 @@ public:
         AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"%s\" not found", name);
     }
 
+    template <typename R>
+    const R& getAttr(const char* name) const {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (strcmp(EnumStrings<ATTRS_ENUM>::data[i], name) == 0) {
+                return getAttr<R>(i);
+            }
+        }
+
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"%s\" not found", name);
+    }
+
     template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
     typename std::enable_if<(SIZE > 0), R&>::type getAttr(std::size_t i) {
         if (i == SIZE-1) {
@@ -117,6 +128,26 @@ public:
         AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
     }
 
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    typename std::enable_if<(SIZE > 0), const R&>::type getAttr(std::size_t i) const {
+        if (i == SIZE-1) {
+            if (std::is_same<R, typename std::tuple_element<SIZE-1,std::tuple<T...>>::type>::value) {
+                return reinterpret_cast<const R&>(std::get<SIZE-1>(mAttrs));
+            }
+            else {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "wrong type for attribute with index %lu", i);
+            }
+        }
+        else {
+            return getAttr<R, SIZE-1>(i);
+        }
+    }
+
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    [[noreturn]] typename std::enable_if<(SIZE == 0), const R&>::type getAttr(std::size_t /*i*/) const {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
+    }
+
     template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
     constexpr typename std::enable_if<(SIZE > 0), const std::type_info&>::type getAttrType(std::size_t i) const {
         if (i == SIZE-1) {
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 31470e0eb2c50b5386b64498f89419801b133d3a..babc534bdc23e87e17e21312d18b51b04baee7ca 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -35,7 +35,7 @@ void addCtor(py::class_<Tensor,
         /* Request a buffer descriptor from Python */
         py::buffer_info info = b.request();
         Tensor* newTensor = new Tensor();
-        newTensor->setDatatype(NativeType<T>::type);
+        newTensor->setDataType(NativeType<T>::type);
         const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
         newTensor->resize(dims);
         // TODO : Find a better way to choose backend
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index cea1f27035a1138bedaa004070a8420505dd0127..61392470adaeb7db8812a3063edc5f8eee1d3083 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -98,7 +98,7 @@ void init_GraphView(py::module& m) {
           .def("get_node", &GraphView::getNode, py::arg("node_name"))
           .def("forward_dims", &GraphView::forwardDims)
           .def("__call__", &GraphView::operator(), py::arg("connectors"))
-          .def("set_datatype", &GraphView::setDatatype, py::arg("datatype"))
+          .def("set_datatype", &GraphView::setDataType, py::arg("datatype"))
           .def("set_backend", &GraphView::setBackend, py::arg("backend"))
           //   .def("__getitem__", [](Tensor& b, size_t idx)-> py::object {
           //      // TODO : Should return error if backend not compatible with get
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index 29e9a7b663c851f157e1951d695b5f5cb2c0e8ee..aa5c21372730536662106a035307d885fa011107 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -123,7 +123,7 @@ void init_Node(py::module& m) {
     :rtype: int
     )mydelimiter")
 
-    .def("get_nb_datainputs", &Node::nbDataInputs,
+    .def("get_nb_data", &Node::nbData,
     R"mydelimiter(
     Number of data inputs.
 
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 0b2323c5cfb660415ec3ae009beaa7aa78afca0b..74ec11c28e746856fe767f16a4380651271d8fe4 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -13,21 +13,21 @@
 
 #include "aidge/operator/Add.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 
-template <std::size_t NUM> void declare_Add(py::module &m) {
-  py::class_<Add_Op<NUM>, std::shared_ptr<Add_Op<NUM>>, Operator>(m, "AddOp", py::multiple_inheritance())
-  .def("get_inputs_name", &Add_Op<NUM>::getInputsName)
-  .def("get_outputs_name", &Add_Op<NUM>::getOutputsName);
+void declare_Add(py::module &m) {
+  py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance())
+  .def("get_inputs_name", &Add_Op::getInputsName)
+  .def("get_outputs_name", &Add_Op::getOutputsName);
 
-  m.def("Add", &Add<NUM>, py::arg("name") = "");
+  m.def("Add", &Add, py::arg("nbIn"), py::arg("name") = "");
 }
 
 void init_Add(py::module &m) {
-  declare_Add<2>(m);
+  declare_Add(m);
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index fe67fcb7a26f6ea1f05577b47444df5cb271110a..f87cd5dd66f44535ff895f73b160fc5988e1009a 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -18,7 +18,7 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/AvgPooling.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
-  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Operator, Attributes>(
+  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor, Attributes>(
     m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index cabaa2edd7053718160fa5013492d1914ee4cf16..ff0b9e0dfcb0d1c5e5567a938b1ca74faf242bed 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -13,7 +13,7 @@
 #include <string>
 
 #include "aidge/operator/BatchNorm.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
@@ -21,7 +21,7 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, Attributes>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor, Attributes>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
     .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
     .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 9e587f0f01c0b7e4a168e30f5c5519aa8d9a8371..701afa3e1280c9f0ba4eb09292e3fd2a5e8060c4 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -13,13 +13,13 @@
 #include <string>
 
 #include "aidge/operator/Concat.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Concat(py::module& m) {
-    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, Operator, Attributes>(m, "ConcatOp", py::multiple_inheritance())
+    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, OperatorTensor, Attributes>(m, "ConcatOp", py::multiple_inheritance())
     .def("get_inputs_name", &Concat_Op::getInputsName)
     .def("get_outputs_name", &Concat_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index f4f7946c6ecc180f83e4bf58eee16102752f0c6e..71231b8218ac6af28c97ec29039301bc25b2d195 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -18,14 +18,14 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Conv.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
-  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Operator, Attributes>(
+  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor, Attributes>(
     m, ("ConvOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<DimSize_t,
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 4745ef345264763f1a890d566235be072c8e50d8..15f2c1c8acb4a1b59cfb0f35ebb78cb611647d3b 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -18,7 +18,7 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 
@@ -26,19 +26,22 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
-  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Operator, Attributes>(
+  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor, Attributes>(
     m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
-  .def(py::init<const std::array<DimSize_t, DIM> &,
+  .def(py::init<const DimSize_t,
+                const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &>(),
+        py::arg("nb_channels"),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
         py::arg("dilation_dims"))
   .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName);
 
-  m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
+  m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
+                                                                  const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
                                                                   const std::vector<DimSize_t> &dilation_dims) {
@@ -46,8 +49,9 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
 
-        return ConvDepthWise<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
-    }, py::arg("kernel_dims"),
+        return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+    }, py::arg("nb_channenls"),
+       py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
index 3492bf244952ba6ed0d77cb16de758e61fb26383..6d14510f34349c001289096a7fc9b08681a25bc8 100644
--- a/python_binding/operator/pybind_Div.cpp
+++ b/python_binding/operator/pybind_Div.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Div.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Div(py::module& m) {
-    py::class_<Div_Op, std::shared_ptr<Div_Op>, Operator>(m, "DivOp", py::multiple_inheritance())
+    py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance())
     .def("get_inputs_name", &Div_Op::getInputsName)
     .def("get_outputs_name", &Div_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp
index 3c1c7b18be9f6b38f8c23188f15f8e005227dd98..806867f61c3580543c184d529edc2856ee8d7a6c 100644
--- a/python_binding/operator/pybind_Erf.cpp
+++ b/python_binding/operator/pybind_Erf.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Erf.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Erf(py::module& m) {
-    py::class_<Erf_Op, std::shared_ptr<Erf_Op>, Operator>(m, "ErfOp", py::multiple_inheritance())
+    py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance())
     .def("get_inputs_name", &Erf_Op::getInputsName)
     .def("get_outputs_name", &Erf_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index c6a1c70000e3e6d604a6652716667efa1c18e956..606b9ae948847f98d5a1129c08db21e073311879 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -13,18 +13,18 @@
 
 #include "aidge/operator/FC.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, Attributes>(m, "FCOp", py::multiple_inheritance())
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor, Attributes>(m, "FCOp", py::multiple_inheritance())
   .def("get_inputs_name", &FC_Op::getInputsName)
   .def("get_outputs_name", &FC_Op::getOutputsName);
 
-  m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
+  m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
 
 void init_FC(py::module &m) {
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index 0fc329d2759c2878730b16de1be696e1a457b4d0..f9768e38fbdceef4a15cc74430bc2205bb32cb6a 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -13,13 +13,13 @@
 #include <string>
 
 #include "aidge/operator/Gather.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Gather(py::module& m) {
-    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, Operator, Attributes>(m, "GatherOp", py::multiple_inheritance())
+    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor, Attributes>(m, "GatherOp", py::multiple_inheritance())
     .def("get_inputs_name", &Gather_Op::getInputsName)
     .def("get_outputs_name", &Gather_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 241fc7f4a003f53de15a42859b078c54cc98b63a..154fdfa64f279d8d6bb40ea7077acdb4c0fd51b9 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -16,18 +16,18 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/GenericOperator.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 namespace py = pybind11;
 namespace Aidge {
 
 void init_GenericOperator(py::module& m) {
-    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, Operator, DynamicAttributes>(m, "GenericOperatorOp",
+    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, OperatorTensor, DynamicAttributes>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
     .def_readonly_static("identity", &GenericOperator_Op::Identity)
     .def("compute_output_dims", &GenericOperator_Op::computeOutputDims)
     .def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function"));
 
-    m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nb_data_in"), py::arg("nb_in"), py::arg("nb_out"),
+    m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"),
           py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index af7689f0e64dd4ca8f798dcb34ea968972ace464..07300633ad1fb8163d4456afd744c4eb5d7b0ed1 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/LeakyReLU.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, Attributes>(m, "LeakyReLUOp", py::multiple_inheritance())
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor, Attributes>(m, "LeakyReLUOp", py::multiple_inheritance())
     .def("get_inputs_name", &LeakyReLU_Op::getInputsName)
     .def("get_outputs_name", &LeakyReLU_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index fdb51b24a87ce358c1e7808873ebc569ca2227c8..242bf2c451723677e1b9063edfc3098d4159e5a4 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -13,18 +13,18 @@
 
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void declare_MatMul(py::module &m) {
-  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Operator, Attributes>(m, "MatMulOp", py::multiple_inheritance())
+  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor, Attributes>(m, "MatMulOp", py::multiple_inheritance())
   .def("get_inputs_name", &MatMul_Op::getInputsName)
   .def("get_outputs_name", &MatMul_Op::getOutputsName);
 
-  m.def("MatMul", &MatMul, py::arg("out_channels"), py::arg("name") = "");
+  m.def("MatMul", &MatMul, py::arg("in_channels"), py::arg("out_channels"), py::arg("name") = "");
 }
 
 void init_MatMul(py::module &m) {
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 907e8cfaa6cde2451677b72beab38bd9a3938735..0ee3d9df80d7ea7b7be2b8d5c456d5d739506882 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -18,7 +18,7 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/MaxPooling.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
-  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, Attributes>(
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor, Attributes>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index 3fc0f89c0e2d02052031df357f2d36e4b67b6b41..6df5a43f64bf8335108ccd99a1588a1367955b77 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -18,7 +18,6 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/MetaOperatorDefs.hpp"
-#include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
@@ -49,7 +48,8 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
 }
 
 template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
-  m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
+  m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
+                                                         const std::vector<DimSize_t>& kernel_dims,
                                                          const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
@@ -60,8 +60,9 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
 
-        return PaddedConvDepthWise<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
-    }, py::arg("kernel_dims"),
+        return PaddedConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+    }, py::arg("nb_channels"),
+       py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
index 2627c99005b009769e8fbb97b1f5d79e2424c997..21f510d98728fbe5401288a366294241b5f10a3f 100644
--- a/python_binding/operator/pybind_Mul.cpp
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Mul.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Mul(py::module& m) {
-    py::class_<Mul_Op, std::shared_ptr<Mul_Op>, Operator>(m, "MulOp", py::multiple_inheritance())
+    py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance())
     .def("get_inputs_name", &Mul_Op::getInputsName)
     .def("get_outputs_name", &Mul_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index b786a27dd04d218da94c148a8087a4b89f8ed6aa..f9482eda2f93b5492cfcc89175da69d140f23df8 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -12,20 +12,23 @@
 #include <pybind11/pybind11.h>
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
+#include "aidge/utils/Types.h"
 #include <pybind11/stl.h>
 
 namespace py = pybind11;
 namespace Aidge {
 void init_Operator(py::module& m){
     py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
-    .def("output", &Operator::output, py::arg("outputIdx"))
-    .def("input", &Operator::input, py::arg("inputIdx"))
+    .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput), py::arg("outputIdx"), py::arg("data"))
+    .def("get_raw_output", &Operator::getRawOutput, py::arg("outputIdx"))
+    .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
+    .def("get_raw_input", &Operator::getRawInput, py::arg("inputIdx"))
     .def("nb_inputs", &Operator::nbInputs)
-    .def("nb_data_inputs", &Operator::nbDataInputs)
+    .def("nb_data", &Operator::nbData)
+    .def("nb_param", &Operator::nbParam)
     .def("nb_outputs", &Operator::nbOutputs)
-    .def("output_dims_forwarded", &Operator::outputDimsForwarded)
     .def("associate_input", &Operator::associateInput, py::arg("inputIdx"), py::arg("data"))
-    .def("set_datatype", &Operator::setDatatype, py::arg("datatype"))
+    .def("set_datatype", &Operator::setDataType, py::arg("dataType"))
     .def("set_backend", &Operator::setBackend, py::arg("name"))
     .def("forward", &Operator::forward)
     // py::keep_alive forbide Python to garbage collect implementation will the Operator is not garbade collected !
@@ -34,4 +37,4 @@ void init_Operator(py::module& m){
     .def("add_hook", &Operator::addHook)
     ;
 }
-}
+}
\ No newline at end of file
diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ce34dea158e6df1466db415b2539962c2113d42b
--- /dev/null
+++ b/python_binding/operator/pybind_OperatorTensor.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Operator.hpp"
+#include <pybind11/stl.h>
+
+namespace py = pybind11;
+namespace Aidge {
+void init_OperatorTensor(py::module& m){
+    py::class_<OperatorTensor, std::shared_ptr<OperatorTensor>, Operator>(m, "OperatorTensor")
+    .def("get_output", &OperatorTensor::getOutput, py::arg("outputIdx"))
+    .def("get_input", &OperatorTensor::getInput, py::arg("inputIdx"))
+    .def("output_dims_forwarded", &OperatorTensor::outputDimsForwarded)
+    ;
+}
+}
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
index 22866c5460381b6f494948c7410bcd67e7e46edb..09d1e4ad2ad6413901c28bc9d9fe16995483da05 100644
--- a/python_binding/operator/pybind_Pow.cpp
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Pow.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Pow(py::module& m) {
-    py::class_<Pow_Op, std::shared_ptr<Pow_Op>, Operator>(m, "PowOp", py::multiple_inheritance())
+    py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance())
     .def("get_inputs_name", &Pow_Op::getInputsName)
     .def("get_outputs_name", &Pow_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 107b7ba00e4077d9f7c215257bf7fd46629481c1..3dae24b620fe99098205d7d5f23591780f1e9cb7 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -14,7 +14,7 @@
 
 #include "aidge/utils/Types.h"
 // #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/data/Tensor.hpp"
 
@@ -30,12 +30,11 @@ void declare_Producer(py::module &m) {
 
 
 void init_Producer(py::module &m) {
-    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, Operator>(
+    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, OperatorTensor>(
         m,
         "ProducerOp",
         py::multiple_inheritance())
     .def("dims", &Producer_Op::dims)
-    .def("set_output_tensor", &Producer_Op::setOutputTensor)
     .def("get_inputs_name", &Producer_Op::getInputsName)
     .def("get_outputs_name", &Producer_Op::getOutputsName);
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = "");
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index dbcb483e8089373bc8599c2d09fed00049e2a2ac..24ae96649a87ff9acc996715d3cd00a97c393578 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/ReLU.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_ReLU(py::module& m) {
-    py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, Operator>(m, "ReLUOp", py::multiple_inheritance())
+    py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance())
     .def("get_inputs_name", &ReLU_Op::getInputsName)
     .def("get_outputs_name", &ReLU_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 1df907734e95fba7baa24694766de1062fb35333..1d0bcf3239f760ab1867c898d3d131b1dfe318a3 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -16,15 +16,15 @@
 #include <array>
 
 #include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/ReduceMean.hpp"
-#include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) {
-  py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, Operator, Attributes>(
+  py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, OperatorTensor, Attributes>(
     m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
     .def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName)
     .def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName)
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index f81b34aae0acea6a58f38945fafefd0eb3be009f..35c26c09d405a8e5fec71abd8517cbf4e5bebc4b 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Reshape.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Reshape(py::module& m) {
-    py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, Operator>(m, "ReshapeOp", py::multiple_inheritance())
+    py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
     .def("get_inputs_name", &Reshape_Op::getInputsName)
     .def("get_outputs_name", &Reshape_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index fb5d883c4612d38d6fe7561991ca089b45bd1366..78eba60a0329384de2ffb87623629b4606e7dbed 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Slice.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Slice(py::module& m) {
-    py::class_<Slice_Op, std::shared_ptr<Slice_Op>, Operator>(m, "SliceOp", py::multiple_inheritance())
+    py::class_<Slice_Op, std::shared_ptr<Slice_Op>, OperatorTensor>(m, "SliceOp", py::multiple_inheritance())
     .def("get_inputs_name", &Slice_Op::getInputsName)
     .def("get_outputs_name", &Slice_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 38aaa4dba443e0691bcddeae6f619bb505963163..04e92d39971a731931397e943aba6e296a81a14d 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -13,13 +13,13 @@
 #include <string>
 
 #include "aidge/operator/Softmax.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Softmax(py::module& m) {
-    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Operator, Attributes>(m, "SoftmaxOp", py::multiple_inheritance())
+    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor, Attributes>(m, "SoftmaxOp", py::multiple_inheritance())
     .def("get_inputs_name", &Softmax_Op::getInputsName)
     .def("get_outputs_name", &Softmax_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index b70171814662c861f19b3048b018260170d37491..98d65242e8ff199992bbfc740192ae25e6d7b738 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Sqrt.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Sqrt(py::module& m) {
-    py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, Operator>(m, "SqrtOp", py::multiple_inheritance())
+    py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, OperatorTensor>(m, "SqrtOp", py::multiple_inheritance())
     .def("get_inputs_name", &Sqrt_Op::getInputsName)
     .def("get_outputs_name", &Sqrt_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
index 10c95939646a6b605f23c42618bfbdd00ceb6e2e..dce1ab6cb27cc7da02e6c817a6bc49ec64bcf364 100644
--- a/python_binding/operator/pybind_Sub.cpp
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Sub.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Sub(py::module& m) {
-    py::class_<Sub_Op, std::shared_ptr<Sub_Op>, Operator>(m, "SubOp", py::multiple_inheritance())
+    py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance())
     .def("get_inputs_name", &Sub_Op::getInputsName)
     .def("get_outputs_name", &Sub_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index 2d9ce1796252974cea1535de86d5050c6af283b8..e92e9c2aaafe2d20220da053a2b9d799fbe8466d 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -18,7 +18,7 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Transpose.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 
@@ -27,7 +27,7 @@ namespace Aidge {
 
 template <DimIdx_t DIM> 
 void declare_Transpose(py::module &m) {
-  py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Operator, Attributes>(
+  py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, OperatorTensor, Attributes>(
     m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
   .def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
   .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName);
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 518da520043f09ec057f6f4673332733af8e92bd..aa79bb621b6660d4c85f84f642e6d17618f77964 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -19,6 +19,7 @@ void init_Tensor(py::module&);
 void init_OperatorImpl(py::module&);
 void init_Attributes(py::module&);
 void init_Operator(py::module&);
+void init_OperatorTensor(py::module&);
 
 void init_Add(py::module&);
 void init_AvgPooling(py::module&);
@@ -73,6 +74,7 @@ void init_Aidge(py::module& m){
     init_OperatorImpl(m);
     init_Attributes(m);
     init_Operator(m);
+    init_OperatorTensor(m);
     init_Add(m);
     init_AvgPooling(m);
     init_BatchNorm(m);
diff --git a/python_binding/recipies/pybind_Recipies.cpp b/python_binding/recipies/pybind_Recipies.cpp
index 0bc89e7d428181dac0fe45e935f59433cca70b89..c8b1916f2a174f05aad58717764130cbe8209239 100644
--- a/python_binding/recipies/pybind_Recipies.cpp
+++ b/python_binding/recipies/pybind_Recipies.cpp
@@ -14,7 +14,7 @@
 
 #include <string>
 
-#include "aidge/utils/Recipies.hpp"
+#include "aidge/recipies/Recipies.hpp"
 
 namespace py = pybind11;
 
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index 166754cc9fe9774d922ef523ab35f569673701fd..b76bf33367221add6273e02590d6ec315cfa4544 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -25,25 +25,25 @@ Aidge::OperatorImpl::OperatorImpl(const Operator& op):
 }
 
 Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    assert(mOp.getInput(inputIdx) && "requires valid input");
+    assert(mOp.getRawInput(inputIdx) && "requires valid input");
 
     // Requires the whole tensor by default
-    return std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->size();
+    return std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->size();
 }
 
 Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
-    assert(mOp.getInput(inputIdx) && "requires valid input");
+    assert(mOp.getRawInput(inputIdx) && "requires valid input");
 
     // Protect the whole tensor by default
-    return std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->size();
+    return std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->size();
 }
 
 Aidge::NbElts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                                                          const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    assert(mOp.getOutput(outputIdx) && "requires valid output");
+    assert(mOp.getRawOutput(outputIdx) && "requires valid output");
 
     // Requires the whole tensor by default, regardless of available data on inputs
-    return std::static_pointer_cast<Tensor>(mOp.getOutput(outputIdx))->size();
+    return std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx))->size();
 }
 
 Aidge::NbElts_t Aidge::OperatorImpl::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 367c9f10dffc0116ffa6cdcfd1841015441af8a1..96466cd1a4b81dae3eec120360055bdf0f8c5844 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -13,10 +13,12 @@
 #include <cassert>
 #include <iterator>
 #include <utility>
+#include <numeric>
 
 #include "aidge/utils/Types.h"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
 ///////////////////////////////////////////////////////
@@ -28,7 +30,7 @@ Aidge::Connector Aidge::GraphView::operator()(
   // TODO: allow for multiple inputNodes?
   assert((inputNodes().size() == 1U) && "Too many input Nodes for the GraphView, undefined behaviour");
   std::shared_ptr<Node> inNode = *inputNodes().begin();
-  assert((ctors.size() == static_cast<std::size_t>(inNode->nbDataInputs())) && "Wrong number of arguments.\n");
+  assert((ctors.size() == static_cast<std::size_t>(inNode->nbData())) && "Wrong number of arguments.\n");
   for (std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inNode->inputs()) {
     assert((gk_IODefaultIndex == input.second) && "At least one input connection is not free.\n");
     (void)input; // avoid unused warning
@@ -107,20 +109,19 @@ void Aidge::GraphView::save(std::string path, bool verbose) const {
 ///////////////////////////////////////////////////////
 
 Aidge::IOIndex_t Aidge::GraphView::getNbDataInputs() const {
-  IOIndex_t nbDataInput = 0;
-  // assert(outputNodes().size() == static_cast<std::size_t>(1));
-  for (const std::shared_ptr<Node> &inNode : inputNodes()) {
-    nbDataInput += inNode->nbDataInputs();
-  }
-  return nbDataInput;
+    return std::accumulate(mInputNodes.cbegin(), mInputNodes.cend(), 0,
+                            [](IOIndex_t sumData, const std::shared_ptr<Node> inNode) {
+                                return sumData + inNode->nbData();
+                            }
+                        );
 }
 
 Aidge::IOIndex_t Aidge::GraphView::getNbFreeDataInputs() const {
-  IOIndex_t nbIn = 0;
-  for (const std::shared_ptr<Node>& inputNode : mInputNodes) {
-    nbIn += inputNode->getNbFreeDataInputs();
-  }
-  return nbIn;
+  return std::accumulate(mInputNodes.cbegin(), mInputNodes.cend(), 0,
+                            [](IOIndex_t sumData, const std::shared_ptr<Node> inNode) {
+                                return sumData + inNode->getNbFreeDataInputs();
+                            }
+                        );
 }
 
 
@@ -165,6 +166,19 @@ Aidge::GraphView::inputs(std::string name) const {
   return mNodeRegistry.at(name)->inputs();
 }
 
+void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType datatype) {
+    // Backend
+    // TODO: add Backend attribute to Operator
+    setBackend(backend);
+    // Data type
+    // TODO: manage Datatype attribute in OperatorImpl
+    setDataType(datatype);
+    // Data Format
+    // TODO: check actual parent output data format and the needed one. Add a Transpose Operator if necessary
+    // Forward dimensions
+    forwardDims();
+}
+
 void Aidge::GraphView::forwardDims() {
     // setInputs
     // Link every tensor to the right pointer
@@ -195,41 +209,46 @@ void Aidge::GraphView::forwardDims() {
 }
 
 void Aidge::GraphView::_forwardDims(std::set<std::shared_ptr<Node>> listNodes) {
-  // TODO: support multi-inputs/outputs
-  std::set<std::shared_ptr<Node>> nextList = std::set<std::shared_ptr<Node>>();
-  for (std::shared_ptr<Node> nodePtr : listNodes) {
-    if (!nodePtr->getOperator()->outputDimsForwarded()) {
-      nodePtr->getOperator()->computeOutputDims();
-    }
-    if (!nodePtr->getOperator()->outputDimsForwarded()) {
-      nextList.insert(nodePtr);
-    } else {
-      std::set<std::shared_ptr<Node>> children = nodePtr->getChildren();
-      nextList.insert(children.begin(), children.end());
+    // TODO: support multi-inputs/outputs
+    std::set<std::shared_ptr<Node>> nextList = std::set<std::shared_ptr<Node>>();
+    for (std::shared_ptr<Node> nodePtr : listNodes) {
+        if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) {
+            const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator());
+            if (!op->outputDimsForwarded()) {
+                op->computeOutputDims();
+            }
+            if (!op->outputDimsForwarded()) { // try to compute output dimensions again later
+                nextList.insert(nodePtr);
+            } else { // compute output dimensions of children
+                std::set<std::shared_ptr<Node>> children = nodePtr->getChildren();
+                nextList.insert(children.begin(), children.end());
+            }
+        }
     }
-  }
-  if (nextList.empty()) {
-    for (std::shared_ptr<Node> nodePtr : getNodes()) {
-      if (!nodePtr->getOperator()->outputDimsForwarded()) {
-        nextList.insert(nodePtr);
-      }
+    if (nextList.empty()) {
+        for (std::shared_ptr<Node> nodePtr : getNodes()) {
+            if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) {
+                if (!std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator())->outputDimsForwarded()) {
+                    nextList.insert(nodePtr);
+                }
+            }
+        }
+    }
+    if (!nextList.empty()) {
+        _forwardDims(nextList);
     }
-  }
-  if (!nextList.empty()) {
-    _forwardDims(nextList);
-  }
 }
 
 void Aidge::GraphView::setBackend(const std::string &backend) {
-  for (auto node : getNodes()) {
-    node->getOperator()->setBackend(backend);
-  }
+    for (auto node : getNodes()) {
+        node->getOperator()->setBackend(backend);
+    }
 }
 
-void Aidge::GraphView::setDatatype(const DataType &datatype) {
-  for (auto node : getNodes()) {
-    node->getOperator()->setDatatype(datatype);
-  }
+void Aidge::GraphView::setDataType(const Aidge::DataType &datatype) {
+    for (auto node : getNodes()) {
+        node->getOperator()->setDataType(datatype);
+    }
 }
 
 void Aidge::GraphView::updateOutputNodes() {
@@ -315,7 +334,7 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
     mNodeRegistry.insert(std::make_pair(node->name(), node));
   // add learnable parameters to the graph
   if (includeLearnableParam) {
-    for (IOIndex_t i = node->nbDataInputs(); i < node->nbInputs(); ++i) {
+    for (IOIndex_t i = node->nbData(); i < node->nbInputs(); ++i) {
       std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
       if (parentNode) {
           parentNode->addView(shared_from_this());
@@ -475,7 +494,7 @@ void Aidge::GraphView::remove(std::shared_ptr<Node> nodePtr, bool includeLearnab
   // same for learnable params
 
   if (includeLearnableParam) {
-    for (IOIndex_t i = nodePtr->nbDataInputs(); i < nodePtr->nbInputs(); ++i) {
+    for (IOIndex_t i = nodePtr->nbData(); i < nodePtr->nbInputs(); ++i) {
       auto inputI = nodePtr->input(i);
       bool removeNode = true;
       for (const auto& parentOutput : inputI.first->outputs()) {
@@ -757,7 +776,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
     for (auto parent : oldToNewNode.first->inputs()) {
       while (oldToNewNodes[parent.first] == nullptr) {
         // Find next valid parent in line, going backward in the graph
-        assert(parent.first->nbDataInputs() <= 1 && "deleted nodes in GraphView::clone() cannot have multiple data inputs");
+        assert(parent.first->nbData() <= 1 && "deleted nodes in GraphView::clone() cannot have multiple data inputs");
         const auto& parents = parent.first->inputs();
 
         if (!parents.empty() && parents[0].first != nullptr // a valid parent exists
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index e6a53c871f5312c68f40dc5c9a2777729470298b..5a7b05e469daab10a4abd468177a3ad137096f63 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -15,6 +15,7 @@
 #include "aidge/operator/Producer.hpp"
 #include <memory>
 #include <vector>
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
 Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
@@ -34,7 +35,7 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
 ///////////////////////////////////////////////////////
 
 Aidge::Connector Aidge::Node::operator()(const std::vector<Connector> &ctors) {
-    assert((ctors.size() == nbDataInputs()) && "Wrong number of arguments.\n");
+    assert((ctors.size() == nbData()) && "Wrong number of arguments.\n");
     for (std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inputs()) {
         assert((gk_IODefaultIndex == input.second) && "At least one input connection is not free.\n");
         (void) input; // avoid unused warning
@@ -94,8 +95,8 @@ Aidge::IOIndex_t Aidge::Node::getNbFreeDataInputs() const {
 std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>
 Aidge::Node::dataInputs() const {
     std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res =
-            std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbDataInputs());
-    for (std::size_t i = 0; i < static_cast<std::size_t>(nbDataInputs()); ++i) {
+            std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbData());
+    for (std::size_t i = 0; i < static_cast<std::size_t>(nbData()); ++i) {
         res[i] = std::pair<std::shared_ptr<Node>, IOIndex_t>(mParents[i], mIdOutParents[i]);
     }
     return res;
@@ -111,18 +112,18 @@ std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> Aidge::No
     return res;
 }
 
-void Aidge::Node::setInput(const Aidge::IOIndex_t idx, const std::shared_ptr<Aidge::Tensor> tensor) {
-    assert(((idx != gk_IODefaultIndex) && (idx < nbInputs())) && "Parent index out of bound.");
-    if (mParents[idx] != nullptr) {
-        mParents[idx]->removeChild(shared_from_this(), mIdOutParents[idx]);
-        removeParent(idx);
-    }
-    std::shared_ptr<Node> newConstantNode = Producer(tensor);
-    newConstantNode->addChild(shared_from_this(), 0, idx);
-    for (auto& graphPtr : views()) {
-        graphPtr->add(newConstantNode);
-    }
-}
+// void Aidge::Node::setInput(const Aidge::IOIndex_t idx, const std::shared_ptr<Aidge::Tensor> tensor) {
+//     assert(((idx != gk_IODefaultIndex) && (idx < nbInputs())) && "Parent index out of bound.");
+//     if (mParents[idx] != nullptr) {
+//         mParents[idx]->removeChild(shared_from_this(), mIdOutParents[idx]);
+//         removeParent(idx);
+//     }
+//     std::shared_ptr<Node> newConstantNode = Producer(tensor);
+//     newConstantNode->addChild(shared_from_this(), 0, idx);
+//     for (auto& graphPtr : views()) {
+//         graphPtr->add(newConstantNode);
+//     }
+// }
 
 std::vector<std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>>
 Aidge::Node::outputs() const {
@@ -295,7 +296,7 @@ bool Aidge::Node::removeChild(const std::shared_ptr<Aidge::Node> nodePtr, const
 
 void Aidge::Node::resetConnections(bool includeLearnableParam) {
     // remove every parents reference to it
-    IOIndex_t nbRemovedInputs = includeLearnableParam ? nbInputs() : nbDataInputs();
+    IOIndex_t nbRemovedInputs = includeLearnableParam ? nbInputs() : nbData();
     for (IOIndex_t i = 0; i < nbRemovedInputs; ++i) {
         std::pair<std::shared_ptr<Node>, IOIndex_t> parent = input(i);
         if (parent.first) {
@@ -367,7 +368,7 @@ std::set<Aidge::NodePtr> Aidge::Node::getNodeDelta(int delta,std::set<Aidge::Nod
             }
         }
     }
-    
+
     return out;
 }
 /////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/src/graphRegex/matchFsm/MatchResult.cpp b/src/graphRegex/matchFsm/MatchResult.cpp
index c871b3d0e22f3fa1f28b7bcea46ee8b9f61a3178..08be00dea66c66a46dbbf2b225efd0df3f332188 100644
--- a/src/graphRegex/matchFsm/MatchResult.cpp
+++ b/src/graphRegex/matchFsm/MatchResult.cpp
@@ -1,90 +1,87 @@
+#include <algorithm> // set_intersection, std::sort
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
 #include "aidge/graphRegex/matchFsm/MatchResult.hpp"
 
-using namespace Aidge; 
-
-    MatchSolution::MatchSolution(std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence,const std::string query,const std::vector<NodePtr> startNode):mQueryFrom(query),mStartNode(startNode){
-            //reformat the solution
-            for (const auto& context : precedence) {
-                for (const auto& pair : context->getValid()) {
-                    
-                    if(mSolution.find(pair.first->getKey()) == mSolution.end()){
-                        mSolution[pair.first->getKey()] = pair.second;
-                    }else{
-                         mSolution[pair.first->getKey()].insert(pair.second.begin(), pair.second.end());
-                    }
+Aidge::MatchSolution::MatchSolution(std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence,const std::string query,const std::vector<NodePtr> startNode):mQueryFrom(query),mStartNode(startNode){
+        //reformat the solution
+        for (const auto& context : precedence) {
+            for (const auto& pair : context->getValid()) {
+
+                if(mSolution.find(pair.first->getKey()) == mSolution.end()){
+                    mSolution[pair.first->getKey()] = pair.second;
+                }else{
+                        mSolution[pair.first->getKey()].insert(pair.second.begin(), pair.second.end());
                 }
             }
-    }
-
-
-    const std::set<NodePtr> & MatchSolution::at(const std::string key){
-
-        return mSolution[key];
+        }
+}
 
-    }
+const std::set<Aidge::NodePtr> Aidge::MatchSolution::getAll(){
 
-    const std::set<NodePtr> MatchSolution::getAll(){
+        // Create a unique set to store all the elements
+        std::set<NodePtr> uniqueSet;
 
-            // Create a unique set to store all the elements
-            std::set<NodePtr> uniqueSet;
+        // Iterate through the map and insert elements from each set into the unique set
+        for (const auto& pair : mSolution) {
+            const std::set<NodePtr>& nodeSet = pair.second;
 
-            // Iterate through the map and insert elements from each set into the unique set
-            for (const auto& pair : mSolution) {
-                const std::set<NodePtr>& nodeSet = pair.second;
-                
-                // Insert elements from the current set into the unique set
-                uniqueSet.insert(nodeSet.begin(), nodeSet.end());
-            }
-            
-            return uniqueSet;
+            // Insert elements from the current set into the unique set
+            uniqueSet.insert(nodeSet.begin(), nodeSet.end());
+        }
 
-    }
+        return uniqueSet;
+}
 
-    bool MatchSolution::areCompatible(std::shared_ptr<MatchSolution> solution){
-        std::set<NodePtr> set1 = solution->getAll();
-        std::set<NodePtr> set2 = getAll();
-        std::set<NodePtr> intersection ;
-        std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(), std::inserter(intersection, intersection.begin()));
-        if (intersection.empty()) {
-            return true;
-        }
-        return false;
-    }
+bool Aidge::MatchSolution::areCompatible(std::shared_ptr<Aidge::MatchSolution> solution){
+    std::set<NodePtr> set1 = solution->getAll();
+    std::set<NodePtr> set2 = getAll();
+    std::set<NodePtr> intersection ;
+    std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(), std::inserter(intersection, intersection.begin()));
+    return intersection.empty();
+}
 
-    
 
 ////////////////////////////////
 //
 ////////////////////////////////
-MatchResult::MatchResult(std::vector<std::shared_ptr<FsmRunTimeContext>> allValid, std::size_t nbSubStm,
-const std::string& query,const std::vector<NodePtr>& startNodes):mIdToRunTime(nbSubStm),mNbSubStm(nbSubStm){
-        mAllValid = allValid;
-
-        //mIdToRunTimm
-        for (const auto& contextPtr : allValid) {
-            mIdToRunTime[contextPtr->getSubStmId()].push_back(contextPtr);
-        }
+Aidge::MatchResult::MatchResult(std::vector<std::shared_ptr<Aidge::FsmRunTimeContext>> allValid,
+                                std::size_t nbSubStm,
+                                const std::string& query,
+                                const std::vector<Aidge::NodePtr>& startNodes)
+        : mIdToRunTime(nbSubStm),
+          mNbSubStm(nbSubStm)
+{
+    mAllValid = allValid;
+
+    //mIdToRunTimm
+    for (const auto& contextPtr : allValid) {
+        mIdToRunTime[contextPtr->getSubStmId()].push_back(contextPtr);
+    }
 
-        std::vector<std::shared_ptr<FsmRunTimeContext>> precedence;
-        //make all solution possible 
-        _generateCombination(0,precedence,query,startNodes);
-        //sort by solution number of elements
-        std::sort(mSolve.begin(), mSolve.end(), [](std::shared_ptr<MatchSolution>& set1, std::shared_ptr<MatchSolution>& set2) {
+    std::vector<std::shared_ptr<FsmRunTimeContext>> precedence;
+    //make all solution possible
+    _generateCombination(0,precedence,query,startNodes);
+    //sort by solution number of elements
+    std::sort(mSolve.begin(), mSolve.end(), [](std::shared_ptr<MatchSolution>& set1, std::shared_ptr<MatchSolution>& set2) {
         return set1->getAll().size() < set2->getAll().size();
-        });
-
-
+    });
 }
 
-void MatchResult::_generateCombination( std::size_t idxSubStm, std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence,
-const std::string& query,const std::vector<NodePtr>& startNodes){
-
+void Aidge::MatchResult::_generateCombination( std::size_t idxSubStm,
+                                        std::vector<std::shared_ptr<Aidge::FsmRunTimeContext>>& precedence,
+                                        const std::string& query,
+                                        const std::vector<Aidge::NodePtr>& startNodes)
+{
     //it's end , we are below the number of stm
     if (idxSubStm == mNbSubStm)
     {
-        //precedence contain a list of FSM compatible, we just need to 
-        //check if all the nodes have been validated by at least one context 
-        
+        //precedence contain a list of FSM compatible, we just need to
+        //check if all the nodes have been validated by at least one context
+
         //1) make the set of all node for the compute graph that are valid in all the  FsmRunTimeContext
         std::set<NodePtr> validNode;
         std::set<NodePtr> rejectNode;
@@ -94,10 +91,10 @@ const std::string& query,const std::vector<NodePtr>& startNodes){
             std::set<NodePtr> tmpR =  contextPtr->getRejectedNodes();
             rejectNode.insert(tmpR.begin(),tmpR.end());
         }
-        // 2) all  RejectedNodes need to be valid by an others stm 
+        // 2) all  RejectedNodes need to be valid by an others stm
         // if it's not the case the match is not valid
         if(std::includes(validNode.begin(), validNode.end(), rejectNode.begin(), rejectNode.end())){
-            //we can save the solution 
+            //we can save the solution
             mSolve.push_back(std::make_shared<MatchSolution>(precedence,query,startNodes));
         }
         precedence.pop_back();
@@ -105,12 +102,12 @@ const std::string& query,const std::vector<NodePtr>& startNodes){
     }
 
 
-    for (const auto& contextPtrOneFsm : mIdToRunTime[idxSubStm]) 
+    for (const auto& contextPtrOneFsm : mIdToRunTime[idxSubStm])
     {
         if(idxSubStm == 0){
             precedence.push_back(contextPtrOneFsm);
             _generateCombination(idxSubStm+1,precedence,query,startNodes);
-            
+
         }else{
             //test if the new context is compatible with all the context in the precedence
             //
@@ -137,16 +134,6 @@ const std::string& query,const std::vector<NodePtr>& startNodes){
 
 }
 
-std::shared_ptr<MatchSolution> MatchResult::getBiggerSolution(void){
-
-    if(mSolve.empty()){
-        return nullptr;
-    }else{
-        return mSolve[0];
-    }
-    
-}
-
-std::vector<std::shared_ptr<MatchSolution>> MatchResult::getSolutions(void){
-    return mSolve;
+std::shared_ptr<Aidge::MatchSolution> Aidge::MatchResult::getBiggerSolution(void){
+    return mSolve.empty() ? nullptr : mSolve[0];
 }
\ No newline at end of file
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..273eac2e8fa9623e617d1be204ac2ae46d8da02d
--- /dev/null
+++ b/src/operator/Div.cpp
@@ -0,0 +1,35 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <cstddef>
+#include <vector>
+#include <utility>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Div.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+void Aidge::Div_Op::computeOutputDims() {
+    // check inputs have been associated
+    if (!getInput(0) || !getInput(1)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
+    }
+
+    if ((!getInput(0)->empty()) &&
+        ((getInput(1)->size() == 1) || // div by a single value
+        (getInput(1)->size() == getInput(0)->size()) || // div elem-wise
+        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // div by a Tensor with one dimension of output size
+    {
+        mOutputs[0]->resize(getInput(0)->dims());
+    }
+}
\ No newline at end of file
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index 23a98152a2b155b5e059c25e616eee47040c0aed..bbc921d3c7b334223b2a92a8fbfee1ffae9c10e1 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -15,14 +15,9 @@
 Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph,
     std::vector<NodePtr> inputNodes,
     std::vector<NodePtr> outputNodes)
-    : Operator(type),
+    : OperatorTensor(type, graph->dataInputs().size(), (graph->inputs().size() - graph->dataInputs().size()), graph->outputs().size()),
         mGraph(graph)
 {
-    mInputs = std::vector<std::shared_ptr<Tensor>>(mGraph->inputs().size());
-    for (std::size_t i = 0; i < mInputs.size(); ++i) {
-        mInputs[i] = std::make_shared<Tensor>();
-    }
-
     // Fill inputsNodes and outputsNodes when there is no ambiguity
     if (inputNodes.empty()) {
         AIDGE_ASSERT(mGraph->inputNodes().size() == 1, "need to specify internal nodes input mapping");
@@ -49,7 +44,7 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr<
                 // The input is not connected inside the micro-graph
                 // (no connection to this input or connection outside the micro-graph)
                 // => it is therefore an input for the meta-operator
-                mInputOps.push_back(std::make_pair(inputNode->getOperator(), inputIdx));
+                mInputOps.push_back(std::make_pair(std::dynamic_pointer_cast<OperatorTensor>(inputNode->getOperator()), inputIdx));
             }
 
             ++inputIdx;
@@ -63,14 +58,13 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr<
             outputNode->outputs();
 
         for (size_t outputIdx = 0; outputIdx < outputNodeoutputs.size(); ++outputIdx) {
-            mOutputOps.push_back(std::make_pair(outputNode->getOperator(), outputIdx));
+            mOutputOps.push_back(std::make_pair(std::dynamic_pointer_cast<OperatorTensor>(outputNode->getOperator()), outputIdx));
         }
     }
 
 
     AIDGE_INTERNAL_ASSERT(mInputOps.size() == mGraph->inputs().size());
     AIDGE_INTERNAL_ASSERT(mOutputOps.size() == mGraph->outputs().size());
-    mOutputs = std::vector<std::shared_ptr<Tensor>>(mGraph->outputs().size());
     // Associate outputs to micro-graph outputs for custom implementation
     for (size_t outputIdx = 0; outputIdx < mOutputOps.size(); ++outputIdx) {
         const auto& outputOp = mOutputOps[outputIdx];
@@ -118,6 +112,7 @@ void Aidge::MetaOperator_Op::updateConsummerProducer() {
             mScheduler = std::make_shared<SequentialScheduler>(mGraph);
         }
 
+
         // TODO: check that generateScheduling() can be called multiple time to iteratively update the schedule.
         // It could be a good idea to unify updateConsummerProducer() and generateScheduling() into a "updateScheduling()"
         mScheduler->generateScheduling();
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2e3e77288bf1e0613f0aa572e3c50e94599a902f
--- /dev/null
+++ b/src/operator/Mul.cpp
@@ -0,0 +1,35 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <cstddef>
+#include <vector>
+#include <utility>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+void Aidge::Mul_Op::computeOutputDims() {
+    // check inputs have been associated
+    if (!getInput(0) || !getInput(1)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
+    }
+
+    if ((!getInput(0)->empty()) &&
+        ((getInput(1)->size() == 1) || // mul by a single value
+        (getInput(1)->size() == getInput(0)->size()) || // mul elem-wise
+        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // mul by a Tensor with one dimension of output size
+    {
+        mOutputs[0]->resize(getInput(0)->dims());
+    }
+}
\ No newline at end of file
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index 09a17a428e1de91c0318f710e6f097573cf529a6..eb94db87df250767967348c3adfed8a1e35b4c5f 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -10,10 +10,14 @@
  ********************************************************************************/
 
 #include <cassert>
+#include <cstddef>
+#include <vector>
+#include <utility>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
 // constexpr Aidge::Operator::Operator(const char* type)
 //     : mType(type)
@@ -21,12 +25,35 @@
 // 	// ctor
 // }
 
-Aidge::Operator::~Operator() = default;
+Aidge::Operator::~Operator() noexcept = default;
 
 ///////////////////////////////////////////////////////
 //        IMPLEMENTATION
 ///////////////////////////////////////////////////////
 
+// std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>> Aidge::Operator::computeReceptiveField(
+//         const std::size_t firstIdx, const std::vector<Aidge::DimSize_t>& outputDims, const Aidge::IOIndex_t outputIdx) const
+// {
+//     static_cast<void>(outputIdx);
+//     if (outputIdx >= nbOutputs()) {
+//         AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator output index out of range.");
+//     }
+//     if (nbInputs() != nbDataInputs()) {
+//         AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function.");
+//     }
+//     if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
+//         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+//     }
+//     const auto outputIdxDims = getOutput(0)->getCoord(firstIdx);
+//     for (DimIdx_t i = 0; i < outputDims.size(); ++i) {
+//         if (((outputDims[i] + outputIdxDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) {
+//             AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+//         }
+//     }
+//     // return the same Tensor description as given in function parameter for each data input
+//     return std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>>(nbDataInputs(),std::pair<std::size_t, std::vector<Aidge::DimSize_t>>(firstIdx, outputDims));
+// }
+
 Aidge::NbElts_t Aidge::Operator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     return mImpl->getNbRequiredData(inputIdx);
 }
@@ -48,8 +75,12 @@ void Aidge::Operator::runHooks() const {
     }
 }
 void Aidge::Operator::forward() {
-    mImpl->forward();
-    runHooks();
+    if(mImpl) {
+        mImpl->forward();
+        runHooks();
+    } else {
+        printf("forward: No implementation is linked.\n");
+    }
 }
 
 void Aidge::Operator::backward() { mImpl->backward(); }
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1d16e9064010269174501d3c824c705c36971641
--- /dev/null
+++ b/src/operator/OperatorTensor.cpp
@@ -0,0 +1,135 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <memory>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+
+void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
+    if (inputIdx >= nbInputs()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu inputs", type().c_str(), nbInputs());
+    }
+    if (strcmp((data)->type(), Tensor::Type) != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input data must be of Tensor type");
+    }
+    mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+}
+
+void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
+    if (strcmp(data->type(), "Tensor") != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
+    }
+    if (getInput(inputIdx)) {
+        *mInputs[inputIdx] = *std::dynamic_pointer_cast<Tensor>(data);
+    } else {
+        mInputs[inputIdx] = std::make_shared<Tensor>(*std::dynamic_pointer_cast<Tensor>(data));
+    }
+}
+
+Aidge::OperatorTensor::~OperatorTensor() = default;
+
+void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, std::shared_ptr<Aidge::Data>&& data) {
+    if (strcmp(data->type(), "Tensor") != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
+    }
+    if (getInput(inputIdx)) {
+        *mInputs[inputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data));
+    } else {
+        mInputs[inputIdx] = std::make_shared<Tensor>(std::move(*std::dynamic_pointer_cast<Tensor>(data)));
+    }
+}
+
+const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getInput(const Aidge::IOIndex_t inputIdx) const {
+    if (inputIdx >= nbInputs()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu inputs", type().c_str(), nbInputs());
+    }
+    return mInputs[inputIdx];
+}
+
+void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) {
+    if (strcmp(data->type(), "Tensor") != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
+    }
+    if (outputIdx >= nbOutputs()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbOutputs());
+    }
+    *mOutputs[outputIdx] = *std::dynamic_pointer_cast<Tensor>(data);
+}
+
+void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) {
+    if (strcmp(data->type(), "Tensor") != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
+    }
+    if (outputIdx >= nbOutputs()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbOutputs());
+    }
+    *mOutputs[outputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data));
+}
+
+const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aidge::IOIndex_t outputIdx) const {
+    if (outputIdx >= nbOutputs()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbOutputs());
+    }
+    return mOutputs[outputIdx];
+}
+
+
+void Aidge::OperatorTensor::computeOutputDims() {
+    // check inputs have been associated
+    bool associated = (nbInputs() > 0); // do not compute anything if no input
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (!getInput(i)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
+        associated &= !(getInput(i)->empty());
+    }
+    if (associated) {
+        const auto expectedDims =  getInput(0)->dims();
+        for (std::size_t i = 1; i < nbInputs(); ++i) {
+            if (expectedDims != getInput(i)->dims()) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator's inputs should have the same dimensions");
+            }
+        }
+        mOutputs[0]->resize(expectedDims);
+    }
+}
+
+bool Aidge::OperatorTensor::outputDimsForwarded() const {
+    bool forwarded = true;
+    // check both inputs and outputs have been filled
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
+    }
+    for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
+        forwarded &= !(getOutput(i)->empty());
+    }
+    return forwarded;
+}
+
+void Aidge::OperatorTensor::setDataType(const DataType& dataType) const {
+    for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
+        getOutput(i)->setDataType(dataType);
+    }
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (!getInput(i)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not set");
+        }
+        else {
+            getInput(i)->setDataType(dataType);
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c213a47a4a590026c07625aeb532d303ca8dbced
--- /dev/null
+++ b/src/operator/Pow.cpp
@@ -0,0 +1,35 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <cstddef>
+#include <vector>
+#include <utility>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Pow.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+void Aidge::Pow_Op::computeOutputDims() {
+    // check inputs have been associated
+    if (!getInput(0) || !getInput(1)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
+    }
+
+    if ((!getInput(0)->empty()) &&
+        ((getInput(1)->size() == 1) || // pow by a single value
+        (getInput(1)->size() == getInput(0)->size()) || // pow elem-wise
+        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // pow by a Tensor with one dimension of output size
+    {
+        mOutputs[0]->resize(getInput(0)->dims());
+    }
+}
\ No newline at end of file
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8175f1b7ae5bb5eccd36267c1d739f764bd3c236
--- /dev/null
+++ b/src/operator/Sub.cpp
@@ -0,0 +1,35 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <cstddef>
+#include <vector>
+#include <utility>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Sub.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+void Aidge::Sub_Op::computeOutputDims() {
+    // check inputs have been associated
+    if (!getInput(0) || !getInput(1)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
+    }
+
+    if ((!getInput(0)->empty()) &&
+        ((getInput(1)->size() == 1) || // sub by a single value
+        (getInput(1)->size() == getInput(0)->size()) || // sub elem-wise
+        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // sub by a Tensor with one dimension of output size
+    {
+        mOutputs[0]->resize(getInput(0)->dims());
+    }
+}
\ No newline at end of file
diff --git a/src/recipies/FuseBatchNorm.cpp b/src/recipies/FuseBatchNorm.cpp
index 6e345a6474821230f95900cc20cba501feabd1d9..ffb4599d83ba922ce5991460810f5d248806617c 100644
--- a/src/recipies/FuseBatchNorm.cpp
+++ b/src/recipies/FuseBatchNorm.cpp
@@ -12,11 +12,11 @@
 #include <cassert>
 #include <memory>
 #include <string>
+
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/Conv.hpp"
-
-#include "aidge/utils/Recipies.hpp"
+#include "aidge/recipies/Recipies.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Producer.hpp"
@@ -26,70 +26,68 @@
 //Graph Regex
 #include "aidge/graphRegex/GraphRegex.hpp"
 
-using namespace Aidge;
-
-void Aidge::fuseBatchNorm(std::shared_ptr<Node> conv,std::shared_ptr<Node> batchnorm){
+void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode, std::shared_ptr<Aidge::Node> batchnormNode) {
 
+    // TODO: Find a way to remove the template
+    // A feature map with 2 dimensions is assumed
+    const std::shared_ptr<BatchNorm_Op<2>> batchOp = std::static_pointer_cast<BatchNorm_Op<2>>(batchnormNode->getOperator());
+    const std::shared_ptr<Conv_Op<2>> convOp = std::static_pointer_cast<Conv_Op<2>>(convNode->getOperator());
 
+    const std::shared_ptr<Tensor> scale  = batchOp->getInput(1);
+    const std::shared_ptr<Tensor> shift  = batchOp->getInput(2);
+    const std::shared_ptr<Tensor> b_mean = batchOp->getInput(3);
+    const std::shared_ptr<Tensor> b_var  = batchOp->getInput(4);
 
-    std::shared_ptr<Tensor> scale  = batchnorm->input(1).first->getOperator()->getOutput(batchnorm->input(1).second);
-    std::shared_ptr<Tensor> shift  = batchnorm->input(2).first->getOperator()->getOutput(batchnorm->input(2).second);
-    std::shared_ptr<Tensor> b_mean = batchnorm->input(3).first->getOperator()->getOutput(batchnorm->input(3).second);
-    std::shared_ptr<Tensor> b_var  = batchnorm->input(4).first->getOperator()->getOutput(batchnorm->input(4).second);
+    const float epsilon = batchOp -> getAttr<float>("Epsilon");
+    const DimSize_t convNbOutChannels = convOp -> getAttr<DimSize_t>("OutChannels");
+    const DimSize_t channelsSize = convOp -> getAttr<DimSize_t>("InChannels");
+    const std::array<DimSize_t, 2> kernelDims = convOp -> getAttr<std::array<DimSize_t, 2>>("KernelDims");
 
 
-    // TODO : Find a way to remove the template
-    const float epsilon = std::static_pointer_cast<BatchNorm_Op<2>>(batchnorm->getOperator())->getAttr<float>("Epsilon");
-    DimSize_t convOutDims = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<DimSize_t>("OutChannels");
-
-
-    assert(scale->size()  == convOutDims);
-    assert(shift->size()  == convOutDims);
-    assert(b_mean->size() == convOutDims);
-    assert(b_var->size()  == convOutDims);
+    assert(scale->size()  == convNbOutChannels);
+    assert(shift->size()  == convNbOutChannels);
+    assert(b_mean->size() == convNbOutChannels);
+    assert(b_var->size()  == convNbOutChannels);
     assert(epsilon > 0.0);
     // TODO : no no_bias attribute ?
+
+
     float meanVariance = 0.0;
     unsigned int count = 0;
 
-    for (std::size_t output = 0; output < convOutDims; ++output) {
-        // TODO : get suppose datatype is float ..
-        if (b_var->get<float>(output) > 1.0e-12) {
-            meanVariance += b_var->get<float>(output);
+    for (std::size_t outChId = 0; outChId < convNbOutChannels; ++outChId) {
+        // TODO: get() assumed dataType is float...
+        if (b_var->get<float>(outChId) > 1.0e-12) {
+            meanVariance += b_var->get<float>(outChId);
             ++count;
         }
         else {
-            printf("Zero-variance: %s [%lu]\n", conv->name().c_str(), output);
+            printf("Zero-variance: %s [%lu]\n", convNode->name().c_str(), outChId);
         }
     }
     if (count > 0)
         meanVariance /= count;
     else {
-        printf("variance < 1e-12 for all outputs! Is the network correctly trained?\n");
+        printf("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
     }
 
-    const DimSize_t channelsSize = std::dynamic_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<DimSize_t>("InChannels");
-
-    // TODO : suppose we have Conv2D ...
-    const std::array<DimSize_t, 2> kernelDims = std::dynamic_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<std::array<DimSize_t, 2>>("KernelDims");
-
-    std::shared_ptr<Tensor> weight  = conv->input(1).first->getOperator()->getOutput(conv->input(1).second);
-    std::shared_ptr<Tensor> bias  = conv->input(2).first->getOperator()->getOutput(conv->input(2).second);
+    std::shared_ptr<Tensor> weight = convOp -> getInput(1);
+    std::shared_ptr<Tensor> bias = convOp -> getInput(2);
 
-    for (std::size_t output = 0; output < convOutDims; ++output) {
+    for (std::size_t outChId = 0; outChId < convNbOutChannels; ++outChId) {
         // Corrected for zero-variance issue:
         // "A Quantization-Friendly Separable Convolution for MobileNets"
         // https://arxiv.org/pdf/1803.08607.pdf
         // to help post-training quantization
-        const float factor = scale->get<float>(output)
-            / std::sqrt(epsilon + ((b_var->get<float>(output) > 1.0e-12 || count == 0)
-                        ? b_var->get<float>(output) : meanVariance));
+        const float factor = scale->get<float>(outChId)
+            / std::sqrt(epsilon + ((b_var->get<float>(outChId) > 1.0e-12 || count == 0)
+                        ? b_var->get<float>(outChId) : meanVariance));
         // Weights adjustments
         for (std::size_t channel = 0; channel < channelsSize; ++channel) {
             // TODO : Suppose kerneldims = 2
             for(std::size_t k0 = 0; k0 < kernelDims[0]; ++ k0){
                 for(std::size_t k1 = 0; k1 < kernelDims[1]; ++ k1){
-                    std::vector<DimSize_t> currentIdx = {output, channel, k0, k1};
+                    std::vector<DimSize_t> currentIdx = {outChId, channel, k0, k1};
                     // TODO : suppose weights are float
                     float weightValue = weight->get<float>(currentIdx);
                     weight->set<float>(currentIdx, weightValue*factor); // Update check it update Conv weights
@@ -98,25 +96,25 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Node> conv,std::shared_ptr<Node> batch
         }
 
         // TODO : check if noBias==true is set, then set biasValue to 0
-        float biasValue = bias->get<float>(output);
+        float biasValue = bias->get<float>(outChId);
 
-        biasValue = shift->get<float>(output) + (biasValue - b_mean->get<float>(output)) * factor;
+        biasValue = shift->get<float>(outChId) + (biasValue - b_mean->get<float>(outChId)) * factor;
 
-        bias->set<float>(output, biasValue);
+        bias->set<float>(outChId, biasValue);
 
     }
 
     GraphView::replace(std::set<std::shared_ptr<Node>>({
-        batchnorm,
-        batchnorm->input(1).first,
-        batchnorm->input(2).first,
-        batchnorm->input(3).first,
-        batchnorm->input(4).first
+        batchnormNode,
+        batchnormNode->input(1).first,
+        batchnormNode->input(2).first,
+        batchnormNode->input(3).first,
+        batchnormNode->input(4).first
         }), {});
 
 }
 
-void Aidge::fuseBatchNorm(std::shared_ptr<MatchSolution> solution){
+void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::MatchSolution> solution) {
 
     assert(solution->at("BatchNorm").size() == 1 && "Wrong number of nodes BatchNorm to replace\n");
     assert(solution->at("OP").size() == 1 && "Wrong number of nodes OP to replace\n");
@@ -129,7 +127,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<MatchSolution> solution){
 
 }
 
-void Aidge::fuseBatchNorm(std::shared_ptr<GraphView> graphView){
+void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::GraphView> graphView) {
 
 
     std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
@@ -143,5 +141,4 @@ void Aidge::fuseBatchNorm(std::shared_ptr<GraphView> graphView){
         fuseBatchNorm(solution);
 
     }
-
-}
+}
\ No newline at end of file
diff --git a/src/recipies/FuseMulAdd.cpp b/src/recipies/FuseMulAdd.cpp
index df0fb5eff2febc93edee1719939dfcfde1bc210a..d37f4749635b2bf76d10f7f8de3a44e254c56347 100644
--- a/src/recipies/FuseMulAdd.cpp
+++ b/src/recipies/FuseMulAdd.cpp
@@ -15,34 +15,34 @@
 #include <string>
 
 #include "aidge/operator/FC.hpp"
-#include "aidge/utils/Recipies.hpp"
+#include "aidge/recipies/Recipies.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/operator/MatMul.hpp"
 
 //Graph Regex
 #include "aidge/graphRegex/GraphRegex.hpp"
 
-using namespace Aidge;
 
-void Aidge::fuseMulAdd(std::shared_ptr<Node> matmul,std::shared_ptr<Node> add){//std::set<std::shared_ptr<Node>> nodes){
+void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<Aidge::Node> addNode) { //std::set<std::shared_ptr<Node>> nodes){
     // Fuse Mulmat & Add into FC
     // Inputs : old nodes (pointers on mul & add)
 
-    assert((matmul->type() == "MatMul" && add->type() == "Add") && "Wrong type for the nodes to replace");
+    assert((matmulNode->type() == "MatMul" && addNode->type() == "Add") && "Wrong type for the nodes to replace");
 
 
     // Step 1 : Create FC
     // Fetch the output dimension throught the bias size
-    std::shared_ptr<Node> bias = (add->getParent(1)) ? add->getParent(1)->cloneSharedOperators() : nullptr;
+    std::shared_ptr<Node> bias = (addNode->getParent(1)) ? addNode->getParent(1)->cloneSharedOperators() : nullptr;
 
-    if (!(matmul->getParent(1))) {
+    if (!(matmulNode->getParent(1))) {
         AIDGE_INTERNAL_ASSERT("No weight detected to produce the fuseMulAdd recipe.");
     }
-    std::shared_ptr<Node> weight = matmul->getParent(1)->cloneSharedOperators();
-    DimSize_t outSize = weight->getOperator()->output(0).dims<2>()[1];
+    std::shared_ptr<Node> weight = matmulNode->getParent(1)->cloneSharedOperators();
+    const DimSize_t outSize = std::dynamic_pointer_cast<MatMul_Op>(matmulNode->getOperator()) -> getAttr<DimSize_t>("OutChannels");
 
     // Instanciate FC
     //std::shared_ptr<Node> fc = FC(dim[0], false, "Fc");
@@ -61,25 +61,25 @@ void Aidge::fuseMulAdd(std::shared_ptr<Node> matmul,std::shared_ptr<Node> add){/
         // Case 2 : If not all nodes are in a graph view : only delete the nodes from the graphview
         // Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory?
     auto newNodes = std::set<std::shared_ptr<Node>>({fc, weight, fc->getParent(2)});
-    GraphView::replace({matmul, add, add->getParent(1), matmul->getParent(1)}, newNodes);
+    GraphView::replace({matmulNode, addNode, addNode->getParent(1), matmulNode->getParent(1)}, newNodes);
 
 }
 
 
-void Aidge::fuseMulAdd(std::shared_ptr<MatchSolution> solution){
+void Aidge::fuseMulAdd(std::shared_ptr<Aidge::MatchSolution> solution){
 
     assert(solution->at("MatMul").size() == 1 && "Wrong number of nodes MatMul to replace\n");
     assert(solution->at("Add").size() == 1 && "Wrong number of nodes Add to replace\n");
 
-    for (const auto& matmul : solution->at("MatMul")) {
-        for (const auto& add : solution->at("Add")) {
-            fuseMulAdd(matmul,add);
+    for (const auto& matmulNode : solution->at("MatMul")) {
+        for (const auto& addNode : solution->at("Add")) {
+            fuseMulAdd(matmulNode,addNode);
         }
     }
 }
 
 
-void Aidge::fuseMulAdd(std::shared_ptr<GraphView> graphView){
+void Aidge::fuseMulAdd(std::shared_ptr<Aidge::GraphView> graphView){
 
 
     std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
@@ -90,10 +90,8 @@ void Aidge::fuseMulAdd(std::shared_ptr<GraphView> graphView){
     for (const auto& solution : regex->match(graphView)) {
 
         fuseMulAdd(solution);
-        
 
 
-    }
-
 
-}
+    }
+}
\ No newline at end of file
diff --git a/src/recipies/LabelGraph.cpp b/src/recipies/LabelGraph.cpp
index 369336f7981198f962d8ab949309005be9ac5eb9..6966bb81d000b62d904f800233048fa58998c6fb 100644
--- a/src/recipies/LabelGraph.cpp
+++ b/src/recipies/LabelGraph.cpp
@@ -22,7 +22,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == Conv_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<Conv_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<ConvAttr::KernelDims>(), op->getAttr<ConvAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<ConvAttr::KernelDims>(), op->template getAttr<ConvAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -30,7 +30,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == ConvDepthWise_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<ConvDepthWise_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<ConvDepthWiseAttr::KernelDims>(), op->getAttr<ConvDepthWiseAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<ConvDepthWiseAttr::KernelDims>(), op->template getAttr<ConvDepthWiseAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -38,7 +38,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == AvgPooling_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<AvgPooling_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<AvgPoolingAttr::KernelDims>(), op->getAttr<AvgPoolingAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<AvgPoolingAttr::KernelDims>(), op->template getAttr<AvgPoolingAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
diff --git a/src/recipies/RemoveDropout.cpp b/src/recipies/RemoveDropout.cpp
index c1b3da4a54601a73a2d29deb7aceec8f893040e0..a159f3b85079a54dc140b1bdaf2d3d9fd21528be 100644
--- a/src/recipies/RemoveDropout.cpp
+++ b/src/recipies/RemoveDropout.cpp
@@ -13,7 +13,7 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
-#include "aidge/utils/Recipies.hpp"
+#include "aidge/recipies/Recipies.hpp"
 
 //Graph Regex
 #include "aidge/graphRegex/GraphRegex.hpp"
diff --git a/src/recipies/RemoveFlatten.cpp b/src/recipies/RemoveFlatten.cpp
index 0dc8d856f88f1fbf7d530338072aa5b34007caaf..d571b53023b7665c25aedc869628045b3b13d509 100644
--- a/src/recipies/RemoveFlatten.cpp
+++ b/src/recipies/RemoveFlatten.cpp
@@ -13,7 +13,7 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
-#include "aidge/utils/Recipies.hpp"
+#include "aidge/recipies/Recipies.hpp"
 
 
 //Graph Regex
diff --git a/unit_tests/graph/Test_Connector.cpp b/unit_tests/graph/Test_Connector.cpp
index ef70521d0552f87a9f293ea03ef99bcfed7c13f2..a7cee610e0014dc024271a008ed964fa67d367ea 100644
--- a/unit_tests/graph/Test_Connector.cpp
+++ b/unit_tests/graph/Test_Connector.cpp
@@ -26,19 +26,19 @@ TEST_CASE("[core/graph] Connector(Constructor)") {
         REQUIRE(x.node() == nullptr);
     }
     SECTION("0 output") {
-        std::shared_ptr<Node> node = GenericOperator("Producer",1,1,0);
+        std::shared_ptr<Node> node = GenericOperator("Producer", 1, 0, 0);
         Connector x = Connector(node);
         REQUIRE(x.index() == gk_IODefaultIndex);
         REQUIRE(x.node() == node);
     }
     SECTION("1 output") {
-        std::shared_ptr<Node> node = GenericOperator("ReLU",1,1,1);
+        std::shared_ptr<Node> node = GenericOperator("ReLU", 1, 0, 1);
         Connector x = Connector(node);
         REQUIRE(x.index() == 0);
         REQUIRE(x.node() == node);
     }
     SECTION("Several outputs") {
-        std::shared_ptr<Node> node = GenericOperator("Split",1,1,2);
+        std::shared_ptr<Node> node = GenericOperator("Split", 1, 0, 2);
         Connector x = Connector(node);
         REQUIRE(x.index() == gk_IODefaultIndex);
         REQUIRE(x.node() == node);
@@ -47,30 +47,30 @@ TEST_CASE("[core/graph] Connector(Constructor)") {
 
 TEST_CASE("Connector connections Node", "[Connector]") {
     SECTION("0 input / 0 output") {
-        std::shared_ptr<Node> fic = GenericOperator("Display",0,0,0);
+        std::shared_ptr<Node> fic = GenericOperator("Display", 0, 0, 0);
         Connector x;
         x = (*fic)({});
         REQUIRE(x.node() == fic);
     }
     SECTION("1 input / 0 output") {
-        std::shared_ptr<Node> fic = GenericOperator("Loss",1,1,0);
+        std::shared_ptr<Node> fic = GenericOperator("Loss", 1, 0, 0);
         Connector x;
         x = (*fic)({x});
         REQUIRE(x.node() == fic);
     }
     SECTION("0 input / 1 output") { // Producers
-        std::shared_ptr<Node> fic = GenericOperator("Producer",0,0,1);
+        std::shared_ptr<Node> fic = GenericOperator("Producer", 0, 0, 1);
         Connector x = (*fic)({});
         REQUIRE(x.node() == fic);
     }
     SECTION("1 input / 1 output") {
-        std::shared_ptr<Node> fic = GenericOperator("Conv",1,1,1);
+        std::shared_ptr<Node> fic = GenericOperator("Conv", 1, 0, 1);
         Connector x(GenericOperator("Producer",0,0,1));
         x = (*fic)({x});
         REQUIRE(x.node() ==fic);
     }
     SECTION("2+ inputs / 1 output") { // ElemWise
-        std::shared_ptr<Node> fic = GenericOperator("fictive",3,3,1);
+        std::shared_ptr<Node> fic = GenericOperator("fictive", 3, 0, 1);
         Connector x1(GenericOperator("fictive",0,0,1));
         Connector x2(GenericOperator("fictive",0,0,1));
         Connector x3(GenericOperator("fictive",0,0,1));
@@ -78,9 +78,9 @@ TEST_CASE("Connector connections Node", "[Connector]") {
         REQUIRE(x.node() ==fic);
     }
     SECTION("1 input / 2+ outputs") { // Slice
-        std::shared_ptr<Node> fic = GenericOperator("fictive",1,1,3);
+        std::shared_ptr<Node> fic = GenericOperator("fictive", 1, 0, 3);
 
-        Connector x(GenericOperator("fictive2",0,0,1));
+        Connector x(GenericOperator("fictive2", 0, 0, 1));
         Connector y;
         REQUIRE_NOTHROW(y = (*fic)({x}));
         REQUIRE(y[0].node() == fic);
@@ -91,16 +91,16 @@ TEST_CASE("Connector connections Node", "[Connector]") {
 
 TEST_CASE("GraphGeneration from Connector", "[GraphView]") {
 
-    auto node01 = GenericOperator("Conv",0,0,1,"g_conv1");
-    auto node02 = GenericOperator("ReLU",1,1,1,"g_relu");
-    auto node03 = GenericOperator("g_maxpool1", 1,1,1);
-    auto node04 = GenericOperator("g_conv2_par1",1,1,1);
-    auto node05 = GenericOperator("g_relu2_par1", 1,1,1);
-    auto node06 = GenericOperator("g_conv2_par2", 1,1,1);
-    auto node07 = GenericOperator("g_relu2_par2", 1,1,1);
-    auto node08 = GenericOperator("g_concat", 2,2,1);
-    auto node09 = GenericOperator("g_conv3", 1, 1,1);
-    auto node10 = GenericOperator("g_matmul1", 2,2,1);
+    auto node01 = GenericOperator("Conv", 0, 0, 1,"g_conv1");
+    auto node02 = GenericOperator("ReLU", 1, 0, 1,"g_relu");
+    auto node03 = GenericOperator("g_maxpool1", 1, 0, 1);
+    auto node04 = GenericOperator("g_conv2_par1", 1, 0, 1);
+    auto node05 = GenericOperator("g_relu2_par1", 1, 0, 1);
+    auto node06 = GenericOperator("g_conv2_par2", 1, 0, 1);
+    auto node07 = GenericOperator("g_relu2_par2", 1, 0, 1);
+    auto node08 = GenericOperator("g_concat", 2, 0, 1);
+    auto node09 = GenericOperator("g_conv3", 1, 0, 1);
+    auto node10 = GenericOperator("g_matmul1", 2, 0, 1);
     Connector a = (*node01)({});
     Connector x = (*node02)({a});
     x = (*node03)({x});
@@ -118,38 +118,38 @@ TEST_CASE("GraphGeneration from Connector", "[GraphView]") {
 TEST_CASE("Connector connection GraphView", "[Connector]") {
     SECTION("1 input") {
         Connector x = Connector();
-        auto prod = GenericOperator("Producer",0,0,1);
+        auto prod = GenericOperator("Producer", 0, 0, 1);
         auto g = Residual({
-            GenericOperator("g_conv1", 1,1,1),
-            GenericOperator("g_relu", 1,1,1),
-            GenericOperator("g_maxpool1", 1,1,1),
+            GenericOperator("g_conv1", 1, 0, 1),
+            GenericOperator("g_relu", 1, 0, 1),
+            GenericOperator("g_maxpool1", 1, 0, 1),
             Parallel({
-                Sequential({GenericOperator("g_conv2_par1",1,1,1), GenericOperator("g_relu2_par1", 1,1,1)}),
-                Sequential({GenericOperator("g_conv2_par2", 1,1,1), GenericOperator("g_relu2_par2", 1,1,1)})
+                Sequential({GenericOperator("g_conv2_par1", 1, 0, 1), GenericOperator("g_relu2_par1", 1, 0, 1)}),
+                Sequential({GenericOperator("g_conv2_par2", 1, 0, 1), GenericOperator("g_relu2_par2", 1, 0, 1)})
             }),
-            GenericOperator("g_concat", 2,2,1),
-            GenericOperator("g_conv3", 1, 1,1),
-            GenericOperator("g_matmul1", 2,2,1)
+            GenericOperator("g_concat", 2, 0, 1),
+            GenericOperator("g_conv3", 1, 0, 1),
+            GenericOperator("g_matmul1", 2, 0, 1)
         });
         x = (*prod)({});
         x = (*g)({x});
         std::shared_ptr<GraphView> g2 = generateGraph({x});
         std::shared_ptr<GraphView> g3 = g;
         g3->add(prod);
-        REQUIRE(*g3== *g2);
+        REQUIRE(*g3 == *g2);
     }
     SECTION("2+ inputs") {
-        Connector x = (*GenericOperator("Producer",0,0,1))({});
-        Connector y = (*GenericOperator("Producer",0,0,1))({});
-        Connector z = (*GenericOperator("Producer",0,0,1))({});
-        auto g = Sequential({GenericOperator("ElemWise", 3,3,1),
+        Connector x = (*GenericOperator("Producer", 0, 0, 1))({});
+        Connector y = (*GenericOperator("Producer", 0, 0, 1))({});
+        Connector z = (*GenericOperator("Producer", 0, 0, 1))({});
+        auto g = Sequential({GenericOperator("ElemWise", 3, 0, 1),
             Parallel({
-                Sequential({GenericOperator("g_conv2_par1",1,1,1), GenericOperator("g_relu2_par1", 1,1,1)}),
-                Sequential({GenericOperator("g_conv2_par2", 1,1,1), GenericOperator("g_relu2_par2", 1,1,1)}),
-                Sequential({GenericOperator("g_conv2_par3", 1,1,1), GenericOperator("g_relu2_par3", 1,1,1)})
+                Sequential({GenericOperator("g_conv2_par1", 1, 0, 1), GenericOperator("g_relu2_par1", 1, 0, 1)}),
+                Sequential({GenericOperator("g_conv2_par2", 1, 0, 1), GenericOperator("g_relu2_par2", 1, 0, 1)}),
+                Sequential({GenericOperator("g_conv2_par3", 1, 0, 1), GenericOperator("g_relu2_par3", 1, 0, 1)})
             }),
-            GenericOperator("g_concat", 3,3,1),
-            GenericOperator("g_conv3", 1, 1,1)
+            GenericOperator("g_concat", 3, 0, 1),
+            GenericOperator("g_conv3", 1, 0, 1)
         });
 
         x = (*g)({x, y, z});
@@ -162,12 +162,12 @@ TEST_CASE("Connector connection GraphView", "[Connector]") {
 TEST_CASE("Connector Mini-graph", "[Connector]") {
     Connector x = Connector();
     Connector y = Connector();
-    x = (*GenericOperator("Producer",0,0,1))({});
-    y = (*GenericOperator("Producer",0,0,1))({});
+    x = (*GenericOperator("Producer", 0, 0, 1))({});
+    y = (*GenericOperator("Producer", 0, 0, 1))({});
     for (int i = 0; i<5; ++i) {
-        x = (*GenericOperator("Conv",1,1,1))({x});
+        x = (*GenericOperator("Conv", 1, 0, 1))({x});
     }
-    y = (*GenericOperator("ElemWise",2,2,1))({y, x});
+    y = (*GenericOperator("ElemWise", 2, 0, 1))({y, x});
     std::shared_ptr<GraphView> g = generateGraph({y});
     g->save("TestGraph");
 }
@@ -180,16 +180,16 @@ TEST_CASE("Structural descrition - Sequential", "[GraphView]") {
     //     REQUIRE(g1->outputNodes() == std::set<std::shared_ptr<Node>>());
     // }
     SECTION("1-element Sequence") {
-        std::shared_ptr<Node> fic = GenericOperator("node1", 1,1,1);
+        std::shared_ptr<Node> fic = GenericOperator("node1", 1, 0, 1);
         std::shared_ptr<GraphView> g2 = Sequential({fic});
         REQUIRE(g2->getNodes() == std::set<std::shared_ptr<Node>>({fic}));
         REQUIRE(g2->inputNodes() == std::set<std::shared_ptr<Node>>({fic}));
         REQUIRE(g2->outputNodes() == std::set<std::shared_ptr<Node>>({fic}));
     }
     SECTION("several-elements simple Sequence") {
-        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1,1,1);
+        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1, 0, 1);
         std::shared_ptr<GraphView> g2 = Sequential({fic1, fic2, fic3});
         REQUIRE(g2->getNodes() == std::set<std::shared_ptr<Node>>({fic1, fic2, fic3}));
         REQUIRE(g2->inputNodes() == std::set<std::shared_ptr<Node>>({fic1}));
@@ -206,37 +206,37 @@ TEST_CASE("Structural description - Parallel", "[GraphView]") {
     //     REQUIRE(g1->outputNodes() == std::set<std::shared_ptr<Node>>());
     // }
     SECTION("1-element Parallel") {
-        std::shared_ptr<Node> fic = GenericOperator("node1", 1,1,1);
+        std::shared_ptr<Node> fic = GenericOperator("node1", 1, 0, 1);
         std::shared_ptr<GraphView> g2 = Parallel({fic});
         REQUIRE(g2->getNodes() == std::set<std::shared_ptr<Node>>({fic}));
         REQUIRE(g2->inputNodes() == std::set<std::shared_ptr<Node>>({fic}));
         REQUIRE(g2->outputNodes() == std::set<std::shared_ptr<Node>>({fic}));
     }
     SECTION("several-elements simple Parallel") {
-        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1,1,1);
+        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1, 0, 1);
         std::shared_ptr<GraphView> g2 = Parallel({fic1, fic2, fic3});
         REQUIRE(g2->getNodes() == std::set<std::shared_ptr<Node>>({fic1, fic2, fic3}));
         REQUIRE(g2->inputNodes() == std::set<std::shared_ptr<Node>>({fic1, fic2, fic3}));
         REQUIRE(g2->outputNodes() == std::set<std::shared_ptr<Node>>({fic1, fic2, fic3}));
     }
     SECTION("1 Graph in Parallel") {
-        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1,1,1);
+        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1, 0, 1);
         std::shared_ptr<GraphView> g2 = Parallel({Sequential({fic1, fic2, fic3})});
         REQUIRE(g2->getNodes() == std::set<std::shared_ptr<Node>>({fic1, fic2, fic3}));
         REQUIRE(g2->inputNodes() == std::set<std::shared_ptr<Node>>({fic1}));
         REQUIRE(g2->outputNodes() == std::set<std::shared_ptr<Node>>({fic3}));
     }
     SECTION("several Sequential in Parallel") {
-        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic4 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic5 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic6 = GenericOperator("node1", 1,1,1);
+        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic4 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic5 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic6 = GenericOperator("node1", 1, 0, 1);
         std::shared_ptr<GraphView> g2 = Parallel({Sequential({fic1, fic2, fic3}),Sequential({fic4, fic5, fic6})});
         REQUIRE(g2->getNodes() == std::set<std::shared_ptr<Node>>({fic1, fic2, fic3, fic4, fic5, fic6}));
         REQUIRE(g2->inputNodes() == std::set<std::shared_ptr<Node>>({fic1, fic4}));
@@ -245,13 +245,13 @@ TEST_CASE("Structural description - Parallel", "[GraphView]") {
 }
 
 TEST_CASE("Strucutral Description - Complex Graph", "[GraphView]") {
-    std::shared_ptr<Node> firstLayer = GenericOperator("first", 1,1,1);
+    std::shared_ptr<Node> firstLayer = GenericOperator("first", 1, 0, 1);
     auto g = Sequential({firstLayer,
-                    GenericOperator("l2",1,1,1),
-                    Parallel({Sequential({GenericOperator("conv1",1,1,1), GenericOperator("relu1",1,1,1)}),
-                            Sequential({GenericOperator("conv2",1,1,1), GenericOperator("relu2",1,1,1)})}),
-                    GenericOperator("concat",2,2,1),
-                    GenericOperator("lastLayer",1,1,1)});
+                    GenericOperator("l2", 1, 0, 1),
+                    Parallel({Sequential({GenericOperator("conv1",1, 0, 1), GenericOperator("relu1", 1, 0, 1)}),
+                            Sequential({GenericOperator("conv2", 1, 0, 1), GenericOperator("relu2", 1, 0, 1)})}),
+                    GenericOperator("concat", 2, 0, 1),
+                    GenericOperator("lastLayer", 1, 0, 1)});
     REQUIRE(g->getNodes().size() == 8U);
     REQUIRE(g->inputNodes() == std::set<std::shared_ptr<Node>>({firstLayer}));
 }
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index dbba1a7d698641d0858f6c3d2f15c4c7ff610261..bb726bd4d92b5674d0e19ea3138e165e1329959a 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -40,13 +40,13 @@ TEST_CASE("[core/graph] GraphView(add)") {
         g->add(GOp1);
         std::shared_ptr<Node> GOp2 = GenericOperator("Fictive", 0, 0, 1, "Gop2");
         g->add(GOp2);
-        std::shared_ptr<Node> GOp3 = GenericOperator("Fictive", 1, 1, 0, "Gop3");
+        std::shared_ptr<Node> GOp3 = GenericOperator("Fictive", 1, 0, 0, "Gop3");
         g->add(GOp3);
         std::shared_ptr<Node> GOp4 = GenericOperator("Fictive", 0, 1, 0, "Gop4");
         g->add(GOp4);
-        std::shared_ptr<Node> GOp5 = GenericOperator("Fictive", 1, 1, 1, "Gop5");
+        std::shared_ptr<Node> GOp5 = GenericOperator("Fictive", 1, 0, 1, "Gop5");
         g->add(GOp5);
-        std::shared_ptr<Node> GOp6 = GenericOperator("Fictive", 1, 2, 1, "Gop6");
+        std::shared_ptr<Node> GOp6 = GenericOperator("Fictive", 1, 1, 1, "Gop6");
         g->add(GOp6);
     }
 
@@ -75,11 +75,11 @@ TEST_CASE("[core/graph] GraphView(add)") {
     SECTION("another GraphView") {
         std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph-1");
         std::shared_ptr<GraphView> g2 = std::make_shared<GraphView>("TestGraph-2");
-        auto conv = GenericOperator("Conv", 1, 1, 1, "c");
-        auto conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
-        auto conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
-        auto conv3 = GenericOperator("Conv", 1, 1, 1, "c3");
-        auto conv4 = GenericOperator("Conv", 1, 1, 1, "c4");
+        auto conv = GenericOperator("Conv", 1, 0, 1, "c");
+        auto conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
+        auto conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+        auto conv3 = GenericOperator("Conv", 1, 0, 1, "c3");
+        auto conv4 = GenericOperator("Conv", 1, 0, 1, "c4");
         conv->addChild(conv1);
         conv1->addChild(conv2);
         conv2->addChild(conv3);
@@ -96,13 +96,13 @@ TEST_CASE("[core/graph] GraphView(add)") {
 
 TEST_CASE("[core/graph] GraphView(addChild)") {
     std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-    std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-    std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
-    std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
-    std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 1, 1, "c3");
-    std::shared_ptr<Node> conv3_5 = GenericOperator("Conv", 1, 1, 1, "c3.5");
-    std::shared_ptr<Node> conv4 = GenericOperator("Conv", 1, 1, 1, "c4");
-    std::shared_ptr<Node> conv5 = GenericOperator("Conv", 1, 1, 1, "c5");
+    std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+    std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
+    std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+    std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 0, 1, "c3");
+    std::shared_ptr<Node> conv3_5 = GenericOperator("Conv", 1, 0, 1, "c3.5");
+    std::shared_ptr<Node> conv4 = GenericOperator("Conv", 1, 0, 1, "c4");
+    std::shared_ptr<Node> conv5 = GenericOperator("Conv", 1, 0, 1, "c5");
 
     g1->add(conv);
     SECTION("add(node)") {
@@ -177,12 +177,12 @@ TEST_CASE("[core/graph] GraphView(outputs)") {
 
 TEST_CASE("[core/graph] GraphView(save)") {
     std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-    std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-    std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
-    std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
-    std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 1, 1, "c3");
-    std::shared_ptr<Node> conv4 = GenericOperator("Conv", 1, 1, 1, "c4");
-    std::shared_ptr<Node> conv5 = GenericOperator("Conv", 1, 1, 1, "c5");
+    std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+    std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
+    std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+    std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 0, 1, "c3");
+    std::shared_ptr<Node> conv4 = GenericOperator("Conv", 1, 0, 1, "c4");
+    std::shared_ptr<Node> conv5 = GenericOperator("Conv", 1, 0, 1, "c5");
 
     g1->add(conv);
     g1->addChild(conv1, "c");
@@ -197,9 +197,9 @@ TEST_CASE("[core/graph] GraphView(save)") {
 
 TEST_CASE("[core/graph] GraphView(resetConnections)") {
     SECTION("disconnect data iput") {
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 3, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 2, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
         std::shared_ptr<Node> prod1 = GenericOperator("Prod", 0, 0, 1, "p1");
         std::shared_ptr<Node> prod2 = GenericOperator("Prod", 0, 0, 1, "p2");
         conv->addChild(conv1);
@@ -210,7 +210,7 @@ TEST_CASE("[core/graph] GraphView(resetConnections)") {
         conv1->resetConnections(false);
 
         REQUIRE(conv->output(0).size() == 0);
-        for (std::size_t i = 0; i < conv1->nbDataInputs(); ++i) {
+        for (std::size_t i = 0; i < conv1->nbData(); ++i) {
         REQUIRE((conv1->input(i) == std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex)));
         }
         REQUIRE((conv1->input(1) == std::pair<std::shared_ptr<Node>, IOIndex_t>(prod1, 0)));
@@ -222,9 +222,9 @@ TEST_CASE("[core/graph] GraphView(resetConnections)") {
     }
 
     SECTION("disconnect data iput + learnable parameters") {
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 3, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 2, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
         std::shared_ptr<Node> prod1 = GenericOperator("Prod", 0, 0, 1, "p1");
         std::shared_ptr<Node> prod2 = GenericOperator("Prod", 0, 0, 1, "p2");
         conv->addChild(conv1);
@@ -245,7 +245,7 @@ TEST_CASE("[core/graph] GraphView(resetConnections)") {
     }
 }
 
-TEST_CASE("Graph Forward dims", "[GraphView]") {
+TEST_CASE("[core/graph] GraphView(forwardDims)", "[GraphView][forwardDims]") {
     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
     auto conv1 = Conv(3, 32, {3, 3}, "conv1");
     auto conv2 = Conv(32, 64, {3, 3}, "conv2");
@@ -259,21 +259,21 @@ TEST_CASE("Graph Forward dims", "[GraphView]") {
     g->forwardDims();
 
     SECTION("Check input-output connections") {
-        REQUIRE(dataProvider->getOperator()->getOutput(0) == conv1->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getInput(1) == g->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getInput(2) == g->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
-        REQUIRE(conv2->getOperator()->getInput(1) == g->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getInput(2) == g->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
-        REQUIRE(conv3->getOperator()->getInput(1) == g->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(conv3->getOperator()->getInput(2) == g->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider->getOperator()->getRawOutput(0) == conv1->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(1) == g->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(2) == g->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) == conv2->getOperator()->getRawInput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(1) == g->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(2) == g->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawOutput(0) == conv3->getOperator()->getRawInput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(1) == g->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(2) == g->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 
     SECTION("Check forwarded dims") {
-        REQUIRE(std::static_pointer_cast<Tensor>(conv1->getOperator()->getOutput(0))
+        REQUIRE(std::static_pointer_cast<Tensor>(conv1->getOperator()->getRawOutput(0))
                     ->dims() == std::vector<DimSize_t>({16, 32, 222, 222}));
-        REQUIRE(std::static_pointer_cast<Tensor>(conv2->getOperator()->getOutput(0))
+        REQUIRE(std::static_pointer_cast<Tensor>(conv2->getOperator()->getRawOutput(0))
                     ->dims() == std::vector<DimSize_t>({16, 64, 220, 220}));
     }
 }
@@ -286,10 +286,10 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
         auto otherInput = GenericOperator("Producer", 0, 0, 1, "other_input");
         auto matmulWeight = GenericOperator("Producer", 0, 0, 1, "matmul_w");
         auto addBias = GenericOperator("Producer", 0, 0, 1, "add_b");
-        auto other1 = GenericOperator("Other", 1, 1, 1, "other1");
-        auto other2 = GenericOperator("Other", 1, 1, 1, "other2");
-        auto matmul = GenericOperator("MatMul", 1, 2, 1, "matmul");
-        auto add = GenericOperator("Add", 1, 2, 1, "add");
+        auto other1 = GenericOperator("Other", 1, 0, 1, "other1");
+        auto other2 = GenericOperator("Other", 1, 0, 1, "other2");
+        auto matmul = GenericOperator("MatMul", 1, 1, 1, "matmul");
+        auto add = GenericOperator("Add", 1, 1, 1, "add");
         otherInput->addChild(other1);
         other1->addChild(matmul);
         matmul->addChild(add);
@@ -303,7 +303,7 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
         std::set<std::shared_ptr<Node>> nodeToReplace = std::set<std::shared_ptr<Node>>({matmulWeight, addBias, matmul, add});
 
         // create replacing graph
-        std::shared_ptr<Node> myFC = GenericOperator("FC", 1, 3, 1, "fc");
+        std::shared_ptr<Node> myFC = GenericOperator("FC", 1, 2, 1, "fc");
         auto newMatmulWeight = matmulWeight->cloneSharedOperators();
         newMatmulWeight->addChild(myFC, 0, 1);
         auto newAddBias = addBias->cloneSharedOperators();
@@ -319,9 +319,9 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
     SECTION("replace with nothing") {
         std::shared_ptr<GraphView> g = std::make_shared<GraphView>("TestGraph");
         auto r1 = GenericOperator("relu", 0, 0, 1);
-        auto r2 = GenericOperator("relu", 1, 1, 1);
-        auto r3 = GenericOperator("relu", 1, 1, 1);
-        auto r4 = GenericOperator("relu", 1, 1, 0);
+        auto r2 = GenericOperator("relu", 1, 0, 1);
+        auto r3 = GenericOperator("relu", 1, 0, 1);
+        auto r4 = GenericOperator("relu", 1, 0, 0);
         r1->addChild(r2);
         r2->addChild(r3);
         r3->addChild(r4);
@@ -337,20 +337,20 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
     SECTION("replace for tiling") {
         std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
         auto otherInput = GenericOperator("Producer", 0, 0, 1, "other_input");
-        auto other1 = GenericOperator("Other", 1, 1, 1, "other1");
-        auto myConv = GenericOperator("Conv", 1, 1, 1, "myConv");
-        auto other2 = GenericOperator("Other", 1, 1, 1, "other2");
+        auto other1 = GenericOperator("Other", 1, 0, 1, "other1");
+        auto myConv = GenericOperator("Conv", 1, 0, 1, "myConv");
+        auto other2 = GenericOperator("Other", 1, 0, 1, "other2");
         otherInput->addChild(other1);
         other1->addChild(myConv);
         myConv->addChild(other2);
         g->add({other1, myConv, other2});
 
         // create tiled Conv
-        auto conv1 =  GenericOperator("Conv", 1, 1, 1, "myConv1");
-        auto conv2 =  GenericOperator("Conv", 1, 1, 1, "myConv2");
-        auto conv3 =  GenericOperator("Conv", 1, 1, 1, "myConv3");
-        auto conv4 =  GenericOperator("Conv", 1, 1, 1, "myConv4");
-        auto concat = GenericOperator("Concat", 4, 4, 1, "myConcat");
+        auto conv1 =  GenericOperator("Conv", 1, 0, 1, "myConv1");
+        auto conv2 =  GenericOperator("Conv", 1, 0, 1, "myConv2");
+        auto conv3 =  GenericOperator("Conv", 1, 0, 1, "myConv3");
+        auto conv4 =  GenericOperator("Conv", 1, 0, 1, "myConv4");
+        auto concat = GenericOperator("Concat", 4, 0, 1, "myConcat");
         conv1->addChild(concat);
         conv2->addChild(concat);
         conv3->addChild(concat);
@@ -368,12 +368,12 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
     SECTION("Change every Nodes in a GraphView") {
         auto matmulWeight0 = GenericOperator("Producer", 0, 0, 1, "matmul_w0");
         auto addBias0 = GenericOperator("Producer", 0, 0, 1, "add_b0");
-        auto matmul0 = GenericOperator("MatMul", 1, 2, 1, "matmul0");
-        auto add0 = GenericOperator("Add", 1, 2, 1, "add0");
+        auto matmul0 = GenericOperator("MatMul", 1, 1, 1, "matmul0");
+        auto add0 = GenericOperator("Add", 1, 1, 1, "add0");
         auto matmulWeight1 = GenericOperator("Producer", 0, 0, 1, "matmul_w1");
         auto addBias1 = GenericOperator("Producer", 0, 0, 1, "add_b1");
-        auto matmul1 = GenericOperator("MatMul", 1, 2, 1, "matmul1");
-        auto add1 = GenericOperator("Add", 1, 2, 1, "add1");
+        auto matmul1 = GenericOperator("MatMul", 1, 1, 1, "matmul1");
+        auto add1 = GenericOperator("Add", 1, 1, 1, "add1");
 
         matmulWeight0 -> addChild(matmul0, 0, 1);
         addBias0 -> addChild(add0, 0, 1);
@@ -389,8 +389,8 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
         auto newAddBias0 = addBias0->cloneSharedOperators();
         auto newMatmulWeight1 = matmulWeight1->cloneSharedOperators();
         auto newAddBias1 = addBias1->cloneSharedOperators();
-        auto fc0 = GenericOperator("FC", 1, 3, 1, "fc0");
-        auto fc1 = GenericOperator("FC", 1, 3, 1, "fc1");
+        auto fc0 = GenericOperator("FC", 1, 2, 1, "fc0");
+        auto fc1 = GenericOperator("FC", 1, 2, 1, "fc1");
 
         newMatmulWeight0 -> addChild(fc0, 0, 1);
         newAddBias0 -> addChild(fc0, 0, 2);
@@ -417,15 +417,15 @@ TEST_CASE("[GraphView] clone") {
     g1->save("clone_g1");
 
     SECTION("Check input-output connections") {
-        REQUIRE(dataProvider->getOperator()->getOutput(0) == conv1->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getInput(1) == g1->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getInput(2) == g1->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
-        REQUIRE(conv2->getOperator()->getInput(1) == g1->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getInput(2) == g1->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
-        REQUIRE(conv3->getOperator()->getInput(1) == g1->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(conv3->getOperator()->getInput(2) == g1->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider->getOperator()->getRawOutput(0) == conv1->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(1) == g1->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(2) == g1->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) == conv2->getOperator()->getRawInput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(1) == g1->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(2) == g1->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawOutput(0) == conv3->getOperator()->getRawInput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(1) == g1->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(2) == g1->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 
     auto g2 = g1->clone();
@@ -461,27 +461,27 @@ TEST_CASE("[GraphView] clone") {
     }
 
     SECTION("Check new connections") {
-        REQUIRE(dataProvider->getOperator()->getOutput(0) != g2->getNode("conv1")->getOperator()->getInput(0));
-        REQUIRE(g1->getNode("conv1")->getOperator()->getInput(1) != g2->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv1")->getOperator()->getInput(2) != g2->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv1")->getOperator()->getOutput(0) != g2->getNode("conv2")->getOperator()->getInput(0));
-        REQUIRE(g1->getNode("conv2")->getOperator()->getInput(1) != g2->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv2")->getOperator()->getInput(2) != g2->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv2")->getOperator()->getOutput(0) != g2->getNode("conv3")->getOperator()->getInput(0));
-        REQUIRE(g1->getNode("conv3")->getOperator()->getInput(1) != g2->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv3")->getOperator()->getInput(2) != g2->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider->getOperator()->getRawOutput(0) != g2->getNode("conv1")->getOperator()->getRawInput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getRawInput(1) != g2->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getRawInput(2) != g2->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getRawOutput(0) != g2->getNode("conv2")->getOperator()->getRawInput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getRawInput(1) != g2->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getRawInput(2) != g2->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getRawOutput(0) != g2->getNode("conv3")->getOperator()->getRawInput(0));
+        REQUIRE(g1->getNode("conv3")->getOperator()->getRawInput(1) != g2->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv3")->getOperator()->getRawInput(2) != g2->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 
     SECTION("Check input-output connections") {
-        REQUIRE(dataProvider2->getOperator()->getOutput(0) == g2->getNode("conv1")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(1) == g2->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(2) == g2->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(1) == g2->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(2) == g2->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(1) == g2->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(2) == g2->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider2->getOperator()->getRawOutput(0) == g2->getNode("conv1")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawInput(1) == g2->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawInput(2) == g2->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawInput(1) == g2->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawInput(2) == g2->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getRawInput(1) == g2->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getRawInput(2) == g2->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 }
 
@@ -498,15 +498,15 @@ TEST_CASE("[GraphView] cloneSharedProducers") {
     g1->save("cloneSharedProducers_g1");
 
     SECTION("Check input-output connections") {
-        REQUIRE(dataProvider->getOperator()->getOutput(0) == conv1->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getInput(1) == g1->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getInput(2) == g1->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
-        REQUIRE(conv2->getOperator()->getInput(1) == g1->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getInput(2) == g1->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
-        REQUIRE(conv3->getOperator()->getInput(1) == g1->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(conv3->getOperator()->getInput(2) == g1->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider->getOperator()->getRawOutput(0) == conv1->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(1) == g1->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(2) == g1->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) == conv2->getOperator()->getRawInput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(1) == g1->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(2) == g1->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawOutput(0) == conv3->getOperator()->getRawInput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(1) == g1->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(2) == g1->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 
     auto g2 = g1->cloneSharedProducers();
@@ -542,27 +542,27 @@ TEST_CASE("[GraphView] cloneSharedProducers") {
     }
 
     SECTION("Check new connections") {
-        REQUIRE(dataProvider->getOperator()->getOutput(0) != g2->getNode("conv1")->getOperator()->getInput(0));
-        REQUIRE(g1->getNode("conv1")->getOperator()->getInput(1) == g2->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv1")->getOperator()->getInput(2) == g2->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv1")->getOperator()->getOutput(0) != g2->getNode("conv2")->getOperator()->getInput(0));
-        REQUIRE(g1->getNode("conv2")->getOperator()->getInput(1) == g2->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv2")->getOperator()->getInput(2) == g2->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv2")->getOperator()->getOutput(0) != g2->getNode("conv3")->getOperator()->getInput(0));
-        REQUIRE(g1->getNode("conv3")->getOperator()->getInput(1) == g2->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv3")->getOperator()->getInput(2) == g2->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider->getOperator()->getRawOutput(0) != g2->getNode("conv1")->getOperator()->getRawInput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getRawInput(1) == g2->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getRawInput(2) == g2->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getRawOutput(0) != g2->getNode("conv2")->getOperator()->getRawInput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getRawInput(1) == g2->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getRawInput(2) == g2->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getRawOutput(0) != g2->getNode("conv3")->getOperator()->getRawInput(0));
+        REQUIRE(g1->getNode("conv3")->getOperator()->getRawInput(1) == g2->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv3")->getOperator()->getRawInput(2) == g2->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 
     SECTION("Check input-output connections") {
-        REQUIRE(dataProvider2->getOperator()->getOutput(0) == g2->getNode("conv1")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(1) == g2->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(2) == g2->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(1) == g2->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(2) == g2->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(1) == g2->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(2) == g2->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider2->getOperator()->getRawOutput(0) == g2->getNode("conv1")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawInput(1) == g2->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawInput(2) == g2->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawInput(1) == g2->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawInput(2) == g2->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getRawInput(1) == g2->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getRawInput(2) == g2->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 }
 
@@ -579,15 +579,15 @@ TEST_CASE("[GraphView] cloneSharedOperators") {
     g1->save("cloneSharedOperators_g1");
 
     SECTION("Check input-output connections") {
-        REQUIRE(dataProvider->getOperator()->getOutput(0) == conv1->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getInput(1) == g1->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getInput(2) == g1->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
-        REQUIRE(conv2->getOperator()->getInput(1) == g1->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getInput(2) == g1->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
-        REQUIRE(conv3->getOperator()->getInput(1) == g1->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(conv3->getOperator()->getInput(2) == g1->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider->getOperator()->getRawOutput(0) == conv1->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(1) == g1->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(2) == g1->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) == conv2->getOperator()->getRawInput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(1) == g1->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(2) == g1->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawOutput(0) == conv3->getOperator()->getRawInput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(1) == g1->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(2) == g1->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 
     auto g2 = g1->cloneSharedOperators();
@@ -619,15 +619,15 @@ TEST_CASE("[GraphView] cloneSharedOperators") {
     }
 
     SECTION("Check input-output connections") {
-        REQUIRE(dataProvider->getOperator()->getOutput(0) == g2->getNode("conv1")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(1) == g2->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(2) == g2->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(1) == g2->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(2) == g2->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(1) == g2->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(2) == g2->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider->getOperator()->getRawOutput(0) == g2->getNode("conv1")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawInput(1) == g2->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawInput(2) == g2->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawInput(1) == g2->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawInput(2) == g2->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getRawInput(1) == g2->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getRawInput(2) == g2->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 }
 
@@ -653,10 +653,10 @@ TEST_CASE("[core/graph] GraphView(insertParent)") {
         std::set<NodePtr> expectedConv1Children = {conv3, newConv};
         std::set<NodePtr> expectedNewConvChildren = {conv2};
 
-        REQUIRE(conv1->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) == newConv->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) != conv2->getOperator()->getInput(0));
-        REQUIRE(newConv->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) == conv3->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) == newConv->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) != conv2->getOperator()->getRawInput(0));
+        REQUIRE(newConv->getOperator()->getRawOutput(0) == conv2->getOperator()->getRawInput(0));
         REQUIRE((newConv->getChildren()) == expectedNewConvChildren);
         REQUIRE((conv1->getChildren()) == expectedConv1Children);
 
@@ -665,11 +665,11 @@ TEST_CASE("[core/graph] GraphView(insertParent)") {
         std::set<NodePtr> expectedConv1Children2 = {newConv};
         std::set<NodePtr> expectedNewConvChildren2 = {conv2, conv3};
 
-        REQUIRE(conv1->getOperator()->getOutput(0) != conv3->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) == newConv->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) != conv2->getOperator()->getInput(0));
-        REQUIRE(newConv->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
-        REQUIRE(newConv->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) != conv3->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) == newConv->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) != conv2->getOperator()->getRawInput(0));
+        REQUIRE(newConv->getOperator()->getRawOutput(0) == conv2->getOperator()->getRawInput(0));
+        REQUIRE(newConv->getOperator()->getRawOutput(0) == conv3->getOperator()->getRawInput(0));
         REQUIRE((newConv->getChildren()) == expectedNewConvChildren2);
         REQUIRE((conv1->getChildren()) == expectedConv1Children2);
 
diff --git a/unit_tests/graph/Test_get.cpp b/unit_tests/graph/Test_get.cpp
index afd1f42ee9f5d6cd668dd5cab82172cdc298e149..7b396f22bbdedf3ae54b1e1cb78644de6e4a8056 100644
--- a/unit_tests/graph/Test_get.cpp
+++ b/unit_tests/graph/Test_get.cpp
@@ -23,15 +23,15 @@
 using namespace Aidge;
 TEST_CASE("get Delta") {
 
-    
+
         std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
-        std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 1, 1, "c3");
-        std::shared_ptr<Node> conv3_5 = GenericOperator("Conv", 1, 1, 1, "c3.5");
-        std::shared_ptr<Node> conv4 = GenericOperator("Conv", 1, 1, 1, "c4");
-        std::shared_ptr<Node> conv5 = GenericOperator("Conv", 1, 1, 1, "c5");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+        std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 0, 1, "c3");
+        std::shared_ptr<Node> conv3_5 = GenericOperator("Conv", 1, 0, 1, "c3.5");
+        std::shared_ptr<Node> conv4 = GenericOperator("Conv", 1, 0, 1, "c4");
+        std::shared_ptr<Node> conv5 = GenericOperator("Conv", 1, 0, 1, "c5");
 
         g1->add(conv);
         g1->addChild(conv1, "c");
diff --git a/unit_tests/graphRegex/Test_FsmMatch.cpp b/unit_tests/graphRegex/Test_FsmMatch.cpp
index 4b0a009a4b142f56334b133919025e5e83b7435a..008251feaac9d2dbe21aae3dfc7ebaa69e828ae7 100644
--- a/unit_tests/graphRegex/Test_FsmMatch.cpp
+++ b/unit_tests/graphRegex/Test_FsmMatch.cpp
@@ -34,8 +34,8 @@ TEST_CASE("FsmMatch") {
 
 
         std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
 
         g1->add(conv);
         g1->addChild(conv1, "c");
@@ -55,9 +55,9 @@ TEST_CASE("FsmMatch") {
     SECTION("2 branche graph"){
 
         std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Fc", 1, 1, 1, "c2");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Fc", 1, 0, 1, "c2");
 
         g1->add(conv);
         g1->addChild(conv1,conv);
diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp
index 19859fd16345ff7f8d85b24e43d23c02f9ec22ee..924aac79ea8492f6ea0f2cd4d93676876c5a8331 100644
--- a/unit_tests/graphRegex/Test_GraphRegex.cpp
+++ b/unit_tests/graphRegex/Test_GraphRegex.cpp
@@ -9,7 +9,7 @@
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Recipies.hpp"
+#include "aidge/recipies/Recipies.hpp"
 
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/GenericOperator.hpp"
@@ -25,10 +25,10 @@ TEST_CASE("GraphRegexUser") {
         std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>();
 
         std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-        std::shared_ptr<Node> fc = GenericOperator("FC", 1, 1, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
-        std::shared_ptr<Node> fc2 = GenericOperator("FC", 1, 1, 1, "c3");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> fc = GenericOperator("FC", 1, 0, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+        std::shared_ptr<Node> fc2 = GenericOperator("FC", 1, 0, 1, "c3");
 
         g1->add(conv);
         g1->addChild(fc, "c");
@@ -38,7 +38,7 @@ TEST_CASE("GraphRegexUser") {
 
         sut->setKeyFromGraph(g1);
         sut->addQuery(query);
-        
+
         for (const auto& solution : sut->match(g1)) {
 
             REQUIRE(solution->getQuery() == query);
@@ -52,17 +52,17 @@ TEST_CASE("GraphRegexUser") {
             }
         }
         //REQUIRE( sut->match(g1)[1]->getAll() == std::set<NodePtr>{conv,fc});
-        
+
     }
 
    SECTION("2 query") {
         std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>();
 
         std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
-        std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 1, 1, "c3");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+        std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 0, 1, "c3");
 
         g1->add(conv);
         g1->addChild(conv1, "c");
@@ -84,7 +84,7 @@ TEST_CASE("GraphRegexUser") {
         for (const auto& solution : sut->match(g1)) {
             REQUIRE(solution->getQuery() == query);
         }
-        
+
     }
 
 
@@ -94,10 +94,10 @@ TEST_CASE("GraphRegexUser") {
         std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>();
 
         std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
-        std::shared_ptr<Node> conv3 = GenericOperator("FC", 1, 1, 1, "c3");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+        std::shared_ptr<Node> conv3 = GenericOperator("FC", 1, 0, 1, "c3");
 
         g1->add(conv);
         g1->addChild(conv1, "c");
@@ -119,17 +119,17 @@ TEST_CASE("GraphRegexUser") {
         for (const auto& solution : sut->match(g1)) {
             REQUIRE(solution->getQuery() == query);
         }
-        
+
     }
 
 
     SECTION("Applied Recipes"){
 
       // generate the original GraphView
-        auto matmul0 = MatMul(5, "matmul0");
-        auto add0 = Add<2>("add0");
-        auto matmul1 = MatMul(5, "matmul1");
-        auto add1 = Add<2>("add1");
+        auto matmul0 = MatMul(5, 5, "matmul0");
+        auto add0 = Add(2, "add0");
+        auto matmul1 = MatMul(5, 5, "matmul1");
+        auto add1 = Add(2, "add1");
 
         auto b0 = Producer({5}, "B0");
         auto w0 = Producer({5, 5}, "W0");
@@ -149,8 +149,8 @@ TEST_CASE("GraphRegexUser") {
         matmul1->addChild(add1, 0, 0);
         b1->addChild(add1, 0, 1);
 
-        auto fc = GenericOperator("FC", 1, 1, 1, "c");
-        auto fl = GenericOperator("Flatten", 1, 1, 1, "c");
+        auto fc = GenericOperator("FC", 1, 0, 1, "c");
+        auto fl = GenericOperator("Flatten", 1, 0, 1, "c");
 
 
         auto g = std::make_shared<GraphView>();
diff --git a/unit_tests/operator/Test_ConvDepthWise_Op.cpp b/unit_tests/operator/Test_ConvDepthWise_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ef68c439d3a3cdf95b7122c1b41bc9fc97311f2d
--- /dev/null
+++ b/unit_tests/operator/Test_ConvDepthWise_Op.cpp
@@ -0,0 +1,68 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+// TEST_CASE("[core/operator] ConvDepthWise_Op(computeReceptiveField)", "[Operator][computeReceptiveFiled][ConvDepthWise]") {
+//     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
+//     auto conv1 = ConvDepthWise({5, 5}, "conv1");         // output dims: {16, 3, 220, 220}
+//     auto conv2 = ConvDepthWise({3, 3}, "conv2");         // output dims: {16, 3, 218, 218}
+//     auto conv3 = ConvDepthWise({2, 2}, "conv3", {2,2});  // output dims: {16, 3, 109, 109}
+//     auto conv4 = ConvDepthWise({1, 1}, "conv4");         // output dims: {16, 3, 109, 109}
+
+//     auto g = std::make_shared<GraphView>("TestGraph");
+
+//     dataProvider->addChild(conv1, 0);
+//     g->add(conv1);
+//     g->addChild(conv2, conv1, 0);
+//     g->addChild(conv3, conv2, 0);
+//     g->addChild(conv4, conv3, 0);
+
+//     g->forwardDims();
+
+//     SECTION("Check individual receptive fields") {
+//         auto res1 = conv1->getOperator()->computeReceptiveField(0, {16,3,10,10});
+//         auto res2 = conv2->getOperator()->computeReceptiveField(conv2->getOperator()->output(0).getIdx({3,1,100,28}), {4,2,30,40});
+//         auto res3 = conv3->getOperator()->computeReceptiveField(0, {1,1,109,109});
+//         auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->input(0).getIdx({5,0,108,108}), {10,1,1,1});
+
+//         REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
+//         REQUIRE(((res2[0].first == conv2->getOperator()->input(0).getIdx({3,1,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 2, 32, 42}))));
+//         REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 1, 218, 218}))));
+//         REQUIRE(((res4[0].first == conv4->getOperator()->input(0).getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 1, 1, 1}))));
+//     }
+
+//     SECTION("Check receptive field propagation") {
+//         // input:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
+//         auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->input(0).getIdx({5,0,50,50}), {1,1,1,1});
+//         // conv4 RF:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
+//         auto res3 = conv3->getOperator()->computeReceptiveField(res4[0].first, res4[0].second);
+//         // conv3 RF:  first-{5, 0, 100, 100} dims-{1, 1, 2, 2}
+//         auto res2 = conv2->getOperator()->computeReceptiveField(res3[0].first, res3[0].second);
+//         // conv2 RF:  first-{5, 0, 100, 100} dims-{1, 1, 4, 4}
+//         auto res1 = conv1->getOperator()->computeReceptiveField(res2[0].first, res2[0].second);
+//         // conv1 RF:  first-{5, 0, 100, 100} dims-{1, 1, 8, 8}
+
+//         REQUIRE(((res1[0].first == conv1->getOperator()->input(0).getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 1, 8, 8}))));
+//     }
+// }
+}  // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/operator/Test_Conv_Op.cpp b/unit_tests/operator/Test_Conv_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ac667ec5af69dccc3e421530a17aca88018aab09
--- /dev/null
+++ b/unit_tests/operator/Test_Conv_Op.cpp
@@ -0,0 +1,79 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+// TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeReceptiveField][Conv]") {
+//     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
+//     auto conv1 = Conv(3, 32, {5, 5}, "conv1");          // output dims: {16, 32, 220, 220}
+//     auto conv2 = Conv(32, 64, {3, 3}, "conv2");         // output dims: {16, 64, 218, 218}
+//     auto conv3 = Conv(64, 10, {2, 2}, "conv3", {2,2});  // output dims: {16, 10, 109, 109}
+//     auto conv4 = Conv(10, 10, {1, 1}, "conv4");         // output dims: {16, 10, 109, 109}
+
+//     auto g = std::make_shared<GraphView>("TestGraph");
+
+//     dataProvider->addChild(conv1, 0);
+//     g->add(conv1);
+//     g->addChild(conv2, conv1, 0);
+//     g->addChild(conv3, conv2, 0);
+//     g->addChild(conv4, conv3, 0);
+
+//     g->forwardDims();
+
+//     SECTION("Check individual receptive fields") {
+//         auto res1 = conv1->getOperator()->computeReceptiveField(0, {16,32,10,10});
+//         auto res2 = conv2->getOperator()->computeReceptiveField(conv2->getOperator()->output(0).getIdx({3,20,100,28}), {4,20,30,40});
+//         auto res3 = conv3->getOperator()->computeReceptiveField(0, {1,1,109,109});
+//         auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->output(0).getIdx({5,0,108,108}), {10,10,1,1});
+
+//         REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
+//         REQUIRE(((res2[0].first == conv2->getOperator()->input(0).getIdx({3,0,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 32, 32, 42}))));
+//         REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 64, 218, 218}))));
+//         REQUIRE(((res4[0].first == conv4->getOperator()->input(0).getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 10, 1, 1}))));
+//     }
+
+//     SECTION("Check receptive field propagation") {
+//         // input:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
+//         auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->output(0).getIdx({5,0,50,50}), {1,1,1,1});
+//         // conv4 RF:  first-{5, 0, 50, 50}  dims-{1, 10, 1, 1}
+//         auto res3 = conv3->getOperator()->computeReceptiveField(res4[0].first, res4[0].second);
+//         // conv3 RF:  first-{5, 0, 100, 100} dims-{1, 64, 2, 2}
+//         auto res2 = conv2->getOperator()->computeReceptiveField(res3[0].first, res3[0].second);
+//         // conv2 RF:  first-{5, 0, 100, 100} dims-{1, 32, 4, 4}
+//         auto res1 = conv1->getOperator()->computeReceptiveField(res2[0].first, res2[0].second);
+//         // conv1 RF:  first-{5, 0, 100, 100} dims-{1, 3, 8, 8}
+
+//         REQUIRE(((res1[0].first == conv1->getOperator()->input(0).getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 3, 8, 8}))));
+
+
+//         // std::cout << "conv1: {";
+//         // std::cout << conv1->getOperator()->input(0).getCoord(res1[0].first)[0] << ", "
+//         //           << conv1->getOperator()->input(0).getCoord(res1[0].first)[1] << ", "
+//         //           << conv1->getOperator()->input(0).getCoord(res1[0].first)[2] << ", "
+//         //           << conv1->getOperator()->input(0).getCoord(res1[0].first)[3] << "} - {";
+//         // std::cout << res1[0].second[0] << ", "
+//         //           << res1[0].second[1] << ", "
+//         //           << res1[0].second[2] << ", "
+//         //           << res1[0].second[3] << "}" << std::endl;
+//     }
+// }
+}  // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index c090427914390369452ce3259f47830f01ab1754..ef0c4e7f72d3148eccb97896a3d6e3d5ae5ad6e1 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -31,21 +31,22 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator]") {
         REQUIRE(microGraph->outputNodes().size() == 1);
         REQUIRE((*microGraph->outputNodes().begin())->getOperator()->type() == "Conv");
         REQUIRE(op->nbInputs() == 3);
-        REQUIRE(op->nbDataInputs() == 1);
+        REQUIRE(op->nbData() == 1);
         REQUIRE(op->nbOutputs() == 1);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>();
         myInput->resize({2,3,5,5});
-        op->getOperator()->associateInput(0,myInput);
-        op->getOperator()->computeOutputDims();
+        std::shared_ptr<OperatorTensor> opTensor = std::static_pointer_cast<OperatorTensor>(op->getOperator());
+        opTensor->associateInput(0,myInput);
+        opTensor->computeOutputDims();
 
-        REQUIRE(op->getOperator()->outputDimsForwarded());
-        REQUIRE(op->getOperator()->getOutput(0)->dims() == std::vector<size_t>({2,3,5,5}));
-        REQUIRE(op->getOperator()->getInput(0) == myInput);
+        REQUIRE(opTensor->outputDimsForwarded());
+        REQUIRE(std::static_pointer_cast<Tensor>(opTensor->getRawOutput(0))->dims() == std::vector<size_t>({2,3,5,5}));
+        REQUIRE(std::static_pointer_cast<Tensor>(opTensor->getRawInput(0)) == myInput);
         // Order not garanteed by the GraphView
-        //REQUIRE((*microGraph->inputNodes().begin())->getOperator()->getInput(0) == myInput);
-        REQUIRE(op->getOperator()->getOutput(0) == (*microGraph->outputNodes().begin())->getOperator()->getOutput(0));
-        
+        //REQUIRE((*microGraph->inputNodes().begin())->getOperator()->getRawInput(0) == myInput);
+        REQUIRE(opTensor->getRawOutput(0) == (*microGraph->outputNodes().begin())->getOperator()->getRawOutput(0));
+
         //op->getOperator()->updateConsummerProducer();  // require implementation
         //auto microGraphScheduler = std::dynamic_pointer_cast<MetaOperator_Op>(op->getOperator())->getMicroGraphScheduler();
         //REQUIRE(microGraphScheduler->getStaticScheduling().size() == 2);
diff --git a/unit_tests/operator/Test_Operator.cpp b/unit_tests/operator/Test_Operator.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a050bbc4021b0c70a0d8faf6478eb2bd13ebdb58
--- /dev/null
+++ b/unit_tests/operator/Test_Operator.cpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/Producer.hpp"
+
+namespace Aidge {
+// TEST_CASE("[core/operator] Operator(computeReceptiveField)", "[Operator][computeReceptiveFiled]") {
+//     auto dataProvider1 = Producer({16, 3, 224, 224}, "dataProvider1");
+//     auto dataProvider2 = Producer({16, 3, 224, 224}, "dataProvider2");
+//     auto gen1 = Add(2);
+//     auto gen2 = ReLU();
+
+//     auto g = std::make_shared<GraphView>("TestGraph");
+
+//     dataProvider1->addChild(gen1, 0);
+//     dataProvider2->addChild(gen1, 0);
+//     g->add(gen1);
+//     g->addChild(gen2, gen1, 0);
+
+//     g->forwardDims();
+
+//     SECTION("Check individual receptive fields") {
+//         auto res1 = gen1->getOperator()->computeReceptiveField(0, {16,3,10,10});
+//         auto res2 = gen2->getOperator()->computeReceptiveField(gen2->getOperator()->output(0).getIdx({3,2,100,28}), {1,1,30,40});
+
+//         REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 10, 10}))));
+//         REQUIRE(((res1[1].first == 0) && (res1[1].second == std::vector<DimSize_t>({16, 3, 10, 10}))));
+//         REQUIRE(((res2[0].first == gen2->getOperator()->input(0).getIdx({3,2,100,28})) && (res2[0].second == std::vector<DimSize_t>({1, 1, 30, 40}))));
+//     }
+// }
+}  // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/recipies/Test_FuseBatchNorm.cpp b/unit_tests/recipies/Test_FuseBatchNorm.cpp
index 13facefd2979a9b0ca4409ead6972013cb1bc0a8..5d9c02d5582e3c56aba9d374d7087946c7d94bde 100644
--- a/unit_tests/recipies/Test_FuseBatchNorm.cpp
+++ b/unit_tests/recipies/Test_FuseBatchNorm.cpp
@@ -42,7 +42,7 @@ namespace Aidge {
             BatchNorm<2>()
         });
 
-        g1->setDatatype(DataType::Float32);
+        g1->setDataType(DataType::Float32);
         g1->setBackend("cpu");
         g1->forwardDims();
 
@@ -59,12 +59,12 @@ namespace Aidge {
         SECTION("Check resulting nodes") {
             // REQUIRE(g1->getNodes().size() == 2);
             // REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
-            // REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
+            // REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
             // REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
-            // REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
+            // REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
             // REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
         }
     }
-    
+
 }
 */
\ No newline at end of file
diff --git a/unit_tests/recipies/Test_FuseMulAdd.cpp b/unit_tests/recipies/Test_FuseMulAdd.cpp
index b99de66d3e23377c13ed86526f6c1a318a00e4e8..0c65db98917e33a11f4b7bac678b271b1a10fb94 100644
--- a/unit_tests/recipies/Test_FuseMulAdd.cpp
+++ b/unit_tests/recipies/Test_FuseMulAdd.cpp
@@ -12,27 +12,23 @@
 #include <catch2/catch_test_macros.hpp>
 #include <set>
 
-// #include "aidge/backend/cpu/operator/AddImpl.hpp"
-// #include "aidge/backend/cpu/operator/ConvImpl.hpp"
-// #include "aidge/backend/cpu/operator/FCImpl.hpp"
-// #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/operator/Add.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Recipies.hpp"
+#include "aidge/recipies/Recipies.hpp"
 
 namespace Aidge {
 
 
 TEST_CASE("[cpu/recipies] FuseMulAdd", "[FuseMulAdd][recipies]") {
     // generate the original GraphView
-    auto matmul0 = MatMul(5, "matmul0");
-    auto add0 = Add<2>("add0");
-    auto matmul1 = MatMul(5, "matmul1");
-    auto add1 = Add<2>("add1");
+    auto matmul0 = MatMul(5, 5, "matmul0");
+    auto add0 = Add(2, "add0");
+    auto matmul1 = MatMul(5, 5, "matmul1");
+    auto add1 = Add(2, "add1");
 
     auto b0 = Producer({5}, "B0");
     auto w0 = Producer({5, 5}, "W0");
diff --git a/unit_tests/recipies/Test_HorizontalTiling.cpp b/unit_tests/recipies/Test_HorizontalTiling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c9fb5ed6dc8a5d994ce2d3434a8176c29e418f95
--- /dev/null
+++ b/unit_tests/recipies/Test_HorizontalTiling.cpp
@@ -0,0 +1,200 @@
+// /********************************************************************************
+//  * Copyright (c) 2023 CEA-List
+//  *
+//  * This program and the accompanying materials are made available under the
+//  * terms of the Eclipse Public License 2.0 which is available at
+//  * http://www.eclipse.org/legal/epl-2.0.
+//  *
+//  * SPDX-License-Identifier: EPL-2.0
+//  *
+//  ********************************************************************************/
+
+// #include <catch2/catch_test_macros.hpp>
+// #include <set>
+
+// #include "aidge/graph/GraphView.hpp"
+// #include "aidge/graph/OpArgs.hpp"
+// #include "aidge/operator/Conv.hpp"
+// #include "aidge/operator/ReLU.hpp"
+// #include "aidge/recipies/Recipies.hpp"
+
+
+// namespace Aidge {
+
+// TEST_CASE("[core/recipies] Tiling(transformation)", "[Tiling][Recipies]") {
+
+//     SECTION("Transform a pre-generated GraphView") {
+
+//         SECTION("Simple Node: Conv") {
+//             std::shared_ptr<Node> myConv = Conv(3,4,{3,3}, "myconv");
+//             myConv->getOperator()->setDatatype(DataType::Int32);
+//             myConv->getOperator()->setBackend("cpu");
+//             std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
+//                 {
+//                     {
+//                         {{  0,   1,   2},
+//                          {  3,   4,   5},
+//                          {  6,   7,   8}},
+//                         {{  9,  10,  11},
+//                          { 12,  13,  14},
+//                          { 15,  16,  17}},
+//                         {{ 18,  19,  20},
+//                          { 21,  22,  23},
+//                          { 24,  25,  26}}
+//                     },
+//                     {
+//                         {{ 27,  28,  29},
+//                         { 30,  31,  32},
+//                         { 33,  34,  35}},
+//                         {{ 36,  37,  38},
+//                         { 39,  40,  41},
+//                         { 42,  43,  44}},
+//                         {{ 45,  46,  47},
+//                         { 48,  49,  50},
+//                         { 51,  52,  53}}
+//                     },
+//                     {
+//                         {{ 54,  55,  56},
+//                         { 57,  58,  59},
+//                         { 60,  61,  62}},
+//                         {{ 63,  64,  65},
+//                         { 66,  67,  68},
+//                         { 69,  70,  71}},
+//                         {{ 72,  73,  74},
+//                         { 75,  76,  77},
+//                         { 78,  79,  80}}
+//                     },
+//                     {
+//                         {{ 81,  82,  83},
+//                         { 84,  85,  86},
+//                         { 87,  88,  89}},
+//                         {{ 90,  91,  92},
+//                         { 93,  94,  95},
+//                         { 96,  97,  98}},
+//                         {{ 99, 100, 101},
+//                         {102, 103, 104},
+//                         {105, 106, 107}}
+//                     }
+//                 }
+//             });
+//             std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
+//             std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
+//                 {
+//                     {
+//                         {{  0,   1,   2,   3,   4},
+//                         {  5,   6,   7,   8,   9},
+//                         { 10,  11,  12,  13,  14},
+//                         { 15,  16,  17,  18,  19},
+//                         { 20,  21,  22,  23,  24}},
+
+//                         {{ 25,  26,  27,  28,  29},
+//                         { 30,  31,  32,  33,  34},
+//                         { 35,  36,  37,  38,  39},
+//                         { 40,  41,  42,  43,  44},
+//                         { 45,  46,  47,  48,  49}},
+
+//                         {{ 50,  51,  52,  53,  54},
+//                         { 55,  56,  57,  58,  59},
+//                         { 60,  61,  62,  63,  64},
+//                         { 65,  66,  67,  68,  69},
+//                         { 70,  71,  72,  73,  74}}
+//                     },
+//                     {
+//                         {{ 75,  76,  77,  78,  79},
+//                         { 80,  81,  82,  83,  84},
+//                         { 85,  86,  87,  88,  89},
+//                         { 90,  91,  92,  93,  94},
+//                         { 95,  96,  97,  98,  99}},
+
+//                         {{100, 101, 102, 103, 104},
+//                         {105, 106, 107, 108, 109},
+//                         {110, 111, 112, 113, 114},
+//                         {115, 116, 117, 118, 119},
+//                         {120, 121, 122, 123, 124}},
+
+//                         {{125, 126, 127, 128, 129},
+//                         {130, 131, 132, 133, 134},
+//                         {135, 136, 137, 138, 139},
+//                         {140, 141, 142, 143, 144},
+//                         {145, 146, 147, 148, 149}}
+//                     }
+//                 }
+//             });
+//             std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> {
+//                 {
+//                     {
+//                         {{ 15226,  15577,  15928},
+//                          { 16981,  17332,  17683},
+//                          { 18736,  19087,  19438}},
+
+//                         {{ 37818,  38898,  39978},
+//                          { 43218,  44298,  45378},
+//                          { 48618,  49698,  50778}},
+
+//                         {{ 60426,  62235,  64044},
+//                          { 69471,  71280,  73089},
+//                          { 78516,  80325,  82134}},
+
+//                         {{ 83016,  85554,  88092},
+//                          { 95706,  98244, 100782},
+//                          {108396, 110934, 113472}}
+//                     },
+//                     {
+//                         {{ 41551,  41902,  42253},
+//                          { 43306,  43657,  44008},
+//                          { 45061,  45412,  45763}},
+
+//                         {{118818, 119898, 120978},
+//                          {124218, 125298, 126378},
+//                          {129618, 130698, 131778}},
+
+//                         {{196101, 197910, 199719},
+//                          {205146, 206955, 208764},
+//                          {214191, 216000, 217809}},
+
+//                         {{273366, 275904, 278442},
+//                          {286056, 288594, 291132},
+//                          {298746, 301284, 303822}}
+//                     }
+//                 }
+//             });
+//             myConv->getOperator()->associateInput(0,myInput);
+//             myConv->getOperator()->associateInput(1,myWeights);
+//             myConv->getOperator()->associateInput(2,myBias);
+//             myConv->getOperator()->computeOutputDims();
+
+//             std::shared_ptr<GraphView> g;
+//             g->add(myConv);
+//             horizontalTiling({myConv}, 3);
+
+//             SequentialScheduler s(g);
+//             s->forward();
+
+//             // myConv->getOperator()->getOutput(0)->print();
+//             REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
+//         }
+//     }
+// }
+// }
+//         // std::shared_ptr<GraphView> g = Sequential({
+//         //     Conv(3, 16, {3,3}, "conv1"),
+//         //     ReLU("relu1"),
+//         //     Conv(16, 32, {1,1}, "conv2"),
+//         //     Conv(32, 16, {1,1}, "conv3"),
+//         //     Conv(16, 10, {3,3}, "conv4"),
+//         //     ReLU("relu2")
+//         // });
+
+//     //     for (auto& individualConv : g->match("Conv")) {
+//     //         auto tiledConv = horizontalTiling(individualConv);
+//     //         g->replace(individualConv, tiledConv);
+//     //     }
+//     // }
+
+//     // SECTION("Create the GraphView with tiled layers") {
+//     //     std::shared_ptr<GraphView> g;
+//     //     g->addChild(horizontalTiling(Conv()))
+//     // }
+
+// // }
+// // } // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/recipies/Test_LabelGraph.cpp b/unit_tests/recipies/Test_LabelGraph.cpp
index 873ad68f3198c6b6adf44d8c7ae31e667c63a18d..e0ba9be6c80ef6109b59458bf52a23120efc7584 100644
--- a/unit_tests/recipies/Test_LabelGraph.cpp
+++ b/unit_tests/recipies/Test_LabelGraph.cpp
@@ -45,9 +45,9 @@ TEST_CASE("[LabelGraph] conv") {
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
         REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
     }
 }
@@ -56,7 +56,7 @@ TEST_CASE("[LabelGraph] deleted node") {
     auto g1 = Sequential({
         Producer({16, 3, 224, 224}, "dataProvider"),
         Conv(3, 32, {3, 3}, "conv1"),
-        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
+        GenericOperator("Dummy_to_be_removed", 1, 0, 1),
         Conv(32, 64, {3, 3}, "conv2"),
         Conv(64, 10, {1, 1}, "conv3", {2, 2})
     });
@@ -74,16 +74,16 @@ TEST_CASE("[LabelGraph] deleted node") {
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
         REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
     }
 
     SECTION("Check dimensions") {
-        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 222, 222}));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 220, 220}));
-        REQUIRE(g2->getNode("conv3")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 110, 110}));
+        REQUIRE(std::static_pointer_cast<Tensor>(g2->getNode("conv1")->getOperator()->getRawOutput(0))->dims() == std::vector<DimSize_t>({16, 1, 222, 222}));
+        REQUIRE(std::static_pointer_cast<Tensor>(g2->getNode("conv2")->getOperator()->getRawOutput(0))->dims() == std::vector<DimSize_t>({16, 1, 220, 220}));
+        REQUIRE(std::static_pointer_cast<Tensor>(g2->getNode("conv3")->getOperator()->getRawOutput(0))->dims() == std::vector<DimSize_t>({16, 1, 110, 110}));
     }
 }
 
@@ -91,11 +91,11 @@ TEST_CASE("[LabelGraph] deleted nodes") {
     auto g1 = Sequential({
         Producer({16, 3, 224, 224}, "dataProvider"),
         Conv(3, 32, {3, 3}, "conv1"),
-        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
-        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
-        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
+        GenericOperator("Dummy_to_be_removed", 1, 0, 1),
+        GenericOperator("Dummy_to_be_removed", 1, 0, 1),
+        GenericOperator("Dummy_to_be_removed", 1, 0, 1),
         Conv(32, 64, {3, 3}, "conv2"),
-        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
+        GenericOperator("Dummy_to_be_removed", 1, 0, 1),
         Conv(64, 10, {1, 1}, "conv3")
     });
 
@@ -112,9 +112,9 @@ TEST_CASE("[LabelGraph] deleted nodes") {
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
         REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
     }
 }
@@ -140,15 +140,15 @@ TEST_CASE("[LabelGraph] pooling") {
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
         REQUIRE(g2->getNode("pool1")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("pool1")->getOperator()->getOutput(0) == g2->getNode("pool2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("pool1")->getOperator()->getRawOutput(0) == g2->getNode("pool2")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("pool2")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("pool2")->getOperator()->getOutput(0) == g2->getNode("pool3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("pool2")->getOperator()->getRawOutput(0) == g2->getNode("pool3")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("pool3")->getOperator()->type() == "MaxPooling");
     }
 
     SECTION("Check dimensions") {
-        REQUIRE(g2->getNode("pool1")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 223, 223}));
-        REQUIRE(g2->getNode("pool2")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 222, 222}));
-        REQUIRE(g2->getNode("pool3")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 111, 111}));
+        REQUIRE(std::static_pointer_cast<Tensor>(g2->getNode("pool1")->getOperator()->getRawOutput(0))->dims() == std::vector<DimSize_t>({16, 1, 223, 223}));
+        REQUIRE(std::static_pointer_cast<Tensor>(g2->getNode("pool2")->getOperator()->getRawOutput(0))->dims() == std::vector<DimSize_t>({16, 1, 222, 222}));
+        REQUIRE(std::static_pointer_cast<Tensor>(g2->getNode("pool3")->getOperator()->getRawOutput(0))->dims() == std::vector<DimSize_t>({16, 1, 111, 111}));
     }
 }