diff --git a/aidge_core/export/node_export.py b/aidge_core/export/node_export.py
index 980cb05a5814b7476d64757353e393ad6130218b..bea61551d6b4363d234fba4df6138ccef3154331 100644
--- a/aidge_core/export/node_export.py
+++ b/aidge_core/export/node_export.py
@@ -37,15 +37,15 @@ class ExportNode(ABC):
         for idx, parent_node in enumerate(self.node.get_parents()):
             self.inputs.append(parent_node)
             if parent_node is not None:
-                self.inputs_dims.append(self.operator.input(idx).dims())
+                self.inputs_dims.append(self.operator.get_input(idx).dims())
             else:
                 self.inputs_dims.append(None)
 
         for idx, child_node in enumerate(self.node.get_children()):
             self.outputs.append(child_node)
-        
+
         # Dirty hot fix, change it quickly
-        self.outputs_dims.append(self.operator.output(0).dims())
+        self.outputs_dims.append(self.operator.get_output(0).dims())
 
     @abstractmethod
     def export(self, export_folder:str, list_configs:list):
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index 7bd1e730a973810db89aa786b52fa05c53c43590..825ca6100382116443699a00bcff27b9bbca028a 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -16,14 +16,14 @@ class test_operator_binding(unittest.TestCase):
     Can be remove in later stage of the developpement.
     """
     def setUp(self):
-        self.generic_operator = aidge_core.GenericOperator("FakeConv", 1, 1, 1).get_operator()
+        self.generic_operator = aidge_core.GenericOperator("FakeConv", 1, 0, 1).get_operator()
 
     def tearDown(self):
         pass
 
     def test_default_name(self):
         op_type = "Conv"
-        gop = aidge_core.GenericOperator(op_type, 1, 1, 1, "FictiveName")
+        gop = aidge_core.GenericOperator(op_type, 1, 0, 1, "FictiveName")
         # check node name is not operator type
         self.assertNotEqual(gop.name(), "Conv")
         # check node name is not default
@@ -95,12 +95,12 @@ class test_operator_binding(unittest.TestCase):
     def test_compute_output_dims(self):
         in_dims=[25, 25]
         input = aidge_core.Producer(in_dims, name="In")
-        genOp = aidge_core.GenericOperator("genOp", 1, 1, 1, name="genOp")
+        genOp = aidge_core.GenericOperator("genOp", 1, 0, 1, name="genOp")
         _ = aidge_core.sequential([input, genOp])
-        self.assertListEqual(genOp.get_operator().output(0).dims(), [])
+        self.assertListEqual(genOp.get_operator().get_output(0).dims(), [])
         genOp.get_operator().set_compute_output_dims(lambda x:x)
         genOp.get_operator().compute_output_dims()
-        self.assertListEqual(genOp.get_operator().output(0).dims(), in_dims)
+        self.assertListEqual(genOp.get_operator().get_output(0).dims(), in_dims)
 
     def test_set_impl(self):
 
@@ -116,7 +116,7 @@ class test_operator_binding(unittest.TestCase):
                 """
                 self.idx += 1
 
-        generic_node = aidge_core.GenericOperator("Relu", 1, 1, 1, name="myReLu")
+        generic_node = aidge_core.GenericOperator("Relu", 1, 0, 1, name="myReLu")
         generic_op = generic_node.get_operator()
         customImpl = PythonCustomImpl(generic_op)
 
diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index 566650713c36236c19763f466ee906970466c02e..620beb160fb3494f156c1a4b512d386447081154 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -32,15 +32,17 @@ class test_attributes(unittest.TestCase):
         self.assertEqual(conv_op.get_attr("KernelDims"), k_dims)
 
     def test_fc(self):
+        in_channels = 4
         out_channels = 8
         nb_bias = True
-        fc_op = aidge_core.FC(out_channels, nb_bias).get_operator()
+        fc_op = aidge_core.FC(in_channels, out_channels, nb_bias).get_operator()
         self.assertEqual(fc_op.get_attr("OutChannels"), out_channels)
         self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
 
     def test_matmul(self):
+        in_channels = 4
         out_channels = 8
-        matmul_op = aidge_core.MatMul(out_channels).get_operator()
+        matmul_op = aidge_core.MatMul(in_channels, out_channels).get_operator()
         self.assertEqual(matmul_op.get_attr("OutChannels"), out_channels)
 
     def test_producer_1D(self):
diff --git a/aidge_core/unit_tests/test_recipies.py b/aidge_core/unit_tests/test_recipies.py
index 754907443530f7e73d1e10ed9549d0c8eb78a011..6cf89a45fd0d4cf1dc970d199d074e886b131896 100644
--- a/aidge_core/unit_tests/test_recipies.py
+++ b/aidge_core/unit_tests/test_recipies.py
@@ -22,8 +22,8 @@ class test_recipies(unittest.TestCase):
 
     def test_remove_flatten(self):
         graph_view = aidge_core.sequential([
-            aidge_core.GenericOperator("Flatten", 1, 1, 1, name="Flatten0"),
-            aidge_core.FC(50, name='0')
+            aidge_core.GenericOperator("Flatten", 1, 0, 1, name="Flatten0"),
+            aidge_core.FC(10, 50, name='0')
         ])
         old_nodes = graph_view.get_nodes()
         aidge_core.remove_flatten(graph_view)
@@ -33,10 +33,10 @@ class test_recipies(unittest.TestCase):
         self.assertTrue(all([i in old_nodes for i in graph_view.get_nodes()]))
 
     def test_fuse_matmul_add(self):
-        matmul0 = aidge_core.GenericOperator("MatMul", 1, 2, 1, name="MatMul0")
-        add0 = aidge_core.Add(name="Add0")
-        matmul1 = aidge_core.GenericOperator("MatMul", 1, 2, 1, name="MatMul1")
-        add1 = aidge_core.Add(name="Add1")
+        matmul0 = aidge_core.MatMul(1, 1, name="MatMul0")
+        add0 = aidge_core.Add(2, name="Add0")
+        matmul1 = aidge_core.MatMul(1, 1, name="MatMul1")
+        add1 = aidge_core.Add(2, name="Add1")
 
         graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
 
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index cc8763580076957d550c7c0702468a593e218569..6782392a77159814c9c363e236e21b87ca5480d9 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -14,21 +14,24 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/backend/TensorImpl.hpp"
+
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
+
 #include "aidge/graph/Connector.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/OpArgs.hpp"
-#include "aidge/graphmatching/GRegex.hpp"
 #include "aidge/graphmatching/Match.hpp"
 #include "aidge/graphmatching/NodeRegex.hpp"
 #include "aidge/graphmatching/SeqStm.hpp"
 #include "aidge/graphmatching/StmFactory.hpp"
 #include "aidge/graphmatching/Utile.hpp"
+
 #include "aidge/operator/Add.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/BatchNorm.hpp"
+#include "aidge/operator/Concat.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/operator/Div.hpp"
@@ -45,14 +48,18 @@
 #include "aidge/operator/Pow.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Scaling.hpp"
+#include "aidge/operator/Slice.hpp"
 #include "aidge/operator/Softmax.hpp"
 #include "aidge/operator/Sqrt.hpp"
 #include "aidge/operator/Sub.hpp"
+
 #include "aidge/scheduler/Scheduler.hpp"
+
+#include "aidge/recipies/Recipies.hpp"
+
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
-#include "aidge/utils/Recipies.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 //#include "aidge/utilsParsing/AstNode.hpp"
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 58c434bccc7c8dd39a93c46ecf74c38d7d834d1a..f8c3a48f7d5169dfee2cdceff37465f61bbb546c 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -299,7 +299,7 @@ class Tensor : public Data,
      */
     Tensor &operator=(const Tensor &t) {
         resize(t.dims());
-        setDatatype(t.dataType());
+        setDataType(t.dataType());
         if (t.hasImpl()) {
             setBackend(t.mImpl->backend());
             mImpl->copy(t.mImpl->rawPtr(), size());
@@ -362,7 +362,7 @@ class Tensor : public Data,
      * if the Tensor has already been initialized.
      * @param dt DataType.
      */
-    void setDatatype(const DataType dt) {
+    void setDataType(const DataType dt) {
         if (mImpl && (dataType() != dt)) {
             // get ptr before changing Tensor backend or the type difference will trigger a warning
             const void *data = mImpl->rawPtr();
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 63f034d4a79384640fcdddbeb0f8557ed544c4b0..859956efb8ccd8d20fef2a09378fa839ca217f9a 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -190,6 +190,21 @@ public:
     std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs(
             std::string nodeName) const;
 
+    /**
+     * @brief Assert Datatype, Backend, data format and dimensions along the GraphView are coherent.
+     * If not, apply the required transformations.
+     * @details Sets the GraphView ready for computation in four steps:
+     * 1 - Assert input Tensors' datatype is compatible with each Operator's datatype.
+     * If not, a conversion Operator is inserted.
+     * 2 - Assert input Tensors' backend is compatible with each Operator's backend.
+     * If not, add a Transmitter Operator.
+     * 3 - Assert data format (NCHW, NHWC, ...) of each Operator's input Tensor is
+     * compatible with the selected kernel.
+     * If not, add a Transpose Operator.
+     * 4 - Propagate Tensor dimensions through the consecutive Operators.
+     */
+    void compile(const std::string& backend, const Aidge::DataType datatype);
+
     /**
      * @brief Compute dimensions of input/output Tensors for each Operator of the
      * GraphView object's Nodes.
@@ -199,7 +214,7 @@ public:
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string &backend);
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
-    void setDatatype(const DataType &datatype);
+    void setDataType(const DataType &datatype);
 
 ///////////////////////////////////////////////////////
 //        TOPOLOGY
@@ -477,4 +492,4 @@ private:
 };
 }  // namespace Aidge
 
-#endif /* AIDGE_CORE_GRAPH_GRAPHVIEW_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_GRAPH_GRAPHVIEW_H_ */
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index c1636734fe1f1fb14a518c16663632de288b602f..5ae4eb5d893244fa842e6bb0435c0a8ab3bc0ac5 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -164,13 +164,6 @@ public:
     return std::pair<NodePtr, IOIndex_t>(mParents[inID], mIdOutParents[inID]);
   }
 
-  /**
-   * @brief Set fix value for the specified input by creating a Producer wrapping the given Tensor.
-   *
-   * @param idx Input index.
-   * @param tensor Constant Tensor to add as parent for specified index.
-   */
-  void setInput(const IOIndex_t idx, const std::shared_ptr<Tensor> tensor);
 
   /**
    * @brief Get the lowest index in the InputData Parent list equal to the
@@ -180,9 +173,9 @@ public:
    */
   inline IOIndex_t getFirstFreeDataInput() const {
     IOIndex_t i = 0;
-    for (; (i < nbDataInputs()) && (input(i).second != gk_IODefaultIndex); ++i) {}
-    // assert((i<nbDataInputs()) && "No free data input for Node");
-    return (i < nbDataInputs()) ? i : gk_IODefaultIndex;
+    for (; (i < nbData()) && (input(i).second != gk_IODefaultIndex); ++i) {}
+    // assert((i<nbData()) && "No free data input for Node");
+    return (i < nbData()) ? i : gk_IODefaultIndex;
   }
 
 
@@ -219,8 +212,8 @@ public:
    * @details [data, data, weight, bias] => 2
    * @return IOIndex_t
    */
-  inline IOIndex_t nbDataInputs() const noexcept {
-    return getOperator()->nbDataInputs();
+  inline IOIndex_t nbData() const noexcept {
+    return getOperator()->nbData();
   }
 
   /**
diff --git a/include/aidge/graphRegex/GraphFsmInterpreter.hpp b/include/aidge/graphRegex/GraphFsmInterpreter.hpp
index 9e92b6fe8fc9d5e44cb8051e687e33d7192e0eb7..e2fd43b9e641e8cb4a695e3a3eecf5975610d564 100644
--- a/include/aidge/graphRegex/GraphFsmInterpreter.hpp
+++ b/include/aidge/graphRegex/GraphFsmInterpreter.hpp
@@ -19,13 +19,16 @@ namespace Aidge {
         std::size_t mActGroupe;
         std::map<std::string,std::shared_ptr<ConditionalInterpreter>> mNodesCondition;
 
+        const std::string mGraphMatchExpr;
     public:
-        GraphFsmInterpreter(const std::string graphMatchExpr,std::map<std::string,std::shared_ptr<ConditionalInterpreter>> nodesCondition);
+        GraphFsmInterpreter(const std::string graphMatchExpr,std::vector<std::shared_ptr<ConditionalInterpreter>> & nodesCondition);
         virtual ~GraphFsmInterpreter() =default;
 
 
         std::shared_ptr<FsmGraph>  interpret(void);
 
+        
+
         private:
 
 
diff --git a/include/aidge/graphRegex/GraphLexer.hpp b/include/aidge/graphRegex/GraphLexer.hpp
index e4137ab093c466b7349007da91e032dae48eda51..bd65dfc15d18533676b19e148a98185d3844acbd 100644
--- a/include/aidge/graphRegex/GraphLexer.hpp
+++ b/include/aidge/graphRegex/GraphLexer.hpp
@@ -36,6 +36,9 @@ namespace Aidge {
     bool isEnd(void);
 
 
+    const std::string getQuery();
+
+
     /**
      * @brief Get the representation of the class
      * @return string
@@ -46,7 +49,7 @@ namespace Aidge {
 
     /**
      * @brief Constructs an error message to display the character not understood by the lexer
-     * @return error mesage
+     * @return error message
      */
     std::runtime_error badTokenError(const std::string& currentChars,std::size_t position);
 
diff --git a/include/aidge/graphRegex/GraphParser.hpp b/include/aidge/graphRegex/GraphParser.hpp
index 73406203a8be87e1df75cc694ab1ff281c27fbfa..cfe25c22709a3516b4f55ba774a616e3b94a055c 100644
--- a/include/aidge/graphRegex/GraphParser.hpp
+++ b/include/aidge/graphRegex/GraphParser.hpp
@@ -21,8 +21,6 @@ class GraphParser{
      */
     GraphParser(const std::string gRegexExpressions);
 
-    virtual ~GraphParser() = default;
-
     /**
      * @brief AST graph creation function
      * @return The AST tree
@@ -30,6 +28,13 @@ class GraphParser{
     std::shared_ptr<AstNode<gRegexTokenTypes>> parse(void);
 
 
+    /**
+     * @brief get the query that be use in the parsing
+     * @return query
+     */
+    const std::string getQuery();
+
+
     private:
     /**
      * @brief restart at the start of the ConditionalExpressions for LEXER and restart  mCurrentToken
diff --git a/include/aidge/graphRegex/GraphRegex.hpp b/include/aidge/graphRegex/GraphRegex.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..b62a42fcfeb258e5c659eaeb6681190482f37aa4
--- /dev/null
+++ b/include/aidge/graphRegex/GraphRegex.hpp
@@ -0,0 +1,107 @@
+#ifndef AIDGE_CORE_GRAPH_REGEX_H_
+#define AIDGE_CORE_GRAPH_REGEX_H_
+
+#include <string>
+
+#include "aidge/graphRegex/matchFsm/MatchResult.hpp"
+#include "aidge/graphRegex/matchFsm/FsmGraph.hpp"
+#include "aidge/graphRegex/GraphFsmInterpreter.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+
+namespace Aidge{
+
+/**
+ * type for recipes function use in query and resolve  
+*/
+using RecipesFunctionType = std::function<void(std::shared_ptr<MatchSolution>)>;
+
+/**
+ * @brief class which is the hight level interface for graph matching, used to simplify match definition  
+ * 
+ */
+class GraphRegex{
+
+    private:
+
+    //std::vector<std::string> mQuery;
+    std::vector<std::shared_ptr<ConditionalInterpreter>> mAllTest;
+    std::map<std::string, std::function<bool(NodePtr)>> mAllLambda;
+    std::map<std::string,RecipesFunctionType> mQueryRecipe;
+
+    public:
+    GraphRegex(){};
+    virtual ~GraphRegex() = default;
+
+    /**
+     * @brief add a topology query to the match 
+     * @param query the topology query to find 
+    **/
+    //void addQuery(const std::string query);
+
+    /**
+     * @brief add a topology query to the match and a function for recipe 
+     * @param query the topology query to find 
+     * @param f the funct 
+    **/
+    void addQuery(const std::string query,RecipesFunctionType f = nullptr);
+   
+   
+   /**
+     * @brief get all the types of a graph and set it as type key in the query 
+     * @param Reference graph use to get all the node types 
+    **/
+    void setKeyFromGraph(std::shared_ptr<GraphView> ref);
+
+   /**
+     * @brief set a node test manually 
+     * @param key the ref of this test used in the query
+     * @param ConditionalExpressions expression to test the node 
+    **/
+    void setNodeKey(const std::string key, const std::string conditionalExpressions );
+
+    /**
+     * @brief set a specific lambda that can be used in setQueryKey
+     * @param key ref to the lambda to use in the 
+     * @param f expression to test the node ConditionalExpressions
+    **/
+    void setNodeKey(const std::string key,std::function<bool(NodePtr)> f);
+
+    /**
+     *  @brief brief match the queries in the graph 
+     *  @param ref the graph were the querys in search 
+     *  @return the result  
+    */
+    std::set<std::shared_ptr<MatchSolution>> match(std::shared_ptr<GraphView> ref);
+
+    /***
+     *  @brief  match the queries in the graph and applied the recipes fuction  
+     *  @param ref the graph were the querys in search 
+    */
+    void appliedRecipes(std::shared_ptr<GraphView> ref);
+
+    private:
+
+    void _generateCombinationsStart(const std::set<NodePtr>& elements, std::size_t n, std::size_t index, 
+    std::vector<NodePtr>& current, std::set<std::vector<NodePtr>>& combinations);
+ 
+
+
+  void _findLargestCompatibleSet(
+      const std::vector<std::shared_ptr<MatchSolution>>& solutions,
+      std::set<std::shared_ptr<MatchSolution>>& currentSet,
+      std::set<std::shared_ptr<MatchSolution>>& largestSet,
+      size_t currentIndex
+  );
+
+  std::set<std::shared_ptr<MatchSolution>> _findLargestCompatibleSet(
+      const std::vector<std::shared_ptr<MatchSolution>>& solutions
+  );
+
+  void _majConditionalInterpreterLambda();
+
+};
+}
+
+
+#endif //AIDGE_CORE_GRAPH_REGEX_H_
\ No newline at end of file
diff --git a/include/aidge/graphRegex/matchFsm/FsmEdge.hpp b/include/aidge/graphRegex/matchFsm/FsmEdge.hpp
index c3eae528808dbdb8023718c961b7c45cbf4afac9..a6cc3e59247d4be98caa9881182bfba1c44e0178 100644
--- a/include/aidge/graphRegex/matchFsm/FsmEdge.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmEdge.hpp
@@ -87,7 +87,7 @@ namespace Aidge{
          * @brief set a new source to the edge
          * @return FsmNode
         */
-        void reSetSouceNode(const std::shared_ptr<FsmNode>& newSource);
+        void reSetSourceNode(const std::shared_ptr<FsmNode>& newSource);
           /**
          * @brief get dest FsmNode
          * @return FsmNode
@@ -116,7 +116,7 @@ namespace Aidge{
     };
 
     /**
-     * @brief class spesialisation for not commun node (node that must be match one Unique) transition
+     * @brief class specialization for not commun node (node that must be match one Unique) transition
     */
     class FsmEdgeUnique:public FsmEdge
     {
@@ -127,7 +127,7 @@ namespace Aidge{
     };
 
     /**
-     * @brief class spesialisation for  commun node transition
+     * @brief class specialization for  commun node transition
      * @see FsmEdge
     */
     class FsmEdgeCommon:public FsmEdge
@@ -181,7 +181,7 @@ namespace Aidge{
     };
 
     /**
-     * @brief class spesialisation for ref empty transition
+     * @brief class specialization for ref empty transition
      * @see FsmEdge
     */
     class FsmEdgeEmpty:public FsmEdge
@@ -195,6 +195,20 @@ namespace Aidge{
     };
 
 
+    /**
+     * @brief class specialization for ref empty transition
+     * @see FsmEdge
+    */
+    class FsmEdgeNone:public FsmEdge
+    {
+
+        public:
+        FsmEdgeNone(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNode> dest);
+        const EdgeTestResult test(const std::shared_ptr<FsmRunTimeContext> /*stmContext*/) override;
+
+    };
+
+
 
 ////////////////////////
 // FACTORY
diff --git a/include/aidge/graphRegex/matchFsm/FsmGraph.hpp b/include/aidge/graphRegex/matchFsm/FsmGraph.hpp
index 0a74551367dd492cb0abb820e4c5ce5a601d071e..d718009e87e5360981ff93ff808124581917c089 100644
--- a/include/aidge/graphRegex/matchFsm/FsmGraph.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmGraph.hpp
@@ -18,78 +18,89 @@ class FsmGraph
 {
 private:
     /**
-     * @brief all node origine
+     * @brief all node Origin
     */
-    std::set<std::size_t> mAllOrigine;
+    std::set<std::size_t> mAllOrigin;
     std::set<std::shared_ptr<FsmEdge>> mEdges;
+
+
+    const std::string mQuery;
+
 public:
-    FsmGraph(/* args */);
+
+    FsmGraph(const std::string query);
     virtual ~FsmGraph() = default;
 
-std::shared_ptr<MatchResult> test(std::vector<NodePtr>& StartNodes);
-
-
-
-const std::set<std::shared_ptr<FsmEdge>>& getEdge(void);
-/**
- * @brief add edge in the graph, as FsmEdge know the source and dest FsmNode these nodes are also add to the graph
-*/
-void addEdge(std::shared_ptr<FsmEdge>& edge);
-
-/**
- * @brief get the liste of the starting states
- * @details we need to use a vector because the order of the nodes is important for start node initialization \ref test()
-*/
-const std::vector<std::shared_ptr<FsmNode>> getStartNodes(void);
-
-/**
- * @brief get the set of the valide states
- * @return set of valide state
-*/
-const std::set<std::shared_ptr<FsmNode>> getValidNodes(void);
-
-/**
- * @brief get the set of all the node in the graph
- * @return set of all nodes
-*/
-const std::set<std::shared_ptr<FsmNode>> getNodes(void);
-
-/**
- * @brief set a groupe idx for all the nodes in the graph
-*/
-void setGroupe(std::size_t groupeIdx);
-
-/**
- * @brief make the union beteen this graph and an input graph
- * @param fsmGraph graph to union
-*/
-void unionG(const std::shared_ptr<FsmGraph> fsmGraph);
-
-
-/**
- * @brief make the union beteen this graph and an input graph and merge the valide state to the start state
- * @param fsmGraph graph to merge
-*/
-void mergeOneStartOneValid(const std::shared_ptr< FsmGraph> fsmGraph);
-/**
- * @brief get the number of sub FSM
- * @return number of sub Fsm
-*/
-std::size_t getNbSubFsm(void);
-
-/**
- * @brief increment the origine of all node in the graph
- * @param incr the incrémentation value
-*/
-void incOrigineAllNodeBy(std::size_t incr);
+    std::vector<std::shared_ptr<MatchSolution>> test(const std::vector<NodePtr>& StartNodes);
 
-private:
 
-/**
- * @brief merge tow node of the graph
- * @param node
-*/
-void _mergeNode(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNode> dest);
+
+    const std::set<std::shared_ptr<FsmEdge>>& getEdge(void);
+    /**
+     * @brief add edge in the graph, as FsmEdge know the source and dest FsmNode these nodes are also add to the graph
+    */
+    void addEdge(std::shared_ptr<FsmEdge>& edge);
+
+    /**
+     * @brief get the list of the starting states
+     * @details we need to use a vector because the order of the nodes is important for start node initialization \ref test()
+    */
+    const std::vector<std::shared_ptr<FsmNode>> getStartNodes(void);
+
+    /**
+     * @brief get the set of the valid states
+     * @return set of valide state
+    */
+    const std::set<std::shared_ptr<FsmNode>> getValidNodes(void);
+
+    /**
+     * @brief get the set of all the node in the graph
+     * @return set of all nodes
+    */
+    const std::set<std::shared_ptr<FsmNode>> getNodes(void);
+
+    /**
+     * @brief set a groupe idx for all the nodes in the graph
+    */
+    void setGroupe(std::size_t groupeIdx);
+
+    /**
+     * @brief make the union between this graph and an input graph
+     * @param fsmGraph graph to union
+    */
+    void unionG(const std::shared_ptr<FsmGraph> fsmGraph);
+
+
+    /**
+     * @brief make the union between this graph and an input graph and merge the valid state to the start state
+     * @param fsmGraph graph to merge
+    */
+    void mergeOneStartOneValid(const std::shared_ptr< FsmGraph> fsmGraph);
+    /**
+     * @brief get the number of sub FSM
+     * @return number of sub Fsm
+    */
+    std::size_t getNbSubFsm(void);
+
+    /**
+     * @brief get the number of start state
+     * @return number of start state
+    */
+    std::size_t getNbStart(void);
+
+    /**
+     * @brief increment the origin of all nodes in the graph
+     * @param incr  value
+    */
+    void incOriginAllNodeBy(std::size_t incr);
+
+    private:
+
+    /**
+     * @brief merge tow node of the graph
+     * @param node
+    */
+    void _mergeNode(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNode> dest);
 
 };
 
diff --git a/include/aidge/graphRegex/matchFsm/FsmNode.hpp b/include/aidge/graphRegex/matchFsm/FsmNode.hpp
index 2776ff8eb297fd5ad9a4c425fb386adde0a25269..7987c5ce33522ca7d43de1918d53e68738af6d18 100644
--- a/include/aidge/graphRegex/matchFsm/FsmNode.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmNode.hpp
@@ -33,7 +33,7 @@ namespace Aidge{
      * @details a state can be and/or :
      * - a valide state, the match is valide if it stop on this edge
      * - a start state , the match start on this state
-     * The state is also define by this origine (is the unique id of it's expretion )
+     * The state is also define by this Origin (is the unique id of it's expretion )
      * and it's groupe (for inner expression TODO)
     */
     class FsmNode : public std::enable_shared_from_this<FsmNode>
@@ -49,8 +49,8 @@ namespace Aidge{
         */
         std::set<std::weak_ptr<FsmNode>,lex_compare<FsmNode>> mParents;
 
-        std::size_t mOrigineStm = 0;
-        std::size_t mGroupeStm = 0;
+        std::size_t mOriginFsm = 0;
+        std::size_t mGroupeFsm = 0;
 
         bool mIsAValid;
         bool mIsAStart;
@@ -59,7 +59,7 @@ namespace Aidge{
         FsmNode(bool isAValid,bool isAStart );
         virtual ~FsmNode() = default;
         /**
-         * @brief use to MAG the actual context , and return all the posible new context
+         * @brief use to MAG the actual context , and return all the possible new context
          * @details one input context can generate a multitude of contexts because a graph node
          *  can have more than one child, and each traversal possibility is a new context.
          * @param actContext the actual context
@@ -68,8 +68,8 @@ namespace Aidge{
         const std::vector<std::shared_ptr<FsmRunTimeContext>> test( std::shared_ptr<FsmRunTimeContext>);
 
 
-        std::size_t getOrigine(void);
-        void incOrigine(std::size_t inc);
+        std::size_t getOrigin(void);
+        void incOrigin(std::size_t inc);
 
 
         void rmEdge(std::shared_ptr<FsmEdge>);
diff --git a/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp b/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp
index 6f1b9fc2bfe68195f67cfc0bf17d57aed5345219..2f6066ba4cd97284c43b509c9d5eb988b65b53a5 100644
--- a/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp
@@ -152,7 +152,7 @@ namespace Aidge{
         std::set<NodePtr> getValidNodes(void);
 
         std::set<NodePtr> getValidNodesNoCommon(void);
-        std::map<std::shared_ptr<ConditionalInterpreter>,std::set<NodePtr>> getValid(void);
+        std::map<std::shared_ptr<ConditionalInterpreter>,std::set<NodePtr>>& getValid(void);
 
 
         NodePtr getActNode(void);
diff --git a/include/aidge/graphRegex/matchFsm/MatchResult.hpp b/include/aidge/graphRegex/matchFsm/MatchResult.hpp
index ac2f2a627a9d88b3cabeac4b181af2f3b7566d72..4f7f9bf1dd9b0612e71a1f7894bfc382713c0ad0 100644
--- a/include/aidge/graphRegex/matchFsm/MatchResult.hpp
+++ b/include/aidge/graphRegex/matchFsm/MatchResult.hpp
@@ -1,19 +1,45 @@
 #ifndef AIDGE_CORE_MATCH_RESULT_H_
 #define AIDGE_CORE_MATCH_RESULT_H_
 
+#include <cstddef>
+#include <map>
 #include <memory>
+#include <string>
+#include <set>
 #include <vector>
-#include <map>
-
 
 #include "aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp"
 #include "aidge/graph/Node.hpp"
 
 namespace Aidge{
 
+/**
+ * @brief contained the result of one match and the associate key , the query and the start node
+*/
+
+class MatchSolution{
+private:
+    std::map<std::string, std::set<NodePtr>> mSolution;
+    const std::string mQueryFrom;
+    const std::vector<NodePtr> mStartNode;
+
+public:
+    MatchSolution(std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence,const std::string query,const std::vector<NodePtr> startNode);
+    inline const std::set<NodePtr>& at(const std::string key) {
+        return mSolution[key];
+    }
+    const std::set<NodePtr> getAll();
+    bool areCompatible(std::shared_ptr<MatchSolution> solution);
+
+    inline const std::string& getQuery() const noexcept { return mQueryFrom; }
+    inline const std::vector<NodePtr>& getStartNode() const noexcept { return mStartNode; }
+
+};
+
+
 /**
  * @brief class that old the result of a matching
- * give acess to all node ant there tag in the expression
+ * give access to all node and there tag in the expression
 */
 class MatchResult
 {
@@ -22,34 +48,38 @@ private:
     std::vector<std::shared_ptr<FsmRunTimeContext>> mAllValid;
 
     /*
-    the Run time of eatch sub FSM , to have a valide match we need a set of one run time per FSM compatible
-    the id must be contigue
+    the Run time of each sub FSM , to have a valid match we need a set of one run time per FSM compatible
+    the id must be continue
     */
     std::vector<std::vector<std::shared_ptr<FsmRunTimeContext>>> mIdToRunTime;
 
-    std::vector<std::set<NodePtr>> mSolve;
+    std::vector<std::shared_ptr<MatchSolution>> mSolve;
 
     std::size_t mNbSubStm;
 
-public:
-    MatchResult(std::vector<std::shared_ptr<FsmRunTimeContext>> allValid, std::size_t nbSubStm);
 
-    virtual ~MatchResult() = default;
+
+public:
+    MatchResult(std::vector<std::shared_ptr<FsmRunTimeContext>> allValid, std::size_t nbSubStm,
+     const std::string& query,const std::vector<NodePtr>& startNodes);
 
     /**
      * @brief get the set of the node match for une expression
      * @return the set of node of the graph that corresponding to an expression
     */
-    std::set<NodePtr> getBiggerSolution(void);
+    std::shared_ptr<MatchSolution> getBiggerSolution(void);
+
+    inline std::vector<std::shared_ptr<MatchSolution>> getSolutions(void) const noexcept {
+        return mSolve;
+    }
 
 private:
 
 /**
- * @brief recurent function use to inite mSolve in the constructor
+ * @brief recurrent function use to init mSolve in the constructor
  *
  **/
-
-void _generateCombinationd( std::size_t idxSubStm, std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence);
+void _generateCombination( std::size_t idxSubStm, std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence,const std::string& query,const std::vector<NodePtr>& startNodes);
 
 };
 
diff --git a/include/aidge/graphmatching/GRegex.hpp b/include/aidge/graphmatching/GRegex.hpp
deleted file mode 100644
index fd2d0c52ab47e0f03b3307bdbcfcb5a7b81d78d9..0000000000000000000000000000000000000000
--- a/include/aidge/graphmatching/GRegex.hpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-
-#ifndef AIDGE_GREGEX_H_
-#define AIDGE_GREGEX_H_
-
-#include <stdexcept>    // for exception, runtime_error, out_of_range
-#include <regex>
-#include <memory>       // for shared_ptr
-#include <algorithm>    // for next_permutation
-
-#include "aidge/graphmatching/Utile.hpp"
-#include "aidge/graphmatching/StmFactory.hpp"
-#include "aidge/graphmatching/SeqStm.hpp"
-#include "aidge/graphmatching/NodeRegex.hpp"
-#include "aidge/graphmatching/Match.hpp"
-
-
-namespace Aidge{
-
-class GRegex {
-// __init__(self,nodes_regex:dict,seq_regexps:list)
-
-    StmFactory mStmFab;
-    std::vector<SeqStm*> mStmInit;
-
-public:
-    GRegex(const std::map<std::string,NodeRegex*>& nodesRegex,std::vector<std::string>& seqRegexps );
-
-    std::set<NodeTmp> matchFromStartNodes(const std::vector<NodeTmp> startNodes,const std::shared_ptr<GraphView> graphToMatch);
-
-    bool walk_validation_all_stm_are_valid(const std::vector<std::vector<SeqStm*>> all_stm);
-
-    bool walk_validation_all_node_read_validate_by_one_stm(const std::vector<std::vector<SeqStm*>> all_stm);
-
-    bool walk_validation_common_nodes_same_tag_for_all_stm(const std::vector<std::vector<SeqStm*>> all_stm);
-
-    std::set<NodeTmp> get_all_validate_nodes(const std::vector<std::vector<SeqStm*>> all_stm);
-
-    std::vector<SeqStm*> getStmInit() const {
-        return mStmInit;
-    }
-
-    StmFactory getStmFab() const {
-        return mStmFab;
-    }
-
-    //std::set<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>> match(const std::shared_ptr<GraphView> graphToMatch);
-    Match match(const std::shared_ptr<GraphView> graphToMatch);
-
-};
-
-}
-#endif //AIDGE_GREGEX_H_
\ No newline at end of file
diff --git a/include/aidge/graphmatching/Match.hpp b/include/aidge/graphmatching/Match.hpp
deleted file mode 100644
index fc617a22869fde6531fba67c8641581572cbffc4..0000000000000000000000000000000000000000
--- a/include/aidge/graphmatching/Match.hpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_MATCH_H_
-#define AIDGE_MATCH_H_
-
-#include <vector>
-#include <set>
-#include <iostream>
-#include <cassert>
-#include "aidge/graphmatching/Utile.hpp"
-
-
-namespace Aidge{
-
-class Match {
-
-public:
-    Match();
-
-    size_t getNbMatch();
-
-    void insert(std::vector<NodeTmp> startnodes, std::set<NodeTmp> matchnodes);
-
-    std::vector<std::vector<NodeTmp>> getStartNodes();
-
-    std::vector<std::set<NodeTmp>> getMatchNodes();
-
-protected:
-    std::vector<std::vector<NodeTmp>> mStartNodes;
-    std::vector<std::set<NodeTmp>> mMatchNodes;
-
-};
-
-}
-#endif //AIDGE_MATCH_H_
\ No newline at end of file
diff --git a/include/aidge/graphmatching/NodeRegex.hpp b/include/aidge/graphmatching/NodeRegex.hpp
deleted file mode 100644
index 10ba7225834e4abfb7f0f5cd45ffa91b22f2f87d..0000000000000000000000000000000000000000
--- a/include/aidge/graphmatching/NodeRegex.hpp
+++ /dev/null
@@ -1,41 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_NODEREGEX_H_
-#define AIDGE_NODEREGEX_H_
-#include <cstdlib>
-#include <iostream>
-#include <cstring>
-#include "aidge/graph/Node.hpp"
-
-
-namespace Aidge {
-
-class NodeRegex
-{
-    public:
-    std::string mCondition;
-
-    NodeRegex(const std::string c){
-        mCondition = c;
-    };
-
-    // Version 1 - Only test the type of the node (no need for a lexer)
-    // Input : Node_op
-    // Output : bool
-    // return mCondition == Node_op.type
-    bool _is(std::shared_ptr<Node> &Node_op);
-    bool isA(std::string NodeType);
-};
-
-}
-
-#endif /* _AIDGE_NODEREGEX_H__ */
\ No newline at end of file
diff --git a/include/aidge/graphmatching/SeqStm.hpp b/include/aidge/graphmatching/SeqStm.hpp
deleted file mode 100755
index 0823b5fc0f292d8cf28f7ead53d01bd8dd8adbfe..0000000000000000000000000000000000000000
--- a/include/aidge/graphmatching/SeqStm.hpp
+++ /dev/null
@@ -1,127 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_SEQSTM_H_
-#define AIDGE_SEQSTM_H_
-
-#include <iostream>
-#include <map>
-#include <regex>
-#include <set>
-#include <stdexcept> // for exception, runtime_error, out_of_range
-#include <string>
-#include <utility>
-#include <vector>
-
-
-#include "aidge/graphmatching/NodeRegex.hpp"
-#include "aidge/graphmatching/Utile.hpp"
-
-
-namespace Aidge {
-
-class SeqStm {
-
-private:
-  const int mStmIdx;
-  const std::vector<std::vector<int>> mTransitionMatrix;
-  // str key of type like 'A' that ce use in the A->B .. extpr
-  const std::map<std::string, NodeRegex *> mNodesRegex;
-  // mTypeToIdxTransition.first = std::pair node_type , common_tag
-  // mTypeToIdxTransition.segond = idx in trans matrix
-  const std::map<NodeTypeKey, int> mTypeToIdxTransition;
-
-  int mActSt;
-  std::set<NodeTmp> mAllNodeValidated;
-  std::set<NodeTmp> mAllNodeTested;
-  std::set<std::pair<NodeTmp, std::string>> mAllCommonNode;
-  bool mStmIsValid;
-
-  std::pair<NodeRegex *, std::string> getNodeRegexAndCommonAt(int idxType);
-
-  /**
-   * @brief test the stm on a type
-   * @return the common tag
-   */
-  std::string transitionOnNodeType(NodeType nodeType);
-
-public:
-  SeqStm(const int mStmIdx,
-         const std::vector<std::vector<int>> &mTransitionMatrix,
-         const std::map<std::string, NodeRegex *> &mNodesRegex,
-         const std::map<NodeTypeKey, int> &mTypeToIdxTransition, int mActSt,
-         std::set<NodeTmp> mAllNodeValidated, std::set<NodeTmp> mAllNodeTested,
-         std::set<std::pair<NodeTmp, std::string>> mAllCommonNode,
-         bool mStmIsValid);
-
-  //////////////////////////////////////
-  // STM test
-  /////////////////////////////////////
-
-  /**
-   * @brief get if a st is a valide one
-   * @return bool
-   */
-  bool isAValidSt(int st) {
-    std::size_t size = mTransitionMatrix.size();
-    return st == static_cast<int>(size - 1) ? true : false;
-  }
-
-  /**
-   * @brief true if the stm is blocked into st
-   * @return bool
-   */
-  bool isStmBlocked() { return mActSt == -1 ? true : false; }
-
-  /**
-   * @brief true if the stm into valide st
-   * @return bool
-   */
-  bool isValid() { return mStmIsValid; }
-
-  /////////////////////////////////////
-  // utile
-  /////////////////////////////////////
-  /**
-   * @brief extract from a node is type
-   * @return bool
-   */
-  NodeType getTheNodeType(NodeTmp node);
-
-  void drawStm();
-  /////////////////////////////////////
-  // geter
-  /////////////////////////////////////
-
-  std::set<std::pair<NodeTmp, std::string>> getAllCommonNode() {
-    return mAllCommonNode;
-  }
-  std::set<NodeTmp> getAllNodeTested() { return mAllNodeTested; }
-
-  std::set<NodeTmp> getAllNodeValidated() { return mAllNodeValidated; }
-
-  SeqStm *duplicateStm();
-
-  int getStmIdx() { return mStmIdx; }
-
-  int getState() { return mActSt; }
-  //////////////////////////////////////////
-  // USE
-  //////////////////////////////////////////
-  /**
-   * @brief test the stm on a node
-   * @return  pair new stm state, the common tag
-   */
-  std::pair<int, std::string> testNode(const NodeTmp node);
-};
-} // namespace Aidge
-
-#endif /* AIDGE_SEQSTM_H_ */
\ No newline at end of file
diff --git a/include/aidge/graphmatching/StmFactory.hpp b/include/aidge/graphmatching/StmFactory.hpp
deleted file mode 100644
index b5850e4a00691ef6c808554a86a6ceec8c38ad19..0000000000000000000000000000000000000000
--- a/include/aidge/graphmatching/StmFactory.hpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_STMFACTORY_H_
-#define AIDGE_STMFACTORY_H_
-
-#include <map>
-#include <utility>
-#include <set>
-#include <string>
-#include <vector>
-#include <iostream>
-#include <stdexcept>   // for exception, runtime_error, out_of_range
-#include <regex>
-
-#include "aidge/graphmatching/NodeRegex.hpp"
-#include "aidge/graphmatching/SeqStm.hpp"
-#include "aidge/graphmatching/Utile.hpp"
-
-namespace Aidge{
-
-
-
-class StmFactory {
-
-    const std::map<std::string,NodeRegex*>& mNodesRegex;
-    std::size_t mCmptStm = 0;
-public:
-    StmFactory(const std::map<std::string,NodeRegex*>& nodesRegex);
-    //StmFactory(){};
-
-    SeqStm* makeNewStm(const std::string& sequRegex);
-    SeqStm* duplicateStm(SeqStm* stm);
-
-    std::size_t getNumberOfStm(){
-        return mCmptStm;
-    }
-private:
-
-    ParsingReturn initParsingSequRegex(const std::string& sequRegex);
-
-    std::vector<std::vector<int>> initTransitionMatrix(ParsingReturn& parsing);
-
-};
-}
-
-#endif //AIDGE_STMFACTORY_H_
\ No newline at end of file
diff --git a/include/aidge/graphmatching/Utile.hpp b/include/aidge/graphmatching/Utile.hpp
deleted file mode 100644
index acda78cd181519c86ab0b14d5b01bf91223cec9d..0000000000000000000000000000000000000000
--- a/include/aidge/graphmatching/Utile.hpp
+++ /dev/null
@@ -1,50 +0,0 @@
-
-/**
- * @file
- * @brief
- * @version file 1.0.0
- * @author vl241552
- * @copyright
- *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory.
- * All rights reserved.
- */
-
-#ifndef _utile_H_
-#define _utile_H_
-
-#include <map>
-
-#include "aidge/graph/Node.hpp"
-#include <map>
-
-namespace Aidge {
-
-using NodeTmp = std::shared_ptr<Node>;
-using NodeType = std::string;
-using CommonTag = std::string;
-using NodeTypeKey = std::pair<NodeType, CommonTag>;
-
-// type def
-// struct NodeTypeKey {
-//     NodeType nodeType;
-//     std::string commonTag;
-
-//     // for map find
-//     bool operator<(const NodeTypeKey& other) const {
-//         if (nodeType != other.nodeType or commonTag != other.commonTag) {
-//             return false;
-//         } else {
-//             return true;
-//         }
-//     }
-
-// };
-
-struct ParsingReturn {
-  std::map<NodeTypeKey, int> typeToIdxTransition;
-  std::vector<std::pair<NodeTypeKey, std::string>> transition;
-};
-
-} // namespace Aidge
-
-#endif //_utile_H_
\ No newline at end of file
diff --git a/include/aidge/hook/ExecTime.hpp b/include/aidge/hook/ExecTime.hpp
index 212fef58696be702e89c8ad973dcc0dd0fc389ae..0964d9575b7ad345d5e07c9f19c7e56a3b69c813 100644
--- a/include/aidge/hook/ExecTime.hpp
+++ b/include/aidge/hook/ExecTime.hpp
@@ -18,7 +18,7 @@
 #define execTime_H_
 
 #include "aidge/operator/Operator.hpp"
-#include "aidge/hook/hook.hpp"
+#include "aidge/hook/Hook.hpp"
 #include <memory>
 #include <chrono>
 #include <vector>
diff --git a/include/aidge/hook/OutputRange.hpp b/include/aidge/hook/OutputRange.hpp
index a2da2a997d594c0ef78fb7c31f33b32c3495c4eb..355f4aaa15a6bcd77d99ec2dad344a45f8f9edc0 100644
--- a/include/aidge/hook/OutputRange.hpp
+++ b/include/aidge/hook/OutputRange.hpp
@@ -18,7 +18,7 @@
 #define AIDGE_CORE_HOOK_OUTPUTRANGE_H_
 
 #include "aidge/operator/Operator.hpp"
-#include "aidge/hook/hook.hpp"
+#include "aidge/hook/Hook.hpp"
 #include <memory>
 #include <chrono>
 #include <vector>
diff --git a/include/aidge/nodeTester/ConditionalInterpreter.hpp b/include/aidge/nodeTester/ConditionalInterpreter.hpp
index 165fac1c2ae98bf76b73c039de9fc975e9845cc9..af6a3b920bb9ca389724860d55250d7ef4540677 100644
--- a/include/aidge/nodeTester/ConditionalInterpreter.hpp
+++ b/include/aidge/nodeTester/ConditionalInterpreter.hpp
@@ -22,7 +22,7 @@ namespace Aidge{
 /////////////////////////////
 /**
  * @brief class used to register any lambda function without context,
- * it encapsulates the source lambda in a lambda which takes as argument ConditionalData* which are any type.
+ * it encapsulates the source lambda in a lambda which takes as argument  std::shared_ptr<ConditionalData> which are any type.
  * @see ConditionalData
  */
 class ConditionalRegisterFunction {
@@ -31,12 +31,12 @@ class ConditionalRegisterFunction {
     //////////////////////////
 
     /**
-     * @brief recast the ConditionalData* to the argument type of the lambda
+     * @brief recast the  std::shared_ptr<ConditionalData> to the argument type of the lambda
      * @tparam T type of the lambda argument
      * @see ConditionalData
      */
     template <typename T>
-    T safeCastInput(ConditionalData* data) {
+    T safeCastInput( std::shared_ptr<ConditionalData> data) {
         //cnvertion and type cheking
         if (data->isTypeEqualTo<T>()){
             return data->getValue<T>();
@@ -48,14 +48,14 @@ class ConditionalRegisterFunction {
 
 
     /**
-     * @brief recaste the output of the lambda to a  ConditionalData*
+     * @brief recaste the output of the lambda to a   std::shared_ptr<ConditionalData>
      * @tparam T type of the lambda return
      * @see ConditionalData
      */
     template <typename T>
-    ConditionalData* safeCastOutput(T data) {
+     std::shared_ptr<ConditionalData> safeCastOutput(T data) {
 
-        ConditionalData* out = new ConditionalData;
+        std::shared_ptr<ConditionalData> out = std::make_shared<ConditionalData>();
         out->setValue<T>(data);
 
         return out;
@@ -111,11 +111,11 @@ class ConditionalRegisterFunction {
     };
 
     /////////////////////
-    //change the function to ConditionalData*(std::vector<ConditionalData*>)
+    //change the function to  std::shared_ptr<ConditionalData>(std::vector< std::shared_ptr<ConditionalData>>)
     /////////////////////
 
     /**
-     * @brief Converts a function to a ConditionalData*(std::vector<ConditionalData*>).
+     * @brief Converts a function to a  std::shared_ptr<ConditionalData>(std::vector< std::shared_ptr<ConditionalData>>).
      * @tparam F The type of the function to convert.
      * @tparam ParamsIdx The indices of the function parameters.
      * @param f The function to convert.
@@ -124,25 +124,31 @@ class ConditionalRegisterFunction {
     template <class F, std::size_t... ParamsIdx>
     auto funcPointer(F f, std::index_sequence<ParamsIdx...>) {
         //wrapp the lambda in a new one that as ConditionalData as inputs and output
-    	return [this,f](std::vector<ConditionalData*>  &args) {
-            if (args.size() != sizeof...(ParamsIdx)){
+    	return [this,f](std::vector< std::shared_ptr<ConditionalData>>  &args) {
+            if (args.size() < sizeof...(ParamsIdx)){
                 std::ostringstream errorMessage;
                 errorMessage << "bad Number of argument: get " << args.size() << " need " << sizeof...(ParamsIdx) << "\n";
                 throw std::runtime_error(errorMessage.str());
             }
-    		//assert(args.size() == sizeof...(ParamsIdx));//the size of the vector valide
+    		//we used std::vector< std::shared_ptr<ConditionalData>> as a fifo 
+            std::size_t offset = args.size()-sizeof...(ParamsIdx);
 
     		using FuncTraits = function_traits<decltype(f)>;
     		using outType = typename FuncTraits::return_type;
 
-    		outType result = f(safeCastInput<typename FuncTraits::template argument<ParamsIdx>::type>(args[ParamsIdx])...);
+    		outType result = f(safeCastInput<typename FuncTraits::template argument<ParamsIdx>::type>(args[offset+ParamsIdx])...);
+
+            //suppress what we used
+            for (size_t i = 0; i < sizeof...(ParamsIdx); ++i) {
+                args.pop_back();
+            }
     		//typename
     		return safeCastOutput<outType>(result);
     	};
     }
 
     /**
-     * @brief Converts a function pointer to a ConditionalData*(std::vector<ConditionalData*>).
+     * @brief Converts a function pointer to a  std::shared_ptr<ConditionalData>(std::vector< std::shared_ptr<ConditionalData>>).
      * @tparam R The return type of the function.
      * @tparam Params The parameter types of the function.
      * @param f The function pointer to convert.
@@ -154,7 +160,7 @@ class ConditionalRegisterFunction {
     }
 
     /**
-     * @brief Converts a std::function to a ConditionalData*(std::vector<ConditionalData*>).
+     * @brief Converts a std::function to a  std::shared_ptr<ConditionalData>(std::vector< std::shared_ptr<ConditionalData>>).
      * @tparam R The return type of the function.
      * @tparam Params The parameter types of the function.
      * @param f The function pointer to convert.
@@ -196,11 +202,18 @@ class ConditionalRegisterFunction {
      * @param datas The vector of input data.
      * @return A pointer to the output ConditionalData object.
      */
-    ConditionalData* run(const std::string key,std::vector<ConditionalData*> & datas);
+     std::shared_ptr<ConditionalData> run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & datas);
+
+    bool isLambdaRegister(const std::string &key) {
+        if(mWlambda.find(key) != mWlambda.end()){
+            return true;
+        }
+        return false;
+    }
 
     private:
     /// @brief map of name and the converted function.
-    std::map<const std::string, std::function<ConditionalData*(std::vector<ConditionalData*>  &)>> mWlambda;
+    std::map<const std::string, std::function< std::shared_ptr<ConditionalData>(std::vector< std::shared_ptr<ConditionalData>>  &)>> mWlambda;
 };
 
 ///////////////////
@@ -227,28 +240,38 @@ class ConditionalInterpreter
      * @brief the registery for the lambda fuction
      * @see ConditionalRegisterFunction
     */
-    ConditionalRegisterFunction mLambdaRegiter;
+    ConditionalRegisterFunction mLambdaRegister;
 
 
-    std::vector<ConditionalData*> mResolution ;
+    std::vector< std::shared_ptr<ConditionalData>> mResolution ;
 
-    void clearRes(){
+    // void clearRes(){
 
-        for (std::size_t i = 0; i < mResolution.size(); ++i) {
-            delete mResolution[i];
-        }
-        mResolution.clear();
-    }
+    //     for (std::size_t i = 0; i < mResolution.size(); ++i) {
+    //         delete mResolution[i];
+    //     }
+    //     mResolution.clear();
+    // }
 
     public:
+
+    const std::string mKey;
+
     /**
      * @brief Constructor
      * @param ConditionalExpressions The expression of the test to be performed on the nodes
      */
 
-    ConditionalInterpreter(const std::string ConditionalExpressions);
+    ConditionalInterpreter(const std::string key,const std::string ConditionalExpressions);
 
-    ~ConditionalInterpreter(){clearRes();}
+    ~ConditionalInterpreter(){}
+
+     /**
+     * @brief get the condition key
+     * @return the key
+    */
+
+    const std::string& getKey();
 
     /**
      * @brief Test a node depending of the ConditionalExpressions
@@ -266,7 +289,7 @@ class ConditionalInterpreter
      */
     void insertLambda(const std::string key,std::function<bool(Aidge::NodePtr)> f);
 
-
+    bool isLambdaRegister(const std::string &key);
     /////
 
     private:
@@ -276,12 +299,12 @@ class ConditionalInterpreter
      * @param NodeOp The node currently being tested
      * @param nodes The AST given by the parsing process
      */
-    std::vector<ConditionalData*> visit(const ASTNodeCh& nodes, const NodePtr NodeOp );
+    std::vector< std::shared_ptr<ConditionalData>> visit(const ASTNodeCh& nodes, const NodePtr NodeOp );
 
     /**
      * @defgroup ASTnodeInterpreterF Functions for interpreting AST nodes
      * @brief For each node type in the AST, function defines the processing to be performed
-     *          they return a  std::vector<ConditionalData*> which corresponds to the value(s) obtained
+     *          they return a  std::vector< std::shared_ptr<ConditionalData>> which corresponds to the value(s) obtained
      */
 
     /**
@@ -291,38 +314,38 @@ class ConditionalInterpreter
     void fLambda(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node);
     /**
      * @ingroup ASTnodeInterpreterF
-     * @brief Converted the lexeme to a int and to ConditionalData*
+     * @brief Converted the lexeme to a int and to  std::shared_ptr<ConditionalData>
      */
     void fStrToInteger(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node);
     /**
      * @ingroup ASTnodeInterpreterF
-     * @brief Converted the lexeme to a float and to ConditionalData*
+     * @brief Converted the lexeme to a float and to  std::shared_ptr<ConditionalData>
      */
     void fStrToFloat(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node);
     /**
      * @ingroup ASTnodeInterpreterF
-     * @brief Converted the lexeme to a str and to ConditionalData*
+     * @brief Converted the lexeme to a str and to  std::shared_ptr<ConditionalData>
      */
     void fStrToStr(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node);
 
     /**
      * @ingroup ASTnodeInterpreterF
-     * @brief makes the == operation between two previously converted ConditionalData*
+     * @brief makes the == operation between two previously converted  std::shared_ptr<ConditionalData>
      */
     void fEq(void);
        /**
      * @ingroup ASTnodeInterpreterF
-     * @brief makes the != operation between two previously converted ConditionalData*
+     * @brief makes the != operation between two previously converted  std::shared_ptr<ConditionalData>
      */
     void fNeq(void);
     /**
      * @ingroup ASTnodeInterpreterF
-     * @brief makes the && operation between two previously converted ConditionalData* in bool
+     * @brief makes the && operation between two previously converted  std::shared_ptr<ConditionalData> in bool
      */
     void fAnd(void);
         /**
      * @ingroup ASTnodeInterpreterF
-     * @brief makes the || operation between two previously converted ConditionalData* in bool
+     * @brief makes the || operation between two previously converted  std::shared_ptr<ConditionalData> in bool
      */
     void fOr(void);
 
diff --git a/include/aidge/nodeTester/ConditionalParser.hpp b/include/aidge/nodeTester/ConditionalParser.hpp
index a99f5374182f57c0adca3b4d44691ff4e37de44d..c21eca0407b77808287138fd39e33c00d241fb70 100644
--- a/include/aidge/nodeTester/ConditionalParser.hpp
+++ b/include/aidge/nodeTester/ConditionalParser.hpp
@@ -38,7 +38,6 @@ class ConditionalParser{
      */
     ConditionalParser(const std::string ConditionalExpressions);
 
-    virtual ~ConditionalParser() = default;
     /**
      * @brief AST graph creation function
      * @return The AST tree
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 65c7e8ce0e47bd470e2a1499a682ed2f2c8c2dbc..0c285402929ab7b071d732180891de1b738dc4a8 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -16,52 +16,38 @@
 #include <vector>
 #include <cmath>
 #include <memory>
-#include <array>
+#include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
 
-template <std::size_t NUM>
-class Add_Op : public Operator,
-    public Registrable<Add_Op<NUM>, std::string, std::unique_ptr<OperatorImpl>(const Add_Op<NUM>&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, NUM> mInputs;
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
+class Add_Op : public OperatorTensor,
+    public Registrable<Add_Op, std::string, std::unique_ptr<OperatorImpl>(const Add_Op&)> {
 public:
     static constexpr const char* Type = "Add";
 
-    constexpr Add_Op()
-            : Operator(Type)
+    Add_Op(const IOIndex_t nbIn)
+        : OperatorTensor(Type, nbIn, 0, 1)
     {
-        assert(NUM > 0 && "Add should have at least one input");
-        for (std::size_t i = 0; i<NUM; ++i) {
-            mInputs[i] = std::make_shared<Tensor>();
+        if (nbIn == 0) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
         }
-        setDatatype(DataType::Float32);
     }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Add_Op(const Add_Op<NUM>& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    Add_Op(const Add_Op& op)
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        assert(NUM > 0 && "Add should have at least one input");
-        for (std::size_t i = 0; i<NUM; ++i) {
-            mInputs[i] = std::make_shared<Tensor>();
-        }
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Add_Op<NUM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Add_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -81,88 +67,26 @@ public:
     //     return *in;
     // }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            const auto expectedDims =  mInputs[0]->dims();
-            std::size_t nonEmptyInputTensor = 1;
-            for (; nonEmptyInputTensor<NUM && (!mInputs[nonEmptyInputTensor]->empty()); ++nonEmptyInputTensor) {
-                assert(expectedDims == mInputs[nonEmptyInputTensor]->dims());
-            }
-            if (nonEmptyInputTensor == NUM) {
-                mOutput->resize(expectedDims);
-            }
-        }
-    }
-
-    bool outputDimsForwarded() const override final {
-        std::size_t forwarded = 0;
-        for (; forwarded < NUM && (!mInputs[forwarded]->empty()); ++forwarded) {}
-        return ((forwarded==NUM) && !(mOutput->empty()));
-    }
 
     // void checkDims() const override final {
     //     assert(outputDimsForwarded());
     //     for (const auto& in : mInputs) {
-    //         assert(in->dims() == mOutput->dims());
+    //         assert(in->dims() == mOutputs[0]->dims());
     //     }
     // }
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "Add Operators has only 1 outputs");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
 
 
     void setBackend(const std::string& name) override {
-        mImpl = Registrar<Add_Op<NUM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        for (std::size_t i = 0; i < NUM; ++i) {
-            mInputs[i]->setBackend(name);
-        }
-    }
-
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mImpl = Registrar<Add_Op>::create(name)(*this);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        for (std::size_t i = 0; i < NUM; ++i) {
-            mInputs[i]->setDatatype(datatype);
+        for (std::size_t i = 0; i < nbInputs(); ++i) {
+            getInput(i)->setBackend(name);
         }
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return NUM; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return NUM; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
-        static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName(){
         return {"data_input_0", "data_input_n"};
     }
     static const std::vector<std::string> getOutputsName(){
@@ -170,9 +94,8 @@ public:
     }
 };
 
-template <std::size_t NUM>
-inline std::shared_ptr<Node> Add(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Add_Op<NUM>>(), name);
+inline std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
 }
 }
 
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index dfcd0d5b3b4d892f201485e85710d42cd5b71dba..f0f9f6c54ed1953ed31b713ce19edc7a8e594d4a 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -19,7 +19,7 @@
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,15 +29,11 @@ namespace Aidge {
 enum class AvgPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
-class AvgPooling_Op : public Operator,
+class AvgPooling_Op : public OperatorTensor,
                 public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
                 public StaticAttributes<AvgPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>> {
-private:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
 public:
     static constexpr const char *Type = "AvgPooling";
@@ -52,24 +48,19 @@ public:
 
     constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
-                      attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {
-        setDatatype(DataType::Float32);
-    }
+                      attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     AvgPooling_Op(const AvgPooling_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -80,83 +71,73 @@ public:
         return std::make_shared<AvgPooling_Op<DIM>>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 1 && "operators supports only 3 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInput->empty()) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
+        // check inputs have been associated
+        if (!getInput(0)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
+        if (!(getInput(0)->empty())) {
+            std::array<DimSize_t, DIM + 2> outputDims;
+            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+            outputDims[0] = inputDims[0];
+            outputDims[1] = inputDims[1];
 
             for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                            std::floor(static_cast<float>(mInput->dims()[dim+2] -
+                                            std::floor(static_cast<float>(inputDims[dim+2] -
                                                                     this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
                                             static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
             }
-            outputDims[1] = mInput->dims()[1];
-            outputDims[0] = mInput->dims()[0];
-            mOutput->resize(outputDims);
+            getOutput(0)->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return *(mInput.get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "AvgPooling Operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "AvgPooling Operators has only 1 outputs");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
 
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
+    // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
+    //     if (outputIdx != 0) {
+    //         AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+    //     }
+    //     if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+    //         // Offset
+    //         const auto outputIdxDims = mOutput->getCoord(firstIdx);
+    //         std::vector<DimSize_t> inputIdxDims = outputIdxDims;
+
+    //         for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+    //             if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+    //                 AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+    //             }
+    //         }
+
+    //         // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+    //         // Width
+    //         std::vector<DimSize_t> inputDims;
+    //         inputDims.push_back(outputDims[0]); // same batch value
+    //         inputDims.push_back(outputDims[1]); // same channel value
+
+    //         for (DimIdx_t i = 0; i < DIM; ++i) {
+    //             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+    //                         * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
+    //                         + 1
+    //                         + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
+    //             inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
+    //         }
+    //         std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>();
+    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInput->getIdx(inputIdxDims), inputDims));
+    //         return res;
+    //     }
+    //     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+    // }
 
 
     void setBackend(const std::string &name) override {
         mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -190,4 +171,4 @@ const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
                                                           "KernelDims"};
 }
 
-#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
+#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index da7360c8ba3816cdfe1d2d00f80b08808a80f961..09a9bb9efac81431673ef3449f717fbcb9af5108 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -19,27 +19,20 @@
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class BatchNormAttr { Epsilon, Momentum };
 
+enum class BatchNormAttr { Epsilon, Momentum };
 
 template <DimIdx_t DIM>
-class BatchNorm_Op : public Operator,
+class BatchNorm_Op : public OperatorTensor,
                 public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
                 public StaticAttributes<BatchNormAttr, float, float> {
 public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 5> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
-                                                      std::make_shared<Tensor>(), std::make_shared<Tensor>(),
-                                                      std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
-   public:
     static constexpr const char *Type = "BatchNorm";
 
     BatchNorm_Op() = delete;
@@ -49,25 +42,19 @@ public:
     using attr = typename Attributes_::template attr<e>;
 
     constexpr BatchNorm_Op(float epsilon, float momentum)
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 4, 1),
           Attributes_(attr<BatchNormAttr::Epsilon>(epsilon),
-                           attr<BatchNormAttr::Momentum>(momentum)),
-          mOutput(std::make_shared<Tensor>()) {
-        setDatatype(DataType::Float32);
-    }
+                           attr<BatchNormAttr::Momentum>(momentum)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     BatchNorm_Op(const BatchNorm_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -87,83 +74,41 @@ public:
     //     return *in;
     // }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 5 && "operators supports only 5 inputs");
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) {
-                if(mInputs[i]->size() != mInputs[0]->dims()[1]) {
-                    mInputs[i]->resize(std::array<DimSize_t, 1>({mInputs[0]->dims()[1]}));
+        // check inputs have been associated
+        bool associated = true;
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            const DimSize_t nbChannels =  getInput(0)->dims()[1];
+            for (std::size_t i = nbData(); i < nbInputs(); ++i) {
+                if(getInput(i)->size() != nbChannels) {
+                    // /!\ Input size should be handled BEFORE calling this function
+                    // This should raise an error
+                    getInput(i)->resize(std::array<DimSize_t, 1>({getInput(0)->dims()[1]}));
                 }
             }
-            mOutput->resize(mInputs[0]->dims());
+            mOutputs[0]->resize(getInput(0)->dims());
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 5 && "operators supports only 5 inputs");
-        return *(mInputs[inputIdx].get()); }
-
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 5 && "BatchNorm Operators supports only 5 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "BatchNorm Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 5 && "operators supports only 5 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string &name) override {
         mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[1]->setBackend(name);
-        mInputs[2]->setBackend(name);
-        mInputs[3]->setBackend(name);
-        mInputs[4]->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[1]->setDatatype(datatype);
-        mInputs[2]->setDatatype(datatype);
-        mInputs[3]->setDatatype(datatype);
-        mInputs[4]->setDatatype(datatype);
+        getInput(1)->setBackend(name);
+        getInput(2)->setBackend(name);
+        getInput(3)->setBackend(name);
+        getInput(4)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 5; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
-    static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName() {
         return {"data_input", "scale", "shift", "mean", "variance"};
     }
-    static const std::vector<std::string> getOutputsName(){
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
@@ -187,4 +132,4 @@ template <>
 const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
 }
 
-#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
+#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..01d590aa7425cb62ab665c0078019a6c8ab4a66a
--- /dev/null
+++ b/include/aidge/operator/Concat.hpp
@@ -0,0 +1,134 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_CONCAT_H_
+#define AIDGE_CORE_OPERATOR_CONCAT_H_
+
+#include <numeric>
+#include <vector>
+#include <cmath>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ConcatAttr { Axis };
+
+class Concat_Op : public OperatorTensor,
+    public Registrable<Concat_Op, std::string, std::unique_ptr<OperatorImpl>(const Concat_Op&)>,
+    public StaticAttributes<ConcatAttr, DimSize_t> {
+public:
+    static constexpr const char* Type = "Concat";
+
+    using Attributes_ = StaticAttributes<ConcatAttr, DimSize_t>;
+    template <ConcatAttr e>
+    using attr = typename Attributes_::template attr<e>;
+
+    Concat_Op(const IOIndex_t nbIn, const DimSize_t axis)
+        : OperatorTensor(Type, nbIn, 0, 1),
+          Attributes_(attr<ConcatAttr::Axis>(axis))
+    {
+        if (nbIn == 0) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
+        }
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Concat_Op(const Concat_Op& op)
+        : OperatorTensor(op),
+          Attributes_(op)
+    {
+        mImpl = op.mImpl ? Registrar<Concat_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Concat_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Concat_Op>(*this);
+    }
+
+    // Data operator[](const char* inputName) override final {
+    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
+    //         (strcmp(inputName, "weight") ? mInputs[1] :
+    //         (strcmp(inputName, "bias") ? mInputs[2] :
+    //         nullptr));
+    //     assert((in!=nullptr) && "No such parameter");
+    //     return *in;
+    // }
+
+
+    void computeOutputDims() override final {
+        // Every input is non-empty with the same number of dimensions
+        bool associated = (getInput(0) != nullptr);
+        associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input
+        auto outputDims =  getInput(0)->dims();
+        const auto firstInputNbDims = getInput(0) -> nbDims();
+        for (IOIndex_t i = 1; i < nbInputs(); ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= (getInput(i)->nbDims() == firstInputNbDims);
+            for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) {
+                if (dim == getAttr<ConcatAttr::Axis>()) {
+                    outputDims[dim] += getInput(i)->dims()[dim];
+                }
+                else {
+                    associated &= (getInput(i)->dims()[dim] == outputDims[dim]);
+                }
+            }
+        }
+        if (associated) {
+            getOutput(0)->resize(outputDims);
+        }
+    }
+
+    void setBackend(const std::string& name) override {
+        mImpl = Registrar<Concat_Op>::create(name)(*this);
+        mOutputs[0]->setBackend(name);
+
+        // FIXME: temporary workaround
+        for (std::size_t i = 0; i < nbInputs(); ++i) {
+            getInput(i)->setBackend(name);
+        }
+    }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input_0", "data_input_n"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const DimIdx_t axis = 0, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
+}
+}
+
+namespace {
+    template <>
+    const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
+        "Axis"
+    };
+}
+
+#endif /* AIDGE_CORE_OPERATOR_CONCAT_H_ */
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index b1e3e34b0eff681632d90cb8314ebd8c96722eec..4f0fb1ea2717c1fdf4443c450000ec3a56bb9b5b 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -14,12 +14,13 @@
 
 #include <array>
 #include <cmath>
+#include <cstddef>
 #include <numeric>
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,17 +30,12 @@ namespace Aidge {
 enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims };
 
 template <DimIdx_t DIM>
-class Conv_Op : public Operator,
+class Conv_Op : public OperatorTensor,
                 public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
                 public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
                                        DimSize_t, std::array<DimSize_t, DIM>> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
-                                                      std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
-   public:
+public:
     static constexpr const char *Type = "Conv";
 
     Conv_Op() = delete;
@@ -49,32 +45,27 @@ public:
     template <ConvAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    constexpr Conv_Op(DimSize_t in_channels,
-                      DimSize_t out_channels,
-                      const std::array<DimSize_t, DIM> &kernel_dims,
-                      const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                      const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
-        : Operator(Type),
-          Attributes_(attr<ConvAttr::StrideDims>(stride_dims),
-                      attr<ConvAttr::DilationDims>(dilation_dims),
-                      attr<ConvAttr::InChannels>(in_channels),
-                      attr<ConvAttr::OutChannels>(out_channels),
-                      attr<ConvAttr::KernelDims>(kernel_dims)) {
-        setDatatype(DataType::Float32);
-    }
+    constexpr Conv_Op(DimSize_t inChannels,
+                      DimSize_t outChannels,
+                      const std::array<DimSize_t, DIM> &kernelDims,
+                      const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
+        : OperatorTensor(Type, 1, 2, 1),
+          Attributes_(attr<ConvAttr::StrideDims>(strideDims),
+                      attr<ConvAttr::DilationDims>(dilationDims),
+                      attr<ConvAttr::InChannels>(inChannels),
+                      attr<ConvAttr::OutChannels>(outChannels),
+                      attr<ConvAttr::KernelDims>(kernelDims)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Conv_Op(const Conv_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -98,16 +89,18 @@ public:
 
     // }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
+        // check inputs have been associated
+        bool associated = true;
+        for (IOIndex_t i = 0; i < 3; ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            std::array<DimSize_t, DIM + 2> outputDims{};
+            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
             for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
                 const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
@@ -115,68 +108,76 @@ public:
                                                1;
 
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                        floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
+                        floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
                               static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
             }
 
             outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
-            outputDims[0] = mInputs[0]->dims()[0];
-            mOutput->resize(outputDims);
+            outputDims[0] = inputDims[0];
+            mOutputs[0]->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return *(mInputs[inputIdx].get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "Conv Operators supports only 3 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Conv Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
 
+// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
+    //     if (outputIdx != 0) {
+    //         AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+    //     }
+    //     if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+    //         // Offset
+    //         const auto outputIdxDims = mOutput->getCoord(firstIdx);
+    //         auto inputIdxDims = outputIdxDims; // batch idx is the same
+    //         inputIdxDims[1] = 0; // each channel is used so start with the first one
+
+    //         for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+    //             if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+    //                 AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+    //             }
+    //         }
+
+    //         // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+    //         // Input
+    //         // same batch value, every input channel is used
+    //         std::vector<DimSize_t> inputDims{outputDims[0], mInputs[0]->dims()[1]};
+    //                     for (DimIdx_t i = 0; i < DIM; ++i) {
+    //             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+    //                         * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
+    //                         + 1
+    //                         + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+    //                         * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+    //             inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
+    //         }
+
+    //         // Weight
+    //         // same output value, every input channel is used
+    //         std::vector<DimSize_t> weightDims{outputDims[0], mInputs[0]->dims()[1]};
+    //         weightDims.insert(weightDims.end(), this->template getAttr<ConvAttr::KernelDims>()[0], this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(DIM)]);
+    //         std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
+    //         weightIdxDims[0] = outputIdxDims[1];
+
+    //         // Bias
+    //         const std::vector<DimSize_t> biasDims{outputDims[0]};
+    //         const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
+
+    //         // Result
+    //         std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
+    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
+    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[1]->getIdx(weightIdxDims), weightDims));
+    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[2]->getIdx(biasIdxDims), biasDims));
+    //         return res;
+    //     }
+    //     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+    // }
 
     void setBackend(const std::string &name) override {
         mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[1]->setBackend(name);
-        mInputs[2]->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
-        mInputs[2]->setDatatype(datatype);
+        getInput(1)->setBackend(name);
+        getInput(2)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
@@ -186,32 +187,32 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Conv(DimSize_t in_channels,
-                                  DimSize_t out_channels,
-                                  const std::array<DimSize_t, DIM> &kernel_dims,
+inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
+                                  DimSize_t outChannels,
+                                  const std::array<DimSize_t, DIM> &kernelDims,
                                   const std::string& name = "",
-                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+                                  const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), name);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims), name);
     // addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w");
-    addProducer(conv, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
-    addProducer(conv, 2, std::array<DimSize_t, 1>({out_channels}), "b");
+    addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
+    addProducer(conv, 2, std::array<DimSize_t, 1>({outChannels}), "b");
     return conv;
 }
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> Conv(
-    DimSize_t in_channels,
-    DimSize_t out_channels,
-    DimSize_t const (&kernel_dims)[DIM],
+    DimSize_t inChannels,
+    DimSize_t outChannels,
+    DimSize_t const (&kernelDims)[DIM],
     const std::string& name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+    const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    return Conv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, dilation_dims);
+    return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims);
 }
 }  // namespace Aidge
 
@@ -226,4 +227,4 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
 };
 }
 
-#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
+#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 4caec2032a3c61529d452ae855f00c1da411af10..ca6401e0ed3ac888f12858853f0d8f494c226041 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -19,7 +19,7 @@
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,20 +29,14 @@ namespace Aidge {
 enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims };
 
 template <DimIdx_t DIM>
-class ConvDepthWise_Op : public Operator,
+class ConvDepthWise_Op : public OperatorTensor,
                 public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
                 public StaticAttributes<ConvDepthWiseAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        DimSize_t,
                                        std::array<DimSize_t, DIM>> {
-   public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
-                                                      std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
-   public:
+public:
     static constexpr const char *Type = "ConvDepthWise";
 
     ConvDepthWise_Op() = delete;
@@ -55,29 +49,25 @@ class ConvDepthWise_Op : public Operator,
     template <ConvDepthWiseAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+    constexpr ConvDepthWise_Op(const DimSize_t nbChannels,
+                               const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 2, 1),
           Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
                       attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
-                      attr<ConvDepthWiseAttr::Channels>(0),
-                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {
-        setDatatype(DataType::Float32);
-    }
+                      attr<ConvDepthWiseAttr::Channels>(nbChannels),
+                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -88,16 +78,20 @@ class ConvDepthWise_Op : public Operator,
         return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
+        // check inputs have been associated
+        // TODO : add a check of inputs dimensions ?
+        bool associated = true;
+        for (IOIndex_t i = 0; i < 3; ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
+            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
             for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
                 const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
@@ -105,10 +99,9 @@ class ConvDepthWise_Op : public Operator,
                                                1;
 
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                        floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
+                        floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
                               static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
             }
-            this->template getAttr<ConvDepthWiseAttr::Channels>() = mInputs[0]->dims()[1];
             // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>()));
             // if (mInputs[1]->empty()) {
             //     mInputs[1]->resize(weightDims);
@@ -116,66 +109,57 @@ class ConvDepthWise_Op : public Operator,
             // if (mInputs[2]->empty()) {
             //     mInputs[2]->resize({mInputs[0]->dims()[1]});
             // }
-            outputDims[1] = mInputs[0]->dims()[1];
-            outputDims[0] = mInputs[0]->dims()[0];
-            mOutput->resize(outputDims);
+            outputDims[1] = inputDims[1];
+            outputDims[0] = inputDims[0];
+            mOutputs[0]->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "ConvDepthWise Operators supports only 3 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "ConvDepthWise Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
+    // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
+    //     if (outputIdx != 0) {
+    //         AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+    //     }
+    //     if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+    //         // Offset
+    //         const auto outputIdxDims = mOutput->getCoord(firstIdx);
+    //         auto inputIdxDims = outputIdxDims; // batch idx is the same
+
+    //         for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+    //             if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+    //                 AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+    //             }
+    //         }
+
+    //         // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+    //         // Width
+    //         std::vector<DimSize_t> inputDims;
+    //         inputDims.push_back(outputDims[0]); // same batch value
+    //         inputDims.push_back(outputDims[1]); // same channel value
+
+    //         for (DimIdx_t i = 0; i < DIM; ++i) {
+    //             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+    //                         * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
+    //                         + 1
+    //                         + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+    //                         * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+    //             inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
+    //         }
+    //         std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>();
+    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
+    //         return res;
+    //     }
+    //     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+    // }
 
     void setBackend(const std::string &name) override {
         mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[1]->setBackend(name);
-        mInputs[2]->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
-        mInputs[2]->setDatatype(datatype);
+        getInput(1)->setBackend(name);
+        getInput(2)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
@@ -185,27 +169,29 @@ class ConvDepthWise_Op : public Operator,
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
+inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
+                                           const std::array<DimSize_t, DIM> &kernelDims,
                                            const std::string& name = "",
-                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                           const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+                                           const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                                           const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), name);
-    addProducer(convDW, 1, std::array<DimSize_t,0>({}), "w");
-    addProducer(convDW, 2, std::array<DimSize_t,0>({}), "b");
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name);
+    addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
+    addProducer(convDW, 2, std::array<DimSize_t, 1>({nbChannels}), "b");
     return convDW;
 }
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> ConvDepthWise(
-    DimSize_t const (&kernel_dims)[DIM],
+    const DimSize_t nbChannels,
+    DimSize_t const (&kernelDims)[DIM],
     const std::string& name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
+    const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    return ConvDepthWise(to_array(kernel_dims), name, stride_dims, dilation_dims);
+    return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims);
 }
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 4213f979cf9d675f523a228095edc5606f9412ee..ba76c0bdecfaf86644a3336a1076064b96b36046 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -17,42 +17,30 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 
-class Div_Op : public Operator,
+class Div_Op : public OperatorTensor,
     public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
 public:
     static constexpr const char* Type = "Div";
 
-    Div_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Div_Op() : OperatorTensor(Type, 2, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Div_Op(const Div_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,73 +51,18 @@ public:
         return std::make_shared<Div_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty())
-            mOutput->resize(mInputs[0]->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Div Operator has 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Div Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
+    void computeOutputDims() override final;
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Div_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index b949527c51b9330077dd3bd8f8b4bf1f1b9d719c..4cece292cb322c0a58f96380eb0f0083771d3c19 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -21,7 +21,7 @@
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,16 +29,11 @@
 namespace Aidge {
 enum class FCAttr { OutChannels, NoBias };
 
-class FC_Op : public Operator,
+class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
                                  std::unique_ptr<OperatorImpl>(const FC_Op &)>,
               public StaticAttributes<FCAttr, DimSize_t, bool> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "FC";
 
@@ -48,26 +43,21 @@ public:
     template <FCAttr e> using attr = typename Attributes_::template attr<e>;
 
     FC_Op(DimSize_t out_channels, bool noBias)
-            : Operator(Type),
-            Attributes_(
-                attr<FCAttr::OutChannels>(out_channels),
-                attr<FCAttr::NoBias>(noBias))
-    {
-        setDatatype(DataType::Float32);
-    }
+    : OperatorTensor(Type, 1, 2, 1),
+      Attributes_(
+        attr<FCAttr::OutChannels>(out_channels),
+        attr<FCAttr::NoBias>(noBias))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     FC_Op(const FC_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -78,7 +68,7 @@ public:
         return std::make_shared<FC_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
         if (inputIdx == 2) {
@@ -86,78 +76,35 @@ public:
             assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
         }
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-        if (inputIdx == 0 && mInputs[0]->nbDims() == 1)
-            mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, mInputs[inputIdx]->size()}));
+        if (inputIdx == 0 && getInput(0)->nbDims() == 1)
+            mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, getInput(inputIdx)->size()}));
     }
 
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            // <in_features**, out_channels>
-            std::array<DimSize_t, 2> weightDims = {this->template getAttr<FCAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
-            // <out_channels, batch>
-            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<FCAttr::OutChannels>()};
-
-            mInputs[1]->resize(weightDims);
-            mOutput->resize(outputDims);
+        bool associated = true;
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            // <batch, OutChannels>
+            mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()});
         }
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return *(mInputs[inputIdx].get()); }
-    inline Tensor& output(const IOIndex_t /*inputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "FC Operators supports only 3 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "FC Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 3 && "operators supports only 3 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
     }
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<FC_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-        mInputs[2]->setBackend(name);
-    }
-
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
-        mInputs[2]->setDatatype(datatype);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
+        getInput(2)->setBackend(name);
     }
 
-
-    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
@@ -166,11 +113,11 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") {
+inline std::shared_ptr<Node> FC(DimSize_t inChannels, DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name);
-    addProducer(fc, 1, std::array<DimSize_t, 2>({out_channels, 1}), "w");
-    addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({out_channels})), "b"); // already sets bias dims
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(outChannels, noBias), name);
+    addProducer(fc, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w");
+    addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({outChannels})), "b"); // already sets bias dims
     return fc;
 }
 } // namespace Aidge
@@ -181,4 +128,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
                                                         "NoBias"};
 }
 
-#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
+#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 55ccbf1516fa79663d57e1e44bc4017bc5c8b843..505c5344990453c8f4ab84fa3893e75b216d7a54 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -19,7 +19,7 @@
 #include <cstring>
 
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
@@ -27,50 +27,26 @@
 
 namespace Aidge {
 class GenericOperator_Op
-    : public Operator,
+    : public OperatorTensor,
       public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>,
       public DynamicAttributes {
-   private:
+private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
-    IOIndex_t mNbDataIn;
-    IOIndex_t mNbIn;
-    IOIndex_t mNbOut;
-    std::vector<std::shared_ptr<Tensor>> mInputs;
-    std::vector<std::shared_ptr<Tensor>> mOutputs;
     ComputeDimsFunc mComputeOutputDims;
 
-   public:
-    GenericOperator_Op(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut)
-        : Operator(type), mNbDataIn(nbDataIn), mNbIn(nbIn), mNbOut(nbOut)
-    {
-        mInputs = std::vector<std::shared_ptr<Tensor>>(nbIn);
-        for (std::size_t i = 0; i < nbIn; ++i) {
-            mInputs[i] = std::make_shared<Tensor>();
-        }
-        mOutputs = std::vector<std::shared_ptr<Tensor>>(nbOut);
-        for (std::size_t i = 0; i < nbOut; ++i) {
-            mOutputs[i] = std::make_shared<Tensor>();
-        }
-    }
+public:
+    GenericOperator_Op(const char *type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
+        : OperatorTensor(type, nbData, nbParam, nbOut)
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     GenericOperator_Op(const GenericOperator_Op& op)
-        : Operator(op.type().c_str()), mNbDataIn(op.mNbDataIn), mNbIn(op.mNbIn), mNbOut(op.mNbOut)
-    {
-        // cpy-ctor
-        mInputs = std::vector<std::shared_ptr<Tensor>>(mNbIn);
-        for (std::size_t i = 0; i < mNbIn; ++i) {
-            mInputs[i] = std::make_shared<Tensor>();
-        }
-        mOutputs = std::vector<std::shared_ptr<Tensor>>(mNbOut);
-        for (std::size_t i = 0; i < mNbOut; ++i) {
-            mOutputs[i] = std::make_shared<Tensor>(*op.mOutputs[i]);
-        }
-    }
+        : OperatorTensor(op)
+    {}
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -87,28 +63,19 @@ class GenericOperator_Op
         mComputeOutputDims = func;
     }
 
-    // Override Virtual Opertor methods
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < mNbIn && "operators supports only x inputs");
-
-        if (strcmp(data->type(), Tensor::Type) == 0) {
-            // TODO: associate input only if of type Tensor, otherwise do nothing
-            mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-        }
-    }
 
     void computeOutputDims() override final {
         if (mComputeOutputDims) {
-            std::vector<std::vector<size_t>> inputsDims(mNbIn, std::vector<size_t>());
-            for (std::size_t i = 0; i < mNbIn; ++i) {
-                if (mInputs[i]) {
-                    inputsDims[i] = mInputs[i]->dims();
+            std::vector<std::vector<size_t>> inputsDims(nbInputs(), std::vector<size_t>());
+            for (std::size_t i = 0; i < nbInputs(); ++i) {
+                if (getInput(i)) {
+                    inputsDims[i] = getInput(i)->dims();
                 }
             }
 
             const auto& outputsDims = mComputeOutputDims(inputsDims);
-            assert(outputsDims.size() == mNbOut && "The provided ComputeDimsFunc function returns the wrong number of outputs");
-            for (std::size_t i = 0; i < mNbOut; ++i) {
+            assert(outputsDims.size() == nbOutputs() && "The provided ComputeDimsFunc function returns the wrong number of outputs");
+            for (std::size_t i = 0; i < nbOutputs(); ++i) {
                 mOutputs[i]->resize(outputsDims[i]);
             }
         }
@@ -127,47 +94,11 @@ class GenericOperator_Op
         }
     }
 
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
-        printf("Info: using getRawInput() on a GenericOperator.\n");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
-        printf("Info: using input() on a GenericOperator.\n");
-        return *mInputs[inputIdx];
-    }
-
-
-    std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < mNbIn) && "input index out of range for this instance of GenericOperator");
-        printf("Info: using getInput() on a GenericOperator.\n");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
-        printf("Info: using getOutput() on a GenericOperator.\n");
-        return mOutputs[outputIdx];
-    }
-
-
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
-        printf("Info: using getRawOutput() on a GenericOperator.\n");
-        return std::static_pointer_cast<Data>(mOutputs[outputIdx]);
-    }
-
-    Tensor& output(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx < mNbOut) && "output index out of range for this instance of GenericOperator");
-        printf("Info: using output() on a GenericOperator.\n");
-        return *mOutputs[outputIdx];
-    }
 
     ~GenericOperator_Op() = default;
 
     void setBackend(const std::string & /*name*/) override { printf("setBackend: not available yet.\n"); }
-    void setDatatype(const DataType & /*datatype*/) override { printf("setDatatype: not available yet.\n"); }
+    void setDataType(const DataType& /*datatype*/) const override { printf("setDataType: not available yet.\n"); }
     void forward() override final {
         if(mImpl){
             mImpl->forward();
@@ -182,9 +113,6 @@ class GenericOperator_Op
             printf("backward: No implementation is linked.\n");
         }
     }
-    inline IOIndex_t nbInputs() const noexcept override final { return mNbIn; };
-    inline IOIndex_t nbDataInputs() const noexcept override final { return mNbDataIn; };
-    inline IOIndex_t nbOutputs() const noexcept override final { return mNbOut; };
 };
 
 /**
@@ -197,9 +125,9 @@ class GenericOperator_Op
  * @param name (optional) name of the Operator.
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
  */
-inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut,
+inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
                                              const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbDataIn, nbIn, nbOut), name);
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
 }
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c5cd9bb62e0097c9a0e646caaf14cddd73bf512d
--- /dev/null
+++ b/include/aidge/operator/Identity.hpp
@@ -0,0 +1,126 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_IDENTITY_H_
+#define AIDGE_CORE_OPERATOR_IDENTITY_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes.
+ * This Operator has no Implementation, it just forward its input Tensor.
+ * Note: Error may occur if new methods are added in Operator which use an implementation.
+ * Has we need to update this class to remove the use of Impl.
+ *
+ */
+class Identity_Op : public OperatorTensor,
+    public Registrable<Identity_Op, std::string, std::unique_ptr<OperatorImpl>(const Identity_Op&)> {
+public:
+    static constexpr const char* Type = "Identity";
+
+    Identity_Op()
+            : OperatorTensor(Type, 1, 0, 0)
+    {
+        mImpl = std::make_shared<OperatorImpl>(*this);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Identity_Op(const Identity_Op& op)
+        : OperatorTensor(op)
+    {
+        mImpl = std::make_shared<OperatorImpl>(*this);
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Identity_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Identity_Op>(*this);
+    }
+
+    void computeOutputDims() override final {} // Do nothing
+
+    bool outputDimsForwarded() const override final {
+        if (mInputs[0])
+            return !mInputs[0]->empty();
+        else
+            return false;
+    }
+
+
+    void forward() override final { runHooks(); }
+
+    void backward() override final { }
+
+    void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override final {
+        if (strcmp(data->type(), "Tensor") != 0) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as outputs", type().c_str());
+        }
+        if (outputIdx >= nbInputs()) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbInputs());
+        }
+        *mInputs[outputIdx] = *std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override final {
+        if (strcmp(data->type(), "Tensor") != 0) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
+        }
+        if (outputIdx >= nbInputs()) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbInputs());
+        }
+        *mInputs[outputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data));
+    }
+
+    const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const override final {
+        if (outputIdx >= nbInputs()) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbInputs());
+        }
+        return mInputs[outputIdx];
+    }
+    void setBackend(const std::string& name) override final {
+        // setBackend do nothing, Identity node has no backend it just pass the same Tensor
+    }
+    void setDataType(const DataType& dataType) const override final {
+        // setDatatype do nothing, Identity node has no backend it just pass the same Tensor
+    }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Identity(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Identity_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_IDENTITY_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index bcdcbc7cabd8eda46a7c0c4930f317e562fb46a0..800c8c61d876b6f33cce1af3365179b7eb14b68d 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -17,7 +17,7 @@
 
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -29,14 +29,9 @@ enum class LeakyReLUAttr {
     NegativeSlope
 };
 
-class LeakyReLU_Op : public Operator,
+class LeakyReLU_Op : public OperatorTensor,
     public Registrable<LeakyReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
     public StaticAttributes<LeakyReLUAttr, float> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "LeakyReLU";
 
@@ -46,25 +41,20 @@ public:
     template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
 
     LeakyReLU_Op(float negativeSlope)
-            : Operator(Type),
-            Attributes_(
-                attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
-    {
-        setDatatype(DataType::Float32);
-    }
+        : OperatorTensor(Type, 1, 0, 1),
+          Attributes_(
+            attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     LeakyReLU_Op(const LeakyReLU_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -75,69 +65,17 @@ public:
         return std::make_shared<LeakyReLU_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "LeakyReLU Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "LeakyReLU Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<LeakyReLU_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
-        static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
     static const std::vector<std::string> getOutputsName(){
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index eed1ec04535aa5896aa3d01a27d8023d37a42183..23c12d45802e25f29891c48164acfb2d3ad137ac 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -21,7 +21,7 @@
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,15 +29,11 @@
 namespace Aidge {
 enum class MatMulAttr { OutChannels };
 
-class MatMul_Op : public Operator,
+class MatMul_Op : public OperatorTensor,
               public Registrable<MatMul_Op,
                                  std::string,
                                  std::unique_ptr<OperatorImpl>(const MatMul_Op &)>,
               public StaticAttributes<MatMulAttr, DimSize_t> {
-public:
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "MatMul";
 
@@ -47,25 +43,20 @@ public:
     template <MatMulAttr e> using attr = typename Attributes_::template attr<e>;
 
     MatMul_Op(DimSize_t out_channels)
-            : Operator(Type),
+            : OperatorTensor(Type, 1, 1, 1),
             Attributes_(
                 attr<MatMulAttr::OutChannels>(out_channels))
-    {
-        setDatatype(DataType::Float32);
-    }
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     MatMul_Op(const MatMul_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<MatMul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<MatMul_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -76,78 +67,31 @@ public:
         return std::make_shared<MatMul_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operators supports only 2 inputs");
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInputs[0]->empty()) {
-            // <in_features**, out_channels>
-            std::array<DimSize_t, 2> weightDims = {this->template getAttr<MatMulAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
-            // <out_channels, batch>
-            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()};
-
-            mInputs[1]->resize(weightDims);
-            mOutput->resize(outputDims);
+        bool associated = true;
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            // <batch, OutChannels>
+            mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()});
         }
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operators supports only 2 inputs");
-        return *(mInputs[inputIdx].get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "MatMul Operators has 2 inputs");
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "MatMul Operators has 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operators supports only 2 inputs");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
     }
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<MatMul_Op>::create(name)(*this);
-        mOutput->setBackend(name);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
 
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
-    }
-
-
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight"};
     }
@@ -156,10 +100,10 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> MatMul(DimSize_t out_channels, const std::string& name = "") {
+inline std::shared_ptr<Node> MatMul(DimSize_t inChannels, DimSize_t outChannels, const std::string& name = "") {
     // FIXME: properly handle default w initialization in every cases
-    auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(out_channels), name);
-    addProducer(matmul, 1, std::array<DimSize_t, 2>({out_channels, 1}), "w");
+    auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(outChannels), name);
+    addProducer(matmul, 1, std::array<DimSize_t, 2>({outChannels, inChannels}), "w");
     return matmul;
 }
 } // namespace Aidge
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index bcf47f13cc34132f668ea1ffcb2c91ed6f06f44d..ad50a27a94a2217c94445fb556c84ec7f121c6b9 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -19,7 +19,7 @@
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -29,17 +29,12 @@ namespace Aidge {
 enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
 
 template <DimIdx_t DIM>
-class MaxPooling_Op : public Operator,
+class MaxPooling_Op : public OperatorTensor,
                 public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
                 public StaticAttributes<MaxPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        bool> {
-private:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char *Type = "MaxPooling";
 
@@ -55,26 +50,21 @@ public:
     constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                             bool ceil_mode = false)
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
                       attr<MaxPoolingAttr::KernelDims>(kernel_dims),
-                      attr<MaxPoolingAttr::CeilMode>(ceil_mode)),
-          mOutput(std::make_shared<Tensor>()) {
-        setDatatype(DataType::Float32);
-    }
+                      attr<MaxPoolingAttr::CeilMode>(ceil_mode))
+        {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     MaxPooling_Op(const MaxPooling_Op<DIM>& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -85,17 +75,14 @@ public:
         return std::make_shared<MaxPooling_Op<DIM>>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 1 && "operators supports only 3 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInput->empty()) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
+        if (!getInput(0)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
+        if (!(getInput(0)->empty())) {
+            std::array<DimSize_t, DIM + 2> outputDims{};
+            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
             std::function<float(float)> roundingFunction;
             if (this->template getAttr<MaxPoolingAttr::CeilMode>()) {
@@ -106,69 +93,25 @@ public:
 
             for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                            roundingFunction(static_cast<float>(mInput->dims()[dim+2] -
+                                            roundingFunction(static_cast<float>(inputDims[dim+2] -
                                                                     this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
                                             static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
             }
-            outputDims[1] = mInput->dims()[1];
-            outputDims[0] = mInput->dims()[0];
-            mOutput->resize(outputDims);
+            outputDims[1] = inputDims[1];
+            outputDims[0] = inputDims[0];
+            mOutputs[0]->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return *(mInput.get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "MaxPooling Operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "MaxPooling Operators has only 1 outputs");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
 
     void setBackend(const std::string &name) override {
         mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index b9d04967819805397389aa8f29a786aa9c917b2b..991c1c60dbd7eab79c132447b78cc429a928426b 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -12,18 +12,16 @@
 #ifndef AIDGE_CORE_OPERATOR_METAOPERATOR_H_
 #define AIDGE_CORE_OPERATOR_METAOPERATOR_H_
 
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
 
 namespace Aidge {
-class MetaOperator_Op : public Operator,
+class MetaOperator_Op : public OperatorTensor,
                 public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)> {
 public:
-    std::vector<std::shared_ptr<Tensor>> mInputs;
-    std::vector<std::shared_ptr<Tensor>> mOutputs; // These are shared with micro-graph outputs tensors
-
+    // outputs shared with micro-graph output Tensors
     // Micro-graph handling:
     std::shared_ptr<GraphView> mGraph; // Meta operator micro-graph
     std::shared_ptr<SequentialScheduler> mScheduler;
@@ -36,11 +34,9 @@ public:
      * @param op Operator to copy.
      */
     MetaOperator_Op(const MetaOperator_Op& op)
-        : Operator(op.type().c_str()),
+        : OperatorTensor(op),
           mGraph(op.mGraph->clone())
-    {
-        // cpy-ctor
-    }
+    {}
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -58,7 +54,7 @@ public:
         return mScheduler;
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
 
         const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
@@ -71,46 +67,8 @@ public:
     void computeOutputDims() override final {
         // Forward dims of micro-graph
         mGraph->forwardDims();
-
-        // Associate outputs to micro-graph outputs for custom implementation
-        for (size_t outputIdx = 0; outputIdx < mGraph->getOrderedOutputs().size(); ++outputIdx) {
-            const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
-            mOutputs[outputIdx] = outputOp.first->getOperator()->getOutput(outputOp.second);
-        }
-    }
-
-    bool outputDimsForwarded() const override final { return !(mOutputs[0]->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < mInputs.size() && "inputIdx out of range");
-        return *(mInputs[inputIdx].get());
-    }
-
-    inline Tensor& output(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
-        return *(mOutputs[outputIdx].get());
     }
 
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < mInputs.size() && "inputIdx out of range");
-        return mInputs[inputIdx];
-    }
-
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
-        return mOutputs[outputIdx];
-    }
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < mInputs.size() && "inputIdx out of range");
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
-        return std::static_pointer_cast<Data>(mOutputs[outputIdx]);
-    }
 
     void setBackend(const std::string &name) override {
         if (Registrar<MetaOperator_Op>::exists({name, type()})) {
@@ -124,17 +82,13 @@ public:
         mGraph->setBackend(name);
     }
 
-    void setDatatype(const DataType &datatype) override {
+    void setDataType(const DataType &datatype) const override {
         // The micro-graph should always be set to the right data type, since it
         // shares input/output tensors.
         // Input/output tensors data type are updated here.
-        mGraph->setDatatype(datatype);
+        mGraph->setDataType(datatype);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return mGraph->inputs().size(); }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return mGraph->dataInputs().size(); }
-    inline IOIndex_t nbOutputs() const noexcept override final { return mGraph->outputs().size(); }
-
     NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
     NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override;
     NbElts_t getNbProducedData(IOIndex_t outputIdx) const override;
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index a53ce18f73cf121d24d400edb5a0c8b699c8228c..615b8960403270efa1fe97235dbfeeb129338d5b 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -54,7 +54,8 @@ inline std::shared_ptr<Node> PaddedConv(
 }
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
+inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
+                                  const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
@@ -62,7 +63,7 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM
 {
     // Construct micro-graph
     auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
 
     auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
     addProducer(metaOp, 1, std::array<DimSize_t,0>({}), "w");
@@ -73,13 +74,14 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> PaddedConvDepthWise(
+    const DimSize_t nb_channels,
     DimSize_t const (&kernel_dims)[DIM],
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
 {
-    return PaddedConvDepthWise(to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
+    return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
 }
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 4ea79fe52622b22f8ea8fbd9191d50d45e26acac..5b9ab4eb8c3924133f32ddfeb0a5f05963381771 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -12,47 +12,38 @@
 #ifndef AIDGE_CORE_OPERATOR_MUL_H_
 #define AIDGE_CORE_OPERATOR_MUL_H_
 
-#include <cassert>
 #include <memory>
+#include <string>
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 
-class Mul_Op : public Operator,
+/**
+ * @brief Tensor element-wise multiplication.
+ */
+class Mul_Op : public OperatorTensor,
     public Registrable<Mul_Op, std::string, std::unique_ptr<OperatorImpl>(const Mul_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Mul";
 
-    Mul_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Mul_Op() : OperatorTensor(Type, 2, 0, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Mul_Op(const Mul_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Mul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Mul_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,73 +54,17 @@ public:
         return std::make_shared<Mul_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty())
-            mOutput->resize(mInputs[0]->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Mul Operator has 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Mul Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
+    void computeOutputDims() override final;
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Mul_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -141,6 +76,6 @@ public:
 inline std::shared_ptr<Node> Mul(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
 }
-}
+} // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
+#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 903b6362adf3db0c867dc419086e0cb6ddaa65c7..b0f8435bd0126cf3fba9f956a432017585a4d873 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -15,50 +15,91 @@
 #include <memory>
 #include <string>
 #include <vector>
+#include <utility>
+#include <cstddef>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Data.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/hook/Hook.hpp"
 
 namespace Aidge {
 
+enum class OperatorType {
+    Data,
+    Tensor
+};
+
 class Operator : public std::enable_shared_from_this<Operator> {
 protected:
-  std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator
-  std::map<std::string, std::shared_ptr<Hook>> mHooks;
+    std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator
+    std::map<std::string, std::shared_ptr<Hook>> mHooks;
 
 private:
-  std::string mType;
+    std::string mType;
+    const OperatorType mOperatorType;
+    const IOIndex_t mNbData;
+    const IOIndex_t mNbParam;
+    const IOIndex_t mNbOut;
 
 public:
-  Operator() = delete;
-  Operator(const char* type) : mType(type) {}
-  virtual std::shared_ptr<Operator> clone() const = 0;
-  virtual ~Operator();
-
-  Operator(const Operator& op):
-    std::enable_shared_from_this<Operator>()
-  {
-    mType = op.mType;
-    mImpl = nullptr;
-    // Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation.
-    // See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion.
-    // Hooks are not copied.
-  }
+    Operator() = delete;
+    Operator(const char* type, const IOIndex_t nbData, const IOIndex_t nbParam, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)
+    : mType(type),
+      mOperatorType(operatorType),
+      mNbData(nbData),
+      mNbParam(nbParam),
+      mNbOut(nbOut)
+    {
+        // ctor
+    }
+
+    Operator(const Operator& op):
+        std::enable_shared_from_this<Operator>(),
+        mOperatorType(op.mOperatorType),
+        mNbData(op.mNbData),
+        mNbParam(op.mNbParam),
+        mNbOut(op.mNbOut)
+    {
+        mType = op.mType;
+        mImpl = nullptr;
+        // Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation.
+        // See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion.
+        // Hooks are not copied.
+    }
+
+    virtual ~Operator() noexcept;
 
 public:
+    virtual std::shared_ptr<Operator> clone() const = 0;
 
-    virtual void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) = 0;
-    virtual void computeOutputDims() = 0;
-    virtual bool outputDimsForwarded() const = 0;
+    virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
+    /**
+     * @brief For a given output feature area, compute the associated receptive
+     * field for each data input.
+     * @param firstIdx First index of the output feature.
+     * @param outputDims Size of output feature.
+     * @param outputIdx Index of the output. Default 0.
+     * @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
+     */
+    // virtual std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
+
+    /**
+     * @brief Set the specified input by performing a deep copy of the given data.
+     * The pointer itself is not changed, thus keeping the current connections.
+     * @param inputIdx Index of the input to set.
+     */
+    virtual void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
+    virtual void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) = 0;
     virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const = 0;
-    virtual std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const = 0;
-    virtual Tensor& input(const IOIndex_t /*inputIdx*/) const = 0;
+        /**
+     * @brief Set the specified output by performing a deep copy of the given data.
+     * The pointer itself is not changed, thus keeping the current connections.
+     * @param inputIdx Index of the input to set.
+     */
+    virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) = 0;
+    virtual void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) = 0;
     virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0;
-    virtual std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const = 0;
-    virtual Tensor& output(const IOIndex_t /*outputIdx*/) const = 0;
 
     std::shared_ptr<Hook> getHook(std::string hookName) {
         return mHooks[hookName];
@@ -74,7 +115,7 @@ public:
 ///////////////////////////////////////////////////////
 
     virtual void setBackend(const std::string& name) = 0;
-    virtual void setDatatype(const DataType& datatype) = 0;
+    virtual void setDataType(const DataType& dataType) const = 0;
 
     /**
      * @brief Set the a new OperatorImpl to the Operator
@@ -117,14 +158,20 @@ public:
 //        INNER
 ///////////////////////////////////////////////////////
 
-    std::string type() const {
+    inline std::string type() const noexcept {
         return mType;
     }
 
-    virtual IOIndex_t nbInputs() const noexcept = 0;
-    virtual IOIndex_t nbDataInputs() const noexcept = 0;
-    virtual IOIndex_t nbOutputs() const noexcept = 0;
-      static const std::vector<std::string> getInputsName(){
+    inline OperatorType operatorType() const noexcept{
+        return mOperatorType;
+    }
+
+    inline IOIndex_t nbInputs() const noexcept { return mNbData+mNbParam; };
+    inline IOIndex_t nbData() const noexcept { return mNbData; };
+    inline IOIndex_t nbParam() const noexcept { return mNbParam; };
+    inline IOIndex_t nbOutputs() const noexcept { return mNbOut; };
+
+    static const std::vector<std::string> getInputsName(){
         return {};
     }
     static const std::vector<std::string> getOutputsName(){
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a55d7ac2842f948d923f9e1e54d2ffed1fd0f954
--- /dev/null
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -0,0 +1,101 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_
+#define AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class OperatorTensor : public Operator {
+    /* TODO: Add an attribute specifying the type of Data used by the Operator.
+     * The same way ``Type`` attribute specifies the type of Operator. Hence this
+     * attribute could be checked in the forwardDims function to assert Operators
+     * being used work with Tensors and cast them to OpertorTensor instead of
+     * Operator.
+     */
+    /* TODO: Maybe change type attribute of Data object by an enum instead of an
+     * array of char. Faster comparisons.
+     */
+protected:
+    std::vector<std::shared_ptr<Tensor>> mInputs;
+    std::vector<std::shared_ptr<Tensor>> mOutputs;
+
+public:
+    OperatorTensor() = delete;
+
+    OperatorTensor(const char* type, const IOIndex_t nbData, const IOIndex_t nbParam,
+                   const IOIndex_t nbOut)
+        : Operator(type, nbData, nbParam, nbOut, OperatorType::Tensor),
+          mInputs(std::vector<std::shared_ptr<Tensor>>(nbData + nbParam, nullptr)),
+          mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) {
+        for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) {
+            mOutputs[i] = std::make_shared<Tensor>();
+            mOutputs[i]->setDataType(DataType::Float32);
+        }
+    }
+
+    OperatorTensor(const OperatorTensor& other)
+        : Operator(other),
+          mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)),
+          mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) {
+        for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
+            mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i)));
+            // datatype already copied
+        }
+    }
+
+    ~OperatorTensor();
+
+public:
+    ///////////////////////////////////////////////////
+    virtual void associateInput(const IOIndex_t inputIdx,
+                                const std::shared_ptr<Data>& data) override;
+    ///////////////////////////////////////////////////
+
+    ///////////////////////////////////////////////////
+    // Tensor access
+    // input management
+    void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
+    void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final;
+    const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const;
+    inline std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        return std::static_pointer_cast<Data>(getInput(inputIdx));
+    }
+
+    // output management
+    void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override;
+    void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override;
+    virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const;
+    inline std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final {
+        return std::static_pointer_cast<Data>(getOutput(outputIdx));
+    }
+    ///////////////////////////////////////////////////
+
+    ///////////////////////////////////////////////////
+    // Tensor dimensions
+    virtual void computeOutputDims();
+    virtual bool outputDimsForwarded() const;
+    ///////////////////////////////////////////////////
+
+    virtual void setDataType(const DataType& dataType) const override;
+};
+}  // namespace Aidge
+
+#endif  // AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_
\ No newline at end of file
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index cbebb16e1e24501b0ea371fb45211047f6e2b5e7..279b8b3d2c173d18c65c17e50385954a88fde77e 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -19,7 +19,7 @@
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -30,17 +30,12 @@ enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
 enum class PadBorderType { Constant, Edge, Reflect, Wrap };
 
 template <DimIdx_t DIM>
-class Pad_Op : public Operator,
+class Pad_Op : public OperatorTensor,
                 public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
                 public StaticAttributes<PadAttr,
                                        std::array<DimSize_t, 2*DIM>,
                                        PadBorderType,
                                        double> {
-private:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char *Type = "Pad";
 
@@ -56,25 +51,19 @@ public:
     constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
                      const PadBorderType &borderType = PadBorderType::Constant,
                      double borderValue = 0.0)
-        : Operator(Type),
+        : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples),
                            attr<PadAttr::BorderType>(borderType),
-                           attr<PadAttr::BorderValue>(borderValue)) {
-        setDatatype(DataType::Float32);
-    }
+                           attr<PadAttr::BorderValue>(borderValue)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Pad_Op(const Pad_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
-    {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-    }
+        : OperatorTensor(op),
+          Attributes_(op)
+    {}
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -84,82 +73,38 @@ public:
         return std::make_shared<Pad_Op<DIM>>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 1 && "operators supports only 3 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
-
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
 
     void computeOutputDims() override final {
-        if (!mInput->empty()) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
+        bool associated = true;
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            }
+            associated &= !(getInput(i)->empty());
+        }
+        if (associated) {
+            std::array<DimSize_t, DIM + 2> outputDims{};
+            const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
 
             for (std::size_t dim = 0; dim < DIM; ++dim) {
                 outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
-                                    + mInput->dims()[dim+2]
+                                    + inputDims[dim+2]
                                     + this->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
             }
-            outputDims[1] = mInput->dims()[1];
-            outputDims[0] = mInput->dims()[0];
-            mOutput->resize(outputDims);
+            outputDims[1] = inputDims[1];
+            outputDims[0] = inputDims[0];
+            mOutputs[0]->resize(outputDims);
         }
     }
 
-    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return *(mInput.get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "Pad Operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "Pad Operators has only 1 outputs");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operators supports only 1 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string &name) override {
         mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-
-    void setDatatype(const DataType &datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index 732cf36b4ef7e7640648c542191acd02d0875a4f..0b0ae82f012eace8b5a2d5eb362a359386495b79 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -17,7 +17,7 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -26,33 +26,21 @@
 
 namespace Aidge {
 
-class Pow_Op : public Operator,
+class Pow_Op : public OperatorTensor,
     public Registrable<Pow_Op, std::string, std::unique_ptr<OperatorImpl>(const Pow_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Pow";
 
-    Pow_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Pow_Op() : OperatorTensor(Type, 2, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Pow_Op(const Pow_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Pow_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Pow_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,73 +51,18 @@ public:
         return std::make_shared<Pow_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty())
-            mOutput->resize(mInputs[0]->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Pow Operator has 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Pow Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
+    void computeOutputDims() override final;
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Pow_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -141,6 +74,6 @@ public:
 inline std::shared_ptr<Node> Pow(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
 }
-}
+} // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
+#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index d747b340618cc7e321f2cfc2ed9169798e5d77e9..fb6a20403adc1ee5cddb5869fd9d39ef59fb776e 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -18,49 +18,40 @@
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
 
 class Producer_Op
-    : public Operator,
+    : public OperatorTensor,
       public Registrable<Producer_Op, std::string, std::unique_ptr<OperatorImpl>(
                                           const Producer_Op &)> {
-private:
-    std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Producer";
 
     template <std::size_t DIM>
     Producer_Op(const std::array<DimSize_t, DIM>& dims)
-        : Operator(Type)
+        : OperatorTensor(Type, 0, 0, 1)
     {
-        //ctor
-        setDatatype(DataType::Float32);
-        mOutput->resize(dims);
+        mOutputs[0]->resize(dims);
     }
 
     Producer_Op(const std::shared_ptr<Tensor> tensor)
-        : Operator(Type),
-          mOutput(tensor)
+        : OperatorTensor(Type, 0, 0, 1)
     {
-        setDatatype(tensor->dataType());
+        mOutputs[0] = tensor; // copy the pointer of the Tensor
     }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @param op OperatorTensor to copy.
      */
     Producer_Op(const Producer_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Producer_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -71,18 +62,8 @@ public:
         return std::make_shared<Producer_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final {
-        assert(false && "Producer operator takes no input");
-    }
-
-    /**
-     * @brief Set the Output Tensor of the Producer operator.
-     * This method will create a copy of the Tensor.
-     *
-     * @param newOutput Tensor containing the values to copy
-     */
-    void setOutputTensor(const Tensor& newOutput) {
-        *mOutput = newOutput;
+    void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
     }
 
     void computeOutputDims() override final {}
@@ -90,48 +71,13 @@ public:
     bool outputDimsForwarded() const override final {return true;}
 
 
-    [[noreturn]] inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final {
-      assert(false);
-      exit(-1);
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t /*inputIdx*/) const override final {
-      assert(false && "Producer Operator has no input");
-      return nullptr;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-      assert((outputIdx == 0) && "Producer Operator has only 1 output");
-      (void) outputIdx; // avoid unused warning
-      return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t /*inputIdx*/) const override final {
-        assert(false && "Producer operator takes no input");
-        return nullptr;
-    }
-
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-    inline const std::vector<DimSize_t> dims() const noexcept { return mOutput->dims(); }
+    inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Producer_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 0; };
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 0; };
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; };
     static const std::vector<std::string> getInputsName(){
         return {};
     }
@@ -181,4 +127,4 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, Dim
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
+#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 52f13f1c5ce1d0b7a0d4ccaa4d7fe9927bcc3e53..3444c25fc2e1572e78a1377b3273580f494ac8f9 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -17,42 +17,29 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 
-class ReLU_Op : public Operator,
+class ReLU_Op : public OperatorTensor,
     public Registrable<ReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const ReLU_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "ReLU";
 
-    ReLU_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    ReLU_Op() : OperatorTensor(Type, 1, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     ReLU_Op(const ReLU_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<ReLU_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<ReLU_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,68 +50,15 @@ public:
         return std::make_shared<ReLU_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "ReLU Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "ReLU Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<ReLU_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -138,4 +72,4 @@ inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
 }
 }
 
-#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
+#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 353666fb3950d034a7dbe8ec1d3ebdb312679f95..fd6d6bcfccc36829671538e1f2e31b13644e3938 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -15,58 +15,47 @@
 #include <vector>
 #include <memory>
 
-
-
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 enum class ScalingAttr {
-    scalingFactor
+    scalingFactor, quantizedNbBits, isOutputUnsigned
 };
 
-class Scaling_Op : public Operator,
+class Scaling_Op : public OperatorTensor,
     public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>,
-    public StaticAttributes<ScalingAttr, float> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
+    public StaticAttributes<ScalingAttr, float, size_t, bool> {
 public:
     static constexpr const char* Type = "Scaling";
 
     Scaling_Op() = delete;
 
-    using Attributes_ = StaticAttributes<ScalingAttr, float>;
+    using Attributes_ = StaticAttributes<ScalingAttr, float, std::size_t, bool>;
     template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
 
-    Scaling_Op(float scalingFactor)
-            : Operator(Type),
-            Attributes_(
-                attr<ScalingAttr::scalingFactor>(scalingFactor))
-    {
-        setDatatype(DataType::Float32);
-    }
+    Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
+        : OperatorTensor(Type, 1, 0, 1),
+          Attributes_(
+            attr<ScalingAttr::scalingFactor>(scalingFactor),
+            attr<ScalingAttr::quantizedNbBits>(nbBits),
+            attr<ScalingAttr::isOutputUnsigned>(isOutputUnsigned))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Scaling_Op(const Scaling_Op& op)
-        : Operator(Type),
-          Attributes_(op),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op),
+          Attributes_(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Scaling_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Scaling_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -77,92 +66,35 @@ public:
         return std::make_shared<Scaling_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        (void) inputIdx; //avoid unused warning
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Scaling Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return *(mInput.get());
-    }
-    inline Tensor& output(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Scaling Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return *(mOutput.get());
-    }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Scaling Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Scaling Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning;
-        return mOutput;
-    }
-
-
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Scaling_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
-
+        mOutputs[0]->setBackend(name);
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        mInputs[0]->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
-    static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
-    static const std::vector<std::string> getOutputsName(){
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
+/*
 inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
 }
+*/
+inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, std::size_t quantizedNbBits=8, bool isOutputUnsigned=true, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
 }
+} // namespace Aidge
 
 namespace {
 template <>
 const char* const EnumStrings<Aidge::ScalingAttr>::data[]
-    = {"scalingFactor"};
+    = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
+#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
\ No newline at end of file
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..7bdbd8099ab79c9f1714989dc41cfc0893427bc9
--- /dev/null
+++ b/include/aidge/operator/Slice.hpp
@@ -0,0 +1,132 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SLICE_H_
+#define AIDGE_CORE_OPERATOR_SLICE_H_
+
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class SliceAttr { Beginning, SliceDims };
+
+template <DimIdx_t DIM>
+class Slice_Op
+    : public OperatorTensor,
+      public Registrable<Slice_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op<DIM> &)>,
+      public StaticAttributes<SliceAttr, std::size_t, std::array<DimSize_t, DIM>> {
+public:
+    static constexpr const char *Type = "Slice";
+
+    Slice_Op() = delete;
+
+    using Attributes_ = StaticAttributes<SliceAttr, std::size_t, std::array<DimSize_t, DIM>>;
+    template <SliceAttr e>
+    using attr = typename Attributes_::template attr<e>;
+
+    Slice_Op(std::size_t beginningPos, std::array<DimSize_t, DIM> sliceDims)
+        : OperatorTensor(Type, 1, 0, 1),
+          Attributes_(attr<SliceAttr::Beginning>(beginningPos),
+                      attr<SliceAttr::SliceDims>(sliceDims))
+    {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
+     * input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Slice_Op(const Slice_Op &op)
+        : OperatorTensor(op),
+          Attributes_(op)
+    {
+        mImpl = op.mImpl ? Registrar<Slice_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this)
+                         : nullptr;
+    }
+
+public:
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Slice_Op
+     */
+    std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); }
+
+    void computeOutputDims() override final {
+        if (!getInput(0) || (getInput(0)->empty())) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
+        // Check input dimensions is compatible with slice dimensions
+        if (getInput(0)->nbDims() != DIM) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: input and slice dimensions are not the same size.");
+        }
+        std::array<DimSize_t, DIM> outputDims;
+        const std::array<DimSize_t, DIM> inputDims = getInput(0)->template dims<DIM>();
+
+        // Check that the sliced Tensor is actually part of the input Tensor
+        // For a 5*5 tensor ('x') and a 3*3 slice kernel ('o'):
+        // xxxxx               xxxxx
+        // xxxxx               xxxxx
+        // xxooo  --> ok       xxxoo --> out of bound
+        // xxooo               xxxoo
+        // xxooo               xxxoo
+        std::vector<std::size_t> beginningCoords = mInputs[0]->getCoord(this->template getAttr<SliceAttr::Beginning>());
+        for (std::size_t i = 0; i < DIM; ++i) {
+            if (beginningCoords[i] + this->template getAttr<SliceAttr::SliceDims>()[i] > inputDims[i]) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds");
+            } else {
+                outputDims[i] = this->template getAttr<SliceAttr::SliceDims>()[i];
+            }
+        }
+        mOutputs[0]->resize(outputDims);
+    }
+
+    void setBackend(const std::string &name) {
+        mImpl = Registrar<Slice_Op>::create(name)(*this);
+        mOutputs[0]->setBackend(name);
+
+        // FIXME: temporary workaround
+        getInput(0)->setBackend(name);
+    }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+template <std::size_t DIM>
+inline std::shared_ptr<Node> Slice(std::size_t beginningPos, std::array<DimSize_t, DIM> sliceDims,
+                                   const std::string &name = "") {
+    // FIXME: properly handle default w&b initialization in every cases
+    return std::make_shared<Node>(std::make_shared<Slice_Op<DIM>>( beginningPos, sliceDims), name);
+}
+
+template <DimIdx_t DIM>
+inline std::shared_ptr<Node> Slice(std::size_t beginningPos, DimSize_t const (&sliceDims)[DIM], const std::string& name = "") {
+  return Slice(beginningPos, to_array(sliceDims), name);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "Beginning", "SliceDims" };
+}
+
+#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index ba6132a5ee00325d0f7de57db117a169d42352e9..cc19cb8210af516f349de124f65cdd55308609fb 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -17,7 +17,7 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -26,33 +26,21 @@
 
 namespace Aidge {
 
-class Softmax_Op : public Operator,
+class Softmax_Op : public OperatorTensor,
     public Registrable<Softmax_Op, std::string, std::unique_ptr<OperatorImpl>(const Softmax_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static constexpr const char* Type = "Softmax";
 
-    Softmax_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Softmax_Op() : OperatorTensor(Type, 1, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Softmax_Op(const Softmax_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Softmax_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Softmax_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,68 +51,14 @@ public:
         return std::make_shared<Softmax_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Softmax Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Softmax Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Softmax_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index 90b2ae6a8ae1311aef14e4eba4d3563a28a3d18e..a4069b59bbe7e7586d02b71a39d811d9bf972b77 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -17,7 +17,7 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -26,7 +26,7 @@
 
 namespace Aidge {
 
-class Sqrt_Op : public Operator,
+class Sqrt_Op : public OperatorTensor,
     public Registrable<Sqrt_Op, std::string, std::unique_ptr<OperatorImpl>(const Sqrt_Op&)> {
 public:
     // FIXME: change accessibility
@@ -36,23 +36,16 @@ public:
 public:
     static constexpr const char* Type = "Sqrt";
 
-    Sqrt_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Sqrt_Op() : OperatorTensor(Type, 1, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Sqrt_Op(const Sqrt_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,68 +56,14 @@ public:
         return std::make_shared<Sqrt_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInput = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInput->empty())
-            mOutput->resize(mInput->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx == 0) && "Sqrt Operator has only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return mInput;
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Sqrt Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx == 0 && "operator supports only 1 input");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInput);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
-
-
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Sqrt_Op>::create(name)(*this);
-        mOutput->setBackend(name);
-
-        // FIXME: temporary workaround
-        mInput->setBackend(name);
-    }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInput->setDatatype(datatype);
+        getInput(0)->setBackend(name);
     }
 
-    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index 451cba08f58e7a580576531ce2a97c92fb9be3ae..becf98926d2da777c6551e8ed2fbd7b5fcf50017 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -17,7 +17,7 @@
 #include <vector>
 
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
@@ -26,7 +26,7 @@
 
 namespace Aidge {
 
-class Sub_Op : public Operator,
+class Sub_Op : public OperatorTensor,
     public Registrable<Sub_Op, std::string, std::unique_ptr<OperatorImpl>(const Sub_Op&)> {
 public:
     // FIXME: change accessibility
@@ -36,23 +36,16 @@ public:
 public:
     static constexpr const char* Type = "Sub";
 
-    Sub_Op()
-            : Operator(Type)
-    {
-        setDatatype(DataType::Float32);
-    }
+    Sub_Op() : OperatorTensor(Type, 2, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Sub_Op(const Sub_Op& op)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>(*op.mOutput))
+        : OperatorTensor(op)
     {
-        // cpy-ctor
-        setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Sub_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Sub_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -63,73 +56,18 @@ public:
         return std::make_shared<Sub_Op>(*this);
     }
 
-    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
-        if (!mInputs[0]->empty())
-            mOutput->resize(mInputs[0]->dims());
-    }
-
-    bool outputDimsForwarded() const override final {
-        return !(mOutput->empty());
-    }
-
-
-    inline Tensor& input(const IOIndex_t inputIdx) const override final {
-        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
-        return *(mInputs[inputIdx].get());
-    }
-    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
-
-
-    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
-        assert((inputIdx < 2) && "Sub Operator has 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return mInputs[inputIdx];
-    }
-    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
-        assert((outputIdx == 0) && "Sub Operator has only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return mOutput;
-    }
-
-
-    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < 2 && "operator supports only 2 inputs");
-        (void) inputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }
-    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx == 0 && "operator supports only 1 output");
-        (void) outputIdx; // avoid unused warning
-        return std::static_pointer_cast<Data>(mOutput);
-    }
+    void computeOutputDims() override final;
 
 
     void setBackend(const std::string& name) override {
         mImpl = Registrar<Sub_Op>::create(name)(*this);
-        mOutput->setBackend(name);
+        mOutputs[0]->setBackend(name);
 
         // FIXME: temporary workaround
-        mInputs[0]->setBackend(name);
-        mInputs[1]->setBackend(name);
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) override {
-        mOutput->setDatatype(datatype);
 
-        // FIXME: temporary workaround
-        mInputs[0]->setDatatype(datatype);
-        mInputs[1]->setDatatype(datatype);
-    }
-
-    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
-    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -141,6 +79,6 @@ public:
 inline std::shared_ptr<Node> Sub(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
 }
-}
+} // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
+#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
\ No newline at end of file
diff --git a/include/aidge/utils/Recipies.hpp b/include/aidge/recipies/Recipies.hpp
similarity index 67%
rename from include/aidge/utils/Recipies.hpp
rename to include/aidge/recipies/Recipies.hpp
index 7428d9d226668a1d2268a071d9f534f47bab6840..8c5ba8d085482eaaba75dfd8716eda2aa58c3bb5 100644
--- a/include/aidge/utils/Recipies.hpp
+++ b/include/aidge/recipies/Recipies.hpp
@@ -17,8 +17,10 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
+#include "aidge/graphRegex/matchFsm/MatchResult.hpp"
 
-namespace Aidge{
+
+namespace Aidge {
 
 // FUSE MATMUL + ADD -> FC
 
@@ -27,7 +29,12 @@ namespace Aidge{
  *
  * @param nodes Strict set of Node to merge.
  */
-void fuseMulAdd(std::set<std::shared_ptr<Node>> nodes);
+//void fuseMulAdd(std::set<std::shared_ptr<Node>> nodes);
+
+void fuseMulAdd(std::shared_ptr<MatchSolution> solution);
+
+void fuseMulAdd(std::shared_ptr<Node> matmul,std::shared_ptr<Node> add);
+
 /**
  * @brief Merge ``MatMul`` and :cpp:function:`Aidge::Add` Node into a :cpp:function:`Aidge::FC` Node.
  *
@@ -43,7 +50,11 @@ void fuseMulAdd(std::shared_ptr<GraphView> graphView);
  *
  * @param nodes Strict set of Node to merge.
  */
-void removeFlatten(std::set<std::shared_ptr<Node>> nodes);
+void removeFlatten(std::shared_ptr<Node> flatten);
+
+
+void removeFlatten(std::shared_ptr<MatchSolution> solution);
+
 /**
  * @brief Remove ``Flatten`` before :cpp:function:`Aidge::FC` Node.
  *
@@ -59,7 +70,12 @@ void removeFlatten(std::shared_ptr<GraphView> graphView);
  *
  * @param nodes Strict set of Node to merge.
  */
-void fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes);
+void fuseBatchNorm(std::shared_ptr<Node> conv,std::shared_ptr<Node> batchnorm);
+
+
+
+void fuseBatchNorm(std::shared_ptr<MatchSolution> solution);
+
 /**
  * @brief Fuse :cpp:function:`Aidge::BatchNorm` with :cpp:function:`Aidge::Conv` or :cpp:function:`Aidge::FC` Nodes.
  * Ref: https://nenadmarkus.com/p/fusing-batchnorm-and-conv/
@@ -68,6 +84,11 @@ void fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes);
  */
 void fuseBatchNorm(std::shared_ptr<GraphView> graphView);
 
-}
+// std::set<std::shared_ptr<Node>> getHorizontalTiling(const std::shared_ptr<Node>& node, const DimIdx_t axis, const std::size_t nbSlices);
+// void horizontalTiling(std::shared_ptr<Node> node, DimIdx_t dim, std::size_t nbSlices);
+// std::set<std::shared_ptr<Node>> getHorizontalTiling(std::set<std::shared_ptr<Node>> setOfNodes, DimIdx_t dim, std::size_t nbSlices);
+// void horizontalTiling(std::set<std::shared_ptr<Node>> setOfNodes, DimIdx_t dim, std::size_t nbSlices);
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_RECIPIES_H_ */
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 1896894ee8690cedaef696394da0829604e36211..faf6c49bdbe28e7214f06a4d116cf23a1739154f 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -64,6 +64,9 @@ public:
     std::vector<std::shared_ptr<Node>> getStaticScheduling(){
         return mStaticSchedule;
     }
+    std::shared_ptr<GraphView> getGraphView(){
+        return mGraphView;
+    }
 
 private:
     /**
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index b67f69ae7afc2c22f3b424812ec994b10974b668..50ed0895e82bb468dee57264534f0ec3a486a815 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -22,8 +22,8 @@
 
 namespace Aidge {
 /**
- * @brief This class is designed to handle static attributes (i.e. known at compile-time) 
- * with named accessors, with minimal overhead (the name strings are not stored in each object 
+ * @brief This class is designed to handle static attributes (i.e. known at compile-time)
+ * with named accessors, with minimal overhead (the name strings are not stored in each object
  * instance and it remains possible to access attribute without overhead at compile-time).
 */
 template <class ATTRS_ENUM, class ...T>
@@ -97,6 +97,17 @@ public:
         AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"%s\" not found", name);
     }
 
+    template <typename R>
+    const R& getAttr(const char* name) const {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (strcmp(EnumStrings<ATTRS_ENUM>::data[i], name) == 0) {
+                return getAttr<R>(i);
+            }
+        }
+
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"%s\" not found", name);
+    }
+
     template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
     typename std::enable_if<(SIZE > 0), R&>::type getAttr(std::size_t i) {
         if (i == SIZE-1) {
@@ -117,6 +128,26 @@ public:
         AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
     }
 
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    typename std::enable_if<(SIZE > 0), const R&>::type getAttr(std::size_t i) const {
+        if (i == SIZE-1) {
+            if (std::is_same<R, typename std::tuple_element<SIZE-1,std::tuple<T...>>::type>::value) {
+                return reinterpret_cast<const R&>(std::get<SIZE-1>(mAttrs));
+            }
+            else {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "wrong type for attribute with index %lu", i);
+            }
+        }
+        else {
+            return getAttr<R, SIZE-1>(i);
+        }
+    }
+
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    [[noreturn]] typename std::enable_if<(SIZE == 0), const R&>::type getAttr(std::size_t /*i*/) const {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
+    }
+
     template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
     constexpr typename std::enable_if<(SIZE > 0), const std::type_info&>::type getAttrType(std::size_t i) const {
         if (i == SIZE-1) {
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 31470e0eb2c50b5386b64498f89419801b133d3a..babc534bdc23e87e17e21312d18b51b04baee7ca 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -35,7 +35,7 @@ void addCtor(py::class_<Tensor,
         /* Request a buffer descriptor from Python */
         py::buffer_info info = b.request();
         Tensor* newTensor = new Tensor();
-        newTensor->setDatatype(NativeType<T>::type);
+        newTensor->setDataType(NativeType<T>::type);
         const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
         newTensor->resize(dims);
         // TODO : Find a better way to choose backend
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 6ac2199b4ba59faba16c9815277ad134c6f183f4..61392470adaeb7db8812a3063edc5f8eee1d3083 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -51,9 +51,18 @@ void init_GraphView(py::module& m) {
           Include a Node to the current GraphView object.
 
           :param other_node: Node to add
-          :type oth_Node: Node
-          :param includeLearnableParameter: include non-data inputs, like weights and biases. Default True.
-          :type includeLearnableParameter: bool
+          :type other_node: Node
+          :param include_learnable_parameters: include non-data inputs, like weights and biases, default True.
+          :type include_learnable_parameters: bool, optional
+          )mydelimiter")
+
+          .def("add", (void (GraphView::*)(std::shared_ptr<GraphView>)) & GraphView::add,
+               py::arg("other_graph"),
+          R"mydelimiter(
+          Include a GraphView to the current GraphView object.
+
+          :param other_graph: GraphView to add
+          :type other_graph: GraphView
           )mydelimiter")
 
           .def("add_child",
@@ -89,7 +98,7 @@ void init_GraphView(py::module& m) {
           .def("get_node", &GraphView::getNode, py::arg("node_name"))
           .def("forward_dims", &GraphView::forwardDims)
           .def("__call__", &GraphView::operator(), py::arg("connectors"))
-          .def("set_datatype", &GraphView::setDatatype, py::arg("datatype"))
+          .def("set_datatype", &GraphView::setDataType, py::arg("datatype"))
           .def("set_backend", &GraphView::setBackend, py::arg("backend"))
           //   .def("__getitem__", [](Tensor& b, size_t idx)-> py::object {
           //      // TODO : Should return error if backend not compatible with get
@@ -105,4 +114,4 @@ void init_GraphView(py::module& m) {
           //      })
             ;
 }
-}  // namespace Aidge
\ No newline at end of file
+}  // namespace Aidge
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index e3666d247324fc419570611f41bbe67c7c68cc4e..aa5c21372730536662106a035307d885fa011107 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -16,136 +16,150 @@
 
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/graph/Connector.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 void init_Node(py::module& m) {
     py::class_<Node, std::shared_ptr<Node>>(m, "Node")
-            .def("name", &Node::name,
-            R"mydelimiter(
-            Name of the Node.
-            )mydelimiter")
-
-            .def("type", &Node::type,
-            R"mydelimiter(
-            Type of the node.
-            )mydelimiter")
-
-            .def("get_operator", &Node::getOperator,
-            R"mydelimiter(
-            Get the Operator object of the Node.
-            )mydelimiter")
-
-            .def("set_name", &Node::setName, py::arg("name"),
-            R"mydelimiter(
-            Set the Node name.
-
-            :param name: New name for the node.
-            :type name: str
-            :rtype: str
-            )mydelimiter")
-
-            .def("add_child",
-                 (void (Node::*)(std::shared_ptr<Node>, const IOIndex_t, IOIndex_t)) &
-                         Node::addChild,
-                 py::arg("other_node"), py::arg("out_id") = 0, py::arg("other_in_id") = gk_IODefaultIndex,
-            R"mydelimiter(
-            Link another Node to an output of the current Node.
-
-            :param other_node: Pointer to the other Node.
-            :type other_node: :py:class: Node
-            :param out_id: ID of the current Node output to connect to the other Node. Default to 0.
-            :type out_id: int
-            :param other_in_id: ID of the other Node input to connect to the current Node. Default to the first avaible data input.
-            :type other_in_id: int
-            )mydelimiter")
-
-            .def("add_child",
-                 (void (Node::*)(std::shared_ptr<GraphView>, const IOIndex_t,
-                                 std::pair<std::shared_ptr<Node>, IOIndex_t>)) &
-                         Node::addChild,
-                 py::arg("other_graph"), py::arg("out_id") = 0,
-                 py::arg("other_in_id") =
-                         std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex),
-                        R"mydelimiter(
-            Link a Node from a specific GraphView to the current Node.
-
-            :param other_view: Pointer to the GraphView whose content should be linked to the current Node.
-            :type other_view: :py:class: GraphView
-            :param out_id: ID of the current Node output to connect to the other Node. Default to 0.
-            :type out_id: int
-            :param other_in_id: Pair of Node and input connection ID for specifying the connection. If the GraphView whose content is linked has only one input Node, then it defaults to the first available data input ID of this Node.
-            :type other_in_id: tuple[:py:class: Node, int]
-            )mydelimiter")
-
-            .def("inputs", &Node::inputs,
-            R"mydelimiter(
-            Get ordered list of parent Node and the associated output index connected to the current Node's inputs.
-
-            :return: List of connections. When an input is not linked to any parent, the default value is (None, default_index)
-            :rtype: list[tuple[Node, int]]
-            )mydelimiter")
-
-            .def("input", &Node::input, py::arg("in_id"),
-            R"mydelimiter(
-            Get the parent Node and the associated output index connected to the i-th input of the current Node.
-            
-            :param in_id: input index of the current Node object.
-            :type in_id: int
-            :return: i-th connection. When an input is not linked to any parent, the default value is (None, default_index)
-            :rtype: tuple[Node, int]
-            )mydelimiter")
-
-            .def("outputs", &Node::outputs,
-            R"mydelimiter(
-            Get, for each output of the Node, a list of the children Node and the associated input index connected to it.
-
-            :return: List of a list of connections. When an outut is not linked to any child,  its list a empty.
-            :rtype: list[list[tuple[Node, int]]]
-            )mydelimiter")
-
-            .def("output", &Node::output, py::arg("out_id"),
-            R"mydelimiter(
-            Get a list of the children Node for a specific output and the associated input index connected to it.
-            
-            :param out_id: input index of the current Node object.
-            :type out_id: int
-            :return: i-th connection. When an input is not linked to any parent, the default value is (None, default_index)
-            :rtype: list[tuple[Node, int]]
-            )mydelimiter")
-
-            .def("get_nb_inputs", &Node::nbInputs,
-            R"mydelimiter(
-            Number of inputs.
-
-            :rtype: int
-            )mydelimiter")
-
-            .def("get_nb_datainputs", &Node::nbDataInputs,
-            R"mydelimiter(
-            Number of data inputs.
-
-            :rtype: int
-            )mydelimiter")
-
-            .def("get_nb_outputs", &Node::nbOutputs,
-            R"mydelimiter(
-            Number of outputs.
-
-            :rtype: int
-            )mydelimiter")
-
-            .def("get_parents", &Node::getParents,
-            R"mydelimiter(
-            Get parents.
-            )mydelimiter")
-
-            .def("get_children", (std::set<std::shared_ptr<Node>> (Node::*)() const) &Node::getChildren,
-            R"mydelimiter(
-            Get children.
-            )mydelimiter")
-
-            .def("__call__", &Node::operator(), py::arg("connectors"));
+    .def("name", &Node::name,
+    R"mydelimiter(
+    Name of the Node.
+    )mydelimiter")
+
+    .def("type", &Node::type,
+    R"mydelimiter(
+    Type of the node.
+    )mydelimiter")
+
+    .def("get_operator", &Node::getOperator,
+    R"mydelimiter(
+    Get the Operator object of the Node.
+    )mydelimiter")
+
+    .def("set_name", &Node::setName, py::arg("name"),
+    R"mydelimiter(
+    Set the Node name.
+
+    :param name: New name for the node.
+    :type name: str
+    :rtype: str
+    )mydelimiter")
+
+    .def("add_child",
+         (void (Node::*)(std::shared_ptr<Node>, const IOIndex_t, IOIndex_t)) &
+                 Node::addChild,
+         py::arg("other_node"), py::arg("out_id") = 0, py::arg("other_in_id") = gk_IODefaultIndex,
+    R"mydelimiter(
+    Link another Node to an output of the current Node.
+
+    :param other_node: Pointer to the other Node.
+    :type other_node: :py:class: Node
+    :param out_id: ID of the current Node output to connect to the other Node. Default to 0.
+    :type out_id: int
+    :param other_in_id: ID of the other Node input to connect to the current Node. Default to the first avaible data input.
+    :type other_in_id: int
+    )mydelimiter")
+
+    .def("add_child",
+        (void (Node::*)(std::shared_ptr<GraphView>, const IOIndex_t,
+                        std::pair<std::shared_ptr<Node>, IOIndex_t>)) &
+                Node::addChild,
+        py::arg("other_graph"), py::arg("out_id") = 0,
+        py::arg("other_in_id") =
+                std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex),
+               R"mydelimiter(
+    Link a Node from a specific GraphView to the current Node.
+
+    :param other_view: Pointer to the GraphView whose content should be linked to the current Node.
+    :type other_view: :py:class: GraphView
+    :param out_id: ID of the current Node output to connect to the other Node. Default to 0.
+    :type out_id: int
+    :param other_in_id: Pair of Node and input connection ID for specifying the connection. If the GraphView whose content is linked has only one input Node,   then it defaults to the first available data input ID of this Node.
+    :type other_in_id: tuple[:py:class: Node, int]
+    )mydelimiter")
+
+    .def("inputs", &Node::inputs,
+    R"mydelimiter(
+    Get ordered list of parent Node and the associated output index connected to the current Node's inputs.
+
+    :return: List of connections. When an input is not linked to any parent, the default value is (None, default_index)
+    :rtype: list[tuple[Node, int]]
+    )mydelimiter")
+
+    .def("input", &Node::input, py::arg("in_id"),
+    R"mydelimiter(
+    Get the parent Node and the associated output index connected to the i-th input of the current Node.
+
+    :param in_id: input index of the current Node object.
+    :type in_id: int
+    :return: i-th connection. When an input is not linked to any parent, the default value is (None, default_index)
+    :rtype: tuple[Node, int]
+    )mydelimiter")
+
+    .def("outputs", &Node::outputs,
+    R"mydelimiter(
+    Get, for each output of the Node, a list of the children Node and the associated input index connected to it.
+
+    :return: List of a list of connections. When an outut is not linked to any child,  its list a empty.
+    :rtype: list[list[tuple[Node, int]]]
+    )mydelimiter")
+
+    .def("output", &Node::output, py::arg("out_id"),
+    R"mydelimiter(
+    Get a list of the children Node for a specific output and the associated input index connected to it.
+
+    :param out_id: input index of the current Node object.
+    :type out_id: int
+    :return: i-th connection. When an input is not linked to any parent, the default value is (None, default_index)
+    :rtype: list[tuple[Node, int]]
+    )mydelimiter")
+
+    .def("get_nb_inputs", &Node::nbInputs,
+    R"mydelimiter(
+    Number of inputs.
+
+    :rtype: int
+    )mydelimiter")
+
+    .def("get_nb_data", &Node::nbData,
+    R"mydelimiter(
+    Number of data inputs.
+
+    :rtype: int
+    )mydelimiter")
+
+    .def("get_nb_outputs", &Node::nbOutputs,
+    R"mydelimiter(
+    Number of outputs.
+
+    :rtype: int
+    )mydelimiter")
+
+    .def("get_parents", &Node::getParents,
+    R"mydelimiter(
+    Get parents.
+    )mydelimiter")
+
+    .def("get_children", (std::set<std::shared_ptr<Node>> (Node::*)() const) &Node::getChildren,
+    R"mydelimiter(
+    Get children.
+    )mydelimiter")
+
+    .def("__call__",
+        [](Node &self, pybind11::args args) {
+            std::vector<Connector> connectors;
+            for (const auto &arg : args) {
+                // Check if the argument is an instance of Connector
+                if (pybind11::isinstance<Connector>(arg)) {
+                    // Convert Python object to C++ object adn push it ot vector
+                    connectors.push_back(arg.cast<Connector>());
+                } else {
+                    throw std::runtime_error("One of the arguments was not a Connector.");
+                }
+            }
+            return self(connectors);
+        });
 }
 }  // namespace Aidge
diff --git a/python_binding/graphRegex/pybind_GraphRegex.cpp b/python_binding/graphRegex/pybind_GraphRegex.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..be3cd9e9124ba1306226dcbdc13ee39748cf0606
--- /dev/null
+++ b/python_binding/graphRegex/pybind_GraphRegex.cpp
@@ -0,0 +1,69 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include "aidge/graphRegex/GraphRegex.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+void init_GraphRegex(py::module& m){
+
+
+    py::class_<GraphRegex, std::shared_ptr<GraphRegex>>(m, "GraphRegex", "GraphRegex class describes a regex to test a graph.")
+    .def(py::init<>())
+
+    .def("add_query", &GraphRegex::addQuery, R"mydelimiter(
+    :rtype: str
+    )mydelimiter")
+
+    .def("set_key_from_graph", &GraphRegex::setKeyFromGraph, R"mydelimiter(
+    :param ref: The graph use to define type of Node.
+    :type ref: :py:class:`aidge_core.GraphView`
+    )mydelimiter")
+
+//      void setNodeKey(const std::string key, const std::string conditionalExpressions );
+//  void setNodeKey(const std::string key,std::function<bool(NodePtr)> f);
+
+    .def("match", &GraphRegex::match, R"mydelimiter(
+    :param graphToMatch: The graph to perform the matching algorithm on.
+    :type graphToMatch: :py:class:`aidge_core.GraphView`
+    )mydelimiter")
+
+
+
+    .def("set_node_key",
+            (void (GraphRegex::*)(const std::string, const std::string )) &
+                    GraphRegex::setNodeKey,
+            py::arg("key"), py::arg("conditionalExpressions"),
+    R"mydelimiter(
+    Add a node test
+    :param key: the key of the node test to use in the query.
+    :param conditionalExpressions: the test to do .
+    
+    )mydelimiter")
+
+    
+    .def("set_node_key",
+            (void (GraphRegex::*)(const std::string, std::function<bool(NodePtr)>)) &
+                    GraphRegex::setNodeKey,
+            py::arg("key"), py::arg("f"),
+    R"mydelimiter(
+    Add a node test
+    :param key: the key of the lambda test to use in the conditional expressions.
+    :param f: bool lambda (nodePtr) .
+    
+    )mydelimiter")
+
+
+
+    ;
+}
+}
diff --git a/python_binding/graphmatching/pybind_GRegex.cpp b/python_binding/graphmatching/pybind_GRegex.cpp
deleted file mode 100644
index 48d0e19ff22c1480636b67b5bde70bf1caa1f1b5..0000000000000000000000000000000000000000
--- a/python_binding/graphmatching/pybind_GRegex.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <pybind11/pybind11.h>
-#include <pybind11/stl.h>
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/graphmatching/GRegex.hpp"
-
-namespace py = pybind11;
-namespace Aidge {
-void init_GRegex(py::module& m){
-    py::class_<GRegex, std::shared_ptr<GRegex>>(m, "GRegex", "GRegex class combines a Node Regex and a list of Graph Regex that together describes a graph pattern as a graph regular expression. GRegex find patterns in a given graph that matches the graph regular expression.")
-    .def(py::init<const std::map<std::string,NodeRegex*>&, std::vector<std::string>&>(), py::arg("nodesRegex"), py::arg("seqRegexps"), R"mydelimiter(
-    Constructor of GRegex
-
-    :param nodesRegex: Describe the conditions an operator has to fulfill.
-    :type nodesRegex: Dict[str,:py:class:`aidge_core.NodeRegex`]
-    :param seqRegexps: Describe the graph topological pattern. List of Graph Regex as strings.
-    :type seqRegexps: List[str]
-
-    )mydelimiter")
-    .def("match", &GRegex::match, py::arg("graphToMatch"), R"mydelimiter(
-    Launch the graph matching algorithm on a given graph.
-    
-    :param graphToMatch: The graph to perform the matching algorithm on.
-    :type graphToMatch: :py:class:`aidge_core.GraphView`
-
-    :returns: Matched graph patterns.
-    :rtype: :py:class:`aidge_core.Match`
-
-    )mydelimiter")
-    ;
-}
-}
diff --git a/python_binding/graphmatching/pybind_Match.cpp b/python_binding/graphmatching/pybind_Match.cpp
deleted file mode 100644
index a2d2654f40ed50e20e8761be57e2c8bb98ce4e3b..0000000000000000000000000000000000000000
--- a/python_binding/graphmatching/pybind_Match.cpp
+++ /dev/null
@@ -1,34 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <pybind11/pybind11.h>
-#include <pybind11/stl.h>
-#include "aidge/graphmatching/Match.hpp"
-
-namespace py = pybind11;
-namespace Aidge {
-void init_Match(py::module& m){
-    py::class_<Match, std::shared_ptr<Match>>(m, "Match", "Match class stores the matched patterns resulting from a graph matching query. A matched pattern is the combinaison of the graph pattern start nodes and the set of all the nodes in the matched pattern (including the start nodes)")
-    .def(py::init<>())
-    .def("get_nb_match", &Match::getNbMatch, R"mydelimiter(
-    :returns: The number of graph patterns matched
-    :rtype: int
-    )mydelimiter")
-    .def("get_start_nodes", &Match::getStartNodes, R"mydelimiter(
-    :returns: All matched graph patterns start nodes
-    :rtype: List[List[:py:class:`aidge_core.Nodes`]]
-    )mydelimiter")
-    .def("get_match_nodes", &Match::getMatchNodes, R"mydelimiter(
-    :returns: All matched graph patterns sets of matched nodes
-    :rtype: List[Set[:py:class:`aidge_core.Nodes`]]
-    )mydelimiter");
-}
-}
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 0b2323c5cfb660415ec3ae009beaa7aa78afca0b..74ec11c28e746856fe767f16a4380651271d8fe4 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -13,21 +13,21 @@
 
 #include "aidge/operator/Add.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 
-template <std::size_t NUM> void declare_Add(py::module &m) {
-  py::class_<Add_Op<NUM>, std::shared_ptr<Add_Op<NUM>>, Operator>(m, "AddOp", py::multiple_inheritance())
-  .def("get_inputs_name", &Add_Op<NUM>::getInputsName)
-  .def("get_outputs_name", &Add_Op<NUM>::getOutputsName);
+void declare_Add(py::module &m) {
+  py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance())
+  .def("get_inputs_name", &Add_Op::getInputsName)
+  .def("get_outputs_name", &Add_Op::getOutputsName);
 
-  m.def("Add", &Add<NUM>, py::arg("name") = "");
+  m.def("Add", &Add, py::arg("nbIn"), py::arg("name") = "");
 }
 
 void init_Add(py::module &m) {
-  declare_Add<2>(m);
+  declare_Add(m);
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index fe67fcb7a26f6ea1f05577b47444df5cb271110a..f87cd5dd66f44535ff895f73b160fc5988e1009a 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -18,7 +18,7 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/AvgPooling.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
-  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Operator, Attributes>(
+  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor, Attributes>(
     m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index cabaa2edd7053718160fa5013492d1914ee4cf16..ff0b9e0dfcb0d1c5e5567a938b1ca74faf242bed 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -13,7 +13,7 @@
 #include <string>
 
 #include "aidge/operator/BatchNorm.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
@@ -21,7 +21,7 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, Attributes>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor, Attributes>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
     .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
     .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index f4f7946c6ecc180f83e4bf58eee16102752f0c6e..71231b8218ac6af28c97ec29039301bc25b2d195 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -18,14 +18,14 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Conv.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
-  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Operator, Attributes>(
+  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor, Attributes>(
     m, ("ConvOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<DimSize_t,
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 4745ef345264763f1a890d566235be072c8e50d8..15f2c1c8acb4a1b59cfb0f35ebb78cb611647d3b 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -18,7 +18,7 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 
@@ -26,19 +26,22 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
-  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Operator, Attributes>(
+  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor, Attributes>(
     m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
-  .def(py::init<const std::array<DimSize_t, DIM> &,
+  .def(py::init<const DimSize_t,
+                const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &>(),
+        py::arg("nb_channels"),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
         py::arg("dilation_dims"))
   .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName);
 
-  m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
+  m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
+                                                                  const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
                                                                   const std::vector<DimSize_t> &dilation_dims) {
@@ -46,8 +49,9 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
 
-        return ConvDepthWise<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
-    }, py::arg("kernel_dims"),
+        return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+    }, py::arg("nb_channenls"),
+       py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
index 3492bf244952ba6ed0d77cb16de758e61fb26383..6d14510f34349c001289096a7fc9b08681a25bc8 100644
--- a/python_binding/operator/pybind_Div.cpp
+++ b/python_binding/operator/pybind_Div.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Div.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Div(py::module& m) {
-    py::class_<Div_Op, std::shared_ptr<Div_Op>, Operator>(m, "DivOp", py::multiple_inheritance())
+    py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance())
     .def("get_inputs_name", &Div_Op::getInputsName)
     .def("get_outputs_name", &Div_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index c6a1c70000e3e6d604a6652716667efa1c18e956..606b9ae948847f98d5a1129c08db21e073311879 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -13,18 +13,18 @@
 
 #include "aidge/operator/FC.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, Attributes>(m, "FCOp", py::multiple_inheritance())
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor, Attributes>(m, "FCOp", py::multiple_inheritance())
   .def("get_inputs_name", &FC_Op::getInputsName)
   .def("get_outputs_name", &FC_Op::getOutputsName);
 
-  m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
+  m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
 
 void init_FC(py::module &m) {
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 241fc7f4a003f53de15a42859b078c54cc98b63a..154fdfa64f279d8d6bb40ea7077acdb4c0fd51b9 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -16,18 +16,18 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/GenericOperator.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 namespace py = pybind11;
 namespace Aidge {
 
 void init_GenericOperator(py::module& m) {
-    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, Operator, DynamicAttributes>(m, "GenericOperatorOp",
+    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, OperatorTensor, DynamicAttributes>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
     .def_readonly_static("identity", &GenericOperator_Op::Identity)
     .def("compute_output_dims", &GenericOperator_Op::computeOutputDims)
     .def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function"));
 
-    m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nb_data_in"), py::arg("nb_in"), py::arg("nb_out"),
+    m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"),
           py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Identity.cpp b/python_binding/operator/pybind_Identity.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b1b1e8888976c578ff490f35776c890ba59911dc
--- /dev/null
+++ b/python_binding/operator/pybind_Identity.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Identity.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Identity(py::module& m) {
+    py::class_<Identity_Op, std::shared_ptr<Identity_Op>, Operator>(m, "IdentityOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Identity_Op::getInputsName)
+    .def("get_outputs_name", &Identity_Op::getOutputsName);
+
+    m.def("Identity", &Identity, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index af7689f0e64dd4ca8f798dcb34ea968972ace464..07300633ad1fb8163d4456afd744c4eb5d7b0ed1 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/LeakyReLU.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, Attributes>(m, "LeakyReLUOp", py::multiple_inheritance())
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor, Attributes>(m, "LeakyReLUOp", py::multiple_inheritance())
     .def("get_inputs_name", &LeakyReLU_Op::getInputsName)
     .def("get_outputs_name", &LeakyReLU_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index fdb51b24a87ce358c1e7808873ebc569ca2227c8..242bf2c451723677e1b9063edfc3098d4159e5a4 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -13,18 +13,18 @@
 
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void declare_MatMul(py::module &m) {
-  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Operator, Attributes>(m, "MatMulOp", py::multiple_inheritance())
+  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor, Attributes>(m, "MatMulOp", py::multiple_inheritance())
   .def("get_inputs_name", &MatMul_Op::getInputsName)
   .def("get_outputs_name", &MatMul_Op::getOutputsName);
 
-  m.def("MatMul", &MatMul, py::arg("out_channels"), py::arg("name") = "");
+  m.def("MatMul", &MatMul, py::arg("in_channels"), py::arg("out_channels"), py::arg("name") = "");
 }
 
 void init_MatMul(py::module &m) {
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 907e8cfaa6cde2451677b72beab38bd9a3938735..0ee3d9df80d7ea7b7be2b8d5c456d5d739506882 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -18,7 +18,7 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/MaxPooling.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
-  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, Attributes>(
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor, Attributes>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index aa9f3c50e6b8c6ab9e7be46776d5fba30d775be2..6df5a43f64bf8335108ccd99a1588a1367955b77 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -18,7 +18,6 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/MetaOperatorDefs.hpp"
-#include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
@@ -49,7 +48,8 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
 }
 
 template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
-  m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
+  m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
+                                                         const std::vector<DimSize_t>& kernel_dims,
                                                          const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
@@ -60,8 +60,9 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
 
-        return PaddedConvDepthWise<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
-    }, py::arg("kernel_dims"),
+        return PaddedConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
+    }, py::arg("nb_channels"),
+       py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
@@ -121,6 +122,15 @@ void init_MetaOperatorDefs(py::module &m) {
   declare_PaddedMaxPoolingOp<2>(m);
   declare_PaddedMaxPoolingOp<3>(m);
 
+  py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, Operator>(m, "MetaOperator_Op", py::multiple_inheritance());
+
+  m.def("meta_operator", &MetaOperator,
+    py::arg("type"),
+    py::arg("graph"),
+    py::arg("name") = "",
+    py::arg("input_nodes") = std::vector<NodePtr>(),
+    py::arg("output_nodes") = std::vector<NodePtr>()
+  );
 
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
index 2627c99005b009769e8fbb97b1f5d79e2424c997..21f510d98728fbe5401288a366294241b5f10a3f 100644
--- a/python_binding/operator/pybind_Mul.cpp
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Mul.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Mul(py::module& m) {
-    py::class_<Mul_Op, std::shared_ptr<Mul_Op>, Operator>(m, "MulOp", py::multiple_inheritance())
+    py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance())
     .def("get_inputs_name", &Mul_Op::getInputsName)
     .def("get_outputs_name", &Mul_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index 6b535e8cf3293b26aaa64f95ca2f9a394768935f..f9482eda2f93b5492cfcc89175da69d140f23df8 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -12,21 +12,29 @@
 #include <pybind11/pybind11.h>
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
+#include "aidge/utils/Types.h"
 #include <pybind11/stl.h>
 
 namespace py = pybind11;
 namespace Aidge {
 void init_Operator(py::module& m){
     py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
-    .def("output", &Operator::output, py::arg("outputIdx"))
-    .def("input", &Operator::input, py::arg("inputIdx"))
-    .def("nb_data_inputs", &Operator::nbDataInputs)
+    .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput), py::arg("outputIdx"), py::arg("data"))
+    .def("get_raw_output", &Operator::getRawOutput, py::arg("outputIdx"))
+    .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
+    .def("get_raw_input", &Operator::getRawInput, py::arg("inputIdx"))
+    .def("nb_inputs", &Operator::nbInputs)
+    .def("nb_data", &Operator::nbData)
+    .def("nb_param", &Operator::nbParam)
+    .def("nb_outputs", &Operator::nbOutputs)
     .def("associate_input", &Operator::associateInput, py::arg("inputIdx"), py::arg("data"))
-    .def("set_datatype", &Operator::setDatatype, py::arg("datatype"))
+    .def("set_datatype", &Operator::setDataType, py::arg("dataType"))
     .def("set_backend", &Operator::setBackend, py::arg("name"))
     .def("forward", &Operator::forward)
     // py::keep_alive forbide Python to garbage collect implementation will the Operator is not garbade collected !
     .def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>())
+    .def("get_hook", &Operator::getHook)
+    .def("add_hook", &Operator::addHook)
     ;
 }
-}
+}
\ No newline at end of file
diff --git a/python_binding/graphmatching/pybind_NodeRegex.cpp b/python_binding/operator/pybind_OperatorTensor.cpp
similarity index 50%
rename from python_binding/graphmatching/pybind_NodeRegex.cpp
rename to python_binding/operator/pybind_OperatorTensor.cpp
index 034987f9ccae200a1b8877ecd8b3e878c84e8fc3..ce34dea158e6df1466db415b2539962c2113d42b 100644
--- a/python_binding/graphmatching/pybind_NodeRegex.cpp
+++ b/python_binding/operator/pybind_OperatorTensor.cpp
@@ -10,19 +10,18 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
-#include "aidge/graphmatching/NodeRegex.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Operator.hpp"
+#include <pybind11/stl.h>
 
 namespace py = pybind11;
 namespace Aidge {
-void init_NodeRegex(py::module& m){
-    py::class_<NodeRegex, std::shared_ptr<NodeRegex>>(m, "NodeRegex", "NodeRegex class describes a condition to test on any operator. Current version only supports testing the type of the operator.")
-    .def(py::init<const std::string>(), py::arg("condition"), R"mydelimiter(
-    Constructor of NodeRegex
-    
-    :param condition: Condition to be fulfilled by an operator.
-    :type condition: str
-
-    )mydelimiter")
+void init_OperatorTensor(py::module& m){
+    py::class_<OperatorTensor, std::shared_ptr<OperatorTensor>, Operator>(m, "OperatorTensor")
+    .def("get_output", &OperatorTensor::getOutput, py::arg("outputIdx"))
+    .def("get_input", &OperatorTensor::getInput, py::arg("inputIdx"))
+    .def("output_dims_forwarded", &OperatorTensor::outputDimsForwarded)
     ;
 }
 }
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
index 22866c5460381b6f494948c7410bcd67e7e46edb..09d1e4ad2ad6413901c28bc9d9fe16995483da05 100644
--- a/python_binding/operator/pybind_Pow.cpp
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Pow.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Pow(py::module& m) {
-    py::class_<Pow_Op, std::shared_ptr<Pow_Op>, Operator>(m, "PowOp", py::multiple_inheritance())
+    py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance())
     .def("get_inputs_name", &Pow_Op::getInputsName)
     .def("get_outputs_name", &Pow_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 107b7ba00e4077d9f7c215257bf7fd46629481c1..3dae24b620fe99098205d7d5f23591780f1e9cb7 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -14,7 +14,7 @@
 
 #include "aidge/utils/Types.h"
 // #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/data/Tensor.hpp"
 
@@ -30,12 +30,11 @@ void declare_Producer(py::module &m) {
 
 
 void init_Producer(py::module &m) {
-    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, Operator>(
+    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, OperatorTensor>(
         m,
         "ProducerOp",
         py::multiple_inheritance())
     .def("dims", &Producer_Op::dims)
-    .def("set_output_tensor", &Producer_Op::setOutputTensor)
     .def("get_inputs_name", &Producer_Op::getInputsName)
     .def("get_outputs_name", &Producer_Op::getOutputsName);
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = "");
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index dbcb483e8089373bc8599c2d09fed00049e2a2ac..24ae96649a87ff9acc996715d3cd00a97c393578 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/ReLU.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_ReLU(py::module& m) {
-    py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, Operator>(m, "ReLUOp", py::multiple_inheritance())
+    py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance())
     .def("get_inputs_name", &ReLU_Op::getInputsName)
     .def("get_outputs_name", &ReLU_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 8e50ab7c83bf43285b357cb803c0ce3eb42f4cc7..dc29e2171ff6f0fbbb5c80183778d8f20cbe085b 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -13,13 +13,13 @@
 #include <string>
 
 #include "aidge/operator/Softmax.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Softmax(py::module& m) {
-    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Operator>(m, "SoftmaxOp", py::multiple_inheritance())
+    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
     .def("get_inputs_name", &Softmax_Op::getInputsName)
     .def("get_outputs_name", &Softmax_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index b70171814662c861f19b3048b018260170d37491..98d65242e8ff199992bbfc740192ae25e6d7b738 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Sqrt.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Sqrt(py::module& m) {
-    py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, Operator>(m, "SqrtOp", py::multiple_inheritance())
+    py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, OperatorTensor>(m, "SqrtOp", py::multiple_inheritance())
     .def("get_inputs_name", &Sqrt_Op::getInputsName)
     .def("get_outputs_name", &Sqrt_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
index 10c95939646a6b605f23c42618bfbdd00ceb6e2e..dce1ab6cb27cc7da02e6c817a6bc49ec64bcf364 100644
--- a/python_binding/operator/pybind_Sub.cpp
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -12,13 +12,13 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Sub.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_Sub(py::module& m) {
-    py::class_<Sub_Op, std::shared_ptr<Sub_Op>, Operator>(m, "SubOp", py::multiple_inheritance())
+    py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance())
     .def("get_inputs_name", &Sub_Op::getInputsName)
     .def("get_outputs_name", &Sub_Op::getOutputsName);
 
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index a482191c78ff56b000e043cd7350ca1c150d1d6e..23b54e46b23a341add8ba7291551c0f84f705bea 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -19,6 +19,7 @@ void init_Tensor(py::module&);
 void init_OperatorImpl(py::module&);
 void init_Attributes(py::module&);
 void init_Operator(py::module&);
+void init_OperatorTensor(py::module&);
 
 void init_Add(py::module&);
 void init_AvgPooling(py::module&);
@@ -39,15 +40,14 @@ void init_ReLU(py::module&);
 void init_Softmax(py::module&);
 void init_Sqrt(py::module&);
 void init_Sub(py::module&);
+void init_Identity(py::module&);
 
 void init_Node(py::module&);
 void init_GraphView(py::module&);
 void init_OpArgs(py::module&);
 void init_Connector(py::module&);
 
-void init_Match(py::module&);
-void init_NodeRegex(py::module&);
-void init_GRegex(py::module&);
+void init_GraphRegex(py::module&);
 
 void init_Recipies(py::module&);
 
@@ -67,6 +67,7 @@ void init_Aidge(py::module& m){
     init_OperatorImpl(m);
     init_Attributes(m);
     init_Operator(m);
+    init_OperatorTensor(m);
     init_Add(m);
     init_AvgPooling(m);
     init_BatchNorm(m);
@@ -85,11 +86,11 @@ void init_Aidge(py::module& m){
     init_Softmax(m);
     init_Sqrt(m);
     init_Sub(m);
+    init_Identity(m);
 
     init_Producer(m);
-    init_Match(m);
-    init_NodeRegex(m);
-    init_GRegex(m);
+    init_GraphRegex(m);
+
     init_Recipies(m);
     init_Scheduler(m);
     init_TensorUtils(m);
diff --git a/python_binding/recipies/pybind_Recipies.cpp b/python_binding/recipies/pybind_Recipies.cpp
index 93c131ef7417135bfdbc657c5c809339430616ed..820b6e12b11116b874170bd25a6dc75675894257 100644
--- a/python_binding/recipies/pybind_Recipies.cpp
+++ b/python_binding/recipies/pybind_Recipies.cpp
@@ -14,7 +14,7 @@
 
 #include <string>
 
-#include "aidge/utils/Recipies.hpp"
+#include "aidge/recipies/Recipies.hpp"
 
 namespace py = pybind11;
 
@@ -28,12 +28,13 @@ void init_Recipies(py::module &m) {
     :param graph_view: Graph view on which we want to apply the recipie
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
-  m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
-    Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+    
+  // m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
+  //   Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
-    :param nodes: The MatMul and Add nodes to fuse.
-    :type nodes: list of :py:class:`aidge_core.Node`
-    )mydelimiter");
+  //   :param nodes: The MatMul and Add nodes to fuse.
+  //   :type nodes: list of :py:class:`aidge_core.Node`
+  //   )mydelimiter");
 
   m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter(
     Recipie to remove a flatten operator.
@@ -41,18 +42,20 @@ void init_Recipies(py::module &m) {
     :param graph_view: Graph view on which we want to apply the recipie
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
-  m.def("remove_flatten", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(removeFlatten), py::arg("nodes"), R"mydelimiter(
-    Recipie to remove a flatten operator.
 
-    :param nodes: The flatten operator to remove.
-    :type nodes: list of :py:class:`aidge_core.Node`
-    )mydelimiter");
-  m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
-    Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+  // m.def("remove_flatten", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(removeFlatten), py::arg("nodes"), R"mydelimiter(
+  //   Recipie to remove a flatten operator.
 
-    :param nodes: The MatMul and Add nodes to fuse.
-    :type nodes: list of :py:class:`aidge_core.Node`
-    )mydelimiter");
+  //   :param nodes: The flatten operator to remove.
+  //   :type nodes: list of :py:class:`aidge_core.Node`
+  //   )mydelimiter");
+
+  // m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
+  //   Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+
+  //   :param nodes: The MatMul and Add nodes to fuse.
+  //   :type nodes: list of :py:class:`aidge_core.Node`
+  //   )mydelimiter");
 
   m.def("fuse_batchnorm", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseBatchNorm), py::arg("graph_view"), R"mydelimiter(
     Recipie to remove a flatten operator.
@@ -60,11 +63,12 @@ void init_Recipies(py::module &m) {
     :param graph_view: Graph view on which we want to apply the recipie
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
-  m.def("fuse_batchnorm", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseBatchNorm), py::arg("nodes"), R"mydelimiter(
-    Recipie to remove a flatten operator.
+    
+  // m.def("fuse_batchnorm", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseBatchNorm), py::arg("nodes"), R"mydelimiter(
+  //   Recipie to remove a flatten operator.
 
-    :param nodes: The flatten operator to remove.
-    :type nodes: list of :py:class:`aidge_core.Node`
-    )mydelimiter");
+  //   :param nodes: The flatten operator to remove.
+  //   :type nodes: list of :py:class:`aidge_core.Node`
+  //   )mydelimiter");
 }
 } // namespace Aidge
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index 166754cc9fe9774d922ef523ab35f569673701fd..b76bf33367221add6273e02590d6ec315cfa4544 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -25,25 +25,25 @@ Aidge::OperatorImpl::OperatorImpl(const Operator& op):
 }
 
 Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    assert(mOp.getInput(inputIdx) && "requires valid input");
+    assert(mOp.getRawInput(inputIdx) && "requires valid input");
 
     // Requires the whole tensor by default
-    return std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->size();
+    return std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->size();
 }
 
 Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
-    assert(mOp.getInput(inputIdx) && "requires valid input");
+    assert(mOp.getRawInput(inputIdx) && "requires valid input");
 
     // Protect the whole tensor by default
-    return std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->size();
+    return std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->size();
 }
 
 Aidge::NbElts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                                                          const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    assert(mOp.getOutput(outputIdx) && "requires valid output");
+    assert(mOp.getRawOutput(outputIdx) && "requires valid output");
 
     // Requires the whole tensor by default, regardless of available data on inputs
-    return std::static_pointer_cast<Tensor>(mOp.getOutput(outputIdx))->size();
+    return std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx))->size();
 }
 
 Aidge::NbElts_t Aidge::OperatorImpl::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
diff --git a/src/graph/Connector.cpp b/src/graph/Connector.cpp
index cd2ceff8b58076a5054269e4676120b94c8b5beb..98f58259a97b7c4194b29ae7b75a4140885ee122 100644
--- a/src/graph/Connector.cpp
+++ b/src/graph/Connector.cpp
@@ -41,6 +41,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::generateGraph(std::vector<Connector> ct
             std::vector<std::shared_ptr<Node>> parents = nodesToAdd.back()->getParents();
             const std::set<std::shared_ptr<Node>>& alreadyAdded = graph->getNodes();
             for (std::shared_ptr<Node> parent : parents) {
+                if (!parent) continue;
                 if (alreadyAdded.find(parent) == alreadyAdded.end()) {
                     buffer.push_back(parent);
                 }
@@ -51,4 +52,4 @@ std::shared_ptr<Aidge::GraphView> Aidge::generateGraph(std::vector<Connector> ct
         buffer = {};
     }
     return graph;
-}
\ No newline at end of file
+}
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index b77283ed012f92380e87faa6680d9efc8c4d9137..ec02f2dd8f9718fbdd72ce8915991088218c607a 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -13,10 +13,12 @@
 #include <cassert>
 #include <iterator>
 #include <utility>
+#include <numeric>
 
 #include "aidge/utils/Types.h"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
 ///////////////////////////////////////////////////////
@@ -28,7 +30,7 @@ Aidge::Connector Aidge::GraphView::operator()(
   // TODO: allow for multiple inputNodes?
   assert((inputNodes().size() == 1U) && "Too many input Nodes for the GraphView, undefined behaviour");
   std::shared_ptr<Node> inNode = *inputNodes().begin();
-  assert((ctors.size() == static_cast<std::size_t>(inNode->nbDataInputs())) && "Wrong number of arguments.\n");
+  assert((ctors.size() == static_cast<std::size_t>(inNode->nbData())) && "Wrong number of arguments.\n");
   for (std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inNode->inputs()) {
     assert((gk_IODefaultIndex == input.second) && "At least one input connection is not free.\n");
     (void)input; // avoid unused warning
@@ -237,6 +239,19 @@ Aidge::GraphView::inputs(std::string name) const {
   return mNodeRegistry.at(name)->inputs();
 }
 
+void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType datatype) {
+    // Backend
+    // TODO: add Backend attribute to Operator
+    setBackend(backend);
+    // Data type
+    // TODO: manage Datatype attribute in OperatorImpl
+    setDataType(datatype);
+    // Data Format
+    // TODO: check actual parent output data format and the needed one. Add a Transpose Operator if necessary
+    // Forward dimensions
+    forwardDims();
+}
+
 void Aidge::GraphView::forwardDims() {
     // setInputs
     // Link every tensor to the right pointer
@@ -267,41 +282,46 @@ void Aidge::GraphView::forwardDims() {
 }
 
 void Aidge::GraphView::_forwardDims(std::set<std::shared_ptr<Node>> listNodes) {
-  // TODO: support multi-inputs/outputs
-  std::set<std::shared_ptr<Node>> nextList = std::set<std::shared_ptr<Node>>();
-  for (std::shared_ptr<Node> nodePtr : listNodes) {
-    if (!nodePtr->getOperator()->outputDimsForwarded()) {
-      nodePtr->getOperator()->computeOutputDims();
+    // TODO: support multi-inputs/outputs
+    std::set<std::shared_ptr<Node>> nextList = std::set<std::shared_ptr<Node>>();
+    for (std::shared_ptr<Node> nodePtr : listNodes) {
+        if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) {
+            const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator());
+            if (!op->outputDimsForwarded()) {
+                op->computeOutputDims();
+            }
+            if (!op->outputDimsForwarded()) { // try to compute output dimensions again later
+                nextList.insert(nodePtr);
+            } else { // compute output dimensions of children
+                std::set<std::shared_ptr<Node>> children = nodePtr->getChildren();
+                nextList.insert(children.begin(), children.end());
+            }
+        }
     }
-    if (!nodePtr->getOperator()->outputDimsForwarded()) {
-      nextList.insert(nodePtr);
-    } else {
-      std::set<std::shared_ptr<Node>> children = nodePtr->getChildren();
-      nextList.insert(children.begin(), children.end());
+    if (nextList.empty()) {
+        for (std::shared_ptr<Node> nodePtr : getNodes()) {
+            if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) {
+                if (!std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator())->outputDimsForwarded()) {
+                    nextList.insert(nodePtr);
+                }
+            }
+        }
     }
-  }
-  if (nextList.empty()) {
-    for (std::shared_ptr<Node> nodePtr : getNodes()) {
-      if (!nodePtr->getOperator()->outputDimsForwarded()) {
-        nextList.insert(nodePtr);
-      }
+    if (!nextList.empty()) {
+        _forwardDims(nextList);
     }
-  }
-  if (!nextList.empty()) {
-    _forwardDims(nextList);
-  }
 }
 
 void Aidge::GraphView::setBackend(const std::string &backend) {
-  for (auto node : getNodes()) {
-    node->getOperator()->setBackend(backend);
-  }
+    for (auto node : getNodes()) {
+        node->getOperator()->setBackend(backend);
+    }
 }
 
-void Aidge::GraphView::setDatatype(const DataType &datatype) {
-  for (auto node : getNodes()) {
-    node->getOperator()->setDatatype(datatype);
-  }
+void Aidge::GraphView::setDataType(const Aidge::DataType &datatype) {
+    for (auto node : getNodes()) {
+        node->getOperator()->setDataType(datatype);
+    }
 }
 
 std::vector<
@@ -356,7 +376,7 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
 
   // add learnable parameters to the graph
   if (includeLearnableParam) {
-    for (IOIndex_t i = node->nbDataInputs(); i < node->nbInputs(); ++i) {
+    for (IOIndex_t i = node->nbData(); i < node->nbInputs(); ++i) {
       std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
       if (parentNode) {
           parentNode->addView(shared_from_this());
@@ -601,7 +621,7 @@ Aidge::GraphView::getNode(const std::string& nodeName) const {
 void Aidge::GraphView::remove(std::shared_ptr<Node> nodePtr, bool includeLearnableParam) {
   // remove learnable params
   if (includeLearnableParam) {
-    for (IOIndex_t i = nodePtr->nbDataInputs(); i < nodePtr->nbInputs(); ++i) {
+    for (IOIndex_t i = nodePtr->nbData(); i < nodePtr->nbInputs(); ++i) {
       auto inputI = nodePtr->input(i);
       bool removeNode = true;
       for (const auto& parentOutput : inputI.first->outputs()) {
@@ -1042,7 +1062,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
     auto clonedNode = cloneNode(node_ptr);
     if (clonedNode == nullptr) {
       AIDGE_ASSERT(node_ptr->getChildren().size() <= 1, "deleted nodes in GraphView::clone() cannot have multiple children");
-      AIDGE_ASSERT(node_ptr->nbDataInputs() <= 1, "deleted nodes in GraphView::clone() cannot have multiple data input parents");
+      AIDGE_ASSERT(node_ptr->nbData() <= 1, "deleted nodes in GraphView::clone() cannot have multiple data input parents");
     }
     oldToNewNodes[node_ptr] = clonedNode;
   }
@@ -1060,7 +1080,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
         while (oldToNewNodes[parent.first] == nullptr) {
           // Find next valid parent in line, going backward in the graph
           AIDGE_INTERNAL_ASSERT(parent.first->getChildren().size() == 1);
-          AIDGE_INTERNAL_ASSERT(parent.first->nbDataInputs() <= 1);
+          AIDGE_INTERNAL_ASSERT(parent.first->nbData() <= 1);
           const auto& parents = parent.first->dataInputs();
 
           if (!parents.empty() && parents[0].first != nullptr // a valid parent exists
@@ -1143,7 +1163,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
     // If output node was removed, find previous valid output
     while (oldToNewNodes[it->first] == nullptr) {
       // Removed node should have only one connected data input, otherwise cloning is invalid
-      AIDGE_INTERNAL_ASSERT(it->first->nbDataInputs() <= 1);
+      AIDGE_INTERNAL_ASSERT(it->first->nbData() <= 1);
       auto parents = it->first->dataInputs();
 
       if (!parents.empty() && parents[0].first != nullptr // a valid parent exists
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index e6a53c871f5312c68f40dc5c9a2777729470298b..5a7b05e469daab10a4abd468177a3ad137096f63 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -15,6 +15,7 @@
 #include "aidge/operator/Producer.hpp"
 #include <memory>
 #include <vector>
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
 Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
@@ -34,7 +35,7 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
 ///////////////////////////////////////////////////////
 
 Aidge::Connector Aidge::Node::operator()(const std::vector<Connector> &ctors) {
-    assert((ctors.size() == nbDataInputs()) && "Wrong number of arguments.\n");
+    assert((ctors.size() == nbData()) && "Wrong number of arguments.\n");
     for (std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inputs()) {
         assert((gk_IODefaultIndex == input.second) && "At least one input connection is not free.\n");
         (void) input; // avoid unused warning
@@ -94,8 +95,8 @@ Aidge::IOIndex_t Aidge::Node::getNbFreeDataInputs() const {
 std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>
 Aidge::Node::dataInputs() const {
     std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res =
-            std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbDataInputs());
-    for (std::size_t i = 0; i < static_cast<std::size_t>(nbDataInputs()); ++i) {
+            std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbData());
+    for (std::size_t i = 0; i < static_cast<std::size_t>(nbData()); ++i) {
         res[i] = std::pair<std::shared_ptr<Node>, IOIndex_t>(mParents[i], mIdOutParents[i]);
     }
     return res;
@@ -111,18 +112,18 @@ std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> Aidge::No
     return res;
 }
 
-void Aidge::Node::setInput(const Aidge::IOIndex_t idx, const std::shared_ptr<Aidge::Tensor> tensor) {
-    assert(((idx != gk_IODefaultIndex) && (idx < nbInputs())) && "Parent index out of bound.");
-    if (mParents[idx] != nullptr) {
-        mParents[idx]->removeChild(shared_from_this(), mIdOutParents[idx]);
-        removeParent(idx);
-    }
-    std::shared_ptr<Node> newConstantNode = Producer(tensor);
-    newConstantNode->addChild(shared_from_this(), 0, idx);
-    for (auto& graphPtr : views()) {
-        graphPtr->add(newConstantNode);
-    }
-}
+// void Aidge::Node::setInput(const Aidge::IOIndex_t idx, const std::shared_ptr<Aidge::Tensor> tensor) {
+//     assert(((idx != gk_IODefaultIndex) && (idx < nbInputs())) && "Parent index out of bound.");
+//     if (mParents[idx] != nullptr) {
+//         mParents[idx]->removeChild(shared_from_this(), mIdOutParents[idx]);
+//         removeParent(idx);
+//     }
+//     std::shared_ptr<Node> newConstantNode = Producer(tensor);
+//     newConstantNode->addChild(shared_from_this(), 0, idx);
+//     for (auto& graphPtr : views()) {
+//         graphPtr->add(newConstantNode);
+//     }
+// }
 
 std::vector<std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>>
 Aidge::Node::outputs() const {
@@ -295,7 +296,7 @@ bool Aidge::Node::removeChild(const std::shared_ptr<Aidge::Node> nodePtr, const
 
 void Aidge::Node::resetConnections(bool includeLearnableParam) {
     // remove every parents reference to it
-    IOIndex_t nbRemovedInputs = includeLearnableParam ? nbInputs() : nbDataInputs();
+    IOIndex_t nbRemovedInputs = includeLearnableParam ? nbInputs() : nbData();
     for (IOIndex_t i = 0; i < nbRemovedInputs; ++i) {
         std::pair<std::shared_ptr<Node>, IOIndex_t> parent = input(i);
         if (parent.first) {
@@ -367,7 +368,7 @@ std::set<Aidge::NodePtr> Aidge::Node::getNodeDelta(int delta,std::set<Aidge::Nod
             }
         }
     }
-    
+
     return out;
 }
 /////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/src/graphRegex/GraphFsmInterpreter.cpp b/src/graphRegex/GraphFsmInterpreter.cpp
index 2984ab4fb3864244c9e32dbfcda9ef2ae080acf0..18b768c6567e64caf6841ed4a339f13fd16f69d6 100644
--- a/src/graphRegex/GraphFsmInterpreter.cpp
+++ b/src/graphRegex/GraphFsmInterpreter.cpp
@@ -3,15 +3,24 @@
 using namespace Aidge; 
 
 
-GraphFsmInterpreter::GraphFsmInterpreter(const std::string graphMatchExpr,std::map<std::string,std::shared_ptr<ConditionalInterpreter>> nodesCondition):mParser(graphMatchExpr){
+GraphFsmInterpreter::GraphFsmInterpreter(const std::string graphMatchExpr,std::vector<std::shared_ptr<ConditionalInterpreter>>&nodesCondition):mParser(graphMatchExpr){
     mActGroupe = 0;
-    mNodesCondition = nodesCondition;
+
+    for (const auto &obj : nodesCondition) {
+        if(mNodesCondition.find(obj->getKey()) ==mNodesCondition.end()){
+             mNodesCondition[obj->getKey()] = obj;
+        }else{
+            throw std::logic_error("GraphFsmInterpreter Bad Key" );
+        }
+    }
 }
 std::shared_ptr<FsmGraph>  GraphFsmInterpreter::interpret(void){
     mActGroupe = 0;
     std::shared_ptr<AstNode<gRegexTokenTypes>> tree = mParser.parse();
-    return visit(tree);
+    std::shared_ptr<FsmGraph> out = visit(tree);
+    return out;
 }
+
 std::shared_ptr<FsmGraph> GraphFsmInterpreter::visit(std::shared_ptr<AstNode<gRegexTokenTypes>> AstTree){
 
     std::vector<std::shared_ptr<AstNode<gRegexTokenTypes>>> nextAstNodes = AstTree->getChilds();
@@ -44,7 +53,7 @@ std::shared_ptr<FsmGraph> GraphFsmInterpreter::keyF(std::shared_ptr<AstNode<gReg
 
     std::shared_ptr<FsmNode>  start = std::make_shared<FsmNode>(false,true);
     std::shared_ptr<FsmNode>  valid = std::make_shared<FsmNode>(true,false);
-    std::shared_ptr<FsmGraph> graph = std::make_shared<FsmGraph>();
+    std::shared_ptr<FsmGraph> graph = std::make_shared<FsmGraph>(mParser.getQuery());
     std::shared_ptr<FsmEdge> edge;
     
 
@@ -66,7 +75,7 @@ std::shared_ptr<FsmGraph> GraphFsmInterpreter::keyF(std::shared_ptr<AstNode<gReg
 std::shared_ptr<FsmGraph> GraphFsmInterpreter::sepF(std::shared_ptr<FsmGraph> leftFsm,std::shared_ptr<FsmGraph> rigthFsm){
 
     size_t idxLeft = leftFsm->getNbSubFsm();
-    rigthFsm->incOrigineAllNodeBy(idxLeft);
+    rigthFsm->incOriginAllNodeBy(idxLeft);
     leftFsm->unionG(rigthFsm);
     //the rigthFsm is no longer usfull
     return leftFsm;
@@ -119,7 +128,7 @@ std::shared_ptr<FsmGraph> GraphFsmInterpreter::qomF(std::shared_ptr<FsmGraph> fs
                 for(auto valid : allValid){
                     if(haveCommon){
                         /*
-                        the // quantif case 
+                        the // quantify case 
                         get the go back and make a lexeme id(number)
                         we need to go back to the ref delta min #TODO
                         */
@@ -136,7 +145,7 @@ std::shared_ptr<FsmGraph> GraphFsmInterpreter::qomF(std::shared_ptr<FsmGraph> fs
                         edge = FsmEdgeFactory::make(valid,start,FsmEdgeTypes::REF,mNodesCondition, lexem.str());
                     }else{
                         /*
-                        the sequensial quantif case 
+                        the sequencial quantify case 
                         no reference to common 
                         */
                         edge = FsmEdgeFactory::make(valid,start,FsmEdgeTypes::EMPTY,mNodesCondition,"");
diff --git a/src/graphRegex/GraphLexer.cpp b/src/graphRegex/GraphLexer.cpp
index 61214f96a090fef5d28cb0ce1a009644d9570880..f504ad025940c88058ce5949259c464ae2cedfb6 100644
--- a/src/graphRegex/GraphLexer.cpp
+++ b/src/graphRegex/GraphLexer.cpp
@@ -133,6 +133,11 @@ bool GraphLexer::isEnd(void){
     return mPosition >= mRegularExpressions.length();
 }
 
+
+const std::string GraphLexer::getQuery(){
+    return mRegularExpressions;
+}
+
 std::runtime_error GraphLexer::badTokenError(const std::string& currentChars,std::size_t position){
     std::ostringstream errorMessage;
     errorMessage << "\nBad syntax " << currentChars << " :\n" << mRegularExpressions << "\n";
diff --git a/src/graphRegex/GraphParser.cpp b/src/graphRegex/GraphParser.cpp
index 5aa653c482dae82c2e9fa02bfc36b2ffc821785f..9c3d10114d777cf7755432a5723a3b70b81d37a1 100644
--- a/src/graphRegex/GraphParser.cpp
+++ b/src/graphRegex/GraphParser.cpp
@@ -9,6 +9,10 @@ mLexer(gRegexExpressions)
 }
 
 
+const std::string GraphParser::getQuery(){
+    return mLexer.getQuery();
+}
+
 std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::parse(void){
 
     std::shared_ptr<AstNode<gRegexTokenTypes>> astTree = constructAstAllExpr();
diff --git a/src/graphRegex/GraphRegex.cpp b/src/graphRegex/GraphRegex.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9a9b53da615f77dbdb8e597763411a2e84920b2a
--- /dev/null
+++ b/src/graphRegex/GraphRegex.cpp
@@ -0,0 +1,158 @@
+#include "aidge/graphRegex/GraphRegex.hpp"
+using namespace Aidge; 
+
+
+void GraphRegex::setKeyFromGraph(std::shared_ptr<GraphView> ref){
+
+    for (const NodePtr& node : ref->getNodes()) {
+        std::string type =  node->type();
+        bool isIn = false;
+        for(const auto &test:mAllTest){
+            if(test->getKey() == type){
+                isIn = true;
+                break;
+            }
+        }
+        if(!isIn){
+             mAllTest.push_back(std::make_shared<ConditionalInterpreter>(type,"getType($) =='" + type + "'"));
+        }
+        // auto it = mAllTest.find(type);
+        // if (it == mAllTest.end()) {
+        //    mAllTest[type] = std::make_shared<ConditionalInterpreter>(type,"getType($) =='" + type + "'");
+        // }
+        // //if the key exist it's ok, but not make 2 ConditionalInterpreter
+    }
+}
+
+
+
+// void GraphRegex::addQuery(const std::string query){
+//     //TODO one query only but the same string is a same query but 
+//     //2 different string it's maybe the same query , we need to check the AST
+//     mQueryRecipe[query] = nullptr;
+// }
+
+void GraphRegex::addQuery(const std::string query,RecipesFunctionType f ){
+
+    mQueryRecipe[query] = f;
+
+}
+
+
+// Function to generate all combinations of n elements from a set
+void GraphRegex::_generateCombinationsStart(const std::set<NodePtr>& elements, std::size_t n, std::size_t index, std::vector<NodePtr>& current, std::set<std::vector<NodePtr>>& combinations) {
+    if (n == 0) {
+        combinations.insert(current);
+        return;
+    }
+    for (auto it = elements.begin(); it != elements.end(); ++it) {
+        current.push_back(*it);
+        _generateCombinationsStart(elements, n - 1, index + 1, current, combinations);
+        current.pop_back();
+    }
+}
+
+
+void GraphRegex::_findLargestCompatibleSet(
+    const std::vector<std::shared_ptr<MatchSolution>>& solutions,
+    std::set<std::shared_ptr<MatchSolution>>& currentSet,
+    std::set<std::shared_ptr<MatchSolution>>& largestSet,
+    size_t currentIndex
+) {
+    if (currentIndex >= solutions.size()) {
+        if (currentSet.size() > largestSet.size()) {
+            largestSet = currentSet;
+        }
+        return;
+    }
+
+    for (size_t i = currentIndex; i < solutions.size(); ++i) {
+        if (std::all_of(currentSet.begin(), currentSet.end(),
+            [&](const std::shared_ptr<MatchSolution>& solution) {
+                return solution->areCompatible(solutions[i]);
+            }
+        )) {
+            currentSet.insert(solutions[i]);
+            _findLargestCompatibleSet(solutions, currentSet, largestSet, i + 1);
+            currentSet.erase(solutions[i]);
+        }
+    }
+}
+
+std::set<std::shared_ptr<MatchSolution>> GraphRegex::_findLargestCompatibleSet(
+    const std::vector<std::shared_ptr<MatchSolution>>& solutions
+) {
+    std::set<std::shared_ptr<MatchSolution>> largestSet;
+    std::set<std::shared_ptr<MatchSolution>> currentSet;
+    _findLargestCompatibleSet(solutions, currentSet, largestSet, 0);
+    return largestSet;
+}
+
+
+
+std::set<std::shared_ptr<MatchSolution>> GraphRegex::match(std::shared_ptr<GraphView> ref){
+
+    std::vector<std::shared_ptr<MatchSolution>> solutions = {};
+
+    //for (const std::string& query : mQuery) {
+    for (auto it = mQueryRecipe.begin(); it != mQueryRecipe.end(); ++it) {
+        const std::string query  = it->first;
+
+        std::shared_ptr<GraphFsmInterpreter>  fsmGenerator = std::make_shared<GraphFsmInterpreter>(query,mAllTest);
+        std::shared_ptr<FsmGraph> fsm = fsmGenerator->interpret();
+
+        // generate all the start possibility 
+        std::size_t nb_startSt =  fsm->getNbStart();
+        std::set<std::vector<NodePtr>> combinations;
+        std::vector<NodePtr> current;
+        _generateCombinationsStart(ref->getNodes(), nb_startSt, 0, current, combinations);
+        
+
+        // all start 
+        for (const auto& combination : combinations) {
+            std::vector<std::shared_ptr<MatchSolution>> solution = fsm->test(combination);
+            solutions.insert(solutions.end(), solution.begin(), solution.end());
+        }
+    }
+    return _findLargestCompatibleSet(solutions);
+}
+
+void GraphRegex::appliedRecipes(std::shared_ptr<GraphView> ref){
+    std::set<std::shared_ptr<MatchSolution>> matchRef  = match(ref);
+    for (const auto& solution : matchRef) {
+        if(mQueryRecipe[solution->getQuery()] != nullptr){
+            mQueryRecipe[solution->getQuery()](solution);
+        }
+    }
+}
+
+void GraphRegex::setNodeKey(const std::string key, const std::string conditionalExpressions ){
+    mAllTest.push_back(std::make_shared<ConditionalInterpreter>(key,conditionalExpressions));
+    _majConditionalInterpreterLambda();
+}
+
+
+void GraphRegex::setNodeKey(const std::string key,std::function<bool(NodePtr)> f){
+    //we can applied to all key but it's not efficient 
+    if(mAllLambda.find(key) != mAllLambda.end()){
+        throw std::runtime_error(key + " is define");
+    }
+    mAllLambda[key] = f;
+    _majConditionalInterpreterLambda();
+}
+
+void GraphRegex::_majConditionalInterpreterLambda(){
+       
+    for (const auto& test : mAllTest) {
+        for (const auto& pair : mAllLambda) {
+            const std::string& key = pair.first;
+            const std::function<bool(NodePtr)>& lambda = pair.second;
+
+            if(!test->isLambdaRegister(key)){
+                test->insertLambda(key,lambda);
+            }
+            
+        }
+    }
+}
+
diff --git a/src/graphRegex/matchFsm/FsmEdge.cpp b/src/graphRegex/matchFsm/FsmEdge.cpp
index 593da06abe18576d435ae55718d379aa5b682d60..638aad3bc3f5c94d5b20420ed8cc0799daa08cc0 100644
--- a/src/graphRegex/matchFsm/FsmEdge.cpp
+++ b/src/graphRegex/matchFsm/FsmEdge.cpp
@@ -24,7 +24,7 @@ void FsmEdge::updateRelative( const std::map<size_t,int>& relativePos ){
 std::shared_ptr<FsmNode> FsmEdge::getSourceNode(void){
     return mNodeSource;
 }
-void FsmEdge::reSetSouceNode(const std::shared_ptr<FsmNode>& newSource){
+void FsmEdge::reSetSourceNode(const std::shared_ptr<FsmNode>& newSource){
     mNodeSource->rmEdge(shared_from_this());
     mNodeSource = newSource;
     mNodeSource->addEdge(shared_from_this());
@@ -42,7 +42,7 @@ void FsmEdge::reSetDestNode(const std::shared_ptr<FsmNode>& newDest){
 }
 void FsmEdge::propagateRelativePos(void){
 
-    std::set<int> myRelativeID;
+    std::set<std::size_t> myRelativeID;
     for (const auto& kvp : mRelativePos) {
         myRelativeID.insert(kvp.first);
     }
@@ -56,31 +56,31 @@ void FsmEdge::propagateRelativePos(void){
             }
             
 
-            std::set<int> nextRelativeID;
+            std::set<std::size_t> nextRelativeID;
             for (const auto& kvp : nextEdge->getRelative()) {
                 nextRelativeID.insert(kvp.first);
             }
 
             // Find elements in myRelativeID but not in nextRelativeID
-            std::set<int> idxsToPush;
+            std::set<std::size_t> idxsToPush;
             std::set_difference(myRelativeID.begin(), myRelativeID.end(),
                                 nextRelativeID.begin(), nextRelativeID.end(),
                                 std::inserter(idxsToPush, idxsToPush.begin()));
 
             // Find elements in nextRelativeID but not in myRelativeID
-            std::set<int> idxsToGet;
+            std::set<std::size_t> idxsToGet;
             std::set_difference(nextRelativeID.begin(), nextRelativeID.end(),
                                 myRelativeID.begin(), myRelativeID.end(),
                                 std::inserter(idxsToGet, idxsToGet.begin()));
 
-            //  test for integrity we look if 2 edge refert to the samme
+            //  test for integrity we look if 2 edge refer to the same
             //  ref and are link the ref dif is one
             //  not working for common node
             //  we can go deeper by find the all pass to a ref and see if the delta is good
 
             // Find elements present in both myRelativeID and nextRelativeID
-            std::set<int> idxsTotest;
-            for (int idx : nextRelativeID){
+            std::set<std::size_t> idxsTotest;
+            for (auto idx : nextRelativeID){
                 if (myRelativeID.find(idx) != myRelativeID.end()){
                     if (std::abs(getRelative().at(idx) - nextEdge->getRelative().at(idx)) != 1) {
                         throw std::runtime_error("Bad relative");
@@ -90,10 +90,10 @@ void FsmEdge::propagateRelativePos(void){
 
 
             
-            // this egde have more relative info than the next
+            // this edge have more relative info than the next
             std::map<size_t,int> tmpRelative;
             // we push this info to the next 
-            for( auto idxToPush :idxsToPush ){
+            for(auto idxToPush :idxsToPush ){
                 tmpRelative.insert( std::make_pair(idxToPush, getRelative().at(idxToPush) +1));
             }
             if(tmpRelative.size() != 0){
@@ -104,7 +104,7 @@ void FsmEdge::propagateRelativePos(void){
 
 
             // the next node have more info than me i need to get it
-            for( auto idxToGet :idxsToGet ){
+            for(auto idxToGet :idxsToGet ){
                 tmpRelative.insert( std::make_pair(idxToGet, nextEdge->getRelative().at(idxToGet) -1));
             }
             if(tmpRelative.size() != 0){
@@ -226,6 +226,14 @@ const EdgeTestResult FsmEdgeEmpty::test(const std::shared_ptr<FsmRunTimeContext>
     }
     return {true,std::set<NodePtr>({opNode})};//none
 }
+//////////////
+
+FsmEdgeNone::FsmEdgeNone(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNode> dest)
+:FsmEdge(source,dest,nullptr)
+{}
+ const EdgeTestResult FsmEdgeNone::test(const std::shared_ptr<FsmRunTimeContext> /*stmContext*/){
+    return {false,std::set<NodePtr>()};
+ }
 
 /// factory
 std::shared_ptr<FsmEdge> FsmEdgeFactory::make(
@@ -258,6 +266,14 @@ const std::string lexeme)
                 std::string commonId =  m[2];
                 size_t commonIdx = commonId.empty() ? 0 : std::stoi(commonId) + 1;
                 std::string commonKey = edgeType + std::to_string(commonIdx);
+            
+                if(allTest.find(edgeType) == allTest.end()){
+                    //if the key is not linked to a condition 
+                    //by default, it is initialized by a edge that is always false
+                    return std::make_shared<FsmEdgeNone>(source, dest);
+                    //throw std::invalid_argument("Bad Node Test " + edgeType );
+                }
+
                 return  std::make_shared<FsmEdgeCommon> (source, dest, allTest.at(edgeType), commonKey);
             } else {
                 throw std::invalid_argument("error lexem COMMON " + lexeme);
@@ -267,6 +283,15 @@ const std::string lexeme)
             std::smatch m;
             if (std::regex_match(lexeme, m, uniqueRegex)) {
                 std::string edgeType = m[1];
+
+                if(allTest.find(edgeType) == allTest.end()){
+
+                    //if the key is not linked to a condition 
+                    //by default, it is initialized by a edge that is always false
+                    return std::make_shared<FsmEdgeNone>(source, dest);
+                    //throw std::invalid_argument("Bad Node Test " + edgeType );
+                }
+
                 return  std::make_shared<FsmEdgeUnique>(source, dest, allTest.at(edgeType));
             } else {
                 throw std::invalid_argument("error lexem UNIQUE \"" + std::string(lexeme) +" eee\"");
diff --git a/src/graphRegex/matchFsm/FsmGraph.cpp b/src/graphRegex/matchFsm/FsmGraph.cpp
index 5a9f00d728cd2cd9f58c2228361f8393de2a3d9d..a56474e042cc44a68938b1d19e19a0c6841cb8cb 100644
--- a/src/graphRegex/matchFsm/FsmGraph.cpp
+++ b/src/graphRegex/matchFsm/FsmGraph.cpp
@@ -4,12 +4,13 @@ using namespace Aidge;
 
 
 
-FsmGraph::FsmGraph(/* args */){
+FsmGraph::FsmGraph(const std::string query):mQuery(query){
 
 }
 
 //TODO
-    std::shared_ptr<MatchResult> FsmGraph::test(std::vector<NodePtr>& startNodes){
+    std::vector<std::shared_ptr<MatchSolution>> FsmGraph::test(const std::vector<NodePtr>& startNodes){
+        
     std::vector<std::shared_ptr<Aidge::FsmNode>> startNodesFsm = getStartNodes();
     if(startNodes.size() != startNodesFsm.size()){
          throw std::runtime_error("bad number of Start nodes");
@@ -60,9 +61,9 @@ FsmGraph::FsmGraph(/* args */){
         walks.swap(nextWalks);
         nextWalks.clear();
     }
-
-
-    return std::make_shared<MatchResult>(allValidContext,getNbSubFsm());
+    
+    MatchResult allMatch(allValidContext,getNbSubFsm(),mQuery,startNodes);
+    return allMatch.getSolutions();
 
 }
 
@@ -77,8 +78,8 @@ const std::set<std::shared_ptr<FsmEdge>>& FsmGraph::getEdge(void){
 void FsmGraph::addEdge(std::shared_ptr<FsmEdge>& edge){
     edge->updateWeak();
     mEdges.insert(edge);
-    mAllOrigine.insert(edge->getDestNode()->getOrigine());
-    mAllOrigine.insert(edge->getSourceNode()->getOrigine());
+    mAllOrigin.insert(edge->getDestNode()->getOrigin());
+    mAllOrigin.insert(edge->getSourceNode()->getOrigin());
 }
 
 const std::vector<std::shared_ptr<FsmNode>> FsmGraph::getStartNodes(void){
@@ -151,19 +152,23 @@ void FsmGraph::mergeOneStartOneValid(const std::shared_ptr<FsmGraph> fsmGraph){
 }
 
 std::size_t FsmGraph::getNbSubFsm(void){
-    return mAllOrigine.size();
+    return mAllOrigin.size();
+}
+
+std::size_t FsmGraph::getNbStart(void){
+    return getStartNodes().size();
 }
 
-void FsmGraph::incOrigineAllNodeBy(std::size_t incr){
+void FsmGraph::incOriginAllNodeBy(std::size_t incr){
     std::set<std::shared_ptr<FsmNode>> nodes = getNodes();
     for(auto node :nodes){
-        node->incOrigine(incr);
+        node->incOrigin(incr);
     }
     std::set<std::size_t> updatedOrigin;
-    for(auto origin : mAllOrigine){
+    for(auto origin : mAllOrigin){
         updatedOrigin.insert(origin + incr);
     }
-    mAllOrigine.swap(updatedOrigin);
+    mAllOrigin.swap(updatedOrigin);
 }
 
 void FsmGraph::_mergeNode(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNode> dest){
@@ -187,7 +192,7 @@ void FsmGraph::_mergeNode(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNod
         if(edge->getDestNode() == source ){
             edge->reSetDestNode(dest);
         }else if(edge->getSourceNode() == source ){
-            edge->reSetSouceNode(dest);
+            edge->reSetSourceNode(dest);
         }
 
     }
diff --git a/src/graphRegex/matchFsm/FsmNode.cpp b/src/graphRegex/matchFsm/FsmNode.cpp
index 84b4a0c3fdbe0730a12a2a62db9158e2538d646f..7bc4cf105b43a540bd0e9c686af35dd220611a09 100644
--- a/src/graphRegex/matchFsm/FsmNode.cpp
+++ b/src/graphRegex/matchFsm/FsmNode.cpp
@@ -53,11 +53,11 @@ const std::vector<std::shared_ptr<FsmRunTimeContext>> FsmNode::test( std::shared
 
 
 
-std::size_t FsmNode::getOrigine(void){
-    return mOrigineStm;
+std::size_t FsmNode::getOrigin(void){
+    return mOriginFsm;
 }
-void FsmNode::incOrigine(std::size_t inc){
-    mOrigineStm += inc;
+void FsmNode::incOrigin(std::size_t inc){
+    mOriginFsm += inc;
 }
 void FsmNode::rmEdge(std::shared_ptr<FsmEdge> edge){
     mEdges.erase(edge);
@@ -93,7 +93,7 @@ const std::set<std::weak_ptr<FsmEdge>,lex_compare<FsmEdge>>& FsmNode::getEdges(v
 }
 
 void FsmNode::setGroupe(std::size_t groupeIdx){
-    mGroupeStm = groupeIdx;
+    mGroupeFsm = groupeIdx;
     
 }
 
diff --git a/src/graphRegex/matchFsm/FsmRunTimeContext.cpp b/src/graphRegex/matchFsm/FsmRunTimeContext.cpp
index 787cf2322a5b8e7001cdc59325345000dbb61553..ddf6a46cc7c75dc853d71ba98b051b4263a31164 100644
--- a/src/graphRegex/matchFsm/FsmRunTimeContext.cpp
+++ b/src/graphRegex/matchFsm/FsmRunTimeContext.cpp
@@ -155,7 +155,7 @@ void FsmRunTimeContext::setValid(NodePtr node,std::shared_ptr<ConditionalInterpr
 }
 
 std::size_t FsmRunTimeContext::getSubStmId(void){
-    return mActState->getOrigine();
+    return mActState->getOrigin();
 }
 
 NodePtr FsmRunTimeContext::getCommonNodeFromIdx(std::size_t commonIdx){
@@ -207,7 +207,7 @@ std::set<NodePtr> FsmRunTimeContext::getValidNodesNoCommon(void){
     return differenceSet;
 }
 
-std::map<std::shared_ptr<ConditionalInterpreter>,std::set<NodePtr>> FsmRunTimeContext::getValid(void){
+std::map<std::shared_ptr<ConditionalInterpreter>,std::set<NodePtr>>& FsmRunTimeContext::getValid(void){
     return mValidNodes;
 }
 
diff --git a/src/graphRegex/matchFsm/MatchResult.cpp b/src/graphRegex/matchFsm/MatchResult.cpp
index c35f1a7348e365baa8a27854ee6b0a833e342ee7..08be00dea66c66a46dbbf2b225efd0df3f332188 100644
--- a/src/graphRegex/matchFsm/MatchResult.cpp
+++ b/src/graphRegex/matchFsm/MatchResult.cpp
@@ -1,37 +1,88 @@
+#include <algorithm> // set_intersection, std::sort
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
 #include "aidge/graphRegex/matchFsm/MatchResult.hpp"
 
-using namespace Aidge; 
+Aidge::MatchSolution::MatchSolution(std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence,const std::string query,const std::vector<NodePtr> startNode):mQueryFrom(query),mStartNode(startNode){
+        //reformat the solution
+        for (const auto& context : precedence) {
+            for (const auto& pair : context->getValid()) {
+
+                if(mSolution.find(pair.first->getKey()) == mSolution.end()){
+                    mSolution[pair.first->getKey()] = pair.second;
+                }else{
+                        mSolution[pair.first->getKey()].insert(pair.second.begin(), pair.second.end());
+                }
+            }
+        }
+}
+
+const std::set<Aidge::NodePtr> Aidge::MatchSolution::getAll(){
 
+        // Create a unique set to store all the elements
+        std::set<NodePtr> uniqueSet;
 
-MatchResult::MatchResult(std::vector<std::shared_ptr<FsmRunTimeContext>> allValid, std::size_t nbSubStm):mIdToRunTime(nbSubStm){
-        mAllValid = allValid;
-        mNbSubStm = nbSubStm;
+        // Iterate through the map and insert elements from each set into the unique set
+        for (const auto& pair : mSolution) {
+            const std::set<NodePtr>& nodeSet = pair.second;
 
-        //mIdToRunTimm
-        for (const auto& contextPtr : allValid) {
-            mIdToRunTime[contextPtr->getSubStmId()].push_back(contextPtr);
+            // Insert elements from the current set into the unique set
+            uniqueSet.insert(nodeSet.begin(), nodeSet.end());
         }
 
-        std::vector<std::shared_ptr<FsmRunTimeContext>> precedence;
-        //make all solution posible 
-        _generateCombinationd(0,precedence);
-        //sort by solution number of elements
-        std::sort(mSolve.begin(), mSolve.end(), [](const std::set<NodePtr>& set1, const std::set<NodePtr>& set2) {
-        return set1.size() < set2.size();
-        });
-
+        return uniqueSet;
+}
 
+bool Aidge::MatchSolution::areCompatible(std::shared_ptr<Aidge::MatchSolution> solution){
+    std::set<NodePtr> set1 = solution->getAll();
+    std::set<NodePtr> set2 = getAll();
+    std::set<NodePtr> intersection ;
+    std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(), std::inserter(intersection, intersection.begin()));
+    return intersection.empty();
 }
 
-void MatchResult::_generateCombinationd( std::size_t idxSubStm, std::vector<std::shared_ptr<FsmRunTimeContext>>& precedence){
 
+////////////////////////////////
+//
+////////////////////////////////
+Aidge::MatchResult::MatchResult(std::vector<std::shared_ptr<Aidge::FsmRunTimeContext>> allValid,
+                                std::size_t nbSubStm,
+                                const std::string& query,
+                                const std::vector<Aidge::NodePtr>& startNodes)
+        : mIdToRunTime(nbSubStm),
+          mNbSubStm(nbSubStm)
+{
+    mAllValid = allValid;
+
+    //mIdToRunTimm
+    for (const auto& contextPtr : allValid) {
+        mIdToRunTime[contextPtr->getSubStmId()].push_back(contextPtr);
+    }
+
+    std::vector<std::shared_ptr<FsmRunTimeContext>> precedence;
+    //make all solution possible
+    _generateCombination(0,precedence,query,startNodes);
+    //sort by solution number of elements
+    std::sort(mSolve.begin(), mSolve.end(), [](std::shared_ptr<MatchSolution>& set1, std::shared_ptr<MatchSolution>& set2) {
+        return set1->getAll().size() < set2->getAll().size();
+    });
+}
+
+void Aidge::MatchResult::_generateCombination( std::size_t idxSubStm,
+                                        std::vector<std::shared_ptr<Aidge::FsmRunTimeContext>>& precedence,
+                                        const std::string& query,
+                                        const std::vector<Aidge::NodePtr>& startNodes)
+{
     //it's end , we are below the number of stm
     if (idxSubStm == mNbSubStm)
     {
-        //precedence containe a liste of FSM compatible, we just need to 
-        //check if all the node have been valide by at least one contetext 
-        
-        //1) make the set of all node for the comput graph that are valide in all the  FsmRunTimeContext
+        //precedence contain a list of FSM compatible, we just need to
+        //check if all the nodes have been validated by at least one context
+
+        //1) make the set of all node for the compute graph that are valid in all the  FsmRunTimeContext
         std::set<NodePtr> validNode;
         std::set<NodePtr> rejectNode;
         for (const auto& contextPtr : precedence) {
@@ -40,25 +91,25 @@ void MatchResult::_generateCombinationd( std::size_t idxSubStm, std::vector<std:
             std::set<NodePtr> tmpR =  contextPtr->getRejectedNodes();
             rejectNode.insert(tmpR.begin(),tmpR.end());
         }
-        // 2) all  RejectedNodes need to be valide by an others stm 
+        // 2) all  RejectedNodes need to be valid by an others stm
         // if it's not the case the match is not valid
         if(std::includes(validNode.begin(), validNode.end(), rejectNode.begin(), rejectNode.end())){
-            //we can save the solution 
-            mSolve.push_back(validNode);
+            //we can save the solution
+            mSolve.push_back(std::make_shared<MatchSolution>(precedence,query,startNodes));
         }
         precedence.pop_back();
         return;
     }
 
 
-    for (const auto& contextPtrOneFsm : mIdToRunTime[idxSubStm]) 
+    for (const auto& contextPtrOneFsm : mIdToRunTime[idxSubStm])
     {
         if(idxSubStm == 0){
             precedence.push_back(contextPtrOneFsm);
-            _generateCombinationd(idxSubStm+1,precedence);
-            
+            _generateCombination(idxSubStm+1,precedence,query,startNodes);
+
         }else{
-            //test if the new context is compatible whith all the context in the precedence
+            //test if the new context is compatible with all the context in the precedence
             //
             bool compatibleSolutionFsm = true;
             for (const auto& contextPtrOfOtherFsm : precedence) {
@@ -70,7 +121,7 @@ void MatchResult::_generateCombinationd( std::size_t idxSubStm, std::vector<std:
 
             if(compatibleSolutionFsm){
                 precedence.push_back(contextPtrOneFsm);
-                _generateCombinationd(idxSubStm+1,precedence);
+                _generateCombination(idxSubStm+1,precedence,query,startNodes);
             }
 
         }
@@ -83,11 +134,6 @@ void MatchResult::_generateCombinationd( std::size_t idxSubStm, std::vector<std:
 
 }
 
-std::set<NodePtr> MatchResult::getBiggerSolution(void){
-    if(mSolve.empty()){
-        return std::set<NodePtr>();
-    }else{
-        return mSolve[0];
-    }
-    
+std::shared_ptr<Aidge::MatchSolution> Aidge::MatchResult::getBiggerSolution(void){
+    return mSolve.empty() ? nullptr : mSolve[0];
 }
\ No newline at end of file
diff --git a/src/graphmatching/GRegex.cpp b/src/graphmatching/GRegex.cpp
deleted file mode 100644
index 6b54c5a476e0319c3fab0751c0528a2084ebc0a7..0000000000000000000000000000000000000000
--- a/src/graphmatching/GRegex.cpp
+++ /dev/null
@@ -1,301 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include "aidge/graphmatching/GRegex.hpp"
-#include "aidge/graph/GraphView.hpp"
-
-using namespace Aidge; 
-
-GRegex::GRegex(const std::map<std::string,NodeRegex*>& nodesRegex,std::vector<std::string>& seqRegexps ):mStmFab(nodesRegex){
-
- 
-    //setup all the STM
-    for (const std::string& sequRegex : seqRegexps) {
-        mStmInit.push_back(mStmFab.makeNewStm(sequRegex));
-    }
-    
-}
-
-bool GRegex::walk_validation_all_stm_are_valid(const std::vector<std::vector<SeqStm*>> all_stm){
-    //test if all stm type are in a valid state
-    std::vector<int> number_of_valid;
-    number_of_valid.resize(all_stm.size());
-
-    for (std::size_t i = 0; i < all_stm.size(); ++i) {
-        number_of_valid[i] = 0;
-        for (auto it = all_stm[i].begin(); it != all_stm[i].end(); ++it) {
-            SeqStm* stm = *it;
-            if (stm->isValid()){ 
-                number_of_valid[i] +=1;
-            }
-        }   
-    }
-
-    for (std::size_t i = 0; i < number_of_valid.size(); ++i) {
-        if (number_of_valid[i] == 0) {
-            //std::cout << "NO MATCH at least one stm are not valid" << std::endl;
-            return false;
-        }
-        if (number_of_valid[i] > 1) {
-            //std::cout << "NO MATCH multiple brach match of stm (// quantification)" << std::endl;
-            return false;
-        }
-    }
-    return true;
-}
-
-bool GRegex::walk_validation_all_node_read_validate_by_one_stm(const std::vector<std::vector<SeqStm*>> all_stm){
-    std::set<NodeTmp> all_stm_node_tested;
-    std::set<NodeTmp> all_stm_node_validated;
-
-    for (std::size_t i = 0; i < all_stm.size(); ++i) {
-        //std::cout << "all stm index " << i <<  " on dimension 1 of size " << all_stm.size() <<std::endl;
-        for (std::size_t j = 0; j < all_stm[i].size(); ++j) {
-            //std::cout << "all stm index " << j <<  " on dimension 2 of size " << all_stm[i].size() <<std::endl;
-
-            std::set<NodeTmp> stm_node_tested = all_stm[i][j]->getAllNodeTested();
-            std::set<NodeTmp> stm_node_validated = all_stm[i][j]->getAllNodeValidated();
-
-            all_stm_node_tested.insert(stm_node_tested.begin(), stm_node_tested.end()); 
-            all_stm_node_validated.insert(stm_node_validated.begin(), stm_node_validated.end()); 
-        }   
-    }
-    
-
-    std::set<NodeTmp> test_but_not_valid;
-    for (const auto& x : all_stm_node_tested) {
-        if (all_stm_node_validated.find(x) == all_stm_node_validated.end()) {
-            test_but_not_valid.insert(x);
-        }
-    }
-
-
-    if (!test_but_not_valid.empty()) {
-        std::cout << "NO MATCH. The node(s) ";
-        for (const auto& x : test_but_not_valid) {
-            std::cout << x.get() << ", ";
-        }
-        std::cout << " have been tested but not validated." << std::endl;
-        return false;
-    }
-    return true;
-
-}
-
-bool GRegex::walk_validation_common_nodes_same_tag_for_all_stm(const std::vector<std::vector<SeqStm*>> all_stm){
-    std::map<NodeTmp, std::pair<std::string,int>> node_to_common_tag;
-    for (std::size_t i = 0; i < all_stm.size(); ++i) {
-        for (auto it = all_stm[i].begin(); it != all_stm[i].end(); ++it) {
-            SeqStm* stm = *it;
-            
-            if (!stm->isValid()){ 
-                continue;
-            }
-            
-            for (const auto& pair : stm->getAllCommonNode()) {
-                const NodeTmp node = pair.first;
-                const std::string common_tag = pair.second;
-
-                if (node_to_common_tag.find(node) != node_to_common_tag.end()) {
-                    std::string tag = node_to_common_tag[node].first;
-                    int& occurence = node_to_common_tag[node].second;
-                    if (tag!=common_tag){
-                        std::cout << "NO MATCH. The node " << node << " have two different tags "<< tag << " and " << common_tag  << std::endl;
-                        return false;
-                    } else {
-                        occurence += 1;
-                    }
-                } else {
-                    node_to_common_tag.insert(std::make_pair(node, std::make_pair(common_tag, 1)));
-                }
-            }
-        }   
-    }
-    /*std::cout << "Node to common tag ";
-    for (const auto& x : node_to_common_tag) {
-        std::cout << "(" << x.first << ", " << "[" << x.second.first << ", " << x.second.second << "]" << ") ; ";
-    }
-    std::cout << std::endl;*/
-
-
-    for (const auto& pair : node_to_common_tag) {
-        const std::pair<std::string, int> tag_occurence_pair = pair.second;
-        if (tag_occurence_pair.second < 1){
-            //std::cout << "NO MATCH. The common tag " << tag_occurence_pair.first  << " did not match " << std::endl;
-            return false;
-        }
-    }
-    
-    return true;
-}
-
-std::set<NodeTmp> GRegex::get_all_validate_nodes(const std::vector<std::vector<SeqStm*>> all_stm){
-    std::set<NodeTmp> all_stm_node_validated;
-
-    for (std::size_t i = 0; i < all_stm.size(); ++i) {
-        for (std::size_t j = 0; j < all_stm[i].size(); ++j) {
-            std::set<NodeTmp> stm_node_validated = all_stm[i][j]->getAllNodeValidated();
-            all_stm_node_validated.insert(stm_node_validated.begin(), stm_node_validated.end()); 
-        }   
-    }
-    return all_stm_node_validated;
-}
-
-
-std::set<NodeTmp> GRegex::matchFromStartNodes(const std::vector<NodeTmp> startNodes,const std::shared_ptr<GraphView> graphToMatch){
-    std::set<NodeTmp> empty_set_return;
-    //ASSERT
-    if(startNodes.size() != mStmInit.size()){
-        throw std::runtime_error ("bad GRegex start nodes");
-    }
-
-        //init the walk
-        std::vector<std::vector<SeqStm*>> allStm;
-        std::vector<std::pair<NodeTmp,SeqStm*>> currentWalk;
-
-        for (SeqStm* seqStmPtr : mStmInit) {
-            SeqStm* newStm = mStmFab.duplicateStm(seqStmPtr);
-            std::size_t idxStart = newStm->getStmIdx();
-            currentWalk.push_back(std::make_pair(startNodes[idxStart],newStm));
-            allStm.push_back(std::vector<SeqStm*>());
-        }
-
-        //walk
-        while (currentWalk.size()!=0)
-        {
-            std::vector<std::pair<NodeTmp,SeqStm*>> newWalk;
-            for (const auto& pair : currentWalk) {
-                const NodeTmp node = pair.first;
-                SeqStm* stmPtr = pair.second;
-
-                std::pair<int,std::string> test = stmPtr->testNode(node);
-                int res = test.first;
-                std::string commonTag = test.second;
-
-                std::set<NodeTmp> next_nodes = graphToMatch->getChildren(node);
-
-                /*std::cout << "Next nodes : " ;
-                for (const auto& x : next_nodes) {
-                    std::cout << x->name() << ", ";
-                }
-                std::cout << std::endl;*/
-                
-                // Test Match
-                if (commonTag == "" && next_nodes.size() > 1) {
-                    std::cout << "NO MATCH. The node " << node.get() << " is not common and has more than one child" << std::endl;
-                    return empty_set_return;
-                }
-
-                // If there is no more nodes --> Archive the branch
-                if (res == -1 || next_nodes.empty()) {
-                    int indexToInsert = stmPtr->getStmIdx();
-                    allStm[indexToInsert].push_back(stmPtr);
-                    //std::cout << "No more nodes --> STM archived : " << indexToInsert << std::endl;
-                    continue; // TODEV : replace this with 'else' that encapsulate the rest of the function ? 
-                }
-
-                bool first = true;
-
-                // Use an iterator to read through the next_nodes
-                std::set<NodeTmp>::iterator it;
-                for (it = next_nodes.begin(); it != next_nodes.end(); ++it) {
-                    // Access the current element using the iterator
-                    std::shared_ptr<Aidge::Node> next_node = *it;
-                    if (first){
-                        newWalk.push_back(std::make_pair(next_node, stmPtr));
-                        first = false;
-                    } else {
-                        SeqStm* new_stmPtr = mStmFab.duplicateStm(stmPtr);
-                        newWalk.push_back(std::make_pair(next_node, new_stmPtr));
-                    }
-                }
-            }
-            currentWalk = newWalk;
-        }
-    
-    //std::cout << "Walk finished" << std::endl;    
-
-    if (!walk_validation_all_stm_are_valid(allStm)){
-        return empty_set_return;
-    }
-    //std::cout << "walk_validation_all_stm_are_valid finished" << std::endl;
-    
-
-    if (!walk_validation_all_node_read_validate_by_one_stm(allStm)){
-        return empty_set_return;
-    }
-    //std::cout << "walk_validation_all_node_read_validate_by_one_stm finished" << std::endl;
-    
-
-    if (!walk_validation_common_nodes_same_tag_for_all_stm(allStm)){
-        return empty_set_return;
-    }
-    //std::cout << "walk_validation_common_nodes_same_tag_for_all_stm finished" << std::endl;
-
-    //std::cout << "MATCH" << std::endl;
-    
-    return get_all_validate_nodes(allStm);
-        
-}
-
-
-
-Match GRegex::match(const std::shared_ptr<GraphView> graphToMatch){
-
-    //std::vector<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>>  matches;
-    //std::set<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>>  matches;
-    Match  matches;
-    std::size_t nbStartNodes = mStmInit.size();
-    std::set<NodeTmp> allNodes = graphToMatch->getNodes();
-    std::size_t nbAllNodes = allNodes.size();
-
-    std::vector<std::size_t> indices(nbStartNodes, 0);
-
-       while (true) {
-        // Generate all permutations of the current combination
-        do {
-            std::vector<NodeTmp> startNodes;
-            //std::cout <<"start nodes :";
-            for (std::size_t i = 0; i < nbStartNodes; ++i) {
-                auto it = std::begin(allNodes);
-                std::advance(it, indices[i]);
-                //std::cout << (*it).get() << " ";
-                startNodes.push_back(*it);
-            }
-            //std::cout <<"\n";
-
-           std::set<NodeTmp> match =  matchFromStartNodes(startNodes, graphToMatch);
-           //std::cout << "match size : " << match.size() << " ";
-           if(match.size() != 0){
-                //matches.push_back(std::make_pair(startNodes,match));
-                //matches.insert(std::make_pair(startNodes,match));
-                matches.insert(startNodes,match);
-           }
-            
-        } while (std::next_permutation(indices.begin(), indices.end()));
-        
-        // Generate the next combination with replacement
-        std::size_t i = nbStartNodes - 1;
-        while (true) {
-            if (indices[i] < nbAllNodes - 1) {
-                ++indices[i];
-                break;
-            }
-            if (i == 0) {
-                return matches;
-            }
-            --i;
-        }
-        std::fill(indices.begin() + i + 1, indices.end(), indices[i]);
-    }
-
-    return matches;
-}
\ No newline at end of file
diff --git a/src/graphmatching/Match.cpp b/src/graphmatching/Match.cpp
deleted file mode 100644
index 6c08b30b11ab220310b476bab2c6d17ed86e4fd1..0000000000000000000000000000000000000000
--- a/src/graphmatching/Match.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include "aidge/graphmatching/Match.hpp"
-
-using namespace Aidge; 
-
-Match::Match(){
-    //ctr
-}
-
-size_t Match::getNbMatch(){
-    assert(mStartNodes.size() == mMatchNodes.size() && "Match corrupted");
-    return mStartNodes.size();
-}
-
-void Match::insert(std::vector<NodeTmp> startnodes, std::set<NodeTmp> matchnodes){
-    assert(mStartNodes.size() == mMatchNodes.size() && "Match corrupted");
-    mStartNodes.push_back(startnodes);
-    mMatchNodes.push_back(matchnodes);
-}
-
-std::vector<std::vector<NodeTmp>> Match::getStartNodes(){
-    return mStartNodes;
-}
-
-std::vector<std::set<NodeTmp>> Match::getMatchNodes(){
-    return mMatchNodes;
-}
\ No newline at end of file
diff --git a/src/graphmatching/NodeRegex.cpp b/src/graphmatching/NodeRegex.cpp
deleted file mode 100644
index 9bf164f60255c17492e528b0f27dec8c53f74979..0000000000000000000000000000000000000000
--- a/src/graphmatching/NodeRegex.cpp
+++ /dev/null
@@ -1,46 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include "aidge/graphmatching/NodeRegex.hpp"
-
-
-// Verification done by the Attribute system
-
-
-// Version 1 - Only test the type of the node (no need for a lexer)
-// Input : Node_op
-// Output : bool
-// return mCondition == Node_op.type
-bool Aidge::NodeRegex::_is(std::shared_ptr<Node> &Node_op){
-
-    std::string NodeType = Node_op->type();
-
-    return strcmp(NodeType.c_str(), mCondition.c_str()) == 0;
-}
-
-
-bool Aidge::NodeRegex::isA(std::string NodeType){
-
-    return strcmp(NodeType.c_str(), mCondition.c_str()) == 0;
-}
-
-// Version 2 - Test the node to an advanced condition
-// Input : Node_op
-// Output : bool
-// return mCondition applied on Node
-/**bool NodeRegex::_is(string &Node_op){
-    // Parsing the condition is done in the initialization of the NodeRegex
-    
-    // assert attributes exist in the node with the attribute function hasAttr()
-
-    // get the attributes
-
-}*/
diff --git a/src/graphmatching/SeqStm.cpp b/src/graphmatching/SeqStm.cpp
deleted file mode 100755
index 84553cb44cb898535943b31b8c955378e73ccbd5..0000000000000000000000000000000000000000
--- a/src/graphmatching/SeqStm.cpp
+++ /dev/null
@@ -1,247 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include "aidge/graphmatching/SeqStm.hpp"
-
-using namespace Aidge; 
-
-
-
-
-    ///////////////////////////////////////////////////////
-
-    SeqStm::SeqStm( 
-        const int stmIdx,
-        const std::vector<std::vector<int>>& transitionMatrix,
-        const std::map<std::string,NodeRegex*>& nodesRegex,
-        const std::map<NodeTypeKey,int>& typeToIdxTransition,
-        int actSt, 
-        std::set<NodeTmp> allNodeValidated,
-        std::set<NodeTmp> allNodeTested,
-        std::set<std::pair<NodeTmp,std::string>> allCommonNode,
-        bool stmIsValid):mStmIdx(stmIdx),
-        mTransitionMatrix(transitionMatrix),
-        mNodesRegex(nodesRegex),
-        mTypeToIdxTransition(typeToIdxTransition)
-        {
-
-        //assert
-        if (transitionMatrix.size() == 0){
-            throw std::runtime_error ("no transitionMatrix");
-        }
-        if(transitionMatrix[0].size() == 0 || transitionMatrix[0].size() != typeToIdxTransition.size()){
-            throw std::runtime_error ("bad transitionMatrix");
-        }
-        int size = static_cast<int>(transitionMatrix.size());
-        if (actSt >= size){
-            throw std::runtime_error ("bad actSt");
-        }
-
-
-        mActSt              = actSt;
-        mAllNodeValidated   = allNodeValidated;
-        mAllNodeTested      = allNodeTested;
-        mAllCommonNode      = allCommonNode;
-        mStmIsValid         = stmIsValid;
-
-    }
-
-    SeqStm* SeqStm::duplicateStm(){
-
-        //deep copy of the set 
-        // std::set<Node> cAllNodeValidated(mAllNodeValidated.begin(), mAllNodeValidated.end());
-        // std::set<Node> cAllNodeTested(mAllNodeTested.begin(), mAllNodeTested.end());
-
-        // std::set<std::pair<Node,std::string>> cAllCommonNode;
-        // for (const auto& p : mAllCommonNode) {
-        //     cAllCommonNode.insert(p);
-        // }
-
-        auto newStm = new SeqStm(
-            mStmIdx,
-            mTransitionMatrix,
-            mNodesRegex,
-            mTypeToIdxTransition,
-            mActSt,
-            mAllNodeValidated,
-            mAllNodeTested,
-            mAllCommonNode,
-            mStmIsValid
-        );
-
-        return newStm;
-    }
-
-
-    std::pair<NodeRegex*,std::string> SeqStm::getNodeRegexAndCommonAt(int idxType)
-    {
-        //std::cout << "!" << idxType << "\n";
-        for (auto const& x : mTypeToIdxTransition)
-        {
-            //x.second is the value : idx in mTransitionMatrix for the type
-            //x.first pair of the node regex class and a string that is the common tag '',#,#n
-            if (x.second == idxType ){
-
-                if (mNodesRegex.find(x.first.first) != mNodesRegex.end()){
-                    return std::make_pair(mNodesRegex.find(x.first.first)->second, x.first.second);  
-                }else{
-                    throw std::runtime_error ("a type is not define in NodesRegex");
-                }
-            }
-        }
-        throw std::runtime_error ("bad idx in mNodesRegex");
-        return std::make_pair(nullptr,nullptr);
-    }
-
-
-    NodeType SeqStm::getTheNodeType(NodeTmp node)
-    {
-        //the node is a str of '{type}{idx}' and we juste want type 
-        // // std::regex re("([a-zA-Z]+)[0-9]+");
-        // // std::smatch match;
-        // // if (std::regex_search(node, match, re) == true) {
-        // //     return match.str(1);
-        // // }
-        // // throw std::runtime_error ("Type node not found");
-        // // return "";
-
-        //return node->name();
-        return node->type();
-    }
-
-
-    std::string SeqStm::transitionOnNodeType(NodeType nodeType){
-
-        if (!isStmBlocked()){
-            int idxType = 0;
-            for (auto & nextSt : mTransitionMatrix[mActSt]) {
-                // There are a next step for this type
-                //std::cout << "transition matrix next state -> "<< nextSt<<"\n" ;
-                if (nextSt != -1){
-                    //std::cout << "next -> "<< nextSt<< " "<< isAValidSt(nextSt) <<"\n" ;
-                    auto nodeRegex = getNodeRegexAndCommonAt(idxType);
-                    //std::cout << "-> "<< nodeRegex.second<<"\n" ;
-                    if (nodeRegex.first->isA(nodeType)){
-                        //std::cout << "nodetype tested !"<<"\n" ;
-                        if(isAValidSt(nextSt)){
-                            //std::cout << "Valid state !"<<"\n" ;
-                            mStmIsValid = true;
-                        }
-                        mActSt = nextSt;
-                        return nodeRegex.second;
-                    }
-                    
-                }
-                idxType += 1;
-            }
-
-            mActSt =-1;
-        }
-
-        return "";
-    }
-
-
-    std::pair<int,std::string> SeqStm::testNode(const NodeTmp node){
-        
-        std::string commonTag = "";
-        //std::cout << "0\n" ;
-        if (!isStmBlocked()){
-            bool isNextStEnd = std::all_of(mTransitionMatrix[mActSt].begin(), mTransitionMatrix[mActSt].end(), [&](int x){ return x == -1; });
-            //std::cout << "1:"<< isNextStEnd <<"\n" ;
-            //if the next state if full of -1 can we relay add the node test to all node tested 
-            // oker y test it but it sure that not be valid 
-            if(!isNextStEnd){
-                mAllNodeTested.insert(node);
-            }
-            //std::cout << "2\n" ;
-            //recurtion avoidance
-            if(mAllNodeValidated.find(node) == mAllNodeValidated.end()){
-                
-                NodeType nodeType = getTheNodeType(node);
-                //std::cout << "3 " << nodeType << "\n" ;
-                commonTag = transitionOnNodeType(nodeType);
-                //after the transition test, if the node is != -1 the node is valid for the stm
-                //std::cout << " mActSt = " << mActSt << "\n" ;
-                if( mActSt != -1 ){
-                    mAllNodeValidated.insert(node);
-                }
-            }else{
-                mActSt = -1;
-            }
-        }
-
-        if(commonTag != ""){
-            mAllCommonNode.insert(std::make_pair(node,commonTag));
-        }
-        return std::make_pair(mActSt,commonTag);
-    }
-
-
-void SeqStm::drawStm(){
-
-    //mTransitionMatrix
- // Find the maximum width of each column
-    std::vector<std::size_t> max_widths(mTransitionMatrix[0].size(), 0);
-    for (std::size_t i = 0; i < mTransitionMatrix.size(); ++i)
-    {
-        for (std::size_t j = 0; j < mTransitionMatrix[i].size(); ++j)
-        {
-            std::size_t width = std::to_string(mTransitionMatrix[i][j]).length();
-            if (width > max_widths[j])
-            {
-                max_widths[j] = width;
-            }
-        }
-    }
-
-    // Print the vector with aligned columns
-    for (std::size_t i = 0; i < mTransitionMatrix.size(); ++i)
-    {
-        for (std::size_t j = 0; j < mTransitionMatrix[i].size(); ++j)
-        {
-            int i_int = static_cast<int>(i);
-            if (mActSt == -1 ){
-                if(mStmIsValid){
-                    std::cout << "\033[48;5;40m";
-                }else{
-                     std::cout << "\033[48;5;9m";
-                }
-            }
-            else if (mActSt == i_int){
-                std::cout << "\033[48;5;30m";
-            }else{
-                std::cout << "\033[48;5;27m";
-            }
-
-            // Pad the value with spaces to align it with the maximum width
-            std::size_t width = std::to_string(mTransitionMatrix[i][j]).length();
-            std::string padding(max_widths[j] - width, ' ');
-            std::cout << padding << mTransitionMatrix[i][j] << " ";
-            std::cout << "\033[0m";
-        }
-        std::cout << "\n";
-    }
-
-    std::cout << "mAllNodeTested : ";
-    for (const auto& x : mAllNodeTested) {
-        std::cout << x << ", ";
-    }
-    std::cout << "\n";
-
-
-    std::cout << "mAllNodeValidated : ";
-    for (const auto& x : mAllNodeValidated) {
-        std::cout << x << ", ";
-    }
-    std::cout << "\n";
-}
-
diff --git a/src/graphmatching/StmFactory.cpp b/src/graphmatching/StmFactory.cpp
deleted file mode 100644
index 30b1fad81fc9e7f97dab03f7e6d091a27eeec32b..0000000000000000000000000000000000000000
--- a/src/graphmatching/StmFactory.cpp
+++ /dev/null
@@ -1,150 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include "aidge/graphmatching/StmFactory.hpp"
-
-using namespace Aidge;
-
-StmFactory::StmFactory(const std::map<std::string, NodeRegex *> &nodesRegex)
-    : mNodesRegex(nodesRegex) {}
-
-SeqStm *StmFactory::duplicateStm(SeqStm *stm) { return stm->duplicateStm(); }
-
-SeqStm *StmFactory::makeNewStm(const std::string &sequRegex) {
-
-  ParsingReturn parsing = initParsingSequRegex(sequRegex);
-  std::vector<std::vector<int>> transitionMatrix =
-      initTransitionMatrix(parsing);
-
-  std::set<NodeTmp> allNodeValidated;
-  std::set<NodeTmp> allNodeTested;
-  std::set<std::pair<NodeTmp, std::string>> allCommonNode;
-
-  SeqStm *newStm = new SeqStm(static_cast<int>(mCmptStm), transitionMatrix, mNodesRegex,
-                              parsing.typeToIdxTransition, 0, allNodeValidated,
-                              allNodeTested, allCommonNode, false);
-  mCmptStm += 1;
-
-  return newStm;
-}
-
-ParsingReturn StmFactory::initParsingSequRegex(const std::string &sequRegex) {
-
-  std::string toMatch;
-  std::regex re("\\s*([A-Za-z]+)(#\\d*)?([+*])?\\s*(->|;)");
-  std::smatch matches;
-
-  int idxType = 0;
-  // return
-  ParsingReturn parsing;
-  // std::map<std::pair<NodeType,std::string>,int> typeToIdxTransition;
-  // std::vector<std::pair<std::pair<NodeType,std::string>,std::string>>
-  // transition;
-  // assert
-  std::map<NodeType, std::string> assertCommonNodeTypes;
-
-  for (std::size_t i = 0; i < sequRegex.length(); i++) {
-    toMatch += sequRegex[i];
-    if (std::regex_match(toMatch, matches, re)) {
-
-      std::string type = matches.str(1);
-      std::string commonTag = matches.str(2);
-      std::string quantification = matches.str(3);
-
-      if ((commonTag != "") && (quantification != "")) {
-        throw std::runtime_error("bad commonTag and quantification");
-      }
-
-      // make the typeToIdxTransition
-      NodeTypeKey typeTag = std::make_pair(type, commonTag);
-      /*std::cout << "              typeTag: " << type << "  " << commonTag
-                << parsing.typeToIdxTransition.size() << std::endl;*/
-      if (parsing.typeToIdxTransition.find(typeTag) ==
-          parsing.typeToIdxTransition.end()) {
-        parsing.typeToIdxTransition[typeTag] = idxType;
-        idxType += 1;
-      }
-      ////////////////////////////////////////////////////////////
-      // ASSERT
-      // SAME Common node in the sequ
-      if (commonTag != "") {
-        if (assertCommonNodeTypes.find(type) != assertCommonNodeTypes.end()) {
-          if (assertCommonNodeTypes[type] == commonTag) {
-            throw std::runtime_error("same common node in the sequ regex");
-          }
-        } else {
-          assertCommonNodeTypes[type] = commonTag;
-        }
-      }
-
-      // save all transition
-      parsing.transition.push_back(std::make_pair(typeTag, quantification));
-
-      /*std::cout << "Match found: " << matches.str() << std::endl;
-      std::cout << "Type: " << matches.str(1) << std::endl;
-      std::cout << "Common tag: " << matches.str(2) << std::endl;
-      std::cout << "Quantification: " << matches.str(3) << std::endl;*/
-
-      toMatch = "";
-    }
-  }
-  if (parsing.transition.size() == 0) {
-    throw std::runtime_error("Bad Parsing SequRegex ");
-  }
-
-  return parsing;
-}
-
-std::vector<std::vector<int>>
-StmFactory::initTransitionMatrix(ParsingReturn &parsing) {
-
-  // std::pair<NodeTypeKey,std::string>
-  std::vector<std::vector<int>> transitionMatrix;
-  std::size_t numberOfType = parsing.typeToIdxTransition.size();
-
-  if (numberOfType == 0) {
-    throw std::runtime_error("Bad number Of Type ");
-  }
-  // init start st
-  transitionMatrix.push_back(std::vector<int>(numberOfType, -1));
-
-  std::size_t idxTransition = 0;
-  int idxState = 0;
-  for (const auto &pair : parsing.transition) {
-    const NodeTypeKey &nodeTypeKey = pair.first;
-    const std::string &quant = pair.second;
-
-    /*std::cout << "Key: {" << nodeTypeKey.first << ", " << nodeTypeKey.second
-              << "}, Value: " << quant << std::endl;
-    std::cout << "idxState " << idxState << " TM: " << transitionMatrix.size()
-              << std::endl;*/
-    std::size_t idxType = parsing.typeToIdxTransition[nodeTypeKey];
-    /*std::cout << "idxType " << idxType << " TM: " << transitionMatrix[0].size()
-              << "type" << numberOfType << std::endl;*/
-
-    if (quant == "*") {
-      transitionMatrix[idxTransition][idxType] = idxState;
-    } else if (quant == "+") {
-      idxState += 1;
-      transitionMatrix[idxTransition][idxType] = idxState;
-      transitionMatrix.push_back(std::vector<int>(numberOfType, -1));
-      idxTransition += 1;
-      transitionMatrix[idxTransition][idxType] = idxState;
-    } else {
-
-      idxState += 1;
-      transitionMatrix[idxTransition][idxType] = idxState;
-      transitionMatrix.push_back(std::vector<int>(numberOfType, -1));
-      idxTransition += 1;
-    }
-  }
-  return transitionMatrix;
-}
\ No newline at end of file
diff --git a/src/nodeTester/ConditionalInterpreter.cpp b/src/nodeTester/ConditionalInterpreter.cpp
index e01bdd76a28576451a1a09202d5fd1e87a4856e5..f40e62305334f740057f88ef21cdab749d64bd99 100644
--- a/src/nodeTester/ConditionalInterpreter.cpp
+++ b/src/nodeTester/ConditionalInterpreter.cpp
@@ -8,7 +8,7 @@ using namespace Aidge;
 //ConditionalRegisterFunction
 ///////////////////////////////
 
-    ConditionalData* ConditionalRegisterFunction::run(const std::string key,std::vector<ConditionalData*> & datas){
+     std::shared_ptr<ConditionalData> ConditionalRegisterFunction::run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & datas){
 
         auto lambdaIt = mWlambda.find(key);
         if (lambdaIt != mWlambda.end()) {
@@ -18,37 +18,46 @@ using namespace Aidge;
         }
     }
 
+
 //////////////////////
 //ConditionalInterpreter
 ///////////////////////
-    ConditionalInterpreter::ConditionalInterpreter(const std::string ConditionalExpressions)
-    :mLambdaRegiter()
+    ConditionalInterpreter::ConditionalInterpreter(const std::string key,const std::string ConditionalExpressions)
+    :mLambdaRegister(),mKey(key)
     {
 
         ConditionalParser conditionalParser = ConditionalParser(ConditionalExpressions);
         mTree = conditionalParser.parse();
+        
         ///lambda by default
-        mLambdaRegiter.insert("getType",+[](NodePtr NodeOp){return NodeOp->type();});
+        mLambdaRegister.insert("getType",+[](NodePtr NodeOp){return NodeOp->type();});
 
     }
+    
+    bool ConditionalInterpreter::isLambdaRegister(const std::string &key){
+        return mLambdaRegister.isLambdaRegister(key);
+    }
+    
+    const std::string& ConditionalInterpreter::getKey(){
+        return mKey;
+    }
 
 
     bool ConditionalInterpreter::test( const NodePtr nodeOp)
     {
-
-        clearRes();
+        mResolution.clear();
         try{
-            std::vector<ConditionalData*> r =  visit({mTree},nodeOp);
-
-        if (mResolution.size() != 1){
-            throw std::runtime_error("Multi-output interpretation output");
-        }else{
-            if (!mResolution[0]->isTypeEqualTo<bool>()){
-                throw std::runtime_error("TEST OUT MUST BE A BOOL ");
+            std::vector< std::shared_ptr<ConditionalData>> r =  visit({mTree},nodeOp);
+   
+            if (mResolution.size() != 1){
+                throw std::runtime_error("Multi output interpretation output");
             }else{
-                return mResolution[0]->getValue<bool>();
+                if (!mResolution[0]->isTypeEqualTo<bool>()){
+                    throw std::runtime_error("TEST OUT MUST BE A BOOL ");
+                }else{
+                    return mResolution[0]->getValue<bool>();
+                }
             }
-        }
 
         }catch(const std::exception& e){
             std::ostringstream errorMessage;
@@ -58,12 +67,12 @@ using namespace Aidge;
     }
 
     void ConditionalInterpreter::insertLambda(const std::string key,std::function<bool(Aidge::NodePtr)> f){
-        mLambdaRegiter.insert<std::function<bool(Aidge::NodePtr)> >(key, f);
+        mLambdaRegister.insert<std::function<bool(Aidge::NodePtr)> >(key, f);
     }
 
     /////
-    std::vector<ConditionalData*> ConditionalInterpreter::visit(const ASTNodeCh& nodes, const NodePtr nodeOp ){
-            std::vector<ConditionalData*> dataVector;
+    std::vector< std::shared_ptr<ConditionalData>> ConditionalInterpreter::visit(const ASTNodeCh& nodes, const NodePtr nodeOp ){
+            std::vector< std::shared_ptr<ConditionalData>> dataVector;
 
             for ( std::shared_ptr<AstNode<ConditionalTokenTypes>> node : nodes) {
                 try{
@@ -130,7 +139,7 @@ using namespace Aidge;
                         case ConditionalTokenTypes::NODE: //TODO
                             {
 
-                                ConditionalData* data = new ConditionalData;
+                                std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
                                 data->setValue<NodePtr>(nodeOp);
                                 mResolution.push_back(data);
 
@@ -147,7 +156,7 @@ using namespace Aidge;
 
                         case ConditionalTokenTypes::BOOL: //TODO
                             {
-                            ConditionalData* data = new ConditionalData;
+                             std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
 
                             if(node->getValue() == "true"){
                                 data->setValue<bool>(true);
@@ -169,8 +178,8 @@ using namespace Aidge;
                     }
                 }catch(const std::exception& e){
                     std::ostringstream errorMessage;
-                    errorMessage << "Error in visiting AST for node"<< nodeOp->name() << "\n\t" << e.what()  << "\n";
-                    throw std::runtime_error(errorMessage.str());
+                    errorMessage << "Error in visiting AST for node "<< nodeOp->name() << "\n\t" << e.what()  << "\n";
+                    throw std::runtime_error(errorMessage.str()); 
                 }
             }
 
@@ -185,7 +194,8 @@ using namespace Aidge;
 
     void ConditionalInterpreter::fStrToInteger(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node)
     {
-        ConditionalData* data = new ConditionalData;
+         std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
+
         data->setValue<int>(std::stoi(node->getValue()));
         mResolution.push_back(data);
     }
@@ -193,14 +203,14 @@ using namespace Aidge;
     void ConditionalInterpreter::fStrToFloat(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node)
     {
 
-        ConditionalData* data = new ConditionalData;
+         std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
         data->setValue<float>(std::stof(node->getValue()));
         mResolution.push_back(data);
     }
 
     void ConditionalInterpreter::fStrToStr(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node)
     {
-        ConditionalData* data = new ConditionalData;
+         std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
         data->setValue<std::string>(node->getValue());
         mResolution.push_back(data);
     }
@@ -208,34 +218,37 @@ using namespace Aidge;
     void ConditionalInterpreter::fLambda(const std::shared_ptr<AstNode<ConditionalTokenTypes>>& node)
     {
         //if the lambda have input
-        ConditionalData* data;
+         std::shared_ptr<ConditionalData> data;
         try {
-            data = mLambdaRegiter.run(node->getValue(),mResolution);
+            data = mLambdaRegister.run(node->getValue(),mResolution);
         } catch (const std::exception& e) {
             std::ostringstream errorMessage;
             errorMessage << "Error in conditional interpretation when run the "<<  node->getValue() <<" Lambda\n\t" << e.what()  << "\n";
             throw std::runtime_error(errorMessage.str());
         }
 
-        clearRes();
+        //clearRes();
         mResolution.push_back(data);
     }
 
     void ConditionalInterpreter::fEq(void)
     {
-        if (mResolution.size() != 2){
+        if (mResolution.size() < 2){
             throw std::runtime_error("EQ need 2 arg and get :" + std::to_string(mResolution.size()));
         }
-        auto a = mResolution[0];
-        auto b = mResolution[1];
+        auto a = mResolution.back(); 
+        mResolution.pop_back();
+        auto b = mResolution.back(); 
+ 	    mResolution.pop_back();
+     
 
         if (a->getType() != b->getType()){
-            throw std::runtime_error("EQ Unsuported between type :" + a->getType() +" "+ b->getType());
+            throw std::runtime_error("EQ Unsupported between type :" + a->getType() +" "+ b->getType());
         }
 
 
 
-        ConditionalData* data = new ConditionalData;
+         std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
 
         if (a->isTypeEqualTo<int>()) {
            data->setValue<bool>( a->getValue<int>() == b->getValue<int>());
@@ -249,23 +262,25 @@ using namespace Aidge;
            throw std::runtime_error("EQ Unknown type encountered :" + a->getType() );
         }
 
-        clearRes();
+        
         mResolution.push_back(data);
     }
 
     void ConditionalInterpreter::fNeq(void)
     {
-        if (mResolution.size() != 2){
+        if (mResolution.size() < 2){
              throw std::runtime_error("NEQ need 2 arg and get :" + std::to_string(mResolution.size()));
         }
-        auto a = mResolution[0];
-        auto b = mResolution[1];
+        auto a = mResolution.back(); 
+ 	    mResolution.pop_back();
+        auto b = mResolution.back(); 
+ 	    mResolution.pop_back();
 
         if (a->getType() != b->getType()){
-            throw std::runtime_error("NEQ Unsuported between type :" + a->getType() +" "+ b->getType());
+            throw std::runtime_error("NEQ Unsupported between type :" + a->getType() +" "+ b->getType());
         }
 
-        ConditionalData* data = new ConditionalData;
+         std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
 
         if (a->isTypeEqualTo<int>()) {
            data->setValue<bool>( a->getValue<int>() != b->getValue<int>());
@@ -278,67 +293,72 @@ using namespace Aidge;
            throw std::runtime_error("NEQ Unknown type encountered :" + a->getType() );
         }
 
-        clearRes();
+        
         mResolution.push_back(data);
     }
 
     void ConditionalInterpreter::fAnd(void)
     {
-        if (mResolution.size() != 2){
+        if (mResolution.size() < 2){
            throw std::runtime_error("AND need 2 arg and get :" + std::to_string(mResolution.size()));
         }
-        auto a = mResolution[0];
-        auto b = mResolution[1];
+        auto a = mResolution.back(); 
+ 	    mResolution.pop_back();
+        auto b = mResolution.back(); 
+ 	    mResolution.pop_back();
 
 
         if (a->getType() != typeid(bool).name() || b->getType() != typeid(bool).name()){
             throw std::runtime_error("AND Unknown type encountered need bool get :" + a->getType() );
         }
 
-        ConditionalData* data = new ConditionalData;
+         std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
         data->setValue<bool>( a->getValue<bool>() && b->getValue<bool>());
 
 
-        clearRes();
+        
         mResolution.push_back(data);
     }
 
     void ConditionalInterpreter::fOr(void)
     {
-        if (mResolution.size() != 2){
+        if (mResolution.size() < 2){
              throw std::runtime_error("OR need 2 arg and get :" + std::to_string(mResolution.size()));
         }
-        auto a = mResolution[0];
-        auto b = mResolution[1];
+        auto a = mResolution.back(); 
+ 	    mResolution.pop_back();
+        auto b = mResolution.back(); 
+ 	    mResolution.pop_back();
 
 
         if (a->getType() != typeid(bool).name() || b->getType() != typeid(bool).name()){
              throw std::runtime_error("OR Unknown type encountered need bool get :" + a->getType() );
         }
 
-        ConditionalData* data = new ConditionalData;
+         std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
         data->setValue<bool>( a->getValue<bool>() || b->getValue<bool>());
 
 
-        clearRes();
+        
         mResolution.push_back(data);
     }
 
     void ConditionalInterpreter::fNot()
         {
-            if (mResolution.size() != 1){
+            if (mResolution.size() < 1){
                 throw std::runtime_error("NOT need 1 arg and get :" + std::to_string(mResolution.size()));
             }
-            auto a = mResolution[0];
+            auto a = mResolution.back(); 
+ 	        mResolution.pop_back();
 
             if (a->getType() != typeid(bool).name()){
                 throw std::runtime_error("NOT Unknown type encountered need bool get :" + a->getType() );
             }
 
-            ConditionalData* data = new ConditionalData;
+             std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
             data->setValue<bool>( !a->getValue<bool>() );
 
-            clearRes();
+            
             mResolution.push_back(data);
 
         }
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..273eac2e8fa9623e617d1be204ac2ae46d8da02d
--- /dev/null
+++ b/src/operator/Div.cpp
@@ -0,0 +1,35 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <cstddef>
+#include <vector>
+#include <utility>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Div.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+void Aidge::Div_Op::computeOutputDims() {
+    // check inputs have been associated
+    if (!getInput(0) || !getInput(1)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
+    }
+
+    if ((!getInput(0)->empty()) &&
+        ((getInput(1)->size() == 1) || // div by a single value
+        (getInput(1)->size() == getInput(0)->size()) || // div elem-wise
+        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // div by a Tensor with one dimension of output size
+    {
+        mOutputs[0]->resize(getInput(0)->dims());
+    }
+}
\ No newline at end of file
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index 183c7cbd85c73d3fc8881a529bcf75c466754608..530357085a16ca3e834669cebd2d26882ca8ddab 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -13,16 +13,18 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph)
-    : Operator(type),
+    : OperatorTensor(type, graph->dataInputs().size(), (graph->inputs().size() - graph->dataInputs().size()), graph->outputs().size()),
         mGraph(graph)
 {
     mInputs = std::vector<std::shared_ptr<Tensor>>(mGraph->inputs().size());
     for (std::size_t i = 0; i < mInputs.size(); ++i) {
         mInputs[i] = std::make_shared<Tensor>();
     }
-    mOutputs = std::vector<std::shared_ptr<Tensor>>(mGraph->outputs().size());
-    for (std::size_t i = 0; i < mOutputs.size(); ++i) {
-        mOutputs[i] = std::make_shared<Tensor>();
+    // Associate outputs to micro-graph outputs for custom implementation
+    mOutputs = std::vector<std::shared_ptr<Tensor>>(mGraph->getOrderedOutputs().size());
+    for (size_t outputIdx = 0; outputIdx < mOutputs.size(); ++outputIdx) {
+        const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
+        mOutputs[outputIdx] = std::dynamic_pointer_cast<Tensor>(outputOp.first->getOperator()->getRawOutput(outputOp.second));
     }
 }
 
@@ -65,7 +67,8 @@ void Aidge::MetaOperator_Op::updateConsummerProducer() {
             // Lazy initialization
             mScheduler = std::make_shared<SequentialScheduler>(mGraph);
         }
-        
+
+
         // TODO: check that generateScheduling() can be called multiple time to iteratively update the schedule.
         // It could be a good idea to unify updateConsummerProducer() and generateScheduling() into a "updateScheduling()"
         mScheduler->generateScheduling();
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2e3e77288bf1e0613f0aa572e3c50e94599a902f
--- /dev/null
+++ b/src/operator/Mul.cpp
@@ -0,0 +1,35 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <cstddef>
+#include <vector>
+#include <utility>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+void Aidge::Mul_Op::computeOutputDims() {
+    // check inputs have been associated
+    if (!getInput(0) || !getInput(1)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
+    }
+
+    if ((!getInput(0)->empty()) &&
+        ((getInput(1)->size() == 1) || // mul by a single value
+        (getInput(1)->size() == getInput(0)->size()) || // mul elem-wise
+        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // mul by a Tensor with one dimension of output size
+    {
+        mOutputs[0]->resize(getInput(0)->dims());
+    }
+}
\ No newline at end of file
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index 09a17a428e1de91c0318f710e6f097573cf529a6..eb94db87df250767967348c3adfed8a1e35b4c5f 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -10,10 +10,14 @@
  ********************************************************************************/
 
 #include <cassert>
+#include <cstddef>
+#include <vector>
+#include <utility>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
 // constexpr Aidge::Operator::Operator(const char* type)
 //     : mType(type)
@@ -21,12 +25,35 @@
 // 	// ctor
 // }
 
-Aidge::Operator::~Operator() = default;
+Aidge::Operator::~Operator() noexcept = default;
 
 ///////////////////////////////////////////////////////
 //        IMPLEMENTATION
 ///////////////////////////////////////////////////////
 
+// std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>> Aidge::Operator::computeReceptiveField(
+//         const std::size_t firstIdx, const std::vector<Aidge::DimSize_t>& outputDims, const Aidge::IOIndex_t outputIdx) const
+// {
+//     static_cast<void>(outputIdx);
+//     if (outputIdx >= nbOutputs()) {
+//         AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator output index out of range.");
+//     }
+//     if (nbInputs() != nbDataInputs()) {
+//         AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function.");
+//     }
+//     if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
+//         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+//     }
+//     const auto outputIdxDims = getOutput(0)->getCoord(firstIdx);
+//     for (DimIdx_t i = 0; i < outputDims.size(); ++i) {
+//         if (((outputDims[i] + outputIdxDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) {
+//             AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+//         }
+//     }
+//     // return the same Tensor description as given in function parameter for each data input
+//     return std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>>(nbDataInputs(),std::pair<std::size_t, std::vector<Aidge::DimSize_t>>(firstIdx, outputDims));
+// }
+
 Aidge::NbElts_t Aidge::Operator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     return mImpl->getNbRequiredData(inputIdx);
 }
@@ -48,8 +75,12 @@ void Aidge::Operator::runHooks() const {
     }
 }
 void Aidge::Operator::forward() {
-    mImpl->forward();
-    runHooks();
+    if(mImpl) {
+        mImpl->forward();
+        runHooks();
+    } else {
+        printf("forward: No implementation is linked.\n");
+    }
 }
 
 void Aidge::Operator::backward() { mImpl->backward(); }
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1d16e9064010269174501d3c824c705c36971641
--- /dev/null
+++ b/src/operator/OperatorTensor.cpp
@@ -0,0 +1,135 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <memory>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+
+void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
+    if (inputIdx >= nbInputs()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu inputs", type().c_str(), nbInputs());
+    }
+    if (strcmp((data)->type(), Tensor::Type) != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input data must be of Tensor type");
+    }
+    mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+}
+
+void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
+    if (strcmp(data->type(), "Tensor") != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
+    }
+    if (getInput(inputIdx)) {
+        *mInputs[inputIdx] = *std::dynamic_pointer_cast<Tensor>(data);
+    } else {
+        mInputs[inputIdx] = std::make_shared<Tensor>(*std::dynamic_pointer_cast<Tensor>(data));
+    }
+}
+
+Aidge::OperatorTensor::~OperatorTensor() = default;
+
+void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, std::shared_ptr<Aidge::Data>&& data) {
+    if (strcmp(data->type(), "Tensor") != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
+    }
+    if (getInput(inputIdx)) {
+        *mInputs[inputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data));
+    } else {
+        mInputs[inputIdx] = std::make_shared<Tensor>(std::move(*std::dynamic_pointer_cast<Tensor>(data)));
+    }
+}
+
+const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getInput(const Aidge::IOIndex_t inputIdx) const {
+    if (inputIdx >= nbInputs()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu inputs", type().c_str(), nbInputs());
+    }
+    return mInputs[inputIdx];
+}
+
+void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) {
+    if (strcmp(data->type(), "Tensor") != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
+    }
+    if (outputIdx >= nbOutputs()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbOutputs());
+    }
+    *mOutputs[outputIdx] = *std::dynamic_pointer_cast<Tensor>(data);
+}
+
+void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) {
+    if (strcmp(data->type(), "Tensor") != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
+    }
+    if (outputIdx >= nbOutputs()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbOutputs());
+    }
+    *mOutputs[outputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data));
+}
+
+const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aidge::IOIndex_t outputIdx) const {
+    if (outputIdx >= nbOutputs()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbOutputs());
+    }
+    return mOutputs[outputIdx];
+}
+
+
+void Aidge::OperatorTensor::computeOutputDims() {
+    // check inputs have been associated
+    bool associated = (nbInputs() > 0); // do not compute anything if no input
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (!getInput(i)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
+        associated &= !(getInput(i)->empty());
+    }
+    if (associated) {
+        const auto expectedDims =  getInput(0)->dims();
+        for (std::size_t i = 1; i < nbInputs(); ++i) {
+            if (expectedDims != getInput(i)->dims()) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator's inputs should have the same dimensions");
+            }
+        }
+        mOutputs[0]->resize(expectedDims);
+    }
+}
+
+bool Aidge::OperatorTensor::outputDimsForwarded() const {
+    bool forwarded = true;
+    // check both inputs and outputs have been filled
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
+    }
+    for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
+        forwarded &= !(getOutput(i)->empty());
+    }
+    return forwarded;
+}
+
+void Aidge::OperatorTensor::setDataType(const DataType& dataType) const {
+    for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
+        getOutput(i)->setDataType(dataType);
+    }
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (!getInput(i)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not set");
+        }
+        else {
+            getInput(i)->setDataType(dataType);
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c213a47a4a590026c07625aeb532d303ca8dbced
--- /dev/null
+++ b/src/operator/Pow.cpp
@@ -0,0 +1,35 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <cstddef>
+#include <vector>
+#include <utility>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Pow.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+void Aidge::Pow_Op::computeOutputDims() {
+    // check inputs have been associated
+    if (!getInput(0) || !getInput(1)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
+    }
+
+    if ((!getInput(0)->empty()) &&
+        ((getInput(1)->size() == 1) || // pow by a single value
+        (getInput(1)->size() == getInput(0)->size()) || // pow elem-wise
+        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // pow by a Tensor with one dimension of output size
+    {
+        mOutputs[0]->resize(getInput(0)->dims());
+    }
+}
\ No newline at end of file
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8175f1b7ae5bb5eccd36267c1d739f764bd3c236
--- /dev/null
+++ b/src/operator/Sub.cpp
@@ -0,0 +1,35 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <cstddef>
+#include <vector>
+#include <utility>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Sub.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+void Aidge::Sub_Op::computeOutputDims() {
+    // check inputs have been associated
+    if (!getInput(0) || !getInput(1)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
+    }
+
+    if ((!getInput(0)->empty()) &&
+        ((getInput(1)->size() == 1) || // sub by a single value
+        (getInput(1)->size() == getInput(0)->size()) || // sub elem-wise
+        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // sub by a Tensor with one dimension of output size
+    {
+        mOutputs[0]->resize(getInput(0)->dims());
+    }
+}
\ No newline at end of file
diff --git a/src/recipies/FuseBatchNorm.cpp b/src/recipies/FuseBatchNorm.cpp
index 4b2f7a811c022ee80eec98548049853d56951edb..ffb4599d83ba922ce5991460810f5d248806617c 100644
--- a/src/recipies/FuseBatchNorm.cpp
+++ b/src/recipies/FuseBatchNorm.cpp
@@ -12,95 +12,82 @@
 #include <cassert>
 #include <memory>
 #include <string>
+
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/Conv.hpp"
-
-#include "aidge/utils/Recipies.hpp"
+#include "aidge/recipies/Recipies.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/GenericOperator.hpp"
-// Graph Regex
-#include "aidge/graphmatching/GRegex.hpp"
-#include "aidge/graphmatching/NodeRegex.hpp"
-using namespace Aidge;
-
-void Aidge::fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes){
-
-    assert(nodes.size() == 2 && "Wrong number of nodes to replace\n");
-
-    // Assert the nodes types are correct to be fused
-    std::shared_ptr<Node> conv;
-    std::shared_ptr<Node> batchnorm;
-    for (const auto& element : nodes) {
-        assert((element->type() == "Conv" || element->type() == "BatchNorm") && "Wrong type for the nodes to replace");
-        if (element->type() == "Conv"){
-            conv = element;
-        }
-        else if (element->type() == "BatchNorm") {
-            batchnorm = element;
-        }
-    }
-    // TODO : check if batchnorm is the only child of the Conv or FC
-    std::shared_ptr<Tensor> scale  = batchnorm->input(1).first->getOperator()->getOutput(batchnorm->input(1).second);
-    std::shared_ptr<Tensor> shift  = batchnorm->input(2).first->getOperator()->getOutput(batchnorm->input(2).second);
-    std::shared_ptr<Tensor> b_mean = batchnorm->input(3).first->getOperator()->getOutput(batchnorm->input(3).second);
-    std::shared_ptr<Tensor> b_var  = batchnorm->input(4).first->getOperator()->getOutput(batchnorm->input(4).second);
 
 
-    // TODO : Find a way to remove the template
-    const float epsilon = std::static_pointer_cast<BatchNorm_Op<2>>(batchnorm->getOperator())->getAttr<float>("Epsilon");
-    DimSize_t convOutDims = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<DimSize_t>("OutChannels");
+//Graph Regex
+#include "aidge/graphRegex/GraphRegex.hpp"
+
+void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode, std::shared_ptr<Aidge::Node> batchnormNode) {
 
+    // TODO: Find a way to remove the template
+    // A feature map with 2 dimensions is assumed
+    const std::shared_ptr<BatchNorm_Op<2>> batchOp = std::static_pointer_cast<BatchNorm_Op<2>>(batchnormNode->getOperator());
+    const std::shared_ptr<Conv_Op<2>> convOp = std::static_pointer_cast<Conv_Op<2>>(convNode->getOperator());
 
-    assert(scale->size()  == convOutDims);
-    assert(shift->size()  == convOutDims);
-    assert(b_mean->size() == convOutDims);
-    assert(b_var->size()  == convOutDims);
+    const std::shared_ptr<Tensor> scale  = batchOp->getInput(1);
+    const std::shared_ptr<Tensor> shift  = batchOp->getInput(2);
+    const std::shared_ptr<Tensor> b_mean = batchOp->getInput(3);
+    const std::shared_ptr<Tensor> b_var  = batchOp->getInput(4);
+
+    const float epsilon = batchOp -> getAttr<float>("Epsilon");
+    const DimSize_t convNbOutChannels = convOp -> getAttr<DimSize_t>("OutChannels");
+    const DimSize_t channelsSize = convOp -> getAttr<DimSize_t>("InChannels");
+    const std::array<DimSize_t, 2> kernelDims = convOp -> getAttr<std::array<DimSize_t, 2>>("KernelDims");
+
+
+    assert(scale->size()  == convNbOutChannels);
+    assert(shift->size()  == convNbOutChannels);
+    assert(b_mean->size() == convNbOutChannels);
+    assert(b_var->size()  == convNbOutChannels);
     assert(epsilon > 0.0);
     // TODO : no no_bias attribute ?
+
+
     float meanVariance = 0.0;
     unsigned int count = 0;
 
-    for (std::size_t output = 0; output < convOutDims; ++output) {
-        // TODO : get suppose datatype is float ..
-        if (b_var->get<float>(output) > 1.0e-12) {
-            meanVariance += b_var->get<float>(output);
+    for (std::size_t outChId = 0; outChId < convNbOutChannels; ++outChId) {
+        // TODO: get() assumed dataType is float...
+        if (b_var->get<float>(outChId) > 1.0e-12) {
+            meanVariance += b_var->get<float>(outChId);
             ++count;
         }
         else {
-            printf("Zero-variance: %s [%lu]\n", conv->name().c_str(), output);
+            printf("Zero-variance: %s [%lu]\n", convNode->name().c_str(), outChId);
         }
     }
     if (count > 0)
         meanVariance /= count;
     else {
-        printf("variance < 1e-12 for all outputs! Is the network correctly trained?\n");
+        printf("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
     }
 
-    const DimSize_t channelsSize = std::dynamic_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<DimSize_t>("InChannels");
-
-    // TODO : suppose we have Conv2D ...
-    const std::array<DimSize_t, 2> kernelDims = std::dynamic_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<std::array<DimSize_t, 2>>("KernelDims");
+    std::shared_ptr<Tensor> weight = convOp -> getInput(1);
+    std::shared_ptr<Tensor> bias = convOp -> getInput(2);
 
-    std::shared_ptr<Tensor> weight  = conv->input(1).first->getOperator()->getOutput(conv->input(1).second);
-    std::shared_ptr<Tensor> bias  = conv->input(2).first->getOperator()->getOutput(conv->input(2).second);
-
-    for (std::size_t output = 0; output < convOutDims; ++output) {
+    for (std::size_t outChId = 0; outChId < convNbOutChannels; ++outChId) {
         // Corrected for zero-variance issue:
         // "A Quantization-Friendly Separable Convolution for MobileNets"
         // https://arxiv.org/pdf/1803.08607.pdf
         // to help post-training quantization
-        const float factor = scale->get<float>(output)
-            / std::sqrt(epsilon + ((b_var->get<float>(output) > 1.0e-12 || count == 0)
-                        ? b_var->get<float>(output) : meanVariance));
+        const float factor = scale->get<float>(outChId)
+            / std::sqrt(epsilon + ((b_var->get<float>(outChId) > 1.0e-12 || count == 0)
+                        ? b_var->get<float>(outChId) : meanVariance));
         // Weights adjustments
         for (std::size_t channel = 0; channel < channelsSize; ++channel) {
             // TODO : Suppose kerneldims = 2
             for(std::size_t k0 = 0; k0 < kernelDims[0]; ++ k0){
                 for(std::size_t k1 = 0; k1 < kernelDims[1]; ++ k1){
-                    std::vector<DimSize_t> currentIdx = {output, channel, k0, k1};
+                    std::vector<DimSize_t> currentIdx = {outChId, channel, k0, k1};
                     // TODO : suppose weights are float
                     float weightValue = weight->get<float>(currentIdx);
                     weight->set<float>(currentIdx, weightValue*factor); // Update check it update Conv weights
@@ -109,37 +96,49 @@ void Aidge::fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes){
         }
 
         // TODO : check if noBias==true is set, then set biasValue to 0
-        float biasValue = bias->get<float>(output);
+        float biasValue = bias->get<float>(outChId);
 
-        biasValue = shift->get<float>(output) + (biasValue - b_mean->get<float>(output)) * factor;
+        biasValue = shift->get<float>(outChId) + (biasValue - b_mean->get<float>(outChId)) * factor;
 
-        bias->set<float>(output, biasValue);
+        bias->set<float>(outChId, biasValue);
 
     }
 
     GraphView::replace(std::set<std::shared_ptr<Node>>({
-        batchnorm,
-        batchnorm->input(1).first,
-        batchnorm->input(2).first,
-        batchnorm->input(3).first,
-        batchnorm->input(4).first
+        batchnormNode,
+        batchnormNode->input(1).first,
+        batchnormNode->input(2).first,
+        batchnormNode->input(3).first,
+        batchnormNode->input(4).first
         }), {});
 
 }
 
-void Aidge::fuseBatchNorm(std::shared_ptr<GraphView> graphView){
-    std::map<std::string,NodeRegex*> nodesRegex ;
-    nodesRegex["BatchNorm"] = new NodeRegex("BatchNorm");
-    nodesRegex["Conv"] = new NodeRegex("Conv");
-    nodesRegex["FC"] = new NodeRegex("FC");
+void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::MatchSolution> solution) {
 
+    assert(solution->at("BatchNorm").size() == 1 && "Wrong number of nodes BatchNorm to replace\n");
+    assert(solution->at("OP").size() == 1 && "Wrong number of nodes OP to replace\n");
 
-    std::vector<std::string> seqRegex;
-    seqRegex.push_back("Conv -> BatchNorm;"); // TODO: Add (Conv | FC)
-    GRegex GReg(nodesRegex, seqRegex);
-    Match matches = GReg.match(graphView);
-    std::vector<std::set<std::shared_ptr<Node>>> matchNodes = matches.getMatchNodes();
-    for (size_t i = 0; i < matches.getNbMatch(); ++i) {
-        fuseBatchNorm(matchNodes[i]);
+    for (const auto& op : solution->at("OP")) {
+        for (const auto& batchNorm : solution->at("BatchNorm")) {
+            fuseBatchNorm(op,batchNorm);
+        }
     }
+
 }
+
+void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::GraphView> graphView) {
+
+
+    std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
+    regex->setNodeKey("BatchNorm","getType($) =='BatchNorm'");
+    regex->setNodeKey("OP","getType($) =='Conv'");//  || getType($) =='FC' ");
+
+    regex->addQuery("OP -> BatchNorm");
+
+    for (const auto& solution : regex->match(graphView)) {
+
+        fuseBatchNorm(solution);
+
+    }
+}
\ No newline at end of file
diff --git a/src/recipies/FuseMulAdd.cpp b/src/recipies/FuseMulAdd.cpp
index 528d57e31a5ecf3f5a633a20205e79f7926a1f61..d37f4749635b2bf76d10f7f8de3a44e254c56347 100644
--- a/src/recipies/FuseMulAdd.cpp
+++ b/src/recipies/FuseMulAdd.cpp
@@ -15,47 +15,34 @@
 #include <string>
 
 #include "aidge/operator/FC.hpp"
-#include "aidge/utils/Recipies.hpp"
+#include "aidge/recipies/Recipies.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/operator/MatMul.hpp"
 
-// Graph Regex
-#include "aidge/graphmatching/GRegex.hpp"
-#include "aidge/graphmatching/NodeRegex.hpp"
-using namespace Aidge;
+//Graph Regex
+#include "aidge/graphRegex/GraphRegex.hpp"
 
-void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
+
+void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<Aidge::Node> addNode) { //std::set<std::shared_ptr<Node>> nodes){
     // Fuse Mulmat & Add into FC
     // Inputs : old nodes (pointers on mul & add)
 
-    assert(nodes.size() == 2 && "Wrong number of nodes to replace\n");
-    // Too bad we lose information on the type after matching, how to keep the information after matching (not only for the type) ?
+    assert((matmulNode->type() == "MatMul" && addNode->type() == "Add") && "Wrong type for the nodes to replace");
 
-    // Step 0 : Assert the nodes types are correct to be fused
-    std::shared_ptr<Node> add;
-    std::shared_ptr<Node> matmul;
-    for (const auto& element : nodes) {
-        assert((element->type() == "MatMul" || element->type() == "Add") && "Wrong type for the nodes to replace");
-        if (element->type() == "MatMul"){
-            matmul = element;
-        }
-        else if (element->type() == "Add") {
-            add = element;
-        }
-    }
 
     // Step 1 : Create FC
     // Fetch the output dimension throught the bias size
-    std::shared_ptr<Node> bias = (add->getParent(1)) ? add->getParent(1)->cloneSharedOperators() : nullptr;
+    std::shared_ptr<Node> bias = (addNode->getParent(1)) ? addNode->getParent(1)->cloneSharedOperators() : nullptr;
 
-    if (!(matmul->getParent(1))) {
+    if (!(matmulNode->getParent(1))) {
         AIDGE_INTERNAL_ASSERT("No weight detected to produce the fuseMulAdd recipe.");
     }
-    std::shared_ptr<Node> weight = matmul->getParent(1)->cloneSharedOperators();
-    DimSize_t outSize = weight->getOperator()->output(0).dims<2>()[1];
+    std::shared_ptr<Node> weight = matmulNode->getParent(1)->cloneSharedOperators();
+    const DimSize_t outSize = std::dynamic_pointer_cast<MatMul_Op>(matmulNode->getOperator()) -> getAttr<DimSize_t>("OutChannels");
 
     // Instanciate FC
     //std::shared_ptr<Node> fc = FC(dim[0], false, "Fc");
@@ -74,21 +61,37 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
         // Case 2 : If not all nodes are in a graph view : only delete the nodes from the graphview
         // Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory?
     auto newNodes = std::set<std::shared_ptr<Node>>({fc, weight, fc->getParent(2)});
-    GraphView::replace({matmul, add, add->getParent(1), matmul->getParent(1)}, newNodes);
+    GraphView::replace({matmulNode, addNode, addNode->getParent(1), matmulNode->getParent(1)}, newNodes);
 
 }
 
-void Aidge::fuseMulAdd(std::shared_ptr<GraphView> graphView){
-
-    std::map<std::string,NodeRegex*> nodesRegex ;
-    nodesRegex["MatMul"] = new NodeRegex("MatMul");
-    nodesRegex["Add"] = new NodeRegex("Add");
-    std::vector<std::string> seqRegex;
-    seqRegex.push_back("MatMul -> Add;");
-    GRegex GReg(nodesRegex, seqRegex);
-    Match matches = GReg.match(graphView);
-    std::vector<std::set<std::shared_ptr<Node>>> matchNodes = matches.getMatchNodes();
-    for (size_t i = 0; i < matches.getNbMatch(); ++i) {
-        fuseMulAdd(matchNodes[i]);
+
+void Aidge::fuseMulAdd(std::shared_ptr<Aidge::MatchSolution> solution){
+
+    assert(solution->at("MatMul").size() == 1 && "Wrong number of nodes MatMul to replace\n");
+    assert(solution->at("Add").size() == 1 && "Wrong number of nodes Add to replace\n");
+
+    for (const auto& matmulNode : solution->at("MatMul")) {
+        for (const auto& addNode : solution->at("Add")) {
+            fuseMulAdd(matmulNode,addNode);
+        }
     }
 }
+
+
+void Aidge::fuseMulAdd(std::shared_ptr<Aidge::GraphView> graphView){
+
+
+    std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
+    regex->setNodeKey("Add","getType($) =='Add'");
+    regex->setNodeKey("MatMul","getType($) =='MatMul'");
+    regex->addQuery("MatMul -> Add ;");
+
+    for (const auto& solution : regex->match(graphView)) {
+
+        fuseMulAdd(solution);
+
+
+
+    }
+}
\ No newline at end of file
diff --git a/src/recipies/LabelGraph.cpp b/src/recipies/LabelGraph.cpp
index 369336f7981198f962d8ab949309005be9ac5eb9..6966bb81d000b62d904f800233048fa58998c6fb 100644
--- a/src/recipies/LabelGraph.cpp
+++ b/src/recipies/LabelGraph.cpp
@@ -22,7 +22,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == Conv_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<Conv_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<ConvAttr::KernelDims>(), op->getAttr<ConvAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<ConvAttr::KernelDims>(), op->template getAttr<ConvAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -30,7 +30,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == ConvDepthWise_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<ConvDepthWise_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<ConvDepthWiseAttr::KernelDims>(), op->getAttr<ConvDepthWiseAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<ConvDepthWiseAttr::KernelDims>(), op->template getAttr<ConvDepthWiseAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -38,7 +38,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == AvgPooling_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<AvgPooling_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<AvgPoolingAttr::KernelDims>(), op->getAttr<AvgPoolingAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<AvgPoolingAttr::KernelDims>(), op->template getAttr<AvgPoolingAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
diff --git a/src/recipies/RemoveFlatten.cpp b/src/recipies/RemoveFlatten.cpp
index fdfdbfd4aea7543dde31d5f5d4845e54e930feac..d571b53023b7665c25aedc869628045b3b13d509 100644
--- a/src/recipies/RemoveFlatten.cpp
+++ b/src/recipies/RemoveFlatten.cpp
@@ -13,38 +13,43 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
-#include "aidge/utils/Recipies.hpp"
+#include "aidge/recipies/Recipies.hpp"
 
-// Graph Regex
-#include "aidge/graphmatching/GRegex.hpp"
-#include "aidge/graphmatching/NodeRegex.hpp"
 
+//Graph Regex
+#include "aidge/graphRegex/GraphRegex.hpp"
 
-namespace Aidge {
-    void removeFlatten(std::set<std::shared_ptr<Node>> nodes) {
-        assert(nodes.size() == 2 && "Wrong number of nodes to replace\n");
-        std::shared_ptr<Node> flatten;
-        for (const auto& element : nodes) {
-            assert((element->type() == "FC" || element->type() == "Flatten") && "Wrong type for the nodes to replace");
-            if (element->type() == "Flatten"){
-                flatten = element;
-            }
-        }
 
+namespace Aidge {
+    void removeFlatten(std::shared_ptr<Node> flatten) {
+ 
         GraphView::replace({flatten}, {});
     }
 
+    void removeFlatten(std::shared_ptr<MatchSolution> solution){
+
+        assert(solution->at("FC").size() == 1 && "Wrong number of nodes FC to replace\n");
+        assert(solution->at("Flatten").size() == 1 && "Wrong number of nodes Flatten to replace\n");
+
+        for (const auto& flatten : solution->at("Flatten")) {
+            removeFlatten(flatten);
+        }
+    }
+
+
+
     void removeFlatten(std::shared_ptr<GraphView> graphView){
-        std::map<std::string,NodeRegex*> nodesRegex ;
-        nodesRegex["Flatten"] = new NodeRegex("Flatten");
-        nodesRegex["FC"] = new NodeRegex("FC");
-        std::vector<std::string> seqRegex;
-        seqRegex.push_back("Flatten->FC;");
-        GRegex GReg(nodesRegex, seqRegex);
-        Match matches = GReg.match(graphView);
-        std::vector<std::set<std::shared_ptr<Node>>> matchNodes = matches.getMatchNodes();
-        for (size_t i = 0; i < matches.getNbMatch(); ++i) {
-            removeFlatten(matchNodes[i]);
+      
+
+        std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
+        regex->setNodeKey("Flatten","getType($) =='Flatten'");
+        regex->setNodeKey("FC","getType($) =='FC'");
+        regex->addQuery("Flatten->FC");
+
+        for (const auto& solution : regex->match(graphView)) {
+            removeFlatten(solution);
         }
+
+
     }
 }
diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt
index 9d9f81516b0cd2611484ee9e3e06e838833200db..5ccfa3832a8ce2522f18ab07e11a78cf8b462a40 100644
--- a/unit_tests/CMakeLists.txt
+++ b/unit_tests/CMakeLists.txt
@@ -10,6 +10,8 @@ FetchContent_MakeAvailable(Catch2)
 
 file(GLOB_RECURSE src_files "*.cpp")
 
+#file(GLOB_RECURSE src_files "graphRegex/Test_GraphRegex.cpp")
+
 add_executable(tests${module_name} ${src_files})
 
 target_link_libraries(tests${module_name} PUBLIC ${module_name})
diff --git a/unit_tests/graph/Test_Connector.cpp b/unit_tests/graph/Test_Connector.cpp
index 4c78fba6f26ac02633bcc64c9d992af2dd39f6ac..a3bcc5783bd1de89ebf82d2b6078a26bdd49eaa7 100644
--- a/unit_tests/graph/Test_Connector.cpp
+++ b/unit_tests/graph/Test_Connector.cpp
@@ -27,19 +27,19 @@ TEST_CASE("[core/graph] Connector(Constructor)") {
         REQUIRE(x.node() == nullptr);
     }
     SECTION("0 output") {
-        std::shared_ptr<Node> node = GenericOperator("Producer",1,1,0);
+        std::shared_ptr<Node> node = GenericOperator("Producer", 1, 0, 0);
         Connector x = Connector(node);
         REQUIRE(x.index() == gk_IODefaultIndex);
         REQUIRE(x.node() == node);
     }
     SECTION("1 output") {
-        std::shared_ptr<Node> node = GenericOperator("ReLU",1,1,1);
+        std::shared_ptr<Node> node = GenericOperator("ReLU", 1, 0, 1);
         Connector x = Connector(node);
         REQUIRE(x.index() == 0);
         REQUIRE(x.node() == node);
     }
     SECTION("Several outputs") {
-        std::shared_ptr<Node> node = GenericOperator("Split",1,1,2);
+        std::shared_ptr<Node> node = GenericOperator("Split", 1, 0, 2);
         Connector x = Connector(node);
         REQUIRE(x.index() == gk_IODefaultIndex);
         REQUIRE(x.node() == node);
@@ -48,30 +48,30 @@ TEST_CASE("[core/graph] Connector(Constructor)") {
 
 TEST_CASE("Connector connections Node", "[Connector]") {
     SECTION("0 input / 0 output") {
-        std::shared_ptr<Node> fic = GenericOperator("Display",0,0,0);
+        std::shared_ptr<Node> fic = GenericOperator("Display", 0, 0, 0);
         Connector x;
         x = (*fic)({});
         REQUIRE(x.node() == fic);
     }
     SECTION("1 input / 0 output") {
-        std::shared_ptr<Node> fic = GenericOperator("Loss",1,1,0);
+        std::shared_ptr<Node> fic = GenericOperator("Loss", 1, 0, 0);
         Connector x;
         x = (*fic)({x});
         REQUIRE(x.node() == fic);
     }
     SECTION("0 input / 1 output") { // Producers
-        std::shared_ptr<Node> fic = GenericOperator("Producer",0,0,1);
+        std::shared_ptr<Node> fic = GenericOperator("Producer", 0, 0, 1);
         Connector x = (*fic)({});
         REQUIRE(x.node() == fic);
     }
     SECTION("1 input / 1 output") {
-        std::shared_ptr<Node> fic = GenericOperator("Conv",1,1,1);
+        std::shared_ptr<Node> fic = GenericOperator("Conv", 1, 0, 1);
         Connector x(GenericOperator("Producer",0,0,1));
         x = (*fic)({x});
         REQUIRE(x.node() ==fic);
     }
     SECTION("2+ inputs / 1 output") { // ElemWise
-        std::shared_ptr<Node> fic = GenericOperator("fictive",3,3,1);
+        std::shared_ptr<Node> fic = GenericOperator("fictive", 3, 0, 1);
         Connector x1(GenericOperator("fictive",0,0,1));
         Connector x2(GenericOperator("fictive",0,0,1));
         Connector x3(GenericOperator("fictive",0,0,1));
@@ -79,9 +79,9 @@ TEST_CASE("Connector connections Node", "[Connector]") {
         REQUIRE(x.node() ==fic);
     }
     SECTION("1 input / 2+ outputs") { // Slice
-        std::shared_ptr<Node> fic = GenericOperator("fictive",1,1,3);
+        std::shared_ptr<Node> fic = GenericOperator("fictive", 1, 0, 3);
 
-        Connector x(GenericOperator("fictive2",0,0,1));
+        Connector x(GenericOperator("fictive2", 0, 0, 1));
         Connector y;
         REQUIRE_NOTHROW(y = (*fic)({x}));
         REQUIRE(y[0].node() == fic);
@@ -92,16 +92,16 @@ TEST_CASE("Connector connections Node", "[Connector]") {
 
 TEST_CASE("GraphGeneration from Connector", "[GraphView]") {
 
-    auto node01 = GenericOperator("Conv",0,0,1,"g_conv1");
-    auto node02 = GenericOperator("ReLU",1,1,1,"g_relu");
-    auto node03 = GenericOperator("g_maxpool1", 1,1,1);
-    auto node04 = GenericOperator("g_conv2_par1",1,1,1);
-    auto node05 = GenericOperator("g_relu2_par1", 1,1,1);
-    auto node06 = GenericOperator("g_conv2_par2", 1,1,1);
-    auto node07 = GenericOperator("g_relu2_par2", 1,1,1);
-    auto node08 = GenericOperator("g_concat", 2,2,1);
-    auto node09 = GenericOperator("g_conv3", 1, 1,1);
-    auto node10 = GenericOperator("g_matmul1", 2,2,1);
+    auto node01 = GenericOperator("Conv", 0, 0, 1,"g_conv1");
+    auto node02 = GenericOperator("ReLU", 1, 0, 1,"g_relu");
+    auto node03 = GenericOperator("g_maxpool1", 1, 0, 1);
+    auto node04 = GenericOperator("g_conv2_par1", 1, 0, 1);
+    auto node05 = GenericOperator("g_relu2_par1", 1, 0, 1);
+    auto node06 = GenericOperator("g_conv2_par2", 1, 0, 1);
+    auto node07 = GenericOperator("g_relu2_par2", 1, 0, 1);
+    auto node08 = GenericOperator("g_concat", 2, 0, 1);
+    auto node09 = GenericOperator("g_conv3", 1, 0, 1);
+    auto node10 = GenericOperator("g_matmul1", 2, 0, 1);
     Connector a = (*node01)({});
     Connector x = (*node02)({a});
     x = (*node03)({x});
@@ -121,18 +121,18 @@ TEST_CASE("GraphGeneration from Connector", "[GraphView]") {
 TEST_CASE("Connector connection GraphView", "[Connector]") {
     SECTION("1 input") {
         Connector x = Connector();
-        auto prod = GenericOperator("Producer",0,0,1);
+        auto prod = GenericOperator("Producer", 0, 0, 1);
         auto g = Residual({
-            GenericOperator("g_conv1", 1,1,1),
-            GenericOperator("g_relu", 1,1,1),
-            GenericOperator("g_maxpool1", 1,1,1),
+            GenericOperator("g_conv1", 1, 0, 1),
+            GenericOperator("g_relu", 1, 0, 1),
+            GenericOperator("g_maxpool1", 1, 0, 1),
             Parallel({
-                Sequential({GenericOperator("g_conv2_par1",1,1,1), GenericOperator("g_relu2_par1", 1,1,1)}),
-                Sequential({GenericOperator("g_conv2_par2", 1,1,1), GenericOperator("g_relu2_par2", 1,1,1)})
+                Sequential({GenericOperator("g_conv2_par1", 1, 0, 1), GenericOperator("g_relu2_par1", 1, 0, 1)}),
+                Sequential({GenericOperator("g_conv2_par2", 1, 0, 1), GenericOperator("g_relu2_par2", 1, 0, 1)})
             }),
-            GenericOperator("g_concat", 2,2,1),
-            GenericOperator("g_conv3", 1, 1,1),
-            GenericOperator("g_matmul1", 2,2,1)
+            GenericOperator("g_concat", 2, 0, 1),
+            GenericOperator("g_conv3", 1, 0, 1),
+            GenericOperator("g_matmul1", 2, 0, 1)
         });
         REQUIRE(nodePtrTo(g->getOrderedInputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"g_conv1", 0}}));
         REQUIRE(nodePtrTo(g->getOrderedOutputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"g_matmul1", 0}}));
@@ -142,20 +142,20 @@ TEST_CASE("Connector connection GraphView", "[Connector]") {
         std::shared_ptr<GraphView> g2 = generateGraph({x});
         std::shared_ptr<GraphView> g3 = g;
         g3->add(prod);
-        REQUIRE(*g3== *g2);
+        REQUIRE(*g3 == *g2);
     }
     SECTION("2+ inputs") {
-        Connector x = (*GenericOperator("Producer",0,0,1))({});
-        Connector y = (*GenericOperator("Producer",0,0,1))({});
-        Connector z = (*GenericOperator("Producer",0,0,1))({});
-        auto g = Sequential({GenericOperator("ElemWise", 3,3,1),
+        Connector x = (*GenericOperator("Producer", 0, 0, 1))({});
+        Connector y = (*GenericOperator("Producer", 0, 0, 1))({});
+        Connector z = (*GenericOperator("Producer", 0, 0, 1))({});
+        auto g = Sequential({GenericOperator("ElemWise", 3, 0, 1),
             Parallel({
-                Sequential({GenericOperator("g_conv2_par1",1,1,1), GenericOperator("g_relu2_par1", 1,1,1)}),
-                Sequential({GenericOperator("g_conv2_par2", 1,1,1), GenericOperator("g_relu2_par2", 1,1,1)}),
-                Sequential({GenericOperator("g_conv2_par3", 1,1,1), GenericOperator("g_relu2_par3", 1,1,1)})
+                Sequential({GenericOperator("g_conv2_par1", 1, 0, 1), GenericOperator("g_relu2_par1", 1, 0, 1)}),
+                Sequential({GenericOperator("g_conv2_par2", 1, 0, 1), GenericOperator("g_relu2_par2", 1, 0, 1)}),
+                Sequential({GenericOperator("g_conv2_par3", 1, 0, 1), GenericOperator("g_relu2_par3", 1, 0, 1)})
             }),
-            GenericOperator("g_concat", 3,3,1),
-            GenericOperator("g_conv3", 1, 1,1)
+            GenericOperator("g_concat", 3, 0, 1),
+            GenericOperator("g_conv3", 1, 0, 1)
         });
         REQUIRE(nodePtrTo(g->getOrderedInputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"ElemWise", 0}, {"ElemWise", 1}, {"ElemWise", 2}}));
         REQUIRE(nodePtrTo(g->getOrderedOutputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"g_conv3", 0}}));
@@ -172,12 +172,12 @@ TEST_CASE("Connector connection GraphView", "[Connector]") {
 TEST_CASE("Connector Mini-graph", "[Connector]") {
     Connector x = Connector();
     Connector y = Connector();
-    x = (*GenericOperator("Producer",0,0,1))({});
-    y = (*GenericOperator("Producer",0,0,1))({});
+    x = (*GenericOperator("Producer", 0, 0, 1))({});
+    y = (*GenericOperator("Producer", 0, 0, 1))({});
     for (int i = 0; i<5; ++i) {
-        x = (*GenericOperator("Conv",1,1,1))({x});
+        x = (*GenericOperator("Conv", 1, 0, 1))({x});
     }
-    y = (*GenericOperator("ElemWise",2,2,1))({y, x});
+    y = (*GenericOperator("ElemWise", 2, 0, 1))({y, x});
     std::shared_ptr<GraphView> g = generateGraph({y});
     REQUIRE(nodePtrTo(g->getOrderedInputs()) == std::vector<std::pair<std::string, IOIndex_t>>({}));
     REQUIRE(nodePtrTo(g->getOrderedOutputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"ElemWise", 0}}));
@@ -192,16 +192,16 @@ TEST_CASE("Structural descrition - Sequential", "[GraphView]") {
     //     REQUIRE(g1->outputNodes() == std::set<std::shared_ptr<Node>>());
     // }
     SECTION("1-element Sequence") {
-        std::shared_ptr<Node> fic = GenericOperator("node1", 1,1,1);
+        std::shared_ptr<Node> fic = GenericOperator("node1", 1, 0, 1);
         std::shared_ptr<GraphView> g2 = Sequential({fic});
         REQUIRE(g2->getNodes() == std::set<std::shared_ptr<Node>>({fic}));
         REQUIRE(g2->inputNodes() == std::set<std::shared_ptr<Node>>({fic}));
         REQUIRE(g2->outputNodes() == std::set<std::shared_ptr<Node>>({fic}));
     }
     SECTION("several-elements simple Sequence") {
-        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1,1,1);
+        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1, 0, 1);
         std::shared_ptr<GraphView> g2 = Sequential({fic1, fic2, fic3});
         REQUIRE(g2->getNodes() == std::set<std::shared_ptr<Node>>({fic1, fic2, fic3}));
         REQUIRE(g2->inputNodes() == std::set<std::shared_ptr<Node>>({fic1}));
@@ -218,37 +218,37 @@ TEST_CASE("Structural description - Parallel", "[GraphView]") {
     //     REQUIRE(g1->outputNodes() == std::set<std::shared_ptr<Node>>());
     // }
     SECTION("1-element Parallel") {
-        std::shared_ptr<Node> fic = GenericOperator("node1", 1,1,1);
+        std::shared_ptr<Node> fic = GenericOperator("node1", 1, 0, 1);
         std::shared_ptr<GraphView> g2 = Parallel({fic});
         REQUIRE(g2->getNodes() == std::set<std::shared_ptr<Node>>({fic}));
         REQUIRE(g2->inputNodes() == std::set<std::shared_ptr<Node>>({fic}));
         REQUIRE(g2->outputNodes() == std::set<std::shared_ptr<Node>>({fic}));
     }
     SECTION("several-elements simple Parallel") {
-        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1,1,1);
+        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1, 0, 1);
         std::shared_ptr<GraphView> g2 = Parallel({fic1, fic2, fic3});
         REQUIRE(g2->getNodes() == std::set<std::shared_ptr<Node>>({fic1, fic2, fic3}));
         REQUIRE(g2->inputNodes() == std::set<std::shared_ptr<Node>>({fic1, fic2, fic3}));
         REQUIRE(g2->outputNodes() == std::set<std::shared_ptr<Node>>({fic1, fic2, fic3}));
     }
     SECTION("1 Graph in Parallel") {
-        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1,1,1);
+        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1, 0, 1);
         std::shared_ptr<GraphView> g2 = Parallel({Sequential({fic1, fic2, fic3})});
         REQUIRE(g2->getNodes() == std::set<std::shared_ptr<Node>>({fic1, fic2, fic3}));
         REQUIRE(g2->inputNodes() == std::set<std::shared_ptr<Node>>({fic1}));
         REQUIRE(g2->outputNodes() == std::set<std::shared_ptr<Node>>({fic3}));
     }
     SECTION("several Sequential in Parallel") {
-        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic4 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic5 = GenericOperator("node1", 1,1,1);
-        std::shared_ptr<Node> fic6 = GenericOperator("node1", 1,1,1);
+        std::shared_ptr<Node> fic1 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic2 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic3 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic4 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic5 = GenericOperator("node1", 1, 0, 1);
+        std::shared_ptr<Node> fic6 = GenericOperator("node1", 1, 0, 1);
         std::shared_ptr<GraphView> g2 = Parallel({Sequential({fic1, fic2, fic3}),Sequential({fic4, fic5, fic6})});
         REQUIRE(g2->getNodes() == std::set<std::shared_ptr<Node>>({fic1, fic2, fic3, fic4, fic5, fic6}));
         REQUIRE(g2->inputNodes() == std::set<std::shared_ptr<Node>>({fic1, fic4}));
@@ -257,13 +257,13 @@ TEST_CASE("Structural description - Parallel", "[GraphView]") {
 }
 
 TEST_CASE("Strucutral Description - Complex Graph", "[GraphView]") {
-    std::shared_ptr<Node> firstLayer = GenericOperator("first", 1,1,1);
+    std::shared_ptr<Node> firstLayer = GenericOperator("first", 1, 0, 1);
     auto g = Sequential({firstLayer,
-                    GenericOperator("l2",1,1,1),
-                    Parallel({Sequential({GenericOperator("conv1",1,1,1), GenericOperator("relu1",1,1,1)}),
-                            Sequential({GenericOperator("conv2",1,1,1), GenericOperator("relu2",1,1,1)})}),
-                    GenericOperator("concat",2,2,1),
-                    GenericOperator("lastLayer",1,1,1)});
+                    GenericOperator("l2", 1, 0, 1),
+                    Parallel({Sequential({GenericOperator("conv1",1, 0, 1), GenericOperator("relu1", 1, 0, 1)}),
+                            Sequential({GenericOperator("conv2", 1, 0, 1), GenericOperator("relu2", 1, 0, 1)})}),
+                    GenericOperator("concat", 2, 0, 1),
+                    GenericOperator("lastLayer", 1, 0, 1)});
     REQUIRE(g->getNodes().size() == 8U);
     REQUIRE(g->inputNodes() == std::set<std::shared_ptr<Node>>({firstLayer}));
 }
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 5acab4c44f2df8d20fa9b308e07170b6a6764522..36203532ed4fefca1421acab5e5827dbdc4ad9f7 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -133,13 +133,13 @@ TEST_CASE("[core/graph] GraphView(add)") {
         g->add(GOp1);
         std::shared_ptr<Node> GOp2 = GenericOperator("Fictive", 0, 0, 1, "Gop2");
         g->add(GOp2);
-        std::shared_ptr<Node> GOp3 = GenericOperator("Fictive", 1, 1, 0, "Gop3");
+        std::shared_ptr<Node> GOp3 = GenericOperator("Fictive", 1, 0, 0, "Gop3");
         g->add(GOp3);
         std::shared_ptr<Node> GOp4 = GenericOperator("Fictive", 0, 1, 0, "Gop4");
         g->add(GOp4);
-        std::shared_ptr<Node> GOp5 = GenericOperator("Fictive", 1, 1, 1, "Gop5");
+        std::shared_ptr<Node> GOp5 = GenericOperator("Fictive", 1, 0, 1, "Gop5");
         g->add(GOp5);
-        std::shared_ptr<Node> GOp6 = GenericOperator("Fictive", 1, 2, 1, "Gop6");
+        std::shared_ptr<Node> GOp6 = GenericOperator("Fictive", 1, 1, 1, "Gop6");
         g->add(GOp6);
         g->save("node_alone");
         REQUIRE(nodePtrTo(g->getOrderedInputs(), nodePtrToName) == std::vector<std::pair<std::string, IOIndex_t>>({{"Gop3", 0}, {"Gop4", 0}, {"Gop5", 0}, {"Gop6", 0}, {"Gop6", 1}}));
@@ -175,11 +175,11 @@ TEST_CASE("[core/graph] GraphView(add)") {
     SECTION("another GraphView") {
         std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph-1");
         std::shared_ptr<GraphView> g2 = std::make_shared<GraphView>("TestGraph-2");
-        auto conv = GenericOperator("Conv", 1, 1, 1, "c");
-        auto conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
-        auto conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
-        auto conv3 = GenericOperator("Conv", 1, 1, 1, "c3");
-        auto conv4 = GenericOperator("Conv", 1, 1, 1, "c4");
+        auto conv = GenericOperator("Conv", 1, 0, 1, "c");
+        auto conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
+        auto conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+        auto conv3 = GenericOperator("Conv", 1, 0, 1, "c3");
+        auto conv4 = GenericOperator("Conv", 1, 0, 1, "c4");
         conv->addChild(conv1);
         conv1->addChild(conv2);
         conv2->addChild(conv3);
@@ -196,13 +196,13 @@ TEST_CASE("[core/graph] GraphView(add)") {
 
 TEST_CASE("[core/graph] GraphView(addChild)") {
     std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-    std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-    std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
-    std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
-    std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 1, 1, "c3");
-    std::shared_ptr<Node> conv3_5 = GenericOperator("Conv", 1, 1, 1, "c3.5");
-    std::shared_ptr<Node> conv4 = GenericOperator("Conv", 1, 1, 1, "c4");
-    std::shared_ptr<Node> conv5 = GenericOperator("Conv", 1, 1, 1, "c5");
+    std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+    std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
+    std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+    std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 0, 1, "c3");
+    std::shared_ptr<Node> conv3_5 = GenericOperator("Conv", 1, 0, 1, "c3.5");
+    std::shared_ptr<Node> conv4 = GenericOperator("Conv", 1, 0, 1, "c4");
+    std::shared_ptr<Node> conv5 = GenericOperator("Conv", 1, 0, 1, "c5");
 
     g1->add(conv);
     SECTION("add(node)") {
@@ -277,12 +277,12 @@ TEST_CASE("[core/graph] GraphView(outputs)") {
 
 TEST_CASE("[core/graph] GraphView(save)") {
     std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-    std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-    std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
-    std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
-    std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 1, 1, "c3");
-    std::shared_ptr<Node> conv4 = GenericOperator("Conv", 1, 1, 1, "c4");
-    std::shared_ptr<Node> conv5 = GenericOperator("Conv", 1, 1, 1, "c5");
+    std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+    std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
+    std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+    std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 0, 1, "c3");
+    std::shared_ptr<Node> conv4 = GenericOperator("Conv", 1, 0, 1, "c4");
+    std::shared_ptr<Node> conv5 = GenericOperator("Conv", 1, 0, 1, "c5");
 
     g1->add(conv);
     g1->addChild(conv1, "c");
@@ -297,9 +297,9 @@ TEST_CASE("[core/graph] GraphView(save)") {
 
 TEST_CASE("[core/graph] GraphView(resetConnections)") {
     SECTION("disconnect data iput") {
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 3, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 2, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
         std::shared_ptr<Node> prod1 = GenericOperator("Prod", 0, 0, 1, "p1");
         std::shared_ptr<Node> prod2 = GenericOperator("Prod", 0, 0, 1, "p2");
         conv->addChild(conv1);
@@ -310,7 +310,7 @@ TEST_CASE("[core/graph] GraphView(resetConnections)") {
         conv1->resetConnections(false);
 
         REQUIRE(conv->output(0).size() == 0);
-        for (std::size_t i = 0; i < conv1->nbDataInputs(); ++i) {
+        for (std::size_t i = 0; i < conv1->nbData(); ++i) {
         REQUIRE((conv1->input(i) == std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex)));
         }
         REQUIRE((conv1->input(1) == std::pair<std::shared_ptr<Node>, IOIndex_t>(prod1, 0)));
@@ -322,9 +322,9 @@ TEST_CASE("[core/graph] GraphView(resetConnections)") {
     }
 
     SECTION("disconnect data iput + learnable parameters") {
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 3, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 2, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
         std::shared_ptr<Node> prod1 = GenericOperator("Prod", 0, 0, 1, "p1");
         std::shared_ptr<Node> prod2 = GenericOperator("Prod", 0, 0, 1, "p2");
         conv->addChild(conv1);
@@ -345,7 +345,7 @@ TEST_CASE("[core/graph] GraphView(resetConnections)") {
     }
 }
 
-TEST_CASE("Graph Forward dims", "[GraphView]") {
+TEST_CASE("[core/graph] GraphView(forwardDims)", "[GraphView][forwardDims]") {
     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
     auto conv1 = Conv(3, 32, {3, 3}, "conv1");
     auto conv2 = Conv(32, 64, {3, 3}, "conv2");
@@ -359,21 +359,21 @@ TEST_CASE("Graph Forward dims", "[GraphView]") {
     g->forwardDims();
 
     SECTION("Check input-output connections") {
-        REQUIRE(dataProvider->getOperator()->getOutput(0) == conv1->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getInput(1) == g->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getInput(2) == g->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
-        REQUIRE(conv2->getOperator()->getInput(1) == g->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getInput(2) == g->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
-        REQUIRE(conv3->getOperator()->getInput(1) == g->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(conv3->getOperator()->getInput(2) == g->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider->getOperator()->getRawOutput(0) == conv1->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(1) == g->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(2) == g->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) == conv2->getOperator()->getRawInput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(1) == g->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(2) == g->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawOutput(0) == conv3->getOperator()->getRawInput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(1) == g->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(2) == g->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 
     SECTION("Check forwarded dims") {
-        REQUIRE(std::static_pointer_cast<Tensor>(conv1->getOperator()->getOutput(0))
+        REQUIRE(std::static_pointer_cast<Tensor>(conv1->getOperator()->getRawOutput(0))
                     ->dims() == std::vector<DimSize_t>({16, 32, 222, 222}));
-        REQUIRE(std::static_pointer_cast<Tensor>(conv2->getOperator()->getOutput(0))
+        REQUIRE(std::static_pointer_cast<Tensor>(conv2->getOperator()->getRawOutput(0))
                     ->dims() == std::vector<DimSize_t>({16, 64, 220, 220}));
     }
 }
@@ -386,10 +386,10 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
         auto otherInput = GenericOperator("Producer", 0, 0, 1, "other_input");
         auto matmulWeight = GenericOperator("Producer", 0, 0, 1, "matmul_w");
         auto addBias = GenericOperator("Producer", 0, 0, 1, "add_b");
-        auto other1 = GenericOperator("Other", 1, 1, 1, "other1");
-        auto other2 = GenericOperator("Other", 1, 1, 1, "other2");
-        auto matmul = GenericOperator("MatMul", 1, 2, 1, "matmul");
-        auto add = GenericOperator("Add", 1, 2, 1, "add");
+        auto other1 = GenericOperator("Other", 1, 0, 1, "other1");
+        auto other2 = GenericOperator("Other", 1, 0, 1, "other2");
+        auto matmul = GenericOperator("MatMul", 1, 1, 1, "matmul");
+        auto add = GenericOperator("Add", 1, 1, 1, "add");
         otherInput->addChild(other1);
         other1->addChild(matmul);
         matmul->addChild(add);
@@ -403,7 +403,7 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
         std::set<std::shared_ptr<Node>> nodeToReplace = std::set<std::shared_ptr<Node>>({matmulWeight, addBias, matmul, add});
 
         // create replacing graph
-        std::shared_ptr<Node> myFC = GenericOperator("FC", 1, 3, 1, "fc");
+        std::shared_ptr<Node> myFC = GenericOperator("FC", 1, 2, 1, "fc");
         auto newMatmulWeight = matmulWeight->cloneSharedOperators();
         newMatmulWeight->addChild(myFC, 0, 1);
         auto newAddBias = addBias->cloneSharedOperators();
@@ -419,9 +419,9 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
     SECTION("replace with nothing") {
         std::shared_ptr<GraphView> g = std::make_shared<GraphView>("TestGraph");
         auto r1 = GenericOperator("relu", 0, 0, 1);
-        auto r2 = GenericOperator("relu", 1, 1, 1);
-        auto r3 = GenericOperator("relu", 1, 1, 1);
-        auto r4 = GenericOperator("relu", 1, 1, 0);
+        auto r2 = GenericOperator("relu", 1, 0, 1);
+        auto r3 = GenericOperator("relu", 1, 0, 1);
+        auto r4 = GenericOperator("relu", 1, 0, 0);
         r1->addChild(r2);
         r2->addChild(r3);
         r3->addChild(r4);
@@ -437,20 +437,20 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
     SECTION("replace for tiling") {
         std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
         auto otherInput = GenericOperator("Producer", 0, 0, 1, "other_input");
-        auto other1 = GenericOperator("Other", 1, 1, 1, "other1");
-        auto myConv = GenericOperator("Conv", 1, 1, 1, "myConv");
-        auto other2 = GenericOperator("Other", 1, 1, 1, "other2");
+        auto other1 = GenericOperator("Other", 1, 0, 1, "other1");
+        auto myConv = GenericOperator("Conv", 1, 0, 1, "myConv");
+        auto other2 = GenericOperator("Other", 1, 0, 1, "other2");
         otherInput->addChild(other1);
         other1->addChild(myConv);
         myConv->addChild(other2);
         g->add({other1, myConv, other2});
 
         // create tiled Conv
-        auto conv1 =  GenericOperator("Conv", 1, 1, 1, "myConv1");
-        auto conv2 =  GenericOperator("Conv", 1, 1, 1, "myConv2");
-        auto conv3 =  GenericOperator("Conv", 1, 1, 1, "myConv3");
-        auto conv4 =  GenericOperator("Conv", 1, 1, 1, "myConv4");
-        auto concat = GenericOperator("Concat", 4, 4, 1, "myConcat");
+        auto conv1 =  GenericOperator("Conv", 1, 0, 1, "myConv1");
+        auto conv2 =  GenericOperator("Conv", 1, 0, 1, "myConv2");
+        auto conv3 =  GenericOperator("Conv", 1, 0, 1, "myConv3");
+        auto conv4 =  GenericOperator("Conv", 1, 0, 1, "myConv4");
+        auto concat = GenericOperator("Concat", 4, 0, 1, "myConcat");
         conv1->addChild(concat);
         conv2->addChild(concat);
         conv3->addChild(concat);
@@ -468,12 +468,12 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
     SECTION("Change every Nodes in a GraphView") {
         auto matmulWeight0 = GenericOperator("Producer", 0, 0, 1, "matmul_w0");
         auto addBias0 = GenericOperator("Producer", 0, 0, 1, "add_b0");
-        auto matmul0 = GenericOperator("MatMul", 1, 2, 1, "matmul0");
-        auto add0 = GenericOperator("Add", 1, 2, 1, "add0");
+        auto matmul0 = GenericOperator("MatMul", 1, 1, 1, "matmul0");
+        auto add0 = GenericOperator("Add", 1, 1, 1, "add0");
         auto matmulWeight1 = GenericOperator("Producer", 0, 0, 1, "matmul_w1");
         auto addBias1 = GenericOperator("Producer", 0, 0, 1, "add_b1");
-        auto matmul1 = GenericOperator("MatMul", 1, 2, 1, "matmul1");
-        auto add1 = GenericOperator("Add", 1, 2, 1, "add1");
+        auto matmul1 = GenericOperator("MatMul", 1, 1, 1, "matmul1");
+        auto add1 = GenericOperator("Add", 1, 1, 1, "add1");
 
         matmulWeight0 -> addChild(matmul0, 0, 1);
         addBias0 -> addChild(add0, 0, 1);
@@ -489,8 +489,8 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
         auto newAddBias0 = addBias0->cloneSharedOperators();
         auto newMatmulWeight1 = matmulWeight1->cloneSharedOperators();
         auto newAddBias1 = addBias1->cloneSharedOperators();
-        auto fc0 = GenericOperator("FC", 1, 3, 1, "fc0");
-        auto fc1 = GenericOperator("FC", 1, 3, 1, "fc1");
+        auto fc0 = GenericOperator("FC", 1, 2, 1, "fc0");
+        auto fc1 = GenericOperator("FC", 1, 2, 1, "fc1");
 
         newMatmulWeight0 -> addChild(fc0, 0, 1);
         newAddBias0 -> addChild(fc0, 0, 2);
@@ -517,15 +517,15 @@ TEST_CASE("[GraphView] clone") {
     g1->save("clone_g1");
 
     SECTION("Check input-output connections") {
-        REQUIRE(dataProvider->getOperator()->getOutput(0) == conv1->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getInput(1) == g1->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getInput(2) == g1->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
-        REQUIRE(conv2->getOperator()->getInput(1) == g1->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getInput(2) == g1->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
-        REQUIRE(conv3->getOperator()->getInput(1) == g1->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(conv3->getOperator()->getInput(2) == g1->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider->getOperator()->getRawOutput(0) == conv1->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(1) == g1->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(2) == g1->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) == conv2->getOperator()->getRawInput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(1) == g1->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(2) == g1->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawOutput(0) == conv3->getOperator()->getRawInput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(1) == g1->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(2) == g1->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 
     auto g2 = g1->clone();
@@ -561,27 +561,27 @@ TEST_CASE("[GraphView] clone") {
     }
 
     SECTION("Check new connections") {
-        REQUIRE(dataProvider->getOperator()->getOutput(0) != g2->getNode("conv1")->getOperator()->getInput(0));
-        REQUIRE(g1->getNode("conv1")->getOperator()->getInput(1) != g2->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv1")->getOperator()->getInput(2) != g2->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv1")->getOperator()->getOutput(0) != g2->getNode("conv2")->getOperator()->getInput(0));
-        REQUIRE(g1->getNode("conv2")->getOperator()->getInput(1) != g2->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv2")->getOperator()->getInput(2) != g2->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv2")->getOperator()->getOutput(0) != g2->getNode("conv3")->getOperator()->getInput(0));
-        REQUIRE(g1->getNode("conv3")->getOperator()->getInput(1) != g2->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv3")->getOperator()->getInput(2) != g2->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider->getOperator()->getRawOutput(0) != g2->getNode("conv1")->getOperator()->getRawInput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getRawInput(1) != g2->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getRawInput(2) != g2->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getRawOutput(0) != g2->getNode("conv2")->getOperator()->getRawInput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getRawInput(1) != g2->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getRawInput(2) != g2->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getRawOutput(0) != g2->getNode("conv3")->getOperator()->getRawInput(0));
+        REQUIRE(g1->getNode("conv3")->getOperator()->getRawInput(1) != g2->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv3")->getOperator()->getRawInput(2) != g2->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 
     SECTION("Check input-output connections") {
-        REQUIRE(dataProvider2->getOperator()->getOutput(0) == g2->getNode("conv1")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(1) == g2->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(2) == g2->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(1) == g2->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(2) == g2->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(1) == g2->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(2) == g2->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider2->getOperator()->getRawOutput(0) == g2->getNode("conv1")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawInput(1) == g2->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawInput(2) == g2->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawInput(1) == g2->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawInput(2) == g2->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getRawInput(1) == g2->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getRawInput(2) == g2->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 }
 
@@ -598,15 +598,15 @@ TEST_CASE("[GraphView] cloneSharedProducers") {
     g1->save("cloneSharedProducers_g1");
 
     SECTION("Check input-output connections") {
-        REQUIRE(dataProvider->getOperator()->getOutput(0) == conv1->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getInput(1) == g1->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getInput(2) == g1->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
-        REQUIRE(conv2->getOperator()->getInput(1) == g1->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getInput(2) == g1->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
-        REQUIRE(conv3->getOperator()->getInput(1) == g1->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(conv3->getOperator()->getInput(2) == g1->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider->getOperator()->getRawOutput(0) == conv1->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(1) == g1->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(2) == g1->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) == conv2->getOperator()->getRawInput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(1) == g1->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(2) == g1->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawOutput(0) == conv3->getOperator()->getRawInput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(1) == g1->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(2) == g1->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 
     auto g2 = g1->cloneSharedProducers();
@@ -642,27 +642,27 @@ TEST_CASE("[GraphView] cloneSharedProducers") {
     }
 
     SECTION("Check new connections") {
-        REQUIRE(dataProvider->getOperator()->getOutput(0) != g2->getNode("conv1")->getOperator()->getInput(0));
-        REQUIRE(g1->getNode("conv1")->getOperator()->getInput(1) == g2->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv1")->getOperator()->getInput(2) == g2->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv1")->getOperator()->getOutput(0) != g2->getNode("conv2")->getOperator()->getInput(0));
-        REQUIRE(g1->getNode("conv2")->getOperator()->getInput(1) == g2->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv2")->getOperator()->getInput(2) == g2->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv2")->getOperator()->getOutput(0) != g2->getNode("conv3")->getOperator()->getInput(0));
-        REQUIRE(g1->getNode("conv3")->getOperator()->getInput(1) == g2->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(g1->getNode("conv3")->getOperator()->getInput(2) == g2->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider->getOperator()->getRawOutput(0) != g2->getNode("conv1")->getOperator()->getRawInput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getRawInput(1) == g2->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getRawInput(2) == g2->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getRawOutput(0) != g2->getNode("conv2")->getOperator()->getRawInput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getRawInput(1) == g2->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getRawInput(2) == g2->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getRawOutput(0) != g2->getNode("conv3")->getOperator()->getRawInput(0));
+        REQUIRE(g1->getNode("conv3")->getOperator()->getRawInput(1) == g2->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g1->getNode("conv3")->getOperator()->getRawInput(2) == g2->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 
     SECTION("Check input-output connections") {
-        REQUIRE(dataProvider2->getOperator()->getOutput(0) == g2->getNode("conv1")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(1) == g2->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(2) == g2->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(1) == g2->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(2) == g2->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(1) == g2->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(2) == g2->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider2->getOperator()->getRawOutput(0) == g2->getNode("conv1")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawInput(1) == g2->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawInput(2) == g2->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawInput(1) == g2->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawInput(2) == g2->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getRawInput(1) == g2->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getRawInput(2) == g2->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 }
 
@@ -679,15 +679,15 @@ TEST_CASE("[GraphView] cloneSharedOperators") {
     g1->save("cloneSharedOperators_g1");
 
     SECTION("Check input-output connections") {
-        REQUIRE(dataProvider->getOperator()->getOutput(0) == conv1->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getInput(1) == g1->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getInput(2) == g1->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
-        REQUIRE(conv2->getOperator()->getInput(1) == g1->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getInput(2) == g1->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(conv2->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
-        REQUIRE(conv3->getOperator()->getInput(1) == g1->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(conv3->getOperator()->getInput(2) == g1->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider->getOperator()->getRawOutput(0) == conv1->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(1) == g1->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawInput(2) == g1->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) == conv2->getOperator()->getRawInput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(1) == g1->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawInput(2) == g1->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(conv2->getOperator()->getRawOutput(0) == conv3->getOperator()->getRawInput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(1) == g1->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(conv3->getOperator()->getRawInput(2) == g1->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 
     auto g2 = g1->cloneSharedOperators();
@@ -719,15 +719,15 @@ TEST_CASE("[GraphView] cloneSharedOperators") {
     }
 
     SECTION("Check input-output connections") {
-        REQUIRE(dataProvider->getOperator()->getOutput(0) == g2->getNode("conv1")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(1) == g2->getNode("conv1_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(2) == g2->getNode("conv1_b")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(1) == g2->getNode("conv2_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(2) == g2->getNode("conv2_b")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(1) == g2->getNode("conv3_w")->getOperator()->getOutput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(2) == g2->getNode("conv3_b")->getOperator()->getOutput(0));
+        REQUIRE(dataProvider->getOperator()->getRawOutput(0) == g2->getNode("conv1")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawInput(1) == g2->getNode("conv1_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawInput(2) == g2->getNode("conv1_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawInput(1) == g2->getNode("conv2_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawInput(2) == g2->getNode("conv2_b")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getRawInput(1) == g2->getNode("conv3_w")->getOperator()->getRawOutput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getRawInput(2) == g2->getNode("conv3_b")->getOperator()->getRawOutput(0));
     }
 }
 
@@ -753,10 +753,10 @@ TEST_CASE("[core/graph] GraphView(insertParent)") {
         std::set<NodePtr> expectedConv1Children = {conv3, newConv};
         std::set<NodePtr> expectedNewConvChildren = {conv2};
 
-        REQUIRE(conv1->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) == newConv->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) != conv2->getOperator()->getInput(0));
-        REQUIRE(newConv->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) == conv3->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) == newConv->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) != conv2->getOperator()->getRawInput(0));
+        REQUIRE(newConv->getOperator()->getRawOutput(0) == conv2->getOperator()->getRawInput(0));
         REQUIRE((newConv->getChildren()) == expectedNewConvChildren);
         REQUIRE((conv1->getChildren()) == expectedConv1Children);
 
@@ -765,11 +765,11 @@ TEST_CASE("[core/graph] GraphView(insertParent)") {
         std::set<NodePtr> expectedConv1Children2 = {newConv};
         std::set<NodePtr> expectedNewConvChildren2 = {conv2, conv3};
 
-        REQUIRE(conv1->getOperator()->getOutput(0) != conv3->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) == newConv->getOperator()->getInput(0));
-        REQUIRE(conv1->getOperator()->getOutput(0) != conv2->getOperator()->getInput(0));
-        REQUIRE(newConv->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
-        REQUIRE(newConv->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) != conv3->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) == newConv->getOperator()->getRawInput(0));
+        REQUIRE(conv1->getOperator()->getRawOutput(0) != conv2->getOperator()->getRawInput(0));
+        REQUIRE(newConv->getOperator()->getRawOutput(0) == conv2->getOperator()->getRawInput(0));
+        REQUIRE(newConv->getOperator()->getRawOutput(0) == conv3->getOperator()->getRawInput(0));
         REQUIRE((newConv->getChildren()) == expectedNewConvChildren2);
         REQUIRE((conv1->getChildren()) == expectedConv1Children2);
 
diff --git a/unit_tests/graph/Test_get.cpp b/unit_tests/graph/Test_get.cpp
index afd1f42ee9f5d6cd668dd5cab82172cdc298e149..7b396f22bbdedf3ae54b1e1cb78644de6e4a8056 100644
--- a/unit_tests/graph/Test_get.cpp
+++ b/unit_tests/graph/Test_get.cpp
@@ -23,15 +23,15 @@
 using namespace Aidge;
 TEST_CASE("get Delta") {
 
-    
+
         std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 1, 1, "c2");
-        std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 1, 1, "c3");
-        std::shared_ptr<Node> conv3_5 = GenericOperator("Conv", 1, 1, 1, "c3.5");
-        std::shared_ptr<Node> conv4 = GenericOperator("Conv", 1, 1, 1, "c4");
-        std::shared_ptr<Node> conv5 = GenericOperator("Conv", 1, 1, 1, "c5");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+        std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 0, 1, "c3");
+        std::shared_ptr<Node> conv3_5 = GenericOperator("Conv", 1, 0, 1, "c3.5");
+        std::shared_ptr<Node> conv4 = GenericOperator("Conv", 1, 0, 1, "c4");
+        std::shared_ptr<Node> conv5 = GenericOperator("Conv", 1, 0, 1, "c5");
 
         g1->add(conv);
         g1->addChild(conv1, "c");
diff --git a/unit_tests/graphMatching/Test_GRegex.cpp b/unit_tests/graphMatching/Test_GRegex.cpp
deleted file mode 100644
index 2c5907d82e7c5b1d32f1fb38493c7333b68f8731..0000000000000000000000000000000000000000
--- a/unit_tests/graphMatching/Test_GRegex.cpp
+++ /dev/null
@@ -1,318 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <iostream>
-#include <map>
-#include <memory>
-#include <vector>
-#include <utility>
-#include <cassert>
-
-#include <catch2/catch_test_macros.hpp>
-//test
-#include "aidge/graphmatching/GRegex.hpp"
-#include "aidge/graphmatching/StmFactory.hpp"
-#include "aidge/graphmatching/SeqStm.hpp"
-#include "aidge/graphmatching/NodeRegex.hpp"
-#include "aidge/graphmatching/Match.hpp"
-//use
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/operator/GenericOperator.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/graph/GraphView.hpp"
-
-using namespace Aidge;
-
-TEST_CASE("Create good init GRegex", "[GRegex]") {
-    // init all input for GRegex
-    // Nodes Regex map : std::map<std::string,NodeRegex*>& nodesRegex
-    // Sequential Regex vector : std::vector<std::string>& seqRegexps
-
-    // init the Nodes Regex map 
-    std::map<std::string,NodeRegex*> nodesRegex ;
-    std::vector<std::string> nodeTypeKey {"A","B","C"};
-    for (const std::string& key : nodeTypeKey) {
-        nodesRegex[key] = new NodeRegex(key);
-    }
-
-    // init the Sequential Regex vector 
-    std::vector<std::string> seqRegex;
-    seqRegex.push_back("A->B;");
-
-    // Instanciate a GRegex
-    GRegex GReg(nodesRegex, seqRegex);
-
-    // Perform tests
-    REQUIRE(GReg.getStmInit().size() == 1);
-    REQUIRE(GReg.getStmFab().getNumberOfStm() == 1);
-
-    for (const std::string& key : nodeTypeKey) {
-        delete nodesRegex[key];
-    }
-}
-
-
-TEST_CASE("Function matchFromStartNodes | One Match of Nodes sequence", "[GRegex]") {
-    // init all input for GRegex
-    // Nodes Regex map : std::map<std::string,NodeRegex*>& nodesRegex
-    // Sequential Regex vector : std::vector<std::string>& seqRegexps
-
-    // init the Nodes Regex map 
-    std::map<std::string,NodeRegex*> nodesRegex ;
-    std::vector<std::string> nodeTypeKey {"Conv","BN","ReLU"};
-    for (const std::string& key : nodeTypeKey) {
-        nodesRegex[key] = new NodeRegex(key); 
-    }
-    // init the Sequential Regex vector 
-    std::vector<std::string> seqRegex;
-    seqRegex.push_back("Conv->BN->ReLU;");
-
-    // Instanciate a GRegex
-    GRegex GReg(nodesRegex, seqRegex);
-
-    std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-    std::shared_ptr<Node> Conv1 = GenericOperator("Conv", 1, 1, 1);
-    std::shared_ptr<Node> BN1 = GenericOperator("BN", 1, 1, 1);
-    std::shared_ptr<Node> ReLU1 = GenericOperator("ReLU", 1, 1, 1);
-    std::shared_ptr<Node> Random = GenericOperator("Random", 1, 1, 1);
-    std::shared_ptr<Node> Random2 = GenericOperator("Random2", 1, 1, 1);
-
-
-    g1->add(Conv1);
-    g1->addChild(BN1, Conv1);
-    g1->addChild(ReLU1, BN1);
-    g1->addChild(Random, ReLU1);
-    //g1->addChild(BN1, Random2);
-
-    std::vector<std::shared_ptr<Node>> startNodes1;
-    std::set<std::shared_ptr<Node>> result;
-
-    startNodes1.push_back(Conv1);
-    result = GReg.matchFromStartNodes(startNodes1, g1);
-
-    std::set<std::shared_ptr<Node>> true_result;
-    true_result.insert(Conv1);
-    true_result.insert(BN1);
-    true_result.insert(ReLU1);
-
-    // Perform tests
-    REQUIRE(result == true_result);
-
-    for (const std::string& key : nodeTypeKey) {
-        delete nodesRegex[key];
-    }
-}
-
-TEST_CASE("Function matchFromStartNodes | One Match of parallel branches ", "[GRegex]") {
-    // init all input for GRegex
-    // Nodes Regex map : std::map<std::string,NodeRegex*>& nodesRegex
-    // Sequential Regex vector : std::vector<std::string>& seqRegexps
-
-    // init the Nodes Regex map 
-    std::map<std::string,NodeRegex*> nodesRegex ;
-    std::vector<std::string> nodeTypeKey {"Add","FC","Conv"};
-    for (const std::string& key : nodeTypeKey) {
-        nodesRegex[key] = new NodeRegex(key); 
-    }
-
-    // init the Sequential Regex vector 
-    std::vector<std::string> seqRegex;
-    seqRegex.push_back("Add#->Conv;");
-    seqRegex.push_back("Add#->FC;");
-
-    // Instanciate a GRegex
-    GRegex GReg(nodesRegex, seqRegex);
-
-    // Instanciate a graphView
-    std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-    std::shared_ptr<Node> Random0 = GenericOperator("Random", 1, 1, 1);
-    std::shared_ptr<Node> Add1 = GenericOperator("Add", 1, 1, 1);
-    std::shared_ptr<Node> Conv1 = GenericOperator("Conv", 1, 1, 1);
-    std::shared_ptr<Node> BN1 = GenericOperator("BN", 1, 1, 1);
-    std::shared_ptr<Node> ReLU1 = GenericOperator("ReLU", 1, 1, 1);
-    std::shared_ptr<Node> FC1 = GenericOperator("FC", 1, 1, 1);
-    std::shared_ptr<Node> Random = GenericOperator("Random", 1, 1, 1);
-
-    g1->add(Random0);
-    g1->addChild(Add1, Random0);
-    g1->addChild(Conv1, Add1);
-    g1->addChild(BN1, Conv1);
-    g1->addChild(ReLU1, BN1);
-    g1->addChild(FC1, Add1);
-    g1->addChild(Random, FC1);
-
-    // Test 1 : Find the match
-    std::vector<std::shared_ptr<Node>> startNodes;
-    std::set<std::shared_ptr<Node>> result;
-
-    startNodes.push_back(Add1);
-    startNodes.push_back(Add1);
-    result = GReg.matchFromStartNodes(startNodes, g1);
-
-    std::set<std::shared_ptr<Node>> true_result;
-    true_result.insert(Add1);
-    true_result.insert(Conv1);
-    true_result.insert(FC1);
-
-    // Test 2 : Return an empty set when the start nodes are wrong
-    std::vector<std::shared_ptr<Node>> wrong_startNodes;
-    std::set<std::shared_ptr<Node>> wrong_start_result;
-    std::set<std::shared_ptr<Node>> empty_result;
-
-    wrong_startNodes.push_back(Random0);
-    wrong_startNodes.push_back(Random0);
-    wrong_start_result = GReg.matchFromStartNodes(wrong_startNodes, g1);
-
-    // Perform tests
-    REQUIRE(result == true_result);
-    REQUIRE(wrong_start_result == empty_result);
-
-    for (const std::string& key : nodeTypeKey) {
-        delete nodesRegex[key];
-    }
-}
-
-/*
-TEST_CASE("Function matchFromStartNodes | Match a sequence with quantifier ", "[GRegex]") {
-    // init all input for GRegex
-    // Nodes Regex map : std::map<std::string,NodeRegex*>& nodesRegex
-    // Sequential Regex vector : std::vector<std::string>& seqRegexps
-
-    // init the Nodes Regex map 
-    std::map<std::string,NodeRegex*> nodesRegex ;
-    std::vector<std::string> nodeTypeKey {"FC"};
-    for (const std::string& key : nodeTypeKey) {
-        nodesRegex[key] = new NodeRegex(key); 
-    }
-    
-    // init the Sequential Regex vector 
-    std::vector<std::string> seqRegex;
-    seqRegex.push_back("FC+;");
-
-    // Instanciate a GRegex
-    GRegex GReg(nodesRegex, seqRegex);
-
-    
-    // Instanciate a graphView
-    std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-    std::shared_ptr<Node> Random0 = GenericOperator("Random", 1, 1, 1);
-    std::shared_ptr<Node> FC1 = GenericOperator("FC", 1, 1, 1);
-    std::shared_ptr<Node> FC2 = GenericOperator("FC", 1, 1, 1);
-    std::shared_ptr<Node> FC3 = GenericOperator("FC", 1, 1, 1);
-    std::shared_ptr<Node> ReLU1 = GenericOperator("ReLU", 1, 1, 1);
-
-    g1->add(Random0);
-    g1->addChild(FC1, Random0);
-    g1->addChild(FC2, FC1);
-    g1->addChild(FC3, FC2);
-    g1->addChild(ReLU1, FC3);
-
-    // Test 1 : Find the match
-    std::vector<std::shared_ptr<Node>> startNodes;
-    std::set<std::shared_ptr<Node>> result;
-
-    startNodes.push_back(FC1);
-    result = GReg.matchFromStartNodes(startNodes, g1);
-
-    std::set<std::shared_ptr<Node>> true_result;
-    true_result.insert(FC1);
-    true_result.insert(FC2);
-    true_result.insert(FC3);
-
-    // Test 2 : Return an empty set when the start nodes are wrong
-    std::vector<std::shared_ptr<Node>> wrong_startNodes;
-    std::set<std::shared_ptr<Node>> wrong_start_result;
-    std::set<std::shared_ptr<Node>> empty_result;
-
-    wrong_startNodes.push_back(Random0);
-    wrong_start_result = GReg.matchFromStartNodes(wrong_startNodes, g1);
-
-    // Perform tests
-    REQUIRE(result == true_result);
-    REQUIRE(wrong_start_result == empty_result);
-}
-*/
-
-TEST_CASE("Function match | ALL matches of Nodes sequence", "[GRegex]") {
-    // init all input for GRegex
-    // Nodes Regex map : std::map<std::string,NodeRegex*>& nodesRegex
-    // Sequential Regex vector : std::vector<std::string>& seqRegexps
-
-    // init the Nodes Regex map 
-    std::map<std::string,NodeRegex*> nodesRegex ;
-    std::vector<std::string> nodeTypeKey {"GEMM"};
-    for (const std::string& key : nodeTypeKey) {
-        nodesRegex[key] = new NodeRegex(key); 
-    }
-
-    // init the Sequential Regex vector
-    std::vector<std::string> seqRegex;
-    seqRegex.push_back("GEMM;");
-
-    // Instanciate a GRegex
-    GRegex GReg(nodesRegex, seqRegex);
-
-    //init the input graph
-    std::shared_ptr<GraphView> graphToMatch = std::make_shared<GraphView>("TestGraph");
-    std::shared_ptr<Node> Random0 = GenericOperator("Random", 1, 1, 1);
-    std::shared_ptr<Node> GEMM1 = GenericOperator("GEMM", 1, 1, 1);
-    std::shared_ptr<Node> ReLU1 = GenericOperator("ReLU", 1, 1, 1);
-    std::shared_ptr<Node> GEMM2 = GenericOperator("GEMM", 1, 1, 1);
-    std::shared_ptr<Node> GEMM3 = GenericOperator("GEMM", 1, 1, 1);
-    std::shared_ptr<Node> ReLU2 = GenericOperator("ReLU", 1, 1, 1);
-    std::shared_ptr<Node> Random = GenericOperator("Random", 1, 1, 1);
-
-    graphToMatch->add(Random0);
-    graphToMatch->addChild(GEMM1, Random0);
-    graphToMatch->addChild(ReLU1, GEMM1);
-    graphToMatch->addChild(GEMM2, ReLU1);
-    graphToMatch->addChild(GEMM3, GEMM2);
-    graphToMatch->addChild(ReLU2, GEMM3);
-    graphToMatch->addChild(Random, ReLU2);
-
-    
-    //std::vector<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>> matchs = GReg.match(graphToMatch);
-    //std::set<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>> matchs = GReg.match(graphToMatch);
-    Match matches = GReg.match(graphToMatch);
-
-    size_t nb = matches.getNbMatch();
-    std::vector<std::vector<NodeTmp>> gm_startnodes = matches.getStartNodes();
-    std::vector<std::set<NodeTmp>> gm_matchnodes = matches.getMatchNodes();
-
-    std::set<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>> matchs;
-
-    for (size_t i = 0; i < nb; ++i) {
-        matchs.insert(std::make_pair(gm_startnodes[i], gm_matchnodes[i]));
-    }
-
-    //std::vector<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>> toMatchs ;
-    std::set<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>> toMatchs ;
-    // Carefull : as the assert is on a vector, the Order of match matters
-    std::vector<NodeTmp> startNode = {GEMM1};
-    std::set<NodeTmp> matchNode = {GEMM1};
-    //toMatchs.push_back(std::make_pair(startNode,matchNode));
-    toMatchs.insert(std::make_pair(startNode,matchNode));
-    
-    std::vector<NodeTmp> startNode2 = {GEMM2};
-    std::set<NodeTmp> matchNode2 = {GEMM2};
-    //toMatchs.push_back(std::make_pair(startNode2,matchNode2));
-    toMatchs.insert(std::make_pair(startNode2,matchNode2));
-    
-    std::vector<NodeTmp> startNode3 = {GEMM3};
-    std::set<NodeTmp> matchNode3 = {GEMM3};
-    //toMatchs.push_back(std::make_pair(startNode3,matchNode3));
-    toMatchs.insert(std::make_pair(startNode3,matchNode3));
-
-    REQUIRE(matchs == toMatchs);
-    REQUIRE(nb == 3);
-}
-
-
diff --git a/unit_tests/graphMatching/Test_NodeRegex.cpp b/unit_tests/graphMatching/Test_NodeRegex.cpp
deleted file mode 100644
index 2866642bf1355f49a451edffec9e1b62c802ae1f..0000000000000000000000000000000000000000
--- a/unit_tests/graphMatching/Test_NodeRegex.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <iostream>
-#include <map>
-#include <memory>
-#include <cassert>
-
-#include <catch2/catch_test_macros.hpp>
-
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/graphmatching/NodeRegex.hpp"
-#include "aidge/operator/GenericOperator.hpp"
-
-
-using namespace Aidge;
-
-TEST_CASE("Create Noderegex", "[Noderegex]") {
-    std::shared_ptr<NodeRegex> nr = std::make_shared<NodeRegex>("conv");
-}
-
-TEST_CASE("Test _is function", "[Noderegex]") {
-    // Create Noderegex with only condition on the name of the Node
-    // Create several operators to pass into Noderegex _is function
-    // Assert Noderegex._is(operators) are correct
-    std::shared_ptr<NodeRegex> nr = std::make_shared<NodeRegex>("Conv");
-
-    std::shared_ptr<Node> Conv = GenericOperator("Conv", 1, 1, 1);
-    std::shared_ptr<Node> FC = GenericOperator("FC", 1, 1, 1);
-
-    REQUIRE(nr->_is(Conv) == true);
-    REQUIRE(nr->_is(FC) == false);
-    REQUIRE(nr->isA("Conv") == true);
-    REQUIRE(nr->isA("FC") == false);
-    
-}
\ No newline at end of file
diff --git a/unit_tests/graphMatching/Test_SeqStm.cpp b/unit_tests/graphMatching/Test_SeqStm.cpp
deleted file mode 100644
index db8662e3329abe153d4a0fb2b3c46b950208d6bc..0000000000000000000000000000000000000000
--- a/unit_tests/graphMatching/Test_SeqStm.cpp
+++ /dev/null
@@ -1,167 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <iostream>
-#include <map>
-#include <memory>
-#include <vector>
-#include <utility>
-#include <cassert>
-
-#include <catch2/catch_test_macros.hpp>
-//test
-#include "aidge/graphmatching/SeqStm.hpp"
-#include "aidge/graphmatching/NodeRegex.hpp"
-//use
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/operator/GenericOperator.hpp"
-#include "aidge/operator/Producer.hpp"
-
-using namespace Aidge;
-
-TEST_CASE("Create good init SeqStm", "[SeqStm]") {
-    //init all iniput for SeqStm
-
-
-    int stmIdx = 0;
-    //matrix that in B->C
-    std::vector<std::vector<int>> transitionMatrix { 
-        { -1, 1, -1 },
-        { -1, -1, 2 },
-        { -1, -1, -1 } };
-
-    //std::cout << transitionMatrix.size() << "\n";
-    // init the nodes Regex map 
-    std::map<std::string,NodeRegex*> nodesRegex ;
-    std::vector<std::string> nodeTypeKey {"A","B","C"};
-    for (const std::string& key : nodeTypeKey) {
-        nodesRegex[key] = new NodeRegex(key);
-    }
-    //
-
-    std::map<NodeTypeKey,int> typeToIdxTransition;
-    std::vector<NodeTypeKey> nodeTypeCommonTag {{"A",""},{"B",""},{"C",""}};
-    //init nodeTypeCommonTag
-    int idx = 0;
-    for (const NodeTypeKey& key : nodeTypeCommonTag) {
-        typeToIdxTransition[key] = idx;
-        idx += 1;
-    }
-
-    int actSt = 0;
-    std::set<NodeTmp> allNodeValidated;
-    std::set<NodeTmp> allNodeTested;
-    std::set<std::pair<NodeTmp,std::string>> allCommonNode;
-    bool stmIsValid =false;
-
-
-    SeqStm stm( 
-        stmIdx,
-        transitionMatrix,
-        nodesRegex,
-        typeToIdxTransition,
-        actSt, 
-        allNodeValidated,
-        allNodeTested,
-        allCommonNode,
-        stmIsValid);
-
-    REQUIRE(stm.getStmIdx() == 0);
-    REQUIRE(stm.isValid() == false);
-    REQUIRE(stm.getAllCommonNode().size()     == 0);
-    REQUIRE(stm.getAllNodeTested().size()     == 0);
-    REQUIRE(stm.getAllNodeValidated().size()  == 0);
-
-    for (const std::string& key : nodeTypeKey) {
-        delete nodesRegex[key];
-    }
-}
-
-TEST_CASE("Test testNode function", "[SeqStm]") {
-
-    int stmIdx = 0;
-    std::map<NodeTypeKey,int> typeToIdxTransition;
-    std::vector<NodeTypeKey> nodeTypeCommonTag {{"A",""},{"B",""},{"C",""}};
-    //init nodeTypeCommonTag
-    int idx = 0;
-    for (const NodeTypeKey& key : nodeTypeCommonTag) {
-        typeToIdxTransition[key] = idx;
-        idx += 1;
-    }
-    //matrix that in B->C
-    std::vector<std::vector<int>> transitionMatrix { 
-        { -1, 1, -1 },
-        { -1, -1, 2 },
-        { -1, -1, -1 } };
-
-    //std::cout << transitionMatrix.size() << "\n";
-    // init the nodes Regex map 
-    std::map<std::string,NodeRegex*> nodesRegex ;
-    std::vector<std::string> nodeTypeKey {"A","B","C"};
-    for (const std::string& key : nodeTypeKey) {
-        nodesRegex[key] = new NodeRegex(key);
-    }
-    //
-    int actSt = 0;
-    std::set<NodeTmp> allNodeValidated;
-    std::set<NodeTmp> allNodeTested;
-    std::set<std::pair<NodeTmp,std::string>> allCommonNode;
-    bool stmIsValid =false;
-
-    SeqStm stm( 
-        stmIdx,
-        transitionMatrix,
-        nodesRegex,
-        typeToIdxTransition,
-        actSt, 
-        allNodeValidated,
-        allNodeTested,
-        allCommonNode,
-        stmIsValid);
-    REQUIRE(stm.getStmIdx() == 0);
-    //test a node 
-    std::shared_ptr<Node> nodeB = GenericOperator("B", 1, 1, 1);
-    std::shared_ptr<Node> nodeC = GenericOperator("C", 1, 1, 1);
-
-    //set use to test the state of the smt
-    std::set<NodeTmp> testAllNodeTested;
-    std::set<NodeTmp> testAllNodeValidated;
-
-    stm.testNode(nodeB);
-    REQUIRE(stm.isValid() == false);
-    REQUIRE(stm.getState() == 1);
-    REQUIRE(stm.isStmBlocked() == false);
-    testAllNodeTested.insert(nodeB);
-    testAllNodeValidated.insert(nodeB);
-    REQUIRE(stm.getAllNodeTested() == testAllNodeTested);
-    REQUIRE(stm.getAllNodeValidated() == testAllNodeValidated);
-
-
-    stm.testNode(nodeC);
-    REQUIRE(stm.isValid() == true);
-    REQUIRE(stm.getState() == 2);
-    REQUIRE(stm.isStmBlocked() == false);
-    testAllNodeTested.insert(nodeC);
-    testAllNodeValidated.insert(nodeC);
-    REQUIRE(stm.getAllNodeTested() == testAllNodeTested);
-    REQUIRE(stm.getAllNodeValidated() == testAllNodeValidated);
-
-    stm.testNode(nodeC);
-    REQUIRE(stm.isValid() == true);
-    REQUIRE(stm.getState() == -1);
-    REQUIRE(stm.isStmBlocked() == true);
-    REQUIRE(stm.getAllNodeTested() == testAllNodeTested);
-    REQUIRE(stm.getAllNodeValidated() == testAllNodeValidated);
-
-    for (const std::string& key : nodeTypeKey) {
-        delete nodesRegex[key];
-    }
-}
\ No newline at end of file
diff --git a/unit_tests/graphMatching/Test_StmFactory.cpp b/unit_tests/graphMatching/Test_StmFactory.cpp
deleted file mode 100644
index 3c66d0fa817cea674de5ab849091290c976e5735..0000000000000000000000000000000000000000
--- a/unit_tests/graphMatching/Test_StmFactory.cpp
+++ /dev/null
@@ -1,204 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <iostream>
-#include <map>
-#include <memory>
-#include <vector>
-#include <utility>
-#include <cassert>
-
-#include <catch2/catch_test_macros.hpp>
-//test
-#include "aidge/graphmatching/StmFactory.hpp"
-#include "aidge/graphmatching/NodeRegex.hpp"
-//use
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/operator/GenericOperator.hpp"
-#include "aidge/operator/Producer.hpp"
-
-using namespace Aidge;
-
-TEST_CASE("Create good init StmFactory", "[StmFactory]") {
-    // init the nodes Regex map 
-    std::map<std::string,NodeRegex*> nodesRegex ;
-    std::vector<std::string> nodeTypeKey {"A","B","C"};
-    for (const std::string& key : nodeTypeKey) {
-        nodesRegex[key] = new NodeRegex(key);
-    }
-    StmFactory stmF(nodesRegex);
-    REQUIRE(stmF.getNumberOfStm() == 0);
-
-    for (const std::string& key : nodeTypeKey) {
-        delete nodesRegex[key];
-    }
-}
-
-TEST_CASE("Test in makeNewStm the getStmIdx StmFactory", "[SeqStm]") {
-
-    std::map<std::string,NodeRegex*> nodesRegex ;
-    std::vector<std::string> nodeTypeKey {"A","B","C"};
-    for (const std::string& key : nodeTypeKey) {
-        nodesRegex[key] = new NodeRegex(key);
-    }
-
-    StmFactory stmF(nodesRegex);
-
-    std::string seq1 = "A->B+->A#;";
-    SeqStm* stm = stmF.makeNewStm(seq1);
-    REQUIRE(stm->getStmIdx() == 0);
-    REQUIRE(stm->isValid() == false);
-    REQUIRE(stm->getAllCommonNode().size()     == 0);
-    REQUIRE(stm->getAllNodeTested().size()     == 0);
-    REQUIRE(stm->getAllNodeValidated().size()  == 0);
-
-    std::string seq2 = "A->B;";
-    SeqStm* stm2 = stmF.makeNewStm(seq2);
-    REQUIRE(stm2->getStmIdx() == 1);
-    REQUIRE(stm2->isValid() == false);
-    REQUIRE(stm2->getAllCommonNode().size()     == 0);
-    REQUIRE(stm2->getAllNodeTested().size()     == 0);
-    REQUIRE(stm2->getAllNodeValidated().size()  == 0);
-
-    //test the number of stm
-    REQUIRE(stmF.getNumberOfStm() == 2);
-
-    for (const std::string& key : nodeTypeKey) {
-        delete nodesRegex[key];
-    }
-}
-
-TEST_CASE("Test in makeNewStm the stm StmFactory", "[SeqStm]") {
-
-    std::map<std::string,NodeRegex*> nodesRegex ;
-    std::vector<std::string> nodeTypeKey {"A","B","C"};
-    for (const std::string& key : nodeTypeKey) {
-        nodesRegex[key] = new NodeRegex(key);
-    }
-
-
-    StmFactory stmF(nodesRegex);
-    std::string seq1 = "B->C;";
-    SeqStm* stm = stmF.makeNewStm(seq1);
-    //test the number of stm
-    REQUIRE(stmF.getNumberOfStm() == 1);
-
-    //std::shared_ptr<Node> nodeB = GenericOperator("B",1,1,1);
-    //std::shared_ptr<Node> nodeC = GenericiOperator("C",1,1,1);
-    std::shared_ptr<Node> nodeB = GenericOperator("B", 1, 1, 1);
-    std::shared_ptr<Node> nodeC = GenericOperator("C", 1, 1, 1);
-    //set use to test the state of the smt
-    std::set<NodeTmp> testAllNodeTested;
-    std::set<NodeTmp> testAllNodeValidated;
-
-    REQUIRE(stm->isValid() == false);
-    REQUIRE(stm->getState() == 0);
-    REQUIRE(stm->isStmBlocked() == false);
-    REQUIRE(stm->getAllNodeTested() == testAllNodeTested);
-    REQUIRE(stm->getAllNodeValidated() == testAllNodeValidated);
-
-    stm->testNode(nodeB);
-    REQUIRE(stm->isValid() == false);
-    REQUIRE(stm->getState() == 1);
-    REQUIRE(stm->isStmBlocked() == false);
-    testAllNodeTested.insert(nodeB);
-    testAllNodeValidated.insert(nodeB);
-    REQUIRE(stm->getAllNodeTested() == testAllNodeTested);
-    REQUIRE(stm->getAllNodeValidated() == testAllNodeValidated);
-
-
-    stm->testNode(nodeC);
-    REQUIRE(stm->isValid() == true);
-    REQUIRE(stm->getState() == 2);
-    REQUIRE(stm->isStmBlocked() == false);
-    testAllNodeTested.insert(nodeC);
-    testAllNodeValidated.insert(nodeC);
-    REQUIRE(stm->getAllNodeTested() == testAllNodeTested);
-    REQUIRE(stm->getAllNodeValidated() == testAllNodeValidated);
-
-    stm->testNode(nodeC);
-    REQUIRE(stm->isValid() == true);
-    REQUIRE(stm->getState() == -1);
-    REQUIRE(stm->isStmBlocked() == true);
-    REQUIRE(stm->getAllNodeTested() == testAllNodeTested);
-    REQUIRE(stm->getAllNodeValidated() == testAllNodeValidated);
-
-    for (const std::string& key : nodeTypeKey) {
-        delete nodesRegex[key];
-    }
-
-}
-
-TEST_CASE("Test in duplicateStm StmFactory", "[SeqStm]") {
-
-    std::map<std::string,NodeRegex*> nodesRegex ;
-    std::vector<std::string> nodeTypeKey {"A","B","C"};
-    for (const std::string& key : nodeTypeKey) {
-        nodesRegex[key] = new NodeRegex(key);
-    }
-
-
-    StmFactory stmF(nodesRegex);
-    std::string seq1 = "B->C;";
-    SeqStm* stm = stmF.makeNewStm(seq1);
-    SeqStm* stmD =  stmF.duplicateStm(stm);
-
-    std::shared_ptr<Node> nodeB = GenericOperator("B", 1, 1, 1);
-    std::shared_ptr<Node> nodeC = GenericOperator("C", 1, 1, 1);
-    //set use to test the state of the smt
-    std::set<NodeTmp> testAllNodeTested;
-    std::set<NodeTmp> testAllNodeValidated;
-
-    //run the stm
-    REQUIRE(stm->isValid() == false);
-    REQUIRE(stm->getState() == 0);
-    REQUIRE(stm->isStmBlocked() == false);
-    REQUIRE(stm->getAllNodeTested() == testAllNodeTested);
-    REQUIRE(stm->getAllNodeValidated() == testAllNodeValidated);
-
-    stm->testNode(nodeB);
-    REQUIRE(stm->isValid() == false);
-    REQUIRE(stm->getState() == 1);
-    REQUIRE(stm->isStmBlocked() == false);
-    testAllNodeTested.insert(nodeB);
-    testAllNodeValidated.insert(nodeB);
-    REQUIRE(stm->getAllNodeTested() == testAllNodeTested);
-    REQUIRE(stm->getAllNodeValidated() == testAllNodeValidated);
-
-
-    stm->testNode(nodeC);
-    REQUIRE(stm->isValid() == true);
-    REQUIRE(stm->getState() == 2);
-    REQUIRE(stm->isStmBlocked() == false);
-    testAllNodeTested.insert(nodeC);
-    testAllNodeValidated.insert(nodeC);
-    REQUIRE(stm->getAllNodeTested() == testAllNodeTested);
-    REQUIRE(stm->getAllNodeValidated() == testAllNodeValidated);
-
-    stm->testNode(nodeC);
-    REQUIRE(stm->isValid() == true);
-    REQUIRE(stm->getState() == -1);
-    REQUIRE(stm->isStmBlocked() == true);
-    REQUIRE(stm->getAllNodeTested() == testAllNodeTested);
-    REQUIRE(stm->getAllNodeValidated() == testAllNodeValidated);
-
-    //check if stmD not move
-    REQUIRE(stmD->isValid() == false);
-    REQUIRE(stmD->getState() == 0);
-    REQUIRE(stmD->isStmBlocked() == false);
-    REQUIRE(stmD->getAllNodeTested().size() == 0);
-    REQUIRE(stmD->getAllNodeValidated().size() == 0);
-
-    for (const std::string& key : nodeTypeKey) {
-        delete nodesRegex[key];
-    }
-}
-
diff --git a/unit_tests/graphRegex/Test_Fsm.cpp b/unit_tests/graphRegex/Test_Fsm.cpp
index e5950f21b323f07b380ae95f70637ca48a173481..c011a50455e9e21f3df66c3ed46a835bed5346b3 100644
--- a/unit_tests/graphRegex/Test_Fsm.cpp
+++ b/unit_tests/graphRegex/Test_Fsm.cpp
@@ -14,10 +14,10 @@ using namespace Aidge;
 TEST_CASE("matchFSM", "FsmEdge") {
 
    
-        std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
-        std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
-        FsmEdgeUnique EdgeToTest(nodeA,nodeB,toTest);
+    std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
+    std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
+    std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("A","true==true");
+    FsmEdgeUnique EdgeToTest(nodeA,nodeB,toTest);
 
     SECTION("FsmEdgeUnique constructor") {
         REQUIRE(EdgeToTest.getSourceNode() == nodeA);
@@ -28,7 +28,7 @@ TEST_CASE("matchFSM", "FsmEdge") {
     SECTION("FsmEdgeCommon constructor") {
         std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
         std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
+        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("A","true==true");
 
         FsmEdgeCommon EdgeToTest(nodeA,nodeB,toTest,"A");
 
@@ -40,7 +40,7 @@ TEST_CASE("matchFSM", "FsmEdge") {
     SECTION("FsmEdgeRef constructor") {
         std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
         std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
+        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("A","true==true");
 
         FsmEdgeRef EdgeToTest(nodeA,nodeB,0,-1);
 
@@ -52,7 +52,7 @@ TEST_CASE("matchFSM", "FsmEdge") {
     SECTION("FsmEdgeEmpty constructor") {
         std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
         std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
+        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("A","true==true");
 
         FsmEdgeEmpty EdgeToTest(nodeA,nodeB);
 
@@ -65,9 +65,9 @@ TEST_CASE("matchFSM", "FsmEdge") {
     SECTION("FsmEdgeFactory"){
 
     std::map<std::string, std::shared_ptr<ConditionalInterpreter>> allTest = {
-        {"A",std::make_shared<ConditionalInterpreter>("true==true")},
-        {"B",std::make_shared<ConditionalInterpreter>("true==true")},
-        {"C",std::make_shared<ConditionalInterpreter>("true==true")}
+        {"A",std::make_shared<ConditionalInterpreter>("A","true==true")},
+        {"B",std::make_shared<ConditionalInterpreter>("B","true==true")},
+        {"C",std::make_shared<ConditionalInterpreter>("C","true==true")}
     };
 
 // make(std::shared_ptr<FsmNode> source, std::shared_ptr<FsmNode> dest, 
@@ -103,11 +103,11 @@ TEST_CASE("matchFSM", "FsmEdge") {
         std::shared_ptr<FsmNode>  nodeC = std::make_shared<FsmNode>(false,true);
 
         //make the edges
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
+        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("A","true==true");
         std::shared_ptr<FsmEdge> edgeAB =  std::make_shared<FsmEdgeUnique>(nodeA,nodeB,toTest);
         std::shared_ptr<FsmEdge> edgeBC =  std::make_shared<FsmEdgeUnique>(nodeB,nodeC,toTest);
  
-        std::shared_ptr<FsmGraph> graph =  std::make_shared<FsmGraph>();
+        std::shared_ptr<FsmGraph> graph =  std::make_shared<FsmGraph>("");
 
         graph->addEdge(edgeAB);
         graph->addEdge(edgeBC);
@@ -120,7 +120,7 @@ TEST_CASE("matchFSM", "FsmEdge") {
 
     SECTION("graph merge") {
 
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
+        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("A","true==true");
 
         //make the nodes 
         std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(false,true);
@@ -132,7 +132,7 @@ TEST_CASE("matchFSM", "FsmEdge") {
         std::shared_ptr<FsmEdge> edgeAB =  std::make_shared<FsmEdgeUnique>(nodeA,nodeB,toTest);
         std::shared_ptr<FsmEdge> edgeBC =  std::make_shared<FsmEdgeUnique>(nodeB,nodeC,toTest);
  
-        std::shared_ptr<FsmGraph> graph =  std::make_shared<FsmGraph>();
+        std::shared_ptr<FsmGraph> graph =  std::make_shared<FsmGraph>("");
         graph->addEdge(edgeAB);
         graph->addEdge(edgeBC);
 
@@ -149,7 +149,7 @@ TEST_CASE("matchFSM", "FsmEdge") {
         std::shared_ptr<FsmEdge> edge2AB =  std::make_shared<FsmEdgeUnique>(node2A,node2B,toTest);
         std::shared_ptr<FsmEdge> edge2BC =  std::make_shared<FsmEdgeUnique>(node2B,node2C,toTest);
  
-        std::shared_ptr<FsmGraph> graph2 =  std::make_shared<FsmGraph>();
+        std::shared_ptr<FsmGraph> graph2 =  std::make_shared<FsmGraph>("");
 
 
         graph2->addEdge(edge2AB);
@@ -184,7 +184,7 @@ TEST_CASE("matchFSM", "FsmEdge") {
 //         std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
 //         std::shared_ptr<FsmEdgeUnique> edge =  std::make_shared<FsmEdgeUnique>(nodeA,nodeB,toTest);
  
-//         std::shared_ptr<FsmGraph> graph =  std::make_shared<FsmGraph>();
+//         std::shared_ptr<FsmGraph> graph =  std::make_shared<FsmGraph>("");
 
 //         graph->addEdge(edge);
         
diff --git a/unit_tests/graphRegex/Test_FsmMatch.cpp b/unit_tests/graphRegex/Test_FsmMatch.cpp
index 1fe75be1a47033f75af7ccc4dc5202774444cd10..008251feaac9d2dbe21aae3dfc7ebaa69e828ae7 100644
--- a/unit_tests/graphRegex/Test_FsmMatch.cpp
+++ b/unit_tests/graphRegex/Test_FsmMatch.cpp
@@ -14,14 +14,14 @@ using namespace Aidge;
 TEST_CASE("FsmMatch") {
 
     SECTION("Construction") {
-        std::map<std::string,std::shared_ptr<ConditionalInterpreter>> allTest = {
-            {"A",std::make_shared<ConditionalInterpreter>("isConv($)==true")},
-            {"B",std::make_shared<ConditionalInterpreter>("isConv($)==true")},
-            {"C",std::make_shared<ConditionalInterpreter>("true==true")}
+        std::vector<std::shared_ptr<ConditionalInterpreter>> allTest = {
+            std::make_shared<ConditionalInterpreter>("A","isConv($)==true"),
+            std::make_shared<ConditionalInterpreter>("B","isConv($)==true"),
+            std::make_shared<ConditionalInterpreter>("C","true==true")
         };
 
-        allTest["A"]->insertLambda("isConv",+[](NodePtr NodeOp){return NodeOp->type() == "Conv";});
-        allTest["B"]->insertLambda("isConv",+[](NodePtr NodeOp){return NodeOp->type() == "Conv";});
+        allTest[0]->insertLambda("isConv",+[](NodePtr NodeOp){return NodeOp->type() == "Conv";});
+        allTest[1]->insertLambda("isConv",+[](NodePtr NodeOp){return NodeOp->type() == "Conv";});
 
         std::shared_ptr<GraphFsmInterpreter>  fsmGenerator = std::make_shared<GraphFsmInterpreter>("A->A",allTest);
         std::shared_ptr<FsmGraph> fsm = fsmGenerator->interpret();
@@ -34,30 +34,30 @@ TEST_CASE("FsmMatch") {
 
 
         std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
 
         g1->add(conv);
         g1->addChild(conv1, "c");
 
 
-        REQUIRE(allTest["A"]->test(conv) == true);
-        REQUIRE(allTest["B"]->test(conv) == true);
+        REQUIRE(allTest[0]->test(conv) == true);
+        REQUIRE(allTest[1]->test(conv) == true);
 
         std::vector<std::shared_ptr<Node>> startNodes = {conv};
 
         auto result = fsm->test(startNodes);
 
-        REQUIRE( result->getBiggerSolution() == std::set<NodePtr>{conv,conv1});
+        REQUIRE( result[0]->getAll() == std::set<NodePtr>{conv,conv1});
     }
 
 
     SECTION("2 branche graph"){
 
         std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
-        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
-        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
-        std::shared_ptr<Node> conv2 = GenericOperator("Fc", 1, 1, 1, "c2");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Fc", 1, 0, 1, "c2");
 
         g1->add(conv);
         g1->addChild(conv1,conv);
@@ -70,19 +70,20 @@ TEST_CASE("FsmMatch") {
 
         /////////////
 
-        std::map<std::string,std::shared_ptr<ConditionalInterpreter>> allTest = {
-            {"A",std::make_shared<ConditionalInterpreter>("isConv($)==true")},
-            {"B",std::make_shared<ConditionalInterpreter>("isFc($)==true")}
+        std::vector<std::shared_ptr<ConditionalInterpreter>> allTest = {
+            std::make_shared<ConditionalInterpreter>("A","isConv($)==true"),
+            std::make_shared<ConditionalInterpreter>("B","isFc($)==true")
         };
-        allTest["A"]->insertLambda("isConv",+[](NodePtr NodeOp){return NodeOp->type() == "Conv";});
-        allTest["B"]->insertLambda("isFc",+[](NodePtr NodeOp){return NodeOp->type() == "Fc";});
+        allTest[0]->insertLambda("isConv",+[](NodePtr NodeOp){return NodeOp->type() == "Conv";});
+        allTest[1]->insertLambda("isFc",+[](NodePtr NodeOp){return NodeOp->type() == "Fc";});
 
         std::shared_ptr<GraphFsmInterpreter>  fsmGenerator = std::make_shared<GraphFsmInterpreter>("A#->A; A#->B",allTest);
         std::shared_ptr<FsmGraph> fsm = fsmGenerator->interpret();
 
         std::vector<std::shared_ptr<Node>> startNodes = {conv,conv};
         auto result = fsm->test(startNodes);
-        REQUIRE( result->getBiggerSolution() == std::set<NodePtr>{conv,conv1,conv2});
+
+        REQUIRE( result[0]->getAll() == std::set<NodePtr>{conv,conv1,conv2});
 
     }
 
diff --git a/unit_tests/graphRegex/Test_GraphFsmInterpreter.cpp b/unit_tests/graphRegex/Test_GraphFsmInterpreter.cpp
index 9ce090506c9a61abd928b3ae590ee838afb05999..e789677d44efa68071017a9832fa01b5ed340f75 100644
--- a/unit_tests/graphRegex/Test_GraphFsmInterpreter.cpp
+++ b/unit_tests/graphRegex/Test_GraphFsmInterpreter.cpp
@@ -8,10 +8,10 @@ using namespace Aidge;
 TEST_CASE("GraphFsmInterpreter", "GraphFsmInterpreter") {
 
     SECTION("Construction") {
-        std::map<std::string,std::shared_ptr<ConditionalInterpreter>> allTest = {
-            {"A",std::make_shared<ConditionalInterpreter>("true==true")},
-            {"B",std::make_shared<ConditionalInterpreter>("true==true")},
-            {"C",std::make_shared<ConditionalInterpreter>("true==true")}
+        std::vector<std::shared_ptr<ConditionalInterpreter>> allTest = {
+            std::make_shared<ConditionalInterpreter>("A","true==true"),
+            std::make_shared<ConditionalInterpreter>("B","true==true"),
+            std::make_shared<ConditionalInterpreter>("C","true==true")
         };
 
         //GraphFsmInterpreter("A->B",allTest);
diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..924aac79ea8492f6ea0f2cd4d93676876c5a8331
--- /dev/null
+++ b/unit_tests/graphRegex/Test_GraphRegex.cpp
@@ -0,0 +1,178 @@
+
+#include <catch2/catch_test_macros.hpp>
+#include "aidge/graphRegex/GraphRegex.hpp"
+
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/recipies/Recipies.hpp"
+
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("GraphRegexUser") {
+
+    SECTION("INIT") {
+
+        const std::string query = "Conv->FC";
+
+        std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>();
+
+        std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> fc = GenericOperator("FC", 1, 0, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+        std::shared_ptr<Node> fc2 = GenericOperator("FC", 1, 0, 1, "c3");
+
+        g1->add(conv);
+        g1->addChild(fc, "c");
+        g1->addChild(conv2, "c1");
+        g1->addChild(fc2, "c2");
+
+
+        sut->setKeyFromGraph(g1);
+        sut->addQuery(query);
+
+        for (const auto& solution : sut->match(g1)) {
+
+            REQUIRE(solution->getQuery() == query);
+            if(solution->getStartNode() == std::vector<NodePtr>{conv}){
+                REQUIRE(solution->at("Conv") == std::set<NodePtr>{conv} );
+                REQUIRE(solution->at("FC") == std::set<NodePtr>{fc} );
+            }else if (solution->getStartNode() == std::vector<NodePtr>{conv2})
+            {
+                REQUIRE(solution->at("Conv") == std::set<NodePtr>{conv2} );
+                REQUIRE(solution->at("FC") == std::set<NodePtr>{fc2} );
+            }
+        }
+        //REQUIRE( sut->match(g1)[1]->getAll() == std::set<NodePtr>{conv,fc});
+
+    }
+
+   SECTION("2 query") {
+        std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>();
+
+        std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+        std::shared_ptr<Node> conv3 = GenericOperator("Conv", 1, 0, 1, "c3");
+
+        g1->add(conv);
+        g1->addChild(conv1, "c");
+        g1->addChild(conv2, "c1");
+        g1->addChild(conv3, "c2");
+
+
+        sut->setKeyFromGraph(g1);
+
+        const std::string query = "Conv->Conv";
+        const std::string query2 = "Conv->FC";
+
+        sut->setNodeKey("FC","getType($) =='FC'");
+
+        sut->addQuery(query);
+        sut->addQuery(query2);
+
+
+        for (const auto& solution : sut->match(g1)) {
+            REQUIRE(solution->getQuery() == query);
+        }
+
+    }
+
+
+   SECTION("Not define node Test") {
+
+        //test if the FC is not define only match query not query2
+        std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>();
+
+        std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
+        std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
+        std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 0, 1, "c1");
+        std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
+        std::shared_ptr<Node> conv3 = GenericOperator("FC", 1, 0, 1, "c3");
+
+        g1->add(conv);
+        g1->addChild(conv1, "c");
+        g1->addChild(conv2, "c1");
+        g1->addChild(conv3, "c2");
+
+
+        //sut->setKeyFromGraph(g1);
+
+        const std::string query = "Conv->Conv";
+        const std::string query2 = "Conv->FC";
+
+        sut->setNodeKey("Conv","getType($) =='Conv'");
+
+        sut->addQuery(query);
+        sut->addQuery(query2);
+
+
+        for (const auto& solution : sut->match(g1)) {
+            REQUIRE(solution->getQuery() == query);
+        }
+
+    }
+
+
+    SECTION("Applied Recipes"){
+
+      // generate the original GraphView
+        auto matmul0 = MatMul(5, 5, "matmul0");
+        auto add0 = Add(2, "add0");
+        auto matmul1 = MatMul(5, 5, "matmul1");
+        auto add1 = Add(2, "add1");
+
+        auto b0 = Producer({5}, "B0");
+        auto w0 = Producer({5, 5}, "W0");
+        auto b1 = Producer({5}, "B1");
+        auto w1 = Producer({5,5},"W1");
+        auto input = Producer({2,5}, "input");
+
+        input->addChild(matmul0, 0, 0);
+        w0->addChild(matmul0, 0, 1);
+
+        matmul0->addChild(add0, 0, 0);
+        b0->addChild(add0, 0, 1);
+
+        add0->addChild(matmul1, 0, 0);
+        w1->addChild(matmul1, 0, 1);
+
+        matmul1->addChild(add1, 0, 0);
+        b1->addChild(add1, 0, 1);
+
+        auto fc = GenericOperator("FC", 1, 0, 1, "c");
+        auto fl = GenericOperator("Flatten", 1, 0, 1, "c");
+
+
+        auto g = std::make_shared<GraphView>();
+        g->add({matmul0, add0, matmul1, add1, b0, b1,fl,fc});
+
+        std::shared_ptr<GraphRegex> kitchenBook = std::make_shared<GraphRegex>();
+
+        kitchenBook->setNodeKey("Add","getType($) =='Add'");
+        kitchenBook->setNodeKey("MatMul","getType($) =='MatMul'");
+        kitchenBook->setNodeKey("Flatten","getType($) =='Flatten'");
+        kitchenBook->setNodeKey("FC","getType($) =='FC'");
+
+        kitchenBook->addQuery("MatMul->Add",static_cast<void(*)(std::shared_ptr<MatchSolution>)>(fuseMulAdd));
+        kitchenBook->addQuery("Flatten->FC",static_cast<void(*)(std::shared_ptr<MatchSolution>)>(removeFlatten));
+
+        kitchenBook->appliedRecipes(g);
+
+        std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
+        REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1,fc}));
+	    //REQUIRE(newNodes.size() == 6);
+
+
+    }
+
+}
\ No newline at end of file
diff --git a/unit_tests/nodeTester/Test_ConditionalInterpreter.cpp b/unit_tests/nodeTester/Test_ConditionalInterpreter.cpp
index 8b502fb546e2f1396b629ebc78bc1bd4d67842e2..ec068358a34567e57c417a664284bd1db76d7a69 100644
--- a/unit_tests/nodeTester/Test_ConditionalInterpreter.cpp
+++ b/unit_tests/nodeTester/Test_ConditionalInterpreter.cpp
@@ -12,19 +12,44 @@ TEST_CASE("ConditionalInterpreter", "ConditionalInterpreter") {
 
     SECTION("custom Lambda") {
 
-        const std::string test = " !toto($) == true " ;
-        ConditionalInterpreter conditionalParser = ConditionalInterpreter(test);
-        conditionalParser.insertLambda("toto",+[](NodePtr NodeOp){return false;});
+        
+        ConditionalInterpreter conditionalParserB = ConditionalInterpreter("A"," bad($) == false ");
+        ConditionalInterpreter conditionalParserG = ConditionalInterpreter("A"," good($) == true ");
+
+
+        conditionalParserB.insertLambda("bad",+[](NodePtr NodeOp){return NodeOp->name() == "ZZ";});
+        conditionalParserG.insertLambda("good",+[](NodePtr NodeOp){return NodeOp->name() == "Gop1";});
         std::shared_ptr<Node> nodeOp = GenericOperator("conv", 0, 0, 0, "Gop1");
 
-        bool result = conditionalParser.test(nodeOp);
-        REQUIRE(result == true);
+        REQUIRE(conditionalParserB.test(nodeOp) == true);
+        REQUIRE(conditionalParserG.test(nodeOp) == true);
+    }
+    
+
+     ConditionalInterpreter conditionalParserT = ConditionalInterpreter("A","isConv($)==true");
+     conditionalParserT.insertLambda("isConv",+[](NodePtr NodeOp){return NodeOp->type() == "Conv";});
+     std::shared_ptr<Node> zz = GenericOperator("conv", 0, 0, 0, "Gop1");
+     conditionalParserT.test(zz);
+
+    SECTION("Lambdas") {
+        ConditionalInterpreter conditionalParser = ConditionalInterpreter("OP_test","getType($) =='Conv' || getType($) =='FC' ");
+
+        std::shared_ptr<Node> A = GenericOperator("Conv", 0, 0, 0, "A");
+        REQUIRE(conditionalParser.test(A) == true);
+
+        std::shared_ptr<Node> B = GenericOperator("FC", 0, 0, 0, "B");
+        REQUIRE(conditionalParser.test(B) == true);
+
+
+        std::shared_ptr<Node> C = GenericOperator("A", 0, 0, 0, "C");
+        conditionalParser.test(C);
+        REQUIRE(conditionalParser.test(C) == false);
     }
 
     SECTION("syntax error") {
 
         const std::string test = "'A' == 'A' ,&& ";
-        REQUIRE_THROWS_AS( ConditionalInterpreter(test), std::runtime_error);
+        REQUIRE_THROWS_AS( ConditionalInterpreter("A",test), std::runtime_error);
   
     }
 
@@ -32,7 +57,7 @@ TEST_CASE("ConditionalInterpreter", "ConditionalInterpreter") {
     SECTION("test false int ") {
 
         const std::string test = " 10 == 11 " ;
-        ConditionalInterpreter conditionalParser = ConditionalInterpreter(test);
+        ConditionalInterpreter conditionalParser = ConditionalInterpreter("A",test);
         std::shared_ptr<Node> nodeOp = GenericOperator("conv", 0, 0, 0, "Gop1");
         bool result = conditionalParser.test(nodeOp);
         REQUIRE(result == false);
@@ -40,7 +65,7 @@ TEST_CASE("ConditionalInterpreter", "ConditionalInterpreter") {
 
     SECTION("test true int ") {
         const std::string test = " 42 == 42 " ;
-        ConditionalInterpreter conditionalParser = ConditionalInterpreter(test);
+        ConditionalInterpreter conditionalParser = ConditionalInterpreter("A",test);
         std::shared_ptr<Node> nodeOp = GenericOperator("conv", 0, 0, 0, "Gop1");
         bool result = conditionalParser.test(nodeOp);
         REQUIRE(result == true);
@@ -48,7 +73,7 @@ TEST_CASE("ConditionalInterpreter", "ConditionalInterpreter") {
     
     SECTION("test false str ") {
         const std::string test = " 'toto' == 'Corgi' " ;
-        ConditionalInterpreter conditionalParser = ConditionalInterpreter(test);
+        ConditionalInterpreter conditionalParser = ConditionalInterpreter("A",test);
         std::shared_ptr<Node> nodeOp = GenericOperator("conv", 0, 0, 0, "Gop1");
         bool result = conditionalParser.test(nodeOp);
         REQUIRE(result == false);
@@ -57,7 +82,7 @@ TEST_CASE("ConditionalInterpreter", "ConditionalInterpreter") {
     SECTION("test true str ") {
 
         const std::string test = " 'Corgi' == 'Corgi' " ;
-        ConditionalInterpreter conditionalParser = ConditionalInterpreter(test);
+        ConditionalInterpreter conditionalParser = ConditionalInterpreter("A",test);
         std::shared_ptr<Node> nodeOp = GenericOperator("conv", 0, 0, 0, "Gop1");
         bool result = conditionalParser.test(nodeOp);
         REQUIRE(result == true);
diff --git a/unit_tests/operator/Test_ConvDepthWise_Op.cpp b/unit_tests/operator/Test_ConvDepthWise_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ef68c439d3a3cdf95b7122c1b41bc9fc97311f2d
--- /dev/null
+++ b/unit_tests/operator/Test_ConvDepthWise_Op.cpp
@@ -0,0 +1,68 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+// TEST_CASE("[core/operator] ConvDepthWise_Op(computeReceptiveField)", "[Operator][computeReceptiveFiled][ConvDepthWise]") {
+//     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
+//     auto conv1 = ConvDepthWise({5, 5}, "conv1");         // output dims: {16, 3, 220, 220}
+//     auto conv2 = ConvDepthWise({3, 3}, "conv2");         // output dims: {16, 3, 218, 218}
+//     auto conv3 = ConvDepthWise({2, 2}, "conv3", {2,2});  // output dims: {16, 3, 109, 109}
+//     auto conv4 = ConvDepthWise({1, 1}, "conv4");         // output dims: {16, 3, 109, 109}
+
+//     auto g = std::make_shared<GraphView>("TestGraph");
+
+//     dataProvider->addChild(conv1, 0);
+//     g->add(conv1);
+//     g->addChild(conv2, conv1, 0);
+//     g->addChild(conv3, conv2, 0);
+//     g->addChild(conv4, conv3, 0);
+
+//     g->forwardDims();
+
+//     SECTION("Check individual receptive fields") {
+//         auto res1 = conv1->getOperator()->computeReceptiveField(0, {16,3,10,10});
+//         auto res2 = conv2->getOperator()->computeReceptiveField(conv2->getOperator()->output(0).getIdx({3,1,100,28}), {4,2,30,40});
+//         auto res3 = conv3->getOperator()->computeReceptiveField(0, {1,1,109,109});
+//         auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->input(0).getIdx({5,0,108,108}), {10,1,1,1});
+
+//         REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
+//         REQUIRE(((res2[0].first == conv2->getOperator()->input(0).getIdx({3,1,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 2, 32, 42}))));
+//         REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 1, 218, 218}))));
+//         REQUIRE(((res4[0].first == conv4->getOperator()->input(0).getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 1, 1, 1}))));
+//     }
+
+//     SECTION("Check receptive field propagation") {
+//         // input:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
+//         auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->input(0).getIdx({5,0,50,50}), {1,1,1,1});
+//         // conv4 RF:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
+//         auto res3 = conv3->getOperator()->computeReceptiveField(res4[0].first, res4[0].second);
+//         // conv3 RF:  first-{5, 0, 100, 100} dims-{1, 1, 2, 2}
+//         auto res2 = conv2->getOperator()->computeReceptiveField(res3[0].first, res3[0].second);
+//         // conv2 RF:  first-{5, 0, 100, 100} dims-{1, 1, 4, 4}
+//         auto res1 = conv1->getOperator()->computeReceptiveField(res2[0].first, res2[0].second);
+//         // conv1 RF:  first-{5, 0, 100, 100} dims-{1, 1, 8, 8}
+
+//         REQUIRE(((res1[0].first == conv1->getOperator()->input(0).getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 1, 8, 8}))));
+//     }
+// }
+}  // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/operator/Test_Conv_Op.cpp b/unit_tests/operator/Test_Conv_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ac667ec5af69dccc3e421530a17aca88018aab09
--- /dev/null
+++ b/unit_tests/operator/Test_Conv_Op.cpp
@@ -0,0 +1,79 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+// TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeReceptiveField][Conv]") {
+//     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
+//     auto conv1 = Conv(3, 32, {5, 5}, "conv1");          // output dims: {16, 32, 220, 220}
+//     auto conv2 = Conv(32, 64, {3, 3}, "conv2");         // output dims: {16, 64, 218, 218}
+//     auto conv3 = Conv(64, 10, {2, 2}, "conv3", {2,2});  // output dims: {16, 10, 109, 109}
+//     auto conv4 = Conv(10, 10, {1, 1}, "conv4");         // output dims: {16, 10, 109, 109}
+
+//     auto g = std::make_shared<GraphView>("TestGraph");
+
+//     dataProvider->addChild(conv1, 0);
+//     g->add(conv1);
+//     g->addChild(conv2, conv1, 0);
+//     g->addChild(conv3, conv2, 0);
+//     g->addChild(conv4, conv3, 0);
+
+//     g->forwardDims();
+
+//     SECTION("Check individual receptive fields") {
+//         auto res1 = conv1->getOperator()->computeReceptiveField(0, {16,32,10,10});
+//         auto res2 = conv2->getOperator()->computeReceptiveField(conv2->getOperator()->output(0).getIdx({3,20,100,28}), {4,20,30,40});
+//         auto res3 = conv3->getOperator()->computeReceptiveField(0, {1,1,109,109});
+//         auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->output(0).getIdx({5,0,108,108}), {10,10,1,1});
+
+//         REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
+//         REQUIRE(((res2[0].first == conv2->getOperator()->input(0).getIdx({3,0,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 32, 32, 42}))));
+//         REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 64, 218, 218}))));
+//         REQUIRE(((res4[0].first == conv4->getOperator()->input(0).getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 10, 1, 1}))));
+//     }
+
+//     SECTION("Check receptive field propagation") {
+//         // input:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
+//         auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->output(0).getIdx({5,0,50,50}), {1,1,1,1});
+//         // conv4 RF:  first-{5, 0, 50, 50}  dims-{1, 10, 1, 1}
+//         auto res3 = conv3->getOperator()->computeReceptiveField(res4[0].first, res4[0].second);
+//         // conv3 RF:  first-{5, 0, 100, 100} dims-{1, 64, 2, 2}
+//         auto res2 = conv2->getOperator()->computeReceptiveField(res3[0].first, res3[0].second);
+//         // conv2 RF:  first-{5, 0, 100, 100} dims-{1, 32, 4, 4}
+//         auto res1 = conv1->getOperator()->computeReceptiveField(res2[0].first, res2[0].second);
+//         // conv1 RF:  first-{5, 0, 100, 100} dims-{1, 3, 8, 8}
+
+//         REQUIRE(((res1[0].first == conv1->getOperator()->input(0).getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 3, 8, 8}))));
+
+
+//         // std::cout << "conv1: {";
+//         // std::cout << conv1->getOperator()->input(0).getCoord(res1[0].first)[0] << ", "
+//         //           << conv1->getOperator()->input(0).getCoord(res1[0].first)[1] << ", "
+//         //           << conv1->getOperator()->input(0).getCoord(res1[0].first)[2] << ", "
+//         //           << conv1->getOperator()->input(0).getCoord(res1[0].first)[3] << "} - {";
+//         // std::cout << res1[0].second[0] << ", "
+//         //           << res1[0].second[1] << ", "
+//         //           << res1[0].second[2] << ", "
+//         //           << res1[0].second[3] << "}" << std::endl;
+//     }
+// }
+}  // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index aac66429adffc3e5a034325234eff0a010067294..68e2d4d4d5b4fe1b40f83c087eb61c7865d3db75 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -32,20 +32,21 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator]") {
         REQUIRE(microGraph->outputNodes().size() == 1);
         REQUIRE((*microGraph->outputNodes().begin())->getOperator()->type() == "Conv");
         REQUIRE(op->nbInputs() == 3);
-        REQUIRE(op->nbDataInputs() == 1);
+        REQUIRE(op->nbData() == 1);
         REQUIRE(op->nbOutputs() == 1);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>();
         myInput->resize({2,3,5,5});
-        op->getOperator()->associateInput(0,myInput);
-        op->getOperator()->computeOutputDims();
-
-        REQUIRE(op->getOperator()->outputDimsForwarded());
-        REQUIRE(op->getOperator()->getOutput(0)->dims() == std::vector<size_t>({2,3,5,5}));
-        REQUIRE(op->getOperator()->getInput(0) == myInput);
-        REQUIRE(microGraph->getOrderedInputs()[0].first->getOperator()->getInput(0) == myInput);
-        REQUIRE(op->getOperator()->getOutput(0) == (*microGraph->outputNodes().begin())->getOperator()->getOutput(0));
-        
+        std::shared_ptr<OperatorTensor> opTensor = std::static_pointer_cast<OperatorTensor>(op->getOperator());
+        opTensor->associateInput(0,myInput);
+        opTensor->computeOutputDims();
+
+        REQUIRE(opTensor->outputDimsForwarded());
+        REQUIRE(std::static_pointer_cast<Tensor>(opTensor->getRawOutput(0))->dims() == std::vector<size_t>({2,3,5,5}));
+        REQUIRE(std::static_pointer_cast<Tensor>(opTensor->getRawInput(0)) == myInput);
+        REQUIRE(microGraph->getOrderedInputs()[0].first->getOperator()->getRawInput(0) == myInput);
+        REQUIRE(opTensor->getRawOutput(0) == (*microGraph->outputNodes().begin())->getOperator()->getRawOutput(0));
+
         //op->getOperator()->updateConsummerProducer();  // require implementation
         //auto microGraphScheduler = std::dynamic_pointer_cast<MetaOperator_Op>(op->getOperator())->getMicroGraphScheduler();
         //REQUIRE(microGraphScheduler->getStaticScheduling().size() == 2);
diff --git a/unit_tests/operator/Test_Operator.cpp b/unit_tests/operator/Test_Operator.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a050bbc4021b0c70a0d8faf6478eb2bd13ebdb58
--- /dev/null
+++ b/unit_tests/operator/Test_Operator.cpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/Producer.hpp"
+
+namespace Aidge {
+// TEST_CASE("[core/operator] Operator(computeReceptiveField)", "[Operator][computeReceptiveFiled]") {
+//     auto dataProvider1 = Producer({16, 3, 224, 224}, "dataProvider1");
+//     auto dataProvider2 = Producer({16, 3, 224, 224}, "dataProvider2");
+//     auto gen1 = Add(2);
+//     auto gen2 = ReLU();
+
+//     auto g = std::make_shared<GraphView>("TestGraph");
+
+//     dataProvider1->addChild(gen1, 0);
+//     dataProvider2->addChild(gen1, 0);
+//     g->add(gen1);
+//     g->addChild(gen2, gen1, 0);
+
+//     g->forwardDims();
+
+//     SECTION("Check individual receptive fields") {
+//         auto res1 = gen1->getOperator()->computeReceptiveField(0, {16,3,10,10});
+//         auto res2 = gen2->getOperator()->computeReceptiveField(gen2->getOperator()->output(0).getIdx({3,2,100,28}), {1,1,30,40});
+
+//         REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 10, 10}))));
+//         REQUIRE(((res1[1].first == 0) && (res1[1].second == std::vector<DimSize_t>({16, 3, 10, 10}))));
+//         REQUIRE(((res2[0].first == gen2->getOperator()->input(0).getIdx({3,2,100,28})) && (res2[0].second == std::vector<DimSize_t>({1, 1, 30, 40}))));
+//     }
+// }
+}  // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/recipies/Test_FuseBatchNorm.cpp b/unit_tests/recipies/Test_FuseBatchNorm.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5d9c02d5582e3c56aba9d374d7087946c7d94bde
--- /dev/null
+++ b/unit_tests/recipies/Test_FuseBatchNorm.cpp
@@ -0,0 +1,70 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+/*
+#include <catch2/catch_test_macros.hpp>
+#include <set>
+
+
+//#include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
+//#include "aidge/backend/cpu/operator/ConvImpl.hpp"
+
+
+
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/BatchNorm.hpp"
+#include "aidge/utils/Recipies.hpp"
+
+//#include "aidge/backend/TensorImpl.hpp"
+//#include "aidge/backend/cpu.hpp"
+//#include "aidge/"
+
+#include <cstddef>
+
+
+namespace Aidge {
+
+
+    TEST_CASE("[FuseBatchNorm] conv") {
+        auto g1 = Sequential({
+            Producer({16, 3, 224, 224}, "dataProvider"),
+            Conv(3, 32, {3, 3}, "conv1"),
+            BatchNorm<2>()
+        });
+
+        g1->setDataType(DataType::Float32);
+        g1->setBackend("cpu");
+        g1->forwardDims();
+
+        // std::set<std::string> availableBackends = Tensor::getAvailableBackends();
+        // if (availableBackends.find("cpu") != availableBackends.end()){
+        //     g1->setBackend("cpu");
+        //     newTensor->getImpl()->setRawPtr(static_cast<T*>(info.ptr));
+        // }else{
+        //     printf("Warning : Could not use aidge_cpu backend, verify you have `import aidge_cpu`\n");
+        // }
+
+       fuseBatchNorm(g1);
+
+        SECTION("Check resulting nodes") {
+            // REQUIRE(g1->getNodes().size() == 2);
+            // REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
+            // REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
+            // REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
+            // REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
+            // REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
+        }
+    }
+
+}
+*/
\ No newline at end of file
diff --git a/unit_tests/recipies/Test_FuseMulAdd.cpp b/unit_tests/recipies/Test_FuseMulAdd.cpp
index da53642055a3146c71a211ad7816f21c9b92d6cd..0c65db98917e33a11f4b7bac678b271b1a10fb94 100644
--- a/unit_tests/recipies/Test_FuseMulAdd.cpp
+++ b/unit_tests/recipies/Test_FuseMulAdd.cpp
@@ -12,26 +12,23 @@
 #include <catch2/catch_test_macros.hpp>
 #include <set>
 
-// #include "aidge/backend/cpu/operator/AddImpl.hpp"
-// #include "aidge/backend/cpu/operator/ConvImpl.hpp"
-// #include "aidge/backend/cpu/operator/FCImpl.hpp"
-// #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/operator/Add.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Recipies.hpp"
+#include "aidge/recipies/Recipies.hpp"
 
 namespace Aidge {
 
+
 TEST_CASE("[cpu/recipies] FuseMulAdd", "[FuseMulAdd][recipies]") {
     // generate the original GraphView
-    auto matmul0 = MatMul(5, "matmul0");
-    auto add0 = Add<2>("add0");
-    auto matmul1 = MatMul(5, "matmul1");
-    auto add1 = Add<2>("add1");
+    auto matmul0 = MatMul(5, 5, "matmul0");
+    auto add0 = Add(2, "add0");
+    auto matmul1 = MatMul(5, 5, "matmul1");
+    auto add1 = Add(2, "add1");
 
     auto b0 = Producer({5}, "B0");
     auto w0 = Producer({5, 5}, "W0");
@@ -74,4 +71,5 @@ TEST_CASE("[cpu/recipies] FuseMulAdd", "[FuseMulAdd][recipies]") {
 		REQUIRE(((node->type() == "Producer") || (node->type() == "FC")));
 	}
 }
+
 }  // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/recipies/Test_HorizontalTiling.cpp b/unit_tests/recipies/Test_HorizontalTiling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c9fb5ed6dc8a5d994ce2d3434a8176c29e418f95
--- /dev/null
+++ b/unit_tests/recipies/Test_HorizontalTiling.cpp
@@ -0,0 +1,200 @@
+// /********************************************************************************
+//  * Copyright (c) 2023 CEA-List
+//  *
+//  * This program and the accompanying materials are made available under the
+//  * terms of the Eclipse Public License 2.0 which is available at
+//  * http://www.eclipse.org/legal/epl-2.0.
+//  *
+//  * SPDX-License-Identifier: EPL-2.0
+//  *
+//  ********************************************************************************/
+
+// #include <catch2/catch_test_macros.hpp>
+// #include <set>
+
+// #include "aidge/graph/GraphView.hpp"
+// #include "aidge/graph/OpArgs.hpp"
+// #include "aidge/operator/Conv.hpp"
+// #include "aidge/operator/ReLU.hpp"
+// #include "aidge/recipies/Recipies.hpp"
+
+
+// namespace Aidge {
+
+// TEST_CASE("[core/recipies] Tiling(transformation)", "[Tiling][Recipies]") {
+
+//     SECTION("Transform a pre-generated GraphView") {
+
+//         SECTION("Simple Node: Conv") {
+//             std::shared_ptr<Node> myConv = Conv(3,4,{3,3}, "myconv");
+//             myConv->getOperator()->setDatatype(DataType::Int32);
+//             myConv->getOperator()->setBackend("cpu");
+//             std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
+//                 {
+//                     {
+//                         {{  0,   1,   2},
+//                          {  3,   4,   5},
+//                          {  6,   7,   8}},
+//                         {{  9,  10,  11},
+//                          { 12,  13,  14},
+//                          { 15,  16,  17}},
+//                         {{ 18,  19,  20},
+//                          { 21,  22,  23},
+//                          { 24,  25,  26}}
+//                     },
+//                     {
+//                         {{ 27,  28,  29},
+//                         { 30,  31,  32},
+//                         { 33,  34,  35}},
+//                         {{ 36,  37,  38},
+//                         { 39,  40,  41},
+//                         { 42,  43,  44}},
+//                         {{ 45,  46,  47},
+//                         { 48,  49,  50},
+//                         { 51,  52,  53}}
+//                     },
+//                     {
+//                         {{ 54,  55,  56},
+//                         { 57,  58,  59},
+//                         { 60,  61,  62}},
+//                         {{ 63,  64,  65},
+//                         { 66,  67,  68},
+//                         { 69,  70,  71}},
+//                         {{ 72,  73,  74},
+//                         { 75,  76,  77},
+//                         { 78,  79,  80}}
+//                     },
+//                     {
+//                         {{ 81,  82,  83},
+//                         { 84,  85,  86},
+//                         { 87,  88,  89}},
+//                         {{ 90,  91,  92},
+//                         { 93,  94,  95},
+//                         { 96,  97,  98}},
+//                         {{ 99, 100, 101},
+//                         {102, 103, 104},
+//                         {105, 106, 107}}
+//                     }
+//                 }
+//             });
+//             std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
+//             std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
+//                 {
+//                     {
+//                         {{  0,   1,   2,   3,   4},
+//                         {  5,   6,   7,   8,   9},
+//                         { 10,  11,  12,  13,  14},
+//                         { 15,  16,  17,  18,  19},
+//                         { 20,  21,  22,  23,  24}},
+
+//                         {{ 25,  26,  27,  28,  29},
+//                         { 30,  31,  32,  33,  34},
+//                         { 35,  36,  37,  38,  39},
+//                         { 40,  41,  42,  43,  44},
+//                         { 45,  46,  47,  48,  49}},
+
+//                         {{ 50,  51,  52,  53,  54},
+//                         { 55,  56,  57,  58,  59},
+//                         { 60,  61,  62,  63,  64},
+//                         { 65,  66,  67,  68,  69},
+//                         { 70,  71,  72,  73,  74}}
+//                     },
+//                     {
+//                         {{ 75,  76,  77,  78,  79},
+//                         { 80,  81,  82,  83,  84},
+//                         { 85,  86,  87,  88,  89},
+//                         { 90,  91,  92,  93,  94},
+//                         { 95,  96,  97,  98,  99}},
+
+//                         {{100, 101, 102, 103, 104},
+//                         {105, 106, 107, 108, 109},
+//                         {110, 111, 112, 113, 114},
+//                         {115, 116, 117, 118, 119},
+//                         {120, 121, 122, 123, 124}},
+
+//                         {{125, 126, 127, 128, 129},
+//                         {130, 131, 132, 133, 134},
+//                         {135, 136, 137, 138, 139},
+//                         {140, 141, 142, 143, 144},
+//                         {145, 146, 147, 148, 149}}
+//                     }
+//                 }
+//             });
+//             std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> {
+//                 {
+//                     {
+//                         {{ 15226,  15577,  15928},
+//                          { 16981,  17332,  17683},
+//                          { 18736,  19087,  19438}},
+
+//                         {{ 37818,  38898,  39978},
+//                          { 43218,  44298,  45378},
+//                          { 48618,  49698,  50778}},
+
+//                         {{ 60426,  62235,  64044},
+//                          { 69471,  71280,  73089},
+//                          { 78516,  80325,  82134}},
+
+//                         {{ 83016,  85554,  88092},
+//                          { 95706,  98244, 100782},
+//                          {108396, 110934, 113472}}
+//                     },
+//                     {
+//                         {{ 41551,  41902,  42253},
+//                          { 43306,  43657,  44008},
+//                          { 45061,  45412,  45763}},
+
+//                         {{118818, 119898, 120978},
+//                          {124218, 125298, 126378},
+//                          {129618, 130698, 131778}},
+
+//                         {{196101, 197910, 199719},
+//                          {205146, 206955, 208764},
+//                          {214191, 216000, 217809}},
+
+//                         {{273366, 275904, 278442},
+//                          {286056, 288594, 291132},
+//                          {298746, 301284, 303822}}
+//                     }
+//                 }
+//             });
+//             myConv->getOperator()->associateInput(0,myInput);
+//             myConv->getOperator()->associateInput(1,myWeights);
+//             myConv->getOperator()->associateInput(2,myBias);
+//             myConv->getOperator()->computeOutputDims();
+
+//             std::shared_ptr<GraphView> g;
+//             g->add(myConv);
+//             horizontalTiling({myConv}, 3);
+
+//             SequentialScheduler s(g);
+//             s->forward();
+
+//             // myConv->getOperator()->getOutput(0)->print();
+//             REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
+//         }
+//     }
+// }
+// }
+//         // std::shared_ptr<GraphView> g = Sequential({
+//         //     Conv(3, 16, {3,3}, "conv1"),
+//         //     ReLU("relu1"),
+//         //     Conv(16, 32, {1,1}, "conv2"),
+//         //     Conv(32, 16, {1,1}, "conv3"),
+//         //     Conv(16, 10, {3,3}, "conv4"),
+//         //     ReLU("relu2")
+//         // });
+
+//     //     for (auto& individualConv : g->match("Conv")) {
+//     //         auto tiledConv = horizontalTiling(individualConv);
+//     //         g->replace(individualConv, tiledConv);
+//     //     }
+//     // }
+
+//     // SECTION("Create the GraphView with tiled layers") {
+//     //     std::shared_ptr<GraphView> g;
+//     //     g->addChild(horizontalTiling(Conv()))
+//     // }
+
+// // }
+// // } // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/recipies/Test_LabelGraph.cpp b/unit_tests/recipies/Test_LabelGraph.cpp
index 873ad68f3198c6b6adf44d8c7ae31e667c63a18d..e0ba9be6c80ef6109b59458bf52a23120efc7584 100644
--- a/unit_tests/recipies/Test_LabelGraph.cpp
+++ b/unit_tests/recipies/Test_LabelGraph.cpp
@@ -45,9 +45,9 @@ TEST_CASE("[LabelGraph] conv") {
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
         REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
     }
 }
@@ -56,7 +56,7 @@ TEST_CASE("[LabelGraph] deleted node") {
     auto g1 = Sequential({
         Producer({16, 3, 224, 224}, "dataProvider"),
         Conv(3, 32, {3, 3}, "conv1"),
-        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
+        GenericOperator("Dummy_to_be_removed", 1, 0, 1),
         Conv(32, 64, {3, 3}, "conv2"),
         Conv(64, 10, {1, 1}, "conv3", {2, 2})
     });
@@ -74,16 +74,16 @@ TEST_CASE("[LabelGraph] deleted node") {
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
         REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
     }
 
     SECTION("Check dimensions") {
-        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 222, 222}));
-        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 220, 220}));
-        REQUIRE(g2->getNode("conv3")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 110, 110}));
+        REQUIRE(std::static_pointer_cast<Tensor>(g2->getNode("conv1")->getOperator()->getRawOutput(0))->dims() == std::vector<DimSize_t>({16, 1, 222, 222}));
+        REQUIRE(std::static_pointer_cast<Tensor>(g2->getNode("conv2")->getOperator()->getRawOutput(0))->dims() == std::vector<DimSize_t>({16, 1, 220, 220}));
+        REQUIRE(std::static_pointer_cast<Tensor>(g2->getNode("conv3")->getOperator()->getRawOutput(0))->dims() == std::vector<DimSize_t>({16, 1, 110, 110}));
     }
 }
 
@@ -91,11 +91,11 @@ TEST_CASE("[LabelGraph] deleted nodes") {
     auto g1 = Sequential({
         Producer({16, 3, 224, 224}, "dataProvider"),
         Conv(3, 32, {3, 3}, "conv1"),
-        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
-        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
-        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
+        GenericOperator("Dummy_to_be_removed", 1, 0, 1),
+        GenericOperator("Dummy_to_be_removed", 1, 0, 1),
+        GenericOperator("Dummy_to_be_removed", 1, 0, 1),
         Conv(32, 64, {3, 3}, "conv2"),
-        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
+        GenericOperator("Dummy_to_be_removed", 1, 0, 1),
         Conv(64, 10, {1, 1}, "conv3")
     });
 
@@ -112,9 +112,9 @@ TEST_CASE("[LabelGraph] deleted nodes") {
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
         REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
     }
 }
@@ -140,15 +140,15 @@ TEST_CASE("[LabelGraph] pooling") {
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
         REQUIRE(g2->getNode("pool1")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("pool1")->getOperator()->getOutput(0) == g2->getNode("pool2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("pool1")->getOperator()->getRawOutput(0) == g2->getNode("pool2")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("pool2")->getOperator()->type() == "MaxPooling");
-        REQUIRE(g2->getNode("pool2")->getOperator()->getOutput(0) == g2->getNode("pool3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("pool2")->getOperator()->getRawOutput(0) == g2->getNode("pool3")->getOperator()->getRawInput(0));
         REQUIRE(g2->getNode("pool3")->getOperator()->type() == "MaxPooling");
     }
 
     SECTION("Check dimensions") {
-        REQUIRE(g2->getNode("pool1")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 223, 223}));
-        REQUIRE(g2->getNode("pool2")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 222, 222}));
-        REQUIRE(g2->getNode("pool3")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 111, 111}));
+        REQUIRE(std::static_pointer_cast<Tensor>(g2->getNode("pool1")->getOperator()->getRawOutput(0))->dims() == std::vector<DimSize_t>({16, 1, 223, 223}));
+        REQUIRE(std::static_pointer_cast<Tensor>(g2->getNode("pool2")->getOperator()->getRawOutput(0))->dims() == std::vector<DimSize_t>({16, 1, 222, 222}));
+        REQUIRE(std::static_pointer_cast<Tensor>(g2->getNode("pool3")->getOperator()->getRawOutput(0))->dims() == std::vector<DimSize_t>({16, 1, 111, 111}));
     }
 }