diff --git a/.gitlab/ci/_global.gitlab-ci.yml b/.gitlab/ci/_global.gitlab-ci.yml
index aab5d745367d22052f82c6e3ef144680a822cd45..94e5658ff6adc8e07036d3d59ea39a68fbddc4bf 100644
--- a/.gitlab/ci/_global.gitlab-ci.yml
+++ b/.gitlab/ci/_global.gitlab-ci.yml
@@ -9,6 +9,14 @@ variables:
   GIT_SSL_NO_VERIFY: 1
   DEBIAN_FRONTEND: noninteractive
 
+# See https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
+workflow:
+  rules:
+    - if: $CI_PIPELINE_SOURCE == "merge_request_event"
+    - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS
+      when: never
+    - if: $CI_COMMIT_BRANCH
+
 default:
   image: nvidia/cuda:12.2.0-devel-ubuntu22.04
   before_script:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 40d8837f41bdc0d8dfd7eac1c5960064967f1efb..f8dbe375e217020a4c4570bd67c1b466e6593130 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -6,7 +6,7 @@ file(READ "${CMAKE_SOURCE_DIR}/project_name.txt" project)
 message(STATUS "Project name: ${project}")
 message(STATUS "Project version: ${version}")
 
-# Note : project name is {project} and python module name is also {project} 
+# Note : project name is {project} and python module name is also {project}
 set(module_name _${project}) # target name
 
 
@@ -57,7 +57,7 @@ if (PYBIND)
 
     # Handles Python + pybind11 headers dependencies
     target_link_libraries(${module_name}
-        PUBLIC 
+        PUBLIC
             pybind11::pybind11
         PRIVATE
             Python::Python
@@ -101,8 +101,8 @@ install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
 install(EXPORT ${project}-targets
  FILE "${project}-targets.cmake"
  DESTINATION ${INSTALL_CONFIGDIR}
-#  COMPONENT ${module_name} 
-)  
+#  COMPONENT ${module_name}
+)
 
 #Create a ConfigVersion.cmake file
 include(CMakePackageConfigHelpers)
@@ -136,4 +136,4 @@ export(EXPORT ${project}-targets
 if(TEST)
     enable_testing()
     add_subdirectory(unit_tests)
-endif()
\ No newline at end of file
+endif()
diff --git a/aidge_core/__init__.py b/aidge_core/__init__.py
index ad18a8ef1b23625dcb52951f52c43adc4222c997..c65dcc6cfc4be8825d1213854014718fb7170854 100644
--- a/aidge_core/__init__.py
+++ b/aidge_core/__init__.py
@@ -8,3 +8,4 @@ http://www.eclipse.org/legal/epl-2.0.
 SPDX-License-Identifier: EPL-2.0
 """
 from aidge_core.aidge_core import * # import so generated by PyBind
+from aidge_core.export import ExportNode
diff --git a/aidge_core/export/__init__.py b/aidge_core/export/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..00b44121d68af06171525fdf953bf50e53328421
--- /dev/null
+++ b/aidge_core/export/__init__.py
@@ -0,0 +1 @@
+from .node_export import *
diff --git a/aidge_core/export/node_export.py b/aidge_core/export/node_export.py
new file mode 100644
index 0000000000000000000000000000000000000000..980cb05a5814b7476d64757353e393ad6130218b
--- /dev/null
+++ b/aidge_core/export/node_export.py
@@ -0,0 +1,61 @@
+import aidge_core
+
+from abc import ABC, abstractmethod
+
+
+class ExportNode(ABC):
+    """Abstract class to interface node with export generation.
+    """
+
+    @abstractmethod
+    def __init__(self, aidge_node: aidge_core.Node) -> None:
+        """Create ExportNode and retieve attirubtes from ``aidge_node``:
+
+        - name: aidge Node name
+        - attributes: dictionnary of attributes of the aidge Operator linked to the node, attributes name follow aidge naming convention
+        - parameters: List of parameters node, order in the list is the same as the one defined by the aidge operator
+
+        """
+        super().__init__()
+        self.node = aidge_node
+        self.operator = aidge_node.get_operator()
+        self.name = self.node.name()
+        self.attributes = {} # Attributes are auto fetched from aidge operators
+        if isinstance(self.operator, aidge_core.Attributes):
+            for attr_name in self.operator.get_attrs_name():
+                self.attributes[attr_name] = self.operator.get_attr(attr_name)
+
+        # rename is_leaf ?
+        self.is_last = len(self.node.get_children()) == 0
+
+
+        self.inputs = []
+        self.outputs = []
+        self.inputs_dims = []
+        self.outputs_dims = []
+
+        for idx, parent_node in enumerate(self.node.get_parents()):
+            self.inputs.append(parent_node)
+            if parent_node is not None:
+                self.inputs_dims.append(self.operator.input(idx).dims())
+            else:
+                self.inputs_dims.append(None)
+
+        for idx, child_node in enumerate(self.node.get_children()):
+            self.outputs.append(child_node)
+        
+        # Dirty hot fix, change it quickly
+        self.outputs_dims.append(self.operator.output(0).dims())
+
+    @abstractmethod
+    def export(self, export_folder:str, list_configs:list):
+        """Define how to export the node definition.
+        """
+        pass
+
+    @abstractmethod
+    def forward(self, list_actions:list):
+        """Define how to generate code to perform a forward pass.
+        """
+        pass
+
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index fc60f52274162155f8f891bf86c22c9a13b241f4..7bd1e730a973810db89aa786b52fa05c53c43590 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -102,5 +102,30 @@ class test_operator_binding(unittest.TestCase):
         genOp.get_operator().compute_output_dims()
         self.assertListEqual(genOp.get_operator().output(0).dims(), in_dims)
 
+    def test_set_impl(self):
+
+        class PythonCustomImpl(aidge_core.OperatorImpl):
+            """Dummy implementation to test that C++ call python code
+            """
+            def __init__(self, op: aidge_core.Operator):
+                aidge_core.OperatorImpl.__init__(self, op) # Recquired to avoid type error !
+                self.idx = 0
+
+            def forward(self):
+                """Increment idx attribute on forward.
+                """
+                self.idx += 1
+
+        generic_node = aidge_core.GenericOperator("Relu", 1, 1, 1, name="myReLu")
+        generic_op = generic_node.get_operator()
+        customImpl = PythonCustomImpl(generic_op)
+
+        generic_op.forward() # Do nothing, no implementation set
+        generic_op.set_impl(customImpl)
+        generic_op.forward() # Increment idx
+        self.assertEqual(customImpl.idx, 1)
+
+
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index a44757468eb31a3f6a0ef894298110a81aa798a1..0bbe4edd3899a1cfe243358fb226922a1b350b2f 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -36,19 +36,24 @@
 #include "aidge/operator/Concat.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/Div.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/MetaOperatorDefs.hpp"
+#include "aidge/operator/Mul.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Pad.hpp"
 #include "aidge/operator/Producer.hpp"
+#include "aidge/operator/Pow.hpp"
 #include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/Scaling.hpp"
 #include "aidge/operator/Slice.hpp"
 #include "aidge/operator/Softmax.hpp"
-#include "aidge/operator/Scaling.hpp"
+#include "aidge/operator/Sqrt.hpp"
+#include "aidge/operator/Sub.hpp"
 
 #include "aidge/scheduler/Scheduler.hpp"
 
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 453e30a8636d86794c96723350bff615af090e3e..19f0837504016f38ae96dd852bc6fa41b5ab53ba 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -18,11 +18,13 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Operator;
+
 class OperatorImpl {
 public:
-
-    virtual void forward(){};
-    virtual void backward(){};
+    OperatorImpl(const Operator& op);
+    virtual void forward();
+    virtual void backward();
 
     /**
      * @brief Minimum amount of data from a specific input required by the
@@ -31,13 +33,13 @@ public:
      * @param inputIdx Index of the input analysed.
      * @return std::size_t
      */
-    virtual NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const = 0;
+    virtual NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const;
 
     // Amount of input data that cannot be overwritten during the execution.
-    virtual NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const = 0;
+    virtual NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
 
     // Memory required at an output for a given input size.
-    virtual NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const = 0;
+    virtual NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
 
     /**
      * @brief Total amount of consumed data from a specific input.
@@ -45,7 +47,7 @@ public:
      * @param inputIdx Index of the input analysed.
      * @return DimSize_t
      */
-    virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const = 0;
+    virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const;
 
     /**
      * @brief Total amount of produced data ready to be used on a specific output.
@@ -53,15 +55,20 @@ public:
      * @param outputIdx Index of the output analysed.
      * @return DimSize_t
      */
-    virtual NbElts_t getNbProducedData(const IOIndex_t outputIdx) const = 0;
+    virtual NbElts_t getNbProducedData(const IOIndex_t outputIdx) const;
 
     /**
      * @brief Update the Consummer Producer system by simulating the consumption and production of i/o
      *
      */
-    virtual void updateConsummerProducer() = 0;
+    virtual void updateConsummerProducer();
 
     virtual ~OperatorImpl() = default;
+
+protected:
+    const Operator &mOp;
+    std::vector<NbElts_t> mNbConsumedData;
+    std::vector<NbElts_t> mNbProducedData;
 };
 } // namespace Aidge
 
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index e87f6a3e88c996ecd53aa5ad98bd7733f02f67a9..45c22227006f539ad6778c6bdf56746040fcecdd 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -350,13 +350,20 @@ public:
                         IOIndex_t newParentInputTensorIdx,
                         IOIndex_t newParentOutputTensorIdx);
 
+
     /**
-     * @brief Replace the current GraphView with the set of given Nodes if possible
-     * @param newNodes Set of Nodes.
+     * @brief Replace a set of Nodes in every available GraphView with a new set of Nodes if possible.
+     * Both sets should include all the necessary Producers.
+     * @details Replaced Nodes are removed from any GraphView pointing at them all.
+     * The oldNodes set should have only one input/output
+     * Tensor for automatic connections of newNodes set.
+     * @param oldNodes actual set of shared_ptr<Node> to replace.
+     * @param newNodes new set of shared_ptr<Node>.
      * @return true
      * @return false
      */
-    bool replaceWith(std::set<NodePtr> newNodes);
+    static bool replace(const std::set<NodePtr>& oldNodes, const std::set<NodePtr>& newNodes);
+
     void updateInputNodes();
     /**
      * @brief Process from zero the set of output Nodes.
@@ -394,6 +401,12 @@ public:
      */
     std::shared_ptr<GraphView> cloneCallback(NodePtr(*cloneNode)(NodePtr)) const;
 
+    /**
+     * @brief Get the sum of the number of free dataInput connection for all inputNodes of the GraphView object.
+     * @return IOIndex_t
+     */
+    IOIndex_t getNbFreeDataInputs() const;
+
 private:
 ///////////////////////////////////////////////////////
 //        TENSOR MANAGEMENT
@@ -405,12 +418,6 @@ private:
      */
     IOIndex_t getNbDataInputs() const;
 
-    /**
-     * @brief Get the sum of the number of free dataInput connection for all inputNodes of the GraphView object.
-     * @return IOIndex_t
-     */
-    IOIndex_t getNbFreeDataInputs() const;
-
     /**
      * @brief Update the set of inputNodes with a new Node, checking if it can be
      * added and removing any Node not part of mInputNode anymore.
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 1d8449ac25cf8c31192da0c350c14cbfa50a48f4..f1d0a39d4bd7dba6990a46d61f7456c03244e44e 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -258,9 +258,7 @@ public:
   }
 
   inline void removeView(const std::shared_ptr<GraphView> &graphPtr) {
-    std::set<std::weak_ptr<GraphView>, weakCompare>::const_iterator viewIt = mViews.cbegin();
-    for (; (viewIt != mViews.cend()) && ((*viewIt).lock() != graphPtr) ; ++viewIt) {}
-    mViews.erase(*viewIt);
+    mViews.erase(graphPtr);
   }
 
   /**
@@ -402,7 +400,7 @@ public:
 
   /**
    * @brief  Get the set of pointers to connected node at a distance of a delta.
-   * @details the recution are cut 
+   * @details the recution are cut
    * Return a nullptr is nofing found.
    * @param delta Input delta.
    * @return std::shared_ptr<Node>
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4213f979cf9d675f523a228095edc5606f9412ee
--- /dev/null
+++ b/include/aidge/operator/Div.hpp
@@ -0,0 +1,146 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_DIV_H_
+#define AIDGE_CORE_OPERATOR_DIV_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Div_Op : public Operator,
+    public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Div";
+
+    Div_Op()
+            : Operator(Type)
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Div_Op(const Div_Op& op)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Div_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Div_Op>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInputs[0]->empty())
+            mOutput->resize(mInputs[0]->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
+        return *(mInputs[inputIdx].get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx < 2) && "Div Operator has 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Div Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) override {
+        mImpl = Registrar<Div_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setBackend(name);
+        mInputs[1]->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) override {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Div(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Div_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_DIV_H_ */
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 83b9a932633deb822ad86c24b96e6e928b5e2be2..55ccbf1516fa79663d57e1e44bc4017bc5c8b843 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -168,9 +168,20 @@ class GenericOperator_Op
 
     void setBackend(const std::string & /*name*/) override { printf("setBackend: not available yet.\n"); }
     void setDatatype(const DataType & /*datatype*/) override { printf("setDatatype: not available yet.\n"); }
-    void forward() override final { printf("forward: not available yet.\n"); }
-    void backward() override final { printf("backward: not available yet.\n"); }
-
+    void forward() override final {
+        if(mImpl){
+            mImpl->forward();
+        }else{
+            printf("forward: No implementation is linked.\n");
+        }
+    }
+    void backward() override final {
+        if(mImpl){
+            mImpl->backward();
+        }else{
+            printf("backward: No implementation is linked.\n");
+        }
+    }
     inline IOIndex_t nbInputs() const noexcept override final { return mNbIn; };
     inline IOIndex_t nbDataInputs() const noexcept override final { return mNbDataIn; };
     inline IOIndex_t nbOutputs() const noexcept override final { return mNbOut; };
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 874ea81778e0b357a4890b6bb052e85fa266216e..bcf47f13cc34132f668ea1ffcb2c91ed6f06f44d 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -26,14 +26,15 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class MaxPoolingAttr { StrideDims, KernelDims };
+enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public Operator,
                 public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
                 public StaticAttributes<MaxPoolingAttr,
                                        std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>> {
+                                       std::array<DimSize_t, DIM>,
+                                       bool> {
 private:
     // FIXME: change accessibility
     std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
@@ -46,15 +47,18 @@ public:
 
     using Attributes_ = StaticAttributes<MaxPoolingAttr,
                                              std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, DIM>>;
+                                             std::array<DimSize_t, DIM>,
+                                             bool>;
     template <MaxPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
 
     constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
-                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
+                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                            bool ceil_mode = false)
         : Operator(Type),
           Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
-                      attr<MaxPoolingAttr::KernelDims>(kernel_dims)),
+                      attr<MaxPoolingAttr::KernelDims>(kernel_dims),
+                      attr<MaxPoolingAttr::CeilMode>(ceil_mode)),
           mOutput(std::make_shared<Tensor>()) {
         setDatatype(DataType::Float32);
     }
@@ -93,9 +97,16 @@ public:
         if (!mInput->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
+            std::function<float(float)> roundingFunction;
+            if (this->template getAttr<MaxPoolingAttr::CeilMode>()) {
+                roundingFunction = [](float x) { return std::ceil(x); };
+            } else {
+                roundingFunction = [](float x) { return std::floor(x); };
+            }
+
             for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                            std::floor(static_cast<float>(mInput->dims()[dim+2] -
+                                            roundingFunction(static_cast<float>(mInput->dims()[dim+2] -
                                                                     this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
                                             static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
             }
@@ -169,9 +180,10 @@ public:
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                            const std::string& name = "",
-                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
+                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                           bool ceil_mode=false) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
-    return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
+    return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, ceil_mode), name);
 }
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
@@ -179,15 +191,16 @@ template <DimSize_t DIM>
 inline std::shared_ptr<Node> MaxPooling(
     DimSize_t const (&kernel_dims)[DIM],
     const std::string& name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    bool ceil_mode = false) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
-    return MaxPooling(to_array(kernel_dims), name, stride_dims);
+    return MaxPooling(to_array(kernel_dims), name, stride_dims, ceil_mode);
 }
 }  // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"StrideDims", "KernelDims"};
+const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"StrideDims", "KernelDims", "CeilMode"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index 6da76c930a3f08358c8c09ce75e66109370e292a..73feb134837787ae8d0d280dd723182c9d21438b 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -115,11 +115,12 @@ template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
+                                  bool ceil_mode = false)
 {
     auto graph = Sequential({
         Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
-        MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims)
+        MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode)
     });
 
     return MetaOperator("PaddedMaxPooling", graph, name);
@@ -131,9 +132,10 @@ inline std::shared_ptr<Node> PaddedMaxPooling(
     DimSize_t const (&kernel_dims)[DIM],
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
+    const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
+    bool ceil_mode= false)
 {
-    return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
+    return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims, ceil_mode);
 }
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4ea79fe52622b22f8ea8fbd9191d50d45e26acac
--- /dev/null
+++ b/include/aidge/operator/Mul.hpp
@@ -0,0 +1,146 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_MUL_H_
+#define AIDGE_CORE_OPERATOR_MUL_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Mul_Op : public Operator,
+    public Registrable<Mul_Op, std::string, std::unique_ptr<OperatorImpl>(const Mul_Op&)> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Mul";
+
+    Mul_Op()
+            : Operator(Type)
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Mul_Op(const Mul_Op& op)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Mul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Mul_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Mul_Op>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInputs[0]->empty())
+            mOutput->resize(mInputs[0]->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
+        return *(mInputs[inputIdx].get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx < 2) && "Mul Operator has 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Mul Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) override {
+        mImpl = Registrar<Mul_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setBackend(name);
+        mInputs[1]->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) override {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Mul(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 1aa64a1626ce4f3b45b2bf5ed84c810d150ed6e2..0f682297f9f6f4a115279db99ec6141b88fb38f9 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
 
 class Operator : public std::enable_shared_from_this<Operator> {
 protected:
-  std::unique_ptr<OperatorImpl> mImpl; // implementation of the operator
+  std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator
   std::map<std::string, std::shared_ptr<Hook>> mHooks;
 
 private:
@@ -87,6 +87,14 @@ public:
     virtual void setBackend(const std::string& name) = 0;
     virtual void setDatatype(const DataType& datatype) = 0;
 
+    /**
+     * @brief Set the a new OperatorImpl to the Operator
+     *
+     */
+    void setImpl(std::shared_ptr<OperatorImpl> impl){
+        mImpl = impl;
+    }
+
     /**
      * @brief Minimum amount of data from a specific input for one computation pass.
      * @param inputIdx Index of the input analysed.
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..732cf36b4ef7e7640648c542191acd02d0875a4f
--- /dev/null
+++ b/include/aidge/operator/Pow.hpp
@@ -0,0 +1,146 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_POW_H_
+#define AIDGE_CORE_OPERATOR_POW_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Pow_Op : public Operator,
+    public Registrable<Pow_Op, std::string, std::unique_ptr<OperatorImpl>(const Pow_Op&)> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Pow";
+
+    Pow_Op()
+            : Operator(Type)
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Pow_Op(const Pow_Op& op)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Pow_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Pow_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Pow_Op>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInputs[0]->empty())
+            mOutput->resize(mInputs[0]->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
+        return *(mInputs[inputIdx].get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx < 2) && "Pow Operator has 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Pow Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) override {
+        mImpl = Registrar<Pow_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setBackend(name);
+        mInputs[1]->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) override {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Pow(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..90b2ae6a8ae1311aef14e4eba4d3563a28a3d18e
--- /dev/null
+++ b/include/aidge/operator/Sqrt.hpp
@@ -0,0 +1,141 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SQRT_H_
+#define AIDGE_CORE_OPERATOR_SQRT_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Sqrt_Op : public Operator,
+    public Registrable<Sqrt_Op, std::string, std::unique_ptr<OperatorImpl>(const Sqrt_Op&)> {
+public:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Sqrt";
+
+    Sqrt_Op()
+            : Operator(Type)
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Sqrt_Op(const Sqrt_Op& op)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Sqrt_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Sqrt_Op>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInput->empty())
+            mOutput->resize(mInput->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx == 0) && "Sqrt Operator has only 1 input");
+        (void) inputIdx; // avoid unused warning
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Sqrt Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) override {
+        mImpl = Registrar<Sqrt_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) override {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Sqrt(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Sqrt_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SQRT_H_ */
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..451cba08f58e7a580576531ce2a97c92fb9be3ae
--- /dev/null
+++ b/include/aidge/operator/Sub.hpp
@@ -0,0 +1,146 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SUB_H_
+#define AIDGE_CORE_OPERATOR_SUB_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Sub_Op : public Operator,
+    public Registrable<Sub_Op, std::string, std::unique_ptr<OperatorImpl>(const Sub_Op&)> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Sub";
+
+    Sub_Op()
+            : Operator(Type)
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Sub_Op(const Sub_Op& op)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Sub_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Sub_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Sub_Op>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInputs[0]->empty())
+            mOutput->resize(mInputs[0]->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
+        return *(mInputs[inputIdx].get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx < 2) && "Sub Operator has 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Sub Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) override {
+        mImpl = Registrar<Sub_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setBackend(name);
+        mInputs[1]->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) override {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Sub(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
diff --git a/include/aidge/recipies/Recipies.hpp b/include/aidge/recipies/Recipies.hpp
index 7d2539f7a87a37c6857092de442cf54e29415e86..38b190e68778cfa8b72b39902066e450d1351960 100644
--- a/include/aidge/recipies/Recipies.hpp
+++ b/include/aidge/recipies/Recipies.hpp
@@ -12,10 +12,13 @@
 #ifndef AIDGE_CORE_UTILS_RECIPIES_H_
 #define AIDGE_CORE_UTILS_RECIPIES_H_
 
+#include <memory>
+#include <set>
+
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
 
-namespace Aidge{
+namespace Aidge {
 
 // FUSE MATMUL + ADD -> FC
 
@@ -65,8 +68,10 @@ void fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes);
  */
 void fuseBatchNorm(std::shared_ptr<GraphView> graphView);
 
-std::set<std::shared_ptr<Node>> horizontalTiling(std::shared_ptr<Node> node);
-std::set<std::shared_ptr<Node>> horizontalTiling(std::set<std::shared_ptr<Node>> setOfNodes);
+std::set<std::shared_ptr<Node>> getHorizontalTiling(const std::shared_ptr<Node>& node, const DimIdx_t axis, const std::size_t nbSlices);
+void horizontalTiling(std::shared_ptr<Node> node, DimIdx_t dim, std::size_t nbSlices);
+std::set<std::shared_ptr<Node>> getHorizontalTiling(std::set<std::shared_ptr<Node>> setOfNodes, DimIdx_t dim, std::size_t nbSlices);
+void horizontalTiling(std::set<std::shared_ptr<Node>> setOfNodes, DimIdx_t dim, std::size_t nbSlices);
 
 }
 
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 3b29c472b3a540c9ef3b8ed46520e3e718e8cbfb..ece74509d466800c870d73d1e0bbe1d639f8bf54 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -35,7 +35,7 @@ public:
     {
         #ifdef PYBIND
         #define _CRT_SECURE_NO_WARNINGS
-        if (std::getenv("AIDGE_CORE_WITH_PYBIND")){
+        if (Py_IsInitialized()){
             std::string name = std::string("registrar_")+typeid(Registrable<DerivedClass, Key, Func>).name();
             static auto shared_data = reinterpret_cast<std::map<Key, std::function<Func>> *>(py::get_shared_data(name));
             if (!shared_data)
@@ -78,4 +78,4 @@ struct Registrar {
 };
 }
 
-#endif //AIDGE_CORE_UTILS_REGISTRAR_H_
\ No newline at end of file
+#endif //AIDGE_CORE_UTILS_REGISTRAR_H_
diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp
index 11189f2f3c4a46b31d8e08d73bea17f27df07765..34610069079ee792ebbe4b261b57177b3bbe2997 100644
--- a/python_binding/backend/pybind_OperatorImpl.cpp
+++ b/python_binding/backend/pybind_OperatorImpl.cpp
@@ -10,11 +10,112 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include "aidge/operator/Operator.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
+
+/**
+ * @brief Trampoline class for binding
+ *
+ */
+class pyOperatorImpl: public OperatorImpl {
+public:
+    using OperatorImpl::OperatorImpl; // Inherit constructors
+
+    void forward() override {
+        PYBIND11_OVERRIDE(
+            void,
+            OperatorImpl,
+            forward,
+
+        );
+    }
+    void backward() override {
+        PYBIND11_OVERRIDE(
+            void,
+            OperatorImpl,
+            backward,
+
+        );
+    }
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            NbElts_t,
+            OperatorImpl,
+            "get_nb_required_data",
+            getNbRequiredData,
+            inputIdx
+        );
+    }
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            NbElts_t,
+            OperatorImpl,
+            "get_nb_required_protected",
+            getNbRequiredProtected,
+            inputIdx
+
+        );
+    }
+    NbElts_t getRequiredMemory(const IOIndex_t outputIdx,
+    const std::vector<DimSize_t> &inputsSize) const override {
+        PYBIND11_OVERRIDE_NAME(
+            NbElts_t,
+            OperatorImpl,
+            "get_required_memory",
+            getRequiredMemory,
+            outputIdx,
+            inputsSize
+
+        );
+    }
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            NbElts_t,
+            OperatorImpl,
+            "get_nb_consumed_data",
+            getNbConsumedData,
+            inputIdx
+
+        );
+    }
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            NbElts_t,
+            OperatorImpl,
+            "get_nb_produced_data",
+            getNbProducedData,
+            outputIdx
+
+        );
+    }
+    void updateConsummerProducer() override {
+        PYBIND11_OVERRIDE_NAME(
+            void,
+            OperatorImpl,
+            "update_consummer_producer",
+            updateConsummerProducer,
+
+        );
+    }
+};
+
 void init_OperatorImpl(py::module& m){
-    py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>>(m, "OperatorImpl");
+
+    py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr())
+    .def(py::init<const Operator&>())
+    .def("forward", &OperatorImpl::forward)
+    .def("backward", &OperatorImpl::backward)
+    .def("get_nb_required_data", &OperatorImpl::getNbRequiredData)
+    .def("get_nb_required_protected", &OperatorImpl::getNbRequiredProtected)
+    .def("get_required_memory", &OperatorImpl::getRequiredMemory)
+    .def("get_nb_consumed_data", &OperatorImpl::getNbConsumedData)
+    .def("get_nb_produced_data", &OperatorImpl::getNbProducedData)
+    .def("update_consummer_producer", &OperatorImpl::updateConsummerProducer)
+    ;
 }
 }
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 555540045d01aebfe121422ea9e7a367065b9996..6ac2199b4ba59faba16c9815277ad134c6f183f4 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -26,7 +26,7 @@ void init_GraphView(py::module& m) {
           .def("save", &GraphView::save, py::arg("path"), py::arg("verbose") = false,
           R"mydelimiter(
           Save the GraphView as a Mermaid graph in a .md file at the specified location.
-          
+
           :param path: save location
           :type path: str
           )mydelimiter")
@@ -34,14 +34,14 @@ void init_GraphView(py::module& m) {
           .def("get_output_nodes", &GraphView::outputNodes,
           R"mydelimiter(
           Get set of output Nodes.
-          
+
           :rtype: list[Node]
           )mydelimiter")
 
           .def("get_input_nodes", &GraphView::inputNodes,
           R"mydelimiter(
           Get set of input Nodes.
-          
+
           :rtype: list[Node]
           )mydelimiter")
 
@@ -49,7 +49,7 @@ void init_GraphView(py::module& m) {
                py::arg("other_node"), py::arg("include_learnable_parameters") = true,
           R"mydelimiter(
           Include a Node to the current GraphView object.
-          
+
           :param other_node: Node to add
           :type oth_Node: Node
           :param includeLearnableParameter: include non-data inputs, like weights and biases. Default True.
@@ -66,18 +66,20 @@ void init_GraphView(py::module& m) {
                py::arg("fromTensor") = 0U, py::arg("toTensor") = gk_IODefaultIndex,
           R"mydelimiter(
           Include a Node to the current GraphView object.
-          
+
           :param other_node: Node to add
           :type oth_Node: Node
           :param includeLearnableParameter: include non-data inputs, like weights and biases. Default True.
           :type includeLearnableParameter
           )mydelimiter")
-          
-          .def("replace_with", &GraphView::replaceWith, py::arg("new_nodes"),
+
+          .def_static("replace", &GraphView::replace, py::arg("old_nodes"), py::arg("new_nodes"),
           R"mydelimiter(
-          Replace the current GraphView with the set of given Nodes if possible.
-          
-          :param new_nodes: Nodes with connections already taken care of.
+          Replace the old set of Nodes with the new set of given Nodes if possible in every GraphView.
+
+          :param old_nodes: Nodes actually connected in GraphViews.
+          :type old_nodes: Node
+          :param new_nodes: Nodes with inner connections already taken care of.
           :type new_nodes: Node
           :return: Whether any replacement has been made.
           :rtype: bool
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 3801fac8a8ca8461fe6ec74cf75313fc362d15d4..f4f7946c6ecc180f83e4bf58eee16102752f0c6e 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -11,7 +11,7 @@
 
 #include <pybind11/pybind11.h>
 #include <pybind11/stl.h>
-
+#include <iostream>
 #include <string>
 #include <vector>
 #include <array>
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3492bf244952ba6ed0d77cb16de758e61fb26383
--- /dev/null
+++ b/python_binding/operator/pybind_Div.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Div.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Div(py::module& m) {
+    py::class_<Div_Op, std::shared_ptr<Div_Op>, Operator>(m, "DivOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Div_Op::getInputsName)
+    .def("get_outputs_name", &Div_Op::getOutputsName);
+
+    m.def("Div", &Div, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 4cf4dae2234900722058d6555582c5b78900ab7d..241fc7f4a003f53de15a42859b078c54cc98b63a 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -27,7 +27,7 @@ void init_GenericOperator(py::module& m) {
     .def("compute_output_dims", &GenericOperator_Op::computeOutputDims)
     .def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function"));
 
-    m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nbDataIn"), py::arg("nbIn"), py::arg("nbOut"),
+    m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nb_data_in"), py::arg("nb_in"), py::arg("nb_out"),
           py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index c83dfaa3639f05af345bd9214460f95fd661cd31..907e8cfaa6cde2451677b72beab38bd9a3938735 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -30,22 +30,26 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &>(),
+                const std::array<DimSize_t, DIM> &,
+                bool>(),
         py::arg("kernel_dims"),
-        py::arg("stride_dims"))
+        py::arg("stride_dims"),
+        py::arg("ceil_mode"))
   .def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
   .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName);
 
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
-                                                                  const std::vector<DimSize_t> &stride_dims) {
+                                                                  const std::vector<DimSize_t> &stride_dims,
+                                                                  bool ceil_mode) {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
 
-        return MaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()));
+        return MaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), ceil_mode);
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
-       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("ceil_mode") = false);
 
 }
 
@@ -55,8 +59,5 @@ void init_MaxPooling(py::module &m) {
   declare_MaxPoolingOp<2>(m);
   declare_MaxPoolingOp<3>(m);
 
-  // FIXME:
-  // m.def("MaxPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
-  // (&)[1])>(&MaxPooling));
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index 3372d50e14be9e0d24ba5d9171766255ab49f23b..aa9f3c50e6b8c6ab9e7be46776d5fba30d775be2 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -28,7 +28,7 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
   m.def(("PaddedConv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
                                                          DimSize_t out_channels,
                                                          const std::vector<DimSize_t>& kernel_dims,
-                                                         const std::string& name, 
+                                                         const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
                                                          const std::vector<DimSize_t> &dilation_dims)
@@ -50,7 +50,7 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
 
 template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
   m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
-                                                         const std::string& name, 
+                                                         const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
                                                          const std::vector<DimSize_t> &dilation_dims)
@@ -66,12 +66,12 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
-  
+
 }
 
 template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
   m.def(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
-                                                         const std::string& name, 
+                                                         const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims)
     {
@@ -84,25 +84,27 @@ template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0));
-  
+
 }
 
 template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
   m.def(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
-                                                         const std::string& name, 
+                                                         const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
-                                                         const std::vector<DimSize_t> &padding_dims)
+                                                         const std::vector<DimSize_t> &padding_dims,
+                                                         bool ceil_mode)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM);
 
-        return PaddedMaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()));
+        return PaddedMaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), ceil_mode);
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0));
-  
+       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
+       py::arg("ceil_mode") = false);
+
 }
 
 void init_MetaOperatorDefs(py::module &m) {
@@ -118,9 +120,7 @@ void init_MetaOperatorDefs(py::module &m) {
   declare_PaddedMaxPoolingOp<1>(m);
   declare_PaddedMaxPoolingOp<2>(m);
   declare_PaddedMaxPoolingOp<3>(m);
- 
-  // FIXME:
-  // m.def("Conv1D", static_cast<NodeAPI(*)(const char*, int, int, int const
-  // (&)[1])>(&Conv));
+
+
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2627c99005b009769e8fbb97b1f5d79e2424c997
--- /dev/null
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Mul(py::module& m) {
+    py::class_<Mul_Op, std::shared_ptr<Mul_Op>, Operator>(m, "MulOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Mul_Op::getInputsName)
+    .def("get_outputs_name", &Mul_Op::getOutputsName);
+
+    m.def("Mul", &Mul, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index d945b212ff6fb643302ca7512e91c7a778a39419..6b535e8cf3293b26aaa64f95ca2f9a394768935f 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -24,6 +24,9 @@ void init_Operator(py::module& m){
     .def("associate_input", &Operator::associateInput, py::arg("inputIdx"), py::arg("data"))
     .def("set_datatype", &Operator::setDatatype, py::arg("datatype"))
     .def("set_backend", &Operator::setBackend, py::arg("name"))
+    .def("forward", &Operator::forward)
+    // py::keep_alive forbide Python to garbage collect implementation will the Operator is not garbade collected !
+    .def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>())
     ;
 }
 }
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..22866c5460381b6f494948c7410bcd67e7e46edb
--- /dev/null
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Pow.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Pow(py::module& m) {
+    py::class_<Pow_Op, std::shared_ptr<Pow_Op>, Operator>(m, "PowOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Pow_Op::getInputsName)
+    .def("get_outputs_name", &Pow_Op::getOutputsName);
+
+    m.def("Pow", &Pow, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b70171814662c861f19b3048b018260170d37491
--- /dev/null
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Sqrt.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Sqrt(py::module& m) {
+    py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, Operator>(m, "SqrtOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Sqrt_Op::getInputsName)
+    .def("get_outputs_name", &Sqrt_Op::getOutputsName);
+
+    m.def("Sqrt", &Sqrt, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..10c95939646a6b605f23c42618bfbdd00ceb6e2e
--- /dev/null
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Sub.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Sub(py::module& m) {
+    py::class_<Sub_Op, std::shared_ptr<Sub_Op>, Operator>(m, "SubOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Sub_Op::getInputsName)
+    .def("get_outputs_name", &Sub_Op::getOutputsName);
+
+    m.def("Sub", &Sub, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index e9777a220f7dc8d491a8cd8220f3d99f673a8e8d..a482191c78ff56b000e043cd7350ca1c150d1d6e 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -25,15 +25,20 @@ void init_AvgPooling(py::module&);
 void init_BatchNorm(py::module&);
 void init_Conv(py::module&);
 void init_ConvDepthWise(py::module&);
+void init_Div(py::module&);
 void init_FC(py::module&);
 void init_GenericOperator(py::module&);
 void init_LeakyReLU(py::module&);
 void init_MatMul(py::module&);
 void init_MaxPooling(py::module&);
 void init_MetaOperatorDefs(py::module&);
+void init_Mul(py::module&);
 void init_Producer(py::module&);
+void init_Pow(py::module&);
 void init_ReLU(py::module&);
 void init_Softmax(py::module&);
+void init_Sqrt(py::module&);
+void init_Sub(py::module&);
 
 void init_Node(py::module&);
 void init_GraphView(py::module&);
@@ -49,14 +54,8 @@ void init_Recipies(py::module&);
 void init_Scheduler(py::module&);
 void init_TensorUtils(py::module&);
 
-void set_python_flag(){
-    // Set an env variable to know if we run with ypthon or cpp
-    py::module os_module = py::module::import("os");
-    os_module.attr("environ")["AIDGE_CORE_WITH_PYBIND"] = "1";
-}
 
 void init_Aidge(py::module& m){
-    set_python_flag();
     init_Data(m);
     init_Tensor(m);
 
@@ -73,13 +72,19 @@ void init_Aidge(py::module& m){
     init_BatchNorm(m);
     init_Conv(m);
     init_ConvDepthWise(m);
+    init_Div(m);
     init_FC(m);
     init_GenericOperator(m);
     init_LeakyReLU(m);
     init_MatMul(m);
     init_MaxPooling(m);
+    init_MetaOperatorDefs(m);
+    init_Mul(m);
+    init_Pow(m);
     init_ReLU(m);
     init_Softmax(m);
+    init_Sqrt(m);
+    init_Sub(m);
 
     init_Producer(m);
     init_Match(m);
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..166754cc9fe9774d922ef523ab35f569673701fd
--- /dev/null
+++ b/src/backend/OperatorImpl.cpp
@@ -0,0 +1,77 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+Aidge::OperatorImpl::OperatorImpl(const Operator& op):
+    mOp(op),
+    mNbConsumedData(mOp.nbInputs(), 0),
+    mNbProducedData(mOp.nbOutputs(), 0)
+{
+    //ctor
+}
+
+Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+    assert(mOp.getInput(inputIdx) && "requires valid input");
+
+    // Requires the whole tensor by default
+    return std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->size();
+}
+
+Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
+    assert(mOp.getInput(inputIdx) && "requires valid input");
+
+    // Protect the whole tensor by default
+    return std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->size();
+}
+
+Aidge::NbElts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
+    assert(mOp.getOutput(outputIdx) && "requires valid output");
+
+    // Requires the whole tensor by default, regardless of available data on inputs
+    return std::static_pointer_cast<Tensor>(mOp.getOutput(outputIdx))->size();
+}
+
+Aidge::NbElts_t Aidge::OperatorImpl::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
+    assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
+    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
+}
+
+Aidge::NbElts_t Aidge::OperatorImpl::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
+    assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size());
+    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
+}
+
+void Aidge::OperatorImpl::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx) {
+        // each input is consumed by the minimum amount for a forward pass
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));
+    }
+
+    for (std::size_t outputIdx = 0; outputIdx < mNbProducedData.size(); ++outputIdx) {
+        mNbProducedData[outputIdx] += getRequiredMemory(outputIdx, {});
+    }
+}
+
+void Aidge::OperatorImpl::forward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "forward() not implemented");
+}
+
+void Aidge::OperatorImpl::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented");
+}
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 1ca54c9c194a6b0a1fcf932a1f0f92d3b251d312..406fae8829a2135ee9d080a0b8a7ad7174dba798 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -17,6 +17,7 @@
 #include "aidge/utils/Types.h"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 
 ///////////////////////////////////////////////////////
 //        FUNCTIONAL DESCRIPTION
@@ -542,38 +543,72 @@ void Aidge::GraphView::insertParent(NodePtr childNode,
 }
 
 
-bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
-  // TODO : only supports one input/output node for now
-  assert(mNodes.size()>0 && "There must be at least one Node to replace");
+bool Aidge::GraphView::replace(const std::set<Aidge::NodePtr>& oldNodes, const std::set<Aidge::NodePtr>& newNodes) {
 
-  bool replacable;
-  std::shared_ptr<Node> previousInputNode = (*inputNodes().begin());
-  std::shared_ptr<Node> previousOutputNode = (*outputNodes().begin());
-  std::shared_ptr<Node> newOutputNode;
+    // TODO: handle case where an oldNodes parameter does not come from a Producer but another Node (not included in oldNodes)
+    // How to distinguish it from data input?
+    // TODO: Parameter Tensors could be identified with their dimensions
+    // TODO: Take GraphView as input parameters since new Nodes should be connected whatever.
+    // It also avoids specifying each producer since they are automatically included
 
-  auto gNew = std::make_shared<GraphView>();
-  gNew->add(newNodes, false);
+    auto oldG = std::make_shared<GraphView>("oldG");
+    oldG->add(oldNodes, false);
+    auto newG = std::make_shared<GraphView>("newG");
+    newG->add(newNodes, false);
 
-  if (newNodes.empty()) {
-    replacable = (outputNodes().size() == 1) &&
-                 (inputNodes().size() == 1) &&
-                 ((*outputNodes().begin())->nbOutputs() == 1) &&
-                 ((*inputNodes().begin())->nbDataInputs() == 1);
-    newOutputNode = previousInputNode->input(0).first;
-  } else {
-    newOutputNode = (*gNew->outputNodes().begin());
-    replacable = (outputNodes().size() == gNew->outputNodes().size()) &&
-                 (outputNodes().size() == 1) &&
-                 (previousOutputNode->nbOutputs() == newOutputNode->nbOutputs());
-  }
+    if ((oldG->inputNodes().size() == 0) || (oldG->outputNodes().size() != 1)) {
+        return false;
+    }
+    if (!(newNodes.empty()) && ((newG->inputNodes().size() == 0) ||
+                                (newG->outputNodes().size() != 1))) {
+        return false;
+    }
+
+    // there is at least one inputNode in the old/new GraphView
+    std::shared_ptr<Node> firstPreviousInputNode = (*(oldG->inputNodes()).begin());
+    std::shared_ptr<Node> firstPreviousOutputNode = (*(oldG->outputNodes()).begin());
+
+    // find Node to link to new input Node
+    //compute number of input for firstPreviousInputNode not in oldNodes set
+    std::size_t nbExternalInputs = 0;
+    std::shared_ptr<Node> externalInput = nullptr;
+    IOIndex_t externalInputId = gk_IODefaultIndex;
+    for (const auto& input : firstPreviousInputNode->inputs()) {
+        if (oldNodes.find(input.first) == oldNodes.end()) { // Node connected to another Node outside of oldG
+            nbExternalInputs++;
+            externalInput = input.first;
+            externalInputId = input.second;
+        }
+    }
+    if (nbExternalInputs > 1) {
+        AIDGE_INTERNAL_ASSERT("To many input to link for oldNodes set");
+    }
+
+    if (oldG->inputNodes().size() > 1){
+        // one or no input has been identified. Checking every input points to the same source
+        for (const auto& previousInputNode : oldG->inputNodes()) {
+            for (const auto& input : previousInputNode->inputs()) {
+                if (oldNodes.find(input.first) == oldNodes.end()) {
+                    if ( (externalInput != input.first) || (externalInputId != input.second) ) {
+                        return false; // an inputNode points to an external Node different from the registered one
+                    }
+                }
+            }
+        }
+    }
+
+    if (firstPreviousOutputNode->nbOutputs() != 1) {
+        return false;
+    }
 
-  if (replacable) {
-    auto copyOutputs = previousOutputNode->outputs();
+    // find Node to replicate output connections
+    std::shared_ptr<Node> newOutputNode = newNodes.empty() ? externalInput : *(newG->outputNodes().begin());
 
+    auto copyOutputs = firstPreviousOutputNode->outputs();
     // manage Views for newNodes
     // only keep common views to each node for the new set
-    std::set<std::shared_ptr<GraphView>> commonGraphViews =  (*mNodes.begin())->views();
-    for (const auto& nodePtr : mNodes) {
+    std::set<std::shared_ptr<GraphView>> commonGraphViews =  (*oldNodes.begin())->views();
+    for (const auto& nodePtr : oldNodes) {
       const auto nodeView = nodePtr->views();
       std::set<std::shared_ptr<GraphView>> intersection;
       std::set_intersection(commonGraphViews.begin(), commonGraphViews.end(),
@@ -581,32 +616,59 @@ bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
                           std::inserter(intersection, intersection.begin()));
       commonGraphViews = intersection;
     }
+    commonGraphViews.erase(oldG);
+    commonGraphViews.erase(newG);
 
     // clean Nodes to replace
-    std::set<std::shared_ptr<Node>> copyNode = mNodes;
-    for (auto& nodePtr : copyNode) { nodePtr->resetConnections(true); }
+    // Do not include common Nodes to avoid cleaning Producers linked to newNodes
+    std::set<std::shared_ptr<Node>> nodesToClean;
+    std::set_difference(oldNodes.begin(), oldNodes.end(),
+                          newNodes.begin(), newNodes.end(),
+                          std::inserter(nodesToClean, nodesToClean.begin()));
+    for (auto& nodePtr : nodesToClean) { nodePtr->resetConnections(true); }
 
     // copy output connections
     if (newOutputNode) {
-      for (IOIndex_t o = 0; o < previousOutputNode->nbOutputs(); ++o) {
-        auto outputPairs = copyOutputs[o];
-        for (const auto& onePair : outputPairs) {
-          newOutputNode->addChild(onePair.first, o, onePair.second);
+        for (IOIndex_t o = 0; o < firstPreviousOutputNode->nbOutputs(); ++o) {
+            auto outputPairs = copyOutputs[o];
+            for (const auto& onePair : outputPairs) {
+                newOutputNode->addChild(onePair.first, o, onePair.second);
+            }
         }
-      }
     }
+
+    // copy input connections
+    if (!newNodes.empty() && externalInput) {
+        for (const auto& newInputNode : newG->inputNodes()) {
+            IOIndex_t inputId = 0;
+            for (const auto& input : newInputNode->inputs()) {
+                if (newNodes.find(input.first) == newNodes.end()) {
+                    externalInput->addChild(newInputNode, externalInputId, inputId);
+                }
+                inputId++;
+            }
+        }
+    }
+
     // insert new Nodes in the right GraphViews
-    for (auto& graphPtr : commonGraphViews) {
-      graphPtr->add(newNodes, false);
-      if (newNodes.empty()) {
-        graphPtr->updateInputNodes();
-        graphPtr->updateOutputNodes();
-      }
+    for (const auto& graphPtr : commonGraphViews) {
+        graphPtr->add(newNodes, false);
+        if (newNodes.empty()) {
+            graphPtr->updateInputNodes();
+            graphPtr->updateOutputNodes();
+        }
     }
-  }
-  return replacable;
+
+    for (const auto& node : oldNodes) {
+      node->removeView(oldG);
+    }
+    for (const auto& node : newNodes) {
+      node->removeView(newG);
+    }
+    return true;
 }
 
+
 void Aidge::GraphView::updateInputNodes() {
   mInputNodes.clear();
   for (const std::shared_ptr<Node>& go_ptr : mNodes) {
diff --git a/src/recipies/FuseBatchNorm.cpp b/src/recipies/FuseBatchNorm.cpp
index f06e88d3d76166696ca15c7ed8eec962ada74592..5d1a50fdf8d9e11e9ac6672bc93053bdde71851a 100644
--- a/src/recipies/FuseBatchNorm.cpp
+++ b/src/recipies/FuseBatchNorm.cpp
@@ -116,15 +116,14 @@ void Aidge::fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes){
         bias->set<float>(output, biasValue);
 
     }
-    auto g = std::make_shared<GraphView>();
-    g->add(std::set<std::shared_ptr<Node>>({
+
+    GraphView::replace(std::set<std::shared_ptr<Node>>({
         batchnorm,
         batchnorm->input(1).first,
         batchnorm->input(2).first,
         batchnorm->input(3).first,
         batchnorm->input(4).first
-    }));
-    g->replaceWith({});
+        }), {});
 
 }
 
diff --git a/src/recipies/FuseMulAdd.cpp b/src/recipies/FuseMulAdd.cpp
index 75abd1fb675e2e7280bbda295d3097bbc5f29528..09bbd3903189a37237f8b06fda8d15d8aafcb053 100644
--- a/src/recipies/FuseMulAdd.cpp
+++ b/src/recipies/FuseMulAdd.cpp
@@ -20,6 +20,8 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/GenericOperator.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
 // Graph Regex
 #include "aidge/graphmatching/GRegex.hpp"
 #include "aidge/graphmatching/NodeRegex.hpp"
@@ -47,34 +49,32 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
 
     // Step 1 : Create FC
     // Fetch the output dimension throught the bias size
-    auto producer_add_bias = add->input(1);
-    Tensor& bias_tensor = (producer_add_bias.first)->getOperator()->output(0);
+    std::shared_ptr<Node> bias = (add->getParent(1)) ? add->getParent(1)->cloneSharedOperators() : nullptr;
+
+    if (!(matmul->getParent(1))) {
+        AIDGE_INTERNAL_ASSERT("No weight detected to produce the fuseMulAdd recipe.");
+    }
+    std::shared_ptr<Node> weight = matmul->getParent(1)->cloneSharedOperators();
+    DimSize_t outSize = weight->getOperator()->output(0).dims<2>()[1];
 
     // Instanciate FC
     //std::shared_ptr<Node> fc = FC(dim[0], false, "Fc");
-    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(bias_tensor.dims()[0], false));
+    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(outSize, bias ? false : true));
 
     // Step 2 : Branch existing producers & create the others
     // link weights & bias
-    if (matmul->getParent(1)==nullptr) {
-        matmul->getParent(0)->addChild(fc, 0, 1);
-        printf("MatMul out[1] == nullptr !\n");
-    } else {
-        printf("MatMul out[1] != nullptr !\n");
-        if (matmul->getParent(0)!=nullptr)
-            matmul->getParent(0)->addChild(fc, 0, 0);
-        matmul->input(1).first->addChild(fc, 0, 1);
+    weight->addChild(fc, 0, 1);
+    if (bias) {
+        bias->addChild(fc, 0, 2);
     }
-    (producer_add_bias.first)->addChild(fc,0,2);
 
 
     // Step 3 : Update all graphviews that contains at least one node to replace
         // Case 1 : If all nodes are in a graph view : delete old nodes & branch input & output
         // Case 2 : If not all nodes are in a graph view : only delete the nodes from the graphview
-        // Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory ?
-    auto nodeToReplace = std::make_shared<GraphView>();
-    nodeToReplace->add(nodes, false);
-    nodeToReplace->replaceWith({fc});
+        // Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory?
+    auto newNodes = std::set<std::shared_ptr<Node>>({fc, weight, fc->getParent(2)});
+    GraphView::replace({matmul, add, add->getParent(1), matmul->getParent(1)}, newNodes);
 
 }
 
diff --git a/src/recipies/RemoveFlatten.cpp b/src/recipies/RemoveFlatten.cpp
index 2dfa10ce2e1d2ba9feedb4e7d13bad660bc530fb..e5f8977e2ed0dc4d4b327351970f08c76972c101 100644
--- a/src/recipies/RemoveFlatten.cpp
+++ b/src/recipies/RemoveFlatten.cpp
@@ -30,10 +30,8 @@ namespace Aidge {
                 flatten = element;
             }
         }
-        auto g = std::make_shared<GraphView>();
-        // TODO : avoid using replace_with and use a remove method instead
-        g->add(std::set<std::shared_ptr<Node>>({flatten}));
-        g->replaceWith({});
+
+        GraphView::replace({flatten}, {});
     }
 
     void removeFlatten(std::shared_ptr<GraphView> graphView){
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 0811f4abfe5504e5210f09f66b6774ba8362e28b..a07993463eb6597be304d092ae1e0fa059ceb59c 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -12,6 +12,7 @@
 #include <cassert>
 #include <map>
 #include <memory>
+#include <set>
 #include <string>
 
 #include <catch2/catch_test_macros.hpp>
@@ -277,7 +278,8 @@ TEST_CASE("[core/graph] GraphView(forwardDims)", "[GraphView][forwardDims]") {
     }
 }
 
-TEST_CASE("[core/graph] GraphView(replaceWith)") {
+
+TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
     SECTION("replace small pattern") {
         // create original graph
         std::shared_ptr<GraphView> g = std::make_shared<GraphView>("TestGraph");
@@ -298,19 +300,21 @@ TEST_CASE("[core/graph] GraphView(replaceWith)") {
         REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({matmulWeight, addBias, other1, other2, matmul, add}));
 
         // create graph to replace
-        std::shared_ptr<GraphView> nodeToReplace = std::make_shared<GraphView>();
-        nodeToReplace->add({matmul, add}, false);
+        std::set<std::shared_ptr<Node>> nodeToReplace = std::set<std::shared_ptr<Node>>({matmulWeight, addBias, matmul, add});
 
         // create replacing graph
-        std::shared_ptr<Node> newNode = GenericOperator("FC", 1, 3, 1, "fc");
-        other1->addChild(newNode);
-        matmulWeight->addChild(newNode, 0, 1);
-        addBias->addChild(newNode, 0, 2);
+        std::shared_ptr<Node> myFC = GenericOperator("FC", 1, 3, 1, "fc");
+        auto newMatmulWeight = matmulWeight->cloneSharedOperators();
+        newMatmulWeight->addChild(myFC, 0, 1);
+        auto newAddBias = addBias->cloneSharedOperators();
+        newAddBias->addChild(myFC, 0, 2);
+        std::set<std::shared_ptr<Node>> newNodes = std::set<std::shared_ptr<Node>>({myFC, newMatmulWeight, newAddBias});
 
         // replace
-        nodeToReplace->replaceWith({newNode});
+        GraphView::replace(nodeToReplace, newNodes);
 
-        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({matmulWeight, addBias, other1, other2, newNode}));
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({newMatmulWeight, newAddBias, other1, other2, myFC}));
+        REQUIRE(((myFC->getParent(0) == other1) && (myFC->getParent(1) == newMatmulWeight) && (myFC->getParent(2) == newAddBias)));
     }
     SECTION("replace with nothing") {
         std::shared_ptr<GraphView> g = std::make_shared<GraphView>("TestGraph");
@@ -323,13 +327,81 @@ TEST_CASE("[core/graph] GraphView(replaceWith)") {
         r3->addChild(r4);
         g->add({r1, r2, r3, r4});
         auto nodesToReplace = std::set<std::shared_ptr<Node>>({r2, r3});
-        auto graphToReplace = std::make_shared<GraphView>();
-        graphToReplace->add(nodesToReplace);
-        graphToReplace->replaceWith({});
+        auto newNodes = std::set<std::shared_ptr<Node>>({});
+        GraphView::replace(nodesToReplace, newNodes);
 
         REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({r1, r4}));
         REQUIRE((r1->output(0))[0].first == r4);
     }
+
+    SECTION("replace for tiling") {
+        std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
+        auto otherInput = GenericOperator("Producer", 0, 0, 1, "other_input");
+        auto other1 = GenericOperator("Other", 1, 1, 1, "other1");
+        auto myConv = GenericOperator("Conv", 1, 1, 1, "myConv");
+        auto other2 = GenericOperator("Other", 1, 1, 1, "other2");
+        otherInput->addChild(other1);
+        other1->addChild(myConv);
+        myConv->addChild(other2);
+        g->add({other1, myConv, other2});
+
+        // create tiled Conv
+        auto conv1 =  GenericOperator("Conv", 1, 1, 1, "myConv1");
+        auto conv2 =  GenericOperator("Conv", 1, 1, 1, "myConv2");
+        auto conv3 =  GenericOperator("Conv", 1, 1, 1, "myConv3");
+        auto conv4 =  GenericOperator("Conv", 1, 1, 1, "myConv4");
+        auto concat = GenericOperator("Concat", 4, 4, 1, "myConcat");
+        conv1->addChild(concat);
+        conv2->addChild(concat);
+        conv3->addChild(concat);
+        conv4->addChild(concat);
+
+        GraphView::replace({myConv}, {conv1, conv2, conv3, conv4, concat});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, conv1, conv2, conv3, conv4, concat, other2}));
+
+        GraphView::replace({conv1, conv2, conv3, conv4, concat}, {myConv});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myConv, other2}));
+    }
+
+    SECTION("Change every Nodes in a GraphView") {
+        auto matmulWeight0 = GenericOperator("Producer", 0, 0, 1, "matmul_w0");
+        auto addBias0 = GenericOperator("Producer", 0, 0, 1, "add_b0");
+        auto matmul0 = GenericOperator("MatMul", 1, 2, 1, "matmul0");
+        auto add0 = GenericOperator("Add", 1, 2, 1, "add0");
+        auto matmulWeight1 = GenericOperator("Producer", 0, 0, 1, "matmul_w1");
+        auto addBias1 = GenericOperator("Producer", 0, 0, 1, "add_b1");
+        auto matmul1 = GenericOperator("MatMul", 1, 2, 1, "matmul1");
+        auto add1 = GenericOperator("Add", 1, 2, 1, "add1");
+
+        matmulWeight0 -> addChild(matmul0, 0, 1);
+        addBias0 -> addChild(add0, 0, 1);
+        matmulWeight1 -> addChild(matmul1, 0, 1);
+        addBias1 -> addChild(add1, 0, 1);
+        matmul0 -> addChild(add0, 0, 0);
+        add0 -> addChild(matmul1, 0, 0);
+        matmul1 -> addChild(add1, 0, 0);
+
+        auto g = std::make_shared<GraphView>("TestGraph");
+        g -> add({matmulWeight0, addBias0, matmulWeight1, addBias1, matmul0, add0, matmul1, add1});
+        auto newMatmulWeight0 = matmulWeight0->cloneSharedOperators();
+        auto newAddBias0 = addBias0->cloneSharedOperators();
+        auto newMatmulWeight1 = matmulWeight1->cloneSharedOperators();
+        auto newAddBias1 = addBias1->cloneSharedOperators();
+        auto fc0 = GenericOperator("FC", 1, 3, 1, "fc0");
+        auto fc1 = GenericOperator("FC", 1, 3, 1, "fc1");
+
+        newMatmulWeight0 -> addChild(fc0, 0, 1);
+        newAddBias0 -> addChild(fc0, 0, 2);
+        newMatmulWeight1 -> addChild(fc1, 0, 1);
+        newAddBias1 -> addChild(fc1, 0, 2);
+
+        GraphView::replace({matmul0, add0, matmulWeight0, addBias0}, {newMatmulWeight0, newAddBias0, fc0});
+        GraphView::replace({matmul1, add1, matmulWeight1, addBias1}, {newMatmulWeight1, newAddBias1, fc1});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({newMatmulWeight0, newAddBias0, newAddBias1, newMatmulWeight1, fc1, fc0}));
+    }
 }
 
 TEST_CASE("[GraphView] clone") {