diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index e708c168421216fa249f26eee1f2b2eb80b588fd..cc8763580076957d550c7c0702468a593e218569 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -31,18 +31,23 @@
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/Div.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/MetaOperatorDefs.hpp"
+#include "aidge/operator/Mul.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Pad.hpp"
 #include "aidge/operator/Producer.hpp"
+#include "aidge/operator/Pow.hpp"
 #include "aidge/operator/ReLU.hpp"
-#include "aidge/operator/Softmax.hpp"
 #include "aidge/operator/Scaling.hpp"
+#include "aidge/operator/Softmax.hpp"
+#include "aidge/operator/Sqrt.hpp"
+#include "aidge/operator/Sub.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 89ba148497709f0af475bbf953ff285c88036102..481099726843146173a37fcddc3bf69723b1a70e 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -322,26 +322,33 @@ public:
 
     /**
      * @brief Insert a node (newParentNode) as a parent of the passed node (childNode).
-     * 
+     *
      * @param childNode Node that gets a new parent.
      * @param newParentNode Inserted Node.
      * @param childInputTensorIdx Index of the input Tensor for the childNode linked to the inserted Node output.
      * @param newParentInputTensorIdx Index of the input Tensor for the newParentNode linked to the former parent of childNode.
      * @param newParentOutputTensorIdx Index of the output Tensor for the newParentNode linked to the childNode's input Tensor.
      */
-    void insertParent(NodePtr childNode, 
-                        NodePtr newParentNode, 
-                        IOIndex_t childInputTensorIdx, 
-                        IOIndex_t newParentInputTensorIdx, 
+    void insertParent(NodePtr childNode,
+                        NodePtr newParentNode,
+                        IOIndex_t childInputTensorIdx,
+                        IOIndex_t newParentInputTensorIdx,
                         IOIndex_t newParentOutputTensorIdx);
 
+
     /**
-     * @brief Replace the current GraphView with the set of given Nodes if possible
-     * @param newNodes Set of Nodes.
+     * @brief Replace a set of Nodes in every available GraphView with a new set of Nodes if possible.
+     * Both sets should include all the necessary Producers.
+     * @details Replaced Nodes are removed from any GraphView pointing at them all.
+     * The oldNodes set should have only one input/output
+     * Tensor for automatic connections of newNodes set.
+     * @param oldNodes actual set of shared_ptr<Node> to replace.
+     * @param newNodes new set of shared_ptr<Node>.
      * @return true
      * @return false
      */
-    bool replaceWith(std::set<NodePtr> newNodes);
+    static bool replace(const std::set<NodePtr>& oldNodes, const std::set<NodePtr>& newNodes);
+
     void updateInputNodes();
     /**
      * @brief Process from zero the set of output Nodes.
@@ -379,6 +386,12 @@ public:
      */
     std::shared_ptr<GraphView> cloneCallback(NodePtr(*cloneNode)(NodePtr)) const;
 
+    /**
+     * @brief Get the sum of the number of free dataInput connection for all inputNodes of the GraphView object.
+     * @return IOIndex_t
+     */
+    IOIndex_t getNbFreeDataInputs() const;
+
 private:
 ///////////////////////////////////////////////////////
 //        TENSOR MANAGEMENT
@@ -390,12 +403,6 @@ private:
      */
     IOIndex_t getNbDataInputs() const;
 
-    /**
-     * @brief Get the sum of the number of free dataInput connection for all inputNodes of the GraphView object.
-     * @return IOIndex_t
-     */
-    IOIndex_t getNbFreeDataInputs() const;
-
     /**
      * @brief Update the set of inputNodes with a new Node, checking if it can be
      * added and removing any Node not part of mInputNode anymore.
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 1d8449ac25cf8c31192da0c350c14cbfa50a48f4..f1d0a39d4bd7dba6990a46d61f7456c03244e44e 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -258,9 +258,7 @@ public:
   }
 
   inline void removeView(const std::shared_ptr<GraphView> &graphPtr) {
-    std::set<std::weak_ptr<GraphView>, weakCompare>::const_iterator viewIt = mViews.cbegin();
-    for (; (viewIt != mViews.cend()) && ((*viewIt).lock() != graphPtr) ; ++viewIt) {}
-    mViews.erase(*viewIt);
+    mViews.erase(graphPtr);
   }
 
   /**
@@ -402,7 +400,7 @@ public:
 
   /**
    * @brief  Get the set of pointers to connected node at a distance of a delta.
-   * @details the recution are cut 
+   * @details the recution are cut
    * Return a nullptr is nofing found.
    * @param delta Input delta.
    * @return std::shared_ptr<Node>
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4213f979cf9d675f523a228095edc5606f9412ee
--- /dev/null
+++ b/include/aidge/operator/Div.hpp
@@ -0,0 +1,146 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_DIV_H_
+#define AIDGE_CORE_OPERATOR_DIV_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Div_Op : public Operator,
+    public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Div";
+
+    Div_Op()
+            : Operator(Type)
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Div_Op(const Div_Op& op)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Div_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Div_Op>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInputs[0]->empty())
+            mOutput->resize(mInputs[0]->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
+        return *(mInputs[inputIdx].get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx < 2) && "Div Operator has 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Div Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) override {
+        mImpl = Registrar<Div_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setBackend(name);
+        mInputs[1]->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) override {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Div(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Div_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_DIV_H_ */
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4ea79fe52622b22f8ea8fbd9191d50d45e26acac
--- /dev/null
+++ b/include/aidge/operator/Mul.hpp
@@ -0,0 +1,146 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_MUL_H_
+#define AIDGE_CORE_OPERATOR_MUL_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Mul_Op : public Operator,
+    public Registrable<Mul_Op, std::string, std::unique_ptr<OperatorImpl>(const Mul_Op&)> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Mul";
+
+    Mul_Op()
+            : Operator(Type)
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Mul_Op(const Mul_Op& op)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Mul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Mul_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Mul_Op>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInputs[0]->empty())
+            mOutput->resize(mInputs[0]->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
+        return *(mInputs[inputIdx].get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx < 2) && "Mul Operator has 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Mul Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) override {
+        mImpl = Registrar<Mul_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setBackend(name);
+        mInputs[1]->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) override {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Mul(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..732cf36b4ef7e7640648c542191acd02d0875a4f
--- /dev/null
+++ b/include/aidge/operator/Pow.hpp
@@ -0,0 +1,146 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_POW_H_
+#define AIDGE_CORE_OPERATOR_POW_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Pow_Op : public Operator,
+    public Registrable<Pow_Op, std::string, std::unique_ptr<OperatorImpl>(const Pow_Op&)> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Pow";
+
+    Pow_Op()
+            : Operator(Type)
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Pow_Op(const Pow_Op& op)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Pow_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Pow_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Pow_Op>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInputs[0]->empty())
+            mOutput->resize(mInputs[0]->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
+        return *(mInputs[inputIdx].get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx < 2) && "Pow Operator has 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Pow Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) override {
+        mImpl = Registrar<Pow_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setBackend(name);
+        mInputs[1]->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) override {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Pow(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..90b2ae6a8ae1311aef14e4eba4d3563a28a3d18e
--- /dev/null
+++ b/include/aidge/operator/Sqrt.hpp
@@ -0,0 +1,141 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SQRT_H_
+#define AIDGE_CORE_OPERATOR_SQRT_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Sqrt_Op : public Operator,
+    public Registrable<Sqrt_Op, std::string, std::unique_ptr<OperatorImpl>(const Sqrt_Op&)> {
+public:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Sqrt";
+
+    Sqrt_Op()
+            : Operator(Type)
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Sqrt_Op(const Sqrt_Op& op)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Sqrt_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Sqrt_Op>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInput->empty())
+            mOutput->resize(mInput->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx == 0) && "Sqrt Operator has only 1 input");
+        (void) inputIdx; // avoid unused warning
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Sqrt Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) override {
+        mImpl = Registrar<Sqrt_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) override {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Sqrt(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Sqrt_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SQRT_H_ */
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..451cba08f58e7a580576531ce2a97c92fb9be3ae
--- /dev/null
+++ b/include/aidge/operator/Sub.hpp
@@ -0,0 +1,146 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SUB_H_
+#define AIDGE_CORE_OPERATOR_SUB_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Sub_Op : public Operator,
+    public Registrable<Sub_Op, std::string, std::unique_ptr<OperatorImpl>(const Sub_Op&)> {
+public:
+    // FIXME: change accessibility
+    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Sub";
+
+    Sub_Op()
+            : Operator(Type)
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Sub_Op(const Sub_Op& op)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Sub_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Sub_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Sub_Op>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInputs[0]->empty())
+            mOutput->resize(mInputs[0]->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(static_cast<std::size_t>(inputIdx) < 2 && "wrong inputIdx for Add operator.");
+        return *(mInputs[inputIdx].get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx < 2) && "Sub Operator has 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInputs[inputIdx];
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Sub Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < 2 && "operator supports only 2 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string& name) override {
+        mImpl = Registrar<Sub_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setBackend(name);
+        mInputs[1]->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) override {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInputs[0]->setDatatype(datatype);
+        mInputs[1]->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Sub(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
diff --git a/include/aidge/utils/Recipies.hpp b/include/aidge/utils/Recipies.hpp
index 894e56fae2e9c2f6bcf11e4e76a433f5c8058080..c110c9cf8e2ccc84112f7ac48b438f470ee21465 100644
--- a/include/aidge/utils/Recipies.hpp
+++ b/include/aidge/utils/Recipies.hpp
@@ -12,6 +12,9 @@
 #ifndef AIDGE_CORE_UTILS_RECIPIES_H_
 #define AIDGE_CORE_UTILS_RECIPIES_H_
 
+#include <memory>
+#include <set>
+
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
 
@@ -47,7 +50,7 @@ void removeFlatten(std::set<std::shared_ptr<Node>> nodes);
  * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
  */
 void removeFlatten(std::shared_ptr<GraphView> graphView);
- 
+
 // FUSE BN + FC || CONV -> FC || CONV
 
 /**
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 555540045d01aebfe121422ea9e7a367065b9996..6ac2199b4ba59faba16c9815277ad134c6f183f4 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -26,7 +26,7 @@ void init_GraphView(py::module& m) {
           .def("save", &GraphView::save, py::arg("path"), py::arg("verbose") = false,
           R"mydelimiter(
           Save the GraphView as a Mermaid graph in a .md file at the specified location.
-          
+
           :param path: save location
           :type path: str
           )mydelimiter")
@@ -34,14 +34,14 @@ void init_GraphView(py::module& m) {
           .def("get_output_nodes", &GraphView::outputNodes,
           R"mydelimiter(
           Get set of output Nodes.
-          
+
           :rtype: list[Node]
           )mydelimiter")
 
           .def("get_input_nodes", &GraphView::inputNodes,
           R"mydelimiter(
           Get set of input Nodes.
-          
+
           :rtype: list[Node]
           )mydelimiter")
 
@@ -49,7 +49,7 @@ void init_GraphView(py::module& m) {
                py::arg("other_node"), py::arg("include_learnable_parameters") = true,
           R"mydelimiter(
           Include a Node to the current GraphView object.
-          
+
           :param other_node: Node to add
           :type oth_Node: Node
           :param includeLearnableParameter: include non-data inputs, like weights and biases. Default True.
@@ -66,18 +66,20 @@ void init_GraphView(py::module& m) {
                py::arg("fromTensor") = 0U, py::arg("toTensor") = gk_IODefaultIndex,
           R"mydelimiter(
           Include a Node to the current GraphView object.
-          
+
           :param other_node: Node to add
           :type oth_Node: Node
           :param includeLearnableParameter: include non-data inputs, like weights and biases. Default True.
           :type includeLearnableParameter
           )mydelimiter")
-          
-          .def("replace_with", &GraphView::replaceWith, py::arg("new_nodes"),
+
+          .def_static("replace", &GraphView::replace, py::arg("old_nodes"), py::arg("new_nodes"),
           R"mydelimiter(
-          Replace the current GraphView with the set of given Nodes if possible.
-          
-          :param new_nodes: Nodes with connections already taken care of.
+          Replace the old set of Nodes with the new set of given Nodes if possible in every GraphView.
+
+          :param old_nodes: Nodes actually connected in GraphViews.
+          :type old_nodes: Node
+          :param new_nodes: Nodes with inner connections already taken care of.
           :type new_nodes: Node
           :return: Whether any replacement has been made.
           :rtype: bool
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3492bf244952ba6ed0d77cb16de758e61fb26383
--- /dev/null
+++ b/python_binding/operator/pybind_Div.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Div.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Div(py::module& m) {
+    py::class_<Div_Op, std::shared_ptr<Div_Op>, Operator>(m, "DivOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Div_Op::getInputsName)
+    .def("get_outputs_name", &Div_Op::getOutputsName);
+
+    m.def("Div", &Div, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2627c99005b009769e8fbb97b1f5d79e2424c997
--- /dev/null
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Mul(py::module& m) {
+    py::class_<Mul_Op, std::shared_ptr<Mul_Op>, Operator>(m, "MulOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Mul_Op::getInputsName)
+    .def("get_outputs_name", &Mul_Op::getOutputsName);
+
+    m.def("Mul", &Mul, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..22866c5460381b6f494948c7410bcd67e7e46edb
--- /dev/null
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Pow.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Pow(py::module& m) {
+    py::class_<Pow_Op, std::shared_ptr<Pow_Op>, Operator>(m, "PowOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Pow_Op::getInputsName)
+    .def("get_outputs_name", &Pow_Op::getOutputsName);
+
+    m.def("Pow", &Pow, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b70171814662c861f19b3048b018260170d37491
--- /dev/null
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Sqrt.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Sqrt(py::module& m) {
+    py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, Operator>(m, "SqrtOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Sqrt_Op::getInputsName)
+    .def("get_outputs_name", &Sqrt_Op::getOutputsName);
+
+    m.def("Sqrt", &Sqrt, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..10c95939646a6b605f23c42618bfbdd00ceb6e2e
--- /dev/null
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Sub.hpp"
+#include "aidge/operator/Operator.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Sub(py::module& m) {
+    py::class_<Sub_Op, std::shared_ptr<Sub_Op>, Operator>(m, "SubOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Sub_Op::getInputsName)
+    .def("get_outputs_name", &Sub_Op::getOutputsName);
+
+    m.def("Sub", &Sub, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 04e39b11e58718dfcc5f9faef24b140132367700..a482191c78ff56b000e043cd7350ca1c150d1d6e 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -25,15 +25,20 @@ void init_AvgPooling(py::module&);
 void init_BatchNorm(py::module&);
 void init_Conv(py::module&);
 void init_ConvDepthWise(py::module&);
+void init_Div(py::module&);
 void init_FC(py::module&);
 void init_GenericOperator(py::module&);
 void init_LeakyReLU(py::module&);
 void init_MatMul(py::module&);
 void init_MaxPooling(py::module&);
 void init_MetaOperatorDefs(py::module&);
+void init_Mul(py::module&);
 void init_Producer(py::module&);
+void init_Pow(py::module&);
 void init_ReLU(py::module&);
 void init_Softmax(py::module&);
+void init_Sqrt(py::module&);
+void init_Sub(py::module&);
 
 void init_Node(py::module&);
 void init_GraphView(py::module&);
@@ -67,14 +72,19 @@ void init_Aidge(py::module& m){
     init_BatchNorm(m);
     init_Conv(m);
     init_ConvDepthWise(m);
+    init_Div(m);
     init_FC(m);
     init_GenericOperator(m);
     init_LeakyReLU(m);
     init_MatMul(m);
     init_MaxPooling(m);
     init_MetaOperatorDefs(m);
+    init_Mul(m);
+    init_Pow(m);
     init_ReLU(m);
     init_Softmax(m);
+    init_Sqrt(m);
+    init_Sub(m);
 
     init_Producer(m);
     init_Match(m);
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 8f8f51c89bbcc380963f355f781e8fda940dcffc..367c9f10dffc0116ffa6cdcfd1841015441af8a1 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -17,6 +17,7 @@
 #include "aidge/utils/Types.h"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 
 ///////////////////////////////////////////////////////
 //        FUNCTIONAL DESCRIPTION
@@ -529,38 +530,72 @@ void Aidge::GraphView::insertParent(NodePtr childNode,
 }
 
 
-bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
-  // TODO : only supports one input/output node for now
-  assert(mNodes.size()>0 && "There must be at least one Node to replace");
+bool Aidge::GraphView::replace(const std::set<Aidge::NodePtr>& oldNodes, const std::set<Aidge::NodePtr>& newNodes) {
 
-  bool replacable;
-  std::shared_ptr<Node> previousInputNode = (*inputNodes().begin());
-  std::shared_ptr<Node> previousOutputNode = (*outputNodes().begin());
-  std::shared_ptr<Node> newOutputNode;
+    // TODO: handle case where an oldNodes parameter does not come from a Producer but another Node (not included in oldNodes)
+    // How to distinguish it from data input?
+    // TODO: Parameter Tensors could be identified with their dimensions
+    // TODO: Take GraphView as input parameters since new Nodes should be connected whatever.
+    // It also avoids specifying each producer since they are automatically included
 
-  auto gNew = std::make_shared<GraphView>();
-  gNew->add(newNodes, false);
+    auto oldG = std::make_shared<GraphView>("oldG");
+    oldG->add(oldNodes, false);
+    auto newG = std::make_shared<GraphView>("newG");
+    newG->add(newNodes, false);
 
-  if (newNodes.empty()) {
-    replacable = (outputNodes().size() == 1) &&
-                 (inputNodes().size() == 1) &&
-                 ((*outputNodes().begin())->nbOutputs() == 1) &&
-                 ((*inputNodes().begin())->nbDataInputs() == 1);
-    newOutputNode = previousInputNode->input(0).first;
-  } else {
-    newOutputNode = (*gNew->outputNodes().begin());
-    replacable = (outputNodes().size() == gNew->outputNodes().size()) &&
-                 (outputNodes().size() == 1) &&
-                 (previousOutputNode->nbOutputs() == newOutputNode->nbOutputs());
-  }
+    if ((oldG->inputNodes().size() == 0) || (oldG->outputNodes().size() != 1)) {
+        return false;
+    }
+    if (!(newNodes.empty()) && ((newG->inputNodes().size() == 0) ||
+                                (newG->outputNodes().size() != 1))) {
+        return false;
+    }
+
+    // there is at least one inputNode in the old/new GraphView
+    std::shared_ptr<Node> firstPreviousInputNode = (*(oldG->inputNodes()).begin());
+    std::shared_ptr<Node> firstPreviousOutputNode = (*(oldG->outputNodes()).begin());
+
+    // find Node to link to new input Node
+    //compute number of input for firstPreviousInputNode not in oldNodes set
+    std::size_t nbExternalInputs = 0;
+    std::shared_ptr<Node> externalInput = nullptr;
+    IOIndex_t externalInputId = gk_IODefaultIndex;
+    for (const auto& input : firstPreviousInputNode->inputs()) {
+        if (oldNodes.find(input.first) == oldNodes.end()) { // Node connected to another Node outside of oldG
+            nbExternalInputs++;
+            externalInput = input.first;
+            externalInputId = input.second;
+        }
+    }
+    if (nbExternalInputs > 1) {
+        AIDGE_INTERNAL_ASSERT("To many input to link for oldNodes set");
+    }
+
+    if (oldG->inputNodes().size() > 1){
+        // one or no input has been identified. Checking every input points to the same source
+        for (const auto& previousInputNode : oldG->inputNodes()) {
+            for (const auto& input : previousInputNode->inputs()) {
+                if (oldNodes.find(input.first) == oldNodes.end()) {
+                    if ( (externalInput != input.first) || (externalInputId != input.second) ) {
+                        return false; // an inputNode points to an external Node different from the registered one
+                    }
+                }
+            }
+        }
+    }
+
+    if (firstPreviousOutputNode->nbOutputs() != 1) {
+        return false;
+    }
 
-  if (replacable) {
-    auto copyOutputs = previousOutputNode->outputs();
+    // find Node to replicate output connections
+    std::shared_ptr<Node> newOutputNode = newNodes.empty() ? externalInput : *(newG->outputNodes().begin());
 
+    auto copyOutputs = firstPreviousOutputNode->outputs();
     // manage Views for newNodes
     // only keep common views to each node for the new set
-    std::set<std::shared_ptr<GraphView>> commonGraphViews =  (*mNodes.begin())->views();
-    for (const auto& nodePtr : mNodes) {
+    std::set<std::shared_ptr<GraphView>> commonGraphViews =  (*oldNodes.begin())->views();
+    for (const auto& nodePtr : oldNodes) {
       const auto nodeView = nodePtr->views();
       std::set<std::shared_ptr<GraphView>> intersection;
       std::set_intersection(commonGraphViews.begin(), commonGraphViews.end(),
@@ -568,32 +603,59 @@ bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
                           std::inserter(intersection, intersection.begin()));
       commonGraphViews = intersection;
     }
+    commonGraphViews.erase(oldG);
+    commonGraphViews.erase(newG);
 
     // clean Nodes to replace
-    std::set<std::shared_ptr<Node>> copyNode = mNodes;
-    for (auto& nodePtr : copyNode) { nodePtr->resetConnections(true); }
+    // Do not include common Nodes to avoid cleaning Producers linked to newNodes
+    std::set<std::shared_ptr<Node>> nodesToClean;
+    std::set_difference(oldNodes.begin(), oldNodes.end(),
+                          newNodes.begin(), newNodes.end(),
+                          std::inserter(nodesToClean, nodesToClean.begin()));
+    for (auto& nodePtr : nodesToClean) { nodePtr->resetConnections(true); }
 
     // copy output connections
     if (newOutputNode) {
-      for (IOIndex_t o = 0; o < previousOutputNode->nbOutputs(); ++o) {
-        auto outputPairs = copyOutputs[o];
-        for (const auto& onePair : outputPairs) {
-          newOutputNode->addChild(onePair.first, o, onePair.second);
+        for (IOIndex_t o = 0; o < firstPreviousOutputNode->nbOutputs(); ++o) {
+            auto outputPairs = copyOutputs[o];
+            for (const auto& onePair : outputPairs) {
+                newOutputNode->addChild(onePair.first, o, onePair.second);
+            }
         }
-      }
     }
+
+    // copy input connections
+    if (!newNodes.empty() && externalInput) {
+        for (const auto& newInputNode : newG->inputNodes()) {
+            IOIndex_t inputId = 0;
+            for (const auto& input : newInputNode->inputs()) {
+                if (newNodes.find(input.first) == newNodes.end()) {
+                    externalInput->addChild(newInputNode, externalInputId, inputId);
+                }
+                inputId++;
+            }
+        }
+    }
+
     // insert new Nodes in the right GraphViews
-    for (auto& graphPtr : commonGraphViews) {
-      graphPtr->add(newNodes, false);
-      if (newNodes.empty()) {
-        graphPtr->updateInputNodes();
-        graphPtr->updateOutputNodes();
-      }
+    for (const auto& graphPtr : commonGraphViews) {
+        graphPtr->add(newNodes, false);
+        if (newNodes.empty()) {
+            graphPtr->updateInputNodes();
+            graphPtr->updateOutputNodes();
+        }
     }
-  }
-  return replacable;
+
+    for (const auto& node : oldNodes) {
+      node->removeView(oldG);
+    }
+    for (const auto& node : newNodes) {
+      node->removeView(newG);
+    }
+    return true;
 }
 
+
 void Aidge::GraphView::updateInputNodes() {
   mInputNodes.clear();
   for (const std::shared_ptr<Node>& go_ptr : mNodes) {
diff --git a/src/recipies/FuseBatchNorm.cpp b/src/recipies/FuseBatchNorm.cpp
index e5e59582af68f66e6c54d09fac4cb1cc028493dd..4b2f7a811c022ee80eec98548049853d56951edb 100644
--- a/src/recipies/FuseBatchNorm.cpp
+++ b/src/recipies/FuseBatchNorm.cpp
@@ -116,15 +116,14 @@ void Aidge::fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes){
         bias->set<float>(output, biasValue);
 
     }
-    auto g = std::make_shared<GraphView>();
-    g->add(std::set<std::shared_ptr<Node>>({
+
+    GraphView::replace(std::set<std::shared_ptr<Node>>({
         batchnorm,
         batchnorm->input(1).first,
         batchnorm->input(2).first,
         batchnorm->input(3).first,
         batchnorm->input(4).first
-    }));
-    g->replaceWith({});
+        }), {});
 
 }
 
diff --git a/src/recipies/FuseMulAdd.cpp b/src/recipies/FuseMulAdd.cpp
index 1de79890f9b597c4baff7427e01d7217f9695a44..528d57e31a5ecf3f5a633a20205e79f7926a1f61 100644
--- a/src/recipies/FuseMulAdd.cpp
+++ b/src/recipies/FuseMulAdd.cpp
@@ -20,6 +20,8 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/GenericOperator.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
 // Graph Regex
 #include "aidge/graphmatching/GRegex.hpp"
 #include "aidge/graphmatching/NodeRegex.hpp"
@@ -47,34 +49,32 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
 
     // Step 1 : Create FC
     // Fetch the output dimension throught the bias size
-    auto producer_add_bias = add->input(1);
-    Tensor& bias_tensor = (producer_add_bias.first)->getOperator()->output(0);
+    std::shared_ptr<Node> bias = (add->getParent(1)) ? add->getParent(1)->cloneSharedOperators() : nullptr;
+
+    if (!(matmul->getParent(1))) {
+        AIDGE_INTERNAL_ASSERT("No weight detected to produce the fuseMulAdd recipe.");
+    }
+    std::shared_ptr<Node> weight = matmul->getParent(1)->cloneSharedOperators();
+    DimSize_t outSize = weight->getOperator()->output(0).dims<2>()[1];
 
     // Instanciate FC
     //std::shared_ptr<Node> fc = FC(dim[0], false, "Fc");
-    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(bias_tensor.dims()[0], false));
+    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(outSize, bias ? false : true));
 
     // Step 2 : Branch existing producers & create the others
     // link weights & bias
-    if (matmul->getParent(1)==nullptr) {
-        matmul->getParent(0)->addChild(fc, 0, 1);
-        printf("MatMul out[1] == nullptr !\n");
-    } else {
-        printf("MatMul out[1] != nullptr !\n");
-        if (matmul->getParent(0)!=nullptr)
-            matmul->getParent(0)->addChild(fc, 0, 0);
-        matmul->input(1).first->addChild(fc, 0, 1);
+    weight->addChild(fc, 0, 1);
+    if (bias) {
+        bias->addChild(fc, 0, 2);
     }
-    (producer_add_bias.first)->addChild(fc,0,2);
 
 
     // Step 3 : Update all graphviews that contains at least one node to replace
         // Case 1 : If all nodes are in a graph view : delete old nodes & branch input & output
         // Case 2 : If not all nodes are in a graph view : only delete the nodes from the graphview
-        // Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory ?
-    auto nodeToReplace = std::make_shared<GraphView>();
-    nodeToReplace->add(nodes, false);
-    nodeToReplace->replaceWith({fc});
+        // Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory?
+    auto newNodes = std::set<std::shared_ptr<Node>>({fc, weight, fc->getParent(2)});
+    GraphView::replace({matmul, add, add->getParent(1), matmul->getParent(1)}, newNodes);
 
 }
 
diff --git a/src/recipies/RemoveFlatten.cpp b/src/recipies/RemoveFlatten.cpp
index 9096c107ba505f5f18993a761273552408db721b..fdfdbfd4aea7543dde31d5f5d4845e54e930feac 100644
--- a/src/recipies/RemoveFlatten.cpp
+++ b/src/recipies/RemoveFlatten.cpp
@@ -30,10 +30,8 @@ namespace Aidge {
                 flatten = element;
             }
         }
-        auto g = std::make_shared<GraphView>();
-        // TODO : avoid using replace_with and use a remove method instead
-        g->add(std::set<std::shared_ptr<Node>>({flatten}));
-        g->replaceWith({});
+
+        GraphView::replace({flatten}, {});
     }
 
     void removeFlatten(std::shared_ptr<GraphView> graphView){
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 9f014364636c70031b522b09c893e1144af3f133..dbba1a7d698641d0858f6c3d2f15c4c7ff610261 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -12,6 +12,7 @@
 #include <cassert>
 #include <map>
 #include <memory>
+#include <set>
 #include <string>
 
 #include <catch2/catch_test_macros.hpp>
@@ -277,7 +278,8 @@ TEST_CASE("Graph Forward dims", "[GraphView]") {
     }
 }
 
-TEST_CASE("[core/graph] GraphView(replaceWith)") {
+
+TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
     SECTION("replace small pattern") {
         // create original graph
         std::shared_ptr<GraphView> g = std::make_shared<GraphView>("TestGraph");
@@ -298,19 +300,21 @@ TEST_CASE("[core/graph] GraphView(replaceWith)") {
         REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({matmulWeight, addBias, other1, other2, matmul, add}));
 
         // create graph to replace
-        std::shared_ptr<GraphView> nodeToReplace = std::make_shared<GraphView>();
-        nodeToReplace->add({matmul, add}, false);
+        std::set<std::shared_ptr<Node>> nodeToReplace = std::set<std::shared_ptr<Node>>({matmulWeight, addBias, matmul, add});
 
         // create replacing graph
-        std::shared_ptr<Node> newNode = GenericOperator("FC", 1, 3, 1, "fc");
-        other1->addChild(newNode);
-        matmulWeight->addChild(newNode, 0, 1);
-        addBias->addChild(newNode, 0, 2);
+        std::shared_ptr<Node> myFC = GenericOperator("FC", 1, 3, 1, "fc");
+        auto newMatmulWeight = matmulWeight->cloneSharedOperators();
+        newMatmulWeight->addChild(myFC, 0, 1);
+        auto newAddBias = addBias->cloneSharedOperators();
+        newAddBias->addChild(myFC, 0, 2);
+        std::set<std::shared_ptr<Node>> newNodes = std::set<std::shared_ptr<Node>>({myFC, newMatmulWeight, newAddBias});
 
         // replace
-        nodeToReplace->replaceWith({newNode});
+        GraphView::replace(nodeToReplace, newNodes);
 
-        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({matmulWeight, addBias, other1, other2, newNode}));
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({newMatmulWeight, newAddBias, other1, other2, myFC}));
+        REQUIRE(((myFC->getParent(0) == other1) && (myFC->getParent(1) == newMatmulWeight) && (myFC->getParent(2) == newAddBias)));
     }
     SECTION("replace with nothing") {
         std::shared_ptr<GraphView> g = std::make_shared<GraphView>("TestGraph");
@@ -323,13 +327,81 @@ TEST_CASE("[core/graph] GraphView(replaceWith)") {
         r3->addChild(r4);
         g->add({r1, r2, r3, r4});
         auto nodesToReplace = std::set<std::shared_ptr<Node>>({r2, r3});
-        auto graphToReplace = std::make_shared<GraphView>();
-        graphToReplace->add(nodesToReplace);
-        graphToReplace->replaceWith({});
+        auto newNodes = std::set<std::shared_ptr<Node>>({});
+        GraphView::replace(nodesToReplace, newNodes);
 
         REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({r1, r4}));
         REQUIRE((r1->output(0))[0].first == r4);
     }
+
+    SECTION("replace for tiling") {
+        std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
+        auto otherInput = GenericOperator("Producer", 0, 0, 1, "other_input");
+        auto other1 = GenericOperator("Other", 1, 1, 1, "other1");
+        auto myConv = GenericOperator("Conv", 1, 1, 1, "myConv");
+        auto other2 = GenericOperator("Other", 1, 1, 1, "other2");
+        otherInput->addChild(other1);
+        other1->addChild(myConv);
+        myConv->addChild(other2);
+        g->add({other1, myConv, other2});
+
+        // create tiled Conv
+        auto conv1 =  GenericOperator("Conv", 1, 1, 1, "myConv1");
+        auto conv2 =  GenericOperator("Conv", 1, 1, 1, "myConv2");
+        auto conv3 =  GenericOperator("Conv", 1, 1, 1, "myConv3");
+        auto conv4 =  GenericOperator("Conv", 1, 1, 1, "myConv4");
+        auto concat = GenericOperator("Concat", 4, 4, 1, "myConcat");
+        conv1->addChild(concat);
+        conv2->addChild(concat);
+        conv3->addChild(concat);
+        conv4->addChild(concat);
+
+        GraphView::replace({myConv}, {conv1, conv2, conv3, conv4, concat});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, conv1, conv2, conv3, conv4, concat, other2}));
+
+        GraphView::replace({conv1, conv2, conv3, conv4, concat}, {myConv});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myConv, other2}));
+    }
+
+    SECTION("Change every Nodes in a GraphView") {
+        auto matmulWeight0 = GenericOperator("Producer", 0, 0, 1, "matmul_w0");
+        auto addBias0 = GenericOperator("Producer", 0, 0, 1, "add_b0");
+        auto matmul0 = GenericOperator("MatMul", 1, 2, 1, "matmul0");
+        auto add0 = GenericOperator("Add", 1, 2, 1, "add0");
+        auto matmulWeight1 = GenericOperator("Producer", 0, 0, 1, "matmul_w1");
+        auto addBias1 = GenericOperator("Producer", 0, 0, 1, "add_b1");
+        auto matmul1 = GenericOperator("MatMul", 1, 2, 1, "matmul1");
+        auto add1 = GenericOperator("Add", 1, 2, 1, "add1");
+
+        matmulWeight0 -> addChild(matmul0, 0, 1);
+        addBias0 -> addChild(add0, 0, 1);
+        matmulWeight1 -> addChild(matmul1, 0, 1);
+        addBias1 -> addChild(add1, 0, 1);
+        matmul0 -> addChild(add0, 0, 0);
+        add0 -> addChild(matmul1, 0, 0);
+        matmul1 -> addChild(add1, 0, 0);
+
+        auto g = std::make_shared<GraphView>("TestGraph");
+        g -> add({matmulWeight0, addBias0, matmulWeight1, addBias1, matmul0, add0, matmul1, add1});
+        auto newMatmulWeight0 = matmulWeight0->cloneSharedOperators();
+        auto newAddBias0 = addBias0->cloneSharedOperators();
+        auto newMatmulWeight1 = matmulWeight1->cloneSharedOperators();
+        auto newAddBias1 = addBias1->cloneSharedOperators();
+        auto fc0 = GenericOperator("FC", 1, 3, 1, "fc0");
+        auto fc1 = GenericOperator("FC", 1, 3, 1, "fc1");
+
+        newMatmulWeight0 -> addChild(fc0, 0, 1);
+        newAddBias0 -> addChild(fc0, 0, 2);
+        newMatmulWeight1 -> addChild(fc1, 0, 1);
+        newAddBias1 -> addChild(fc1, 0, 2);
+
+        GraphView::replace({matmul0, add0, matmulWeight0, addBias0}, {newMatmulWeight0, newAddBias0, fc0});
+        GraphView::replace({matmul1, add1, matmulWeight1, addBias1}, {newMatmulWeight1, newAddBias1, fc1});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({newMatmulWeight0, newAddBias0, newAddBias1, newMatmulWeight1, fc1, fc0}));
+    }
 }
 
 TEST_CASE("[GraphView] clone") {