diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index dda3d8ee459e9f089f817f7222d717bf75ede0f5..766c6ba72c44293834f130c76b7c21881ef10752 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -257,7 +257,7 @@ public:
      * @brief Get the operator with the corresponding name if it is in the
      * GraphView.
      * @param nodeName Name of the node.
-     * @return NodePtr returns a new empty node if the one asked for
+     * @return NodePtr returns a nullptr if the one asked for
      * was not found.
      */
     NodePtr getNode(const std::string& nodeName) const;
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 469d8485afe39692847ad88726ebca5926708c84..f9bd2c619ed2ca35400f340751f4502b1e862a5e 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -94,22 +94,24 @@ public:
     }
 
 
-    std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>
-    computeReceptiveField(const std::size_t firstIdx,
+    std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
+    computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
                             const std::vector<DimSize_t>& outputDims,
                             const IOIndex_t outputIdx = 0) const override final
     {
         if (outputIdx != 0) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
         }
+        if (firstEltDims.size() != outputDims.size()) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
+        }
         if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
             // Offset
-            const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
-            std::vector<DimSize_t> inputIdxDims = outputIdxDims;
+            std::vector<DimSize_t> inputIdxDims = firstEltDims;
 
             for (DimIdx_t i = 0; i < (DIM+2); ++i) {
-                if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+                if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
                 }
             }
 
@@ -126,8 +128,8 @@ public:
                             + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
                 inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
             }
-            std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
-            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
+            std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
+            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
             return res;
         }
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 194ac313dd7f9b22c55fdbe7e0e30d37d816bcb8..7fa99b08461593d0149e7cb472cb607025a3b6fd 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -119,19 +119,21 @@ public:
     }
 
 
-std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
+std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
         if (outputIdx != 0) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
         }
+        if (firstEltDims.size() != outputDims.size()) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
+        }
         if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
             // Offset
-            const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
-            auto inputIdxDims = outputIdxDims; // batch idx is the same
+            auto inputIdxDims = firstEltDims; // batch idx is the same
             inputIdxDims[1] = 0; // each channel is used so start with the first one
 
             for (DimIdx_t i = 0; i < (DIM+2); ++i) {
-                if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+                if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
                 }
             }
 
@@ -155,17 +157,17 @@ std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveFiel
                 weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
             }
             std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
-            weightIdxDims[0] = outputIdxDims[1];
+            weightIdxDims[0] = firstEltDims[1];
 
             // Bias
             const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
-            const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
+            const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
 
             // Result
-            std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
-            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(0)->getIdx(inputIdxDims), inputDims));
-            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(1)->getIdx(weightIdxDims), weightDims));
-            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(2)->getIdx(biasIdxDims), biasDims));
+            std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
+            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
+            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
+            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
             return res;
         }
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 6f1f3f7ffbaf8dd750f374f2b391ccc90fad8254..cc687622916b0fd27fc2cb777bd50cbfbb7d3949 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -115,18 +115,20 @@ public:
         }
     }
 
-    std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
+    std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
         if (outputIdx != 0) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
         }
+        if (firstEltDims.size() != outputDims.size()) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
+        }
         if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
             // Offset
-            const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
-            auto inputIdxDims = outputIdxDims; // batch idx is the same
+            auto inputIdxDims = firstEltDims; // batch idx is the same
 
             for (DimIdx_t i = 0; i < (DIM+2); ++i) {
-                if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+                if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
                 }
             }
 
@@ -149,17 +151,17 @@ public:
                 weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
             }
             std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
-            weightIdxDims[0] = outputIdxDims[1];
+            weightIdxDims[0] = firstEltDims[1];
 
             // Bias
             const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
-            const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
+            const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
 
             // Result
-            std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
-            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(0)->getIdx(inputIdxDims), inputDims));
-            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(1)->getIdx(weightIdxDims), weightDims));
-            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(2)->getIdx(biasIdxDims), biasDims));
+            std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
+            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
+            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
+            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
             return res;
         }
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 652cecd44537963c3ee4743729d2e98c569e7de6..025b7278c2cb48f859e7e5401288ecdbff3c1525 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -99,6 +99,8 @@ public:
         assert(false && "not implemented");
     }
 
+    inline bool isAtomic() const noexcept override final { return false; }
+
 };
 
 inline std::shared_ptr<Node> MetaOperator(const char *type,
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 5cd35be72aa4ecf880818aaf10dddbb11735e53e..32293eaa548498fac16bdfa526d5c0f8c4bcd199 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -157,6 +157,8 @@ public:
         return mOperatorType;
     }
 
+    virtual inline bool isAtomic() const noexcept { return true; }
+
     inline IOIndex_t nbInputs() const noexcept { return mNbData+mNbParam; };
     inline IOIndex_t nbData() const noexcept { return mNbData; };
     inline IOIndex_t nbParam() const noexcept { return mNbParam; };
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index b956da474311b5863690f5a5e40329e443f1345a..504a416488651d43126a60981cd8afe0f95821f2 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -100,7 +100,7 @@ public:
      * @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>
      * For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
      */
-    virtual std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
+    virtual std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
     virtual void computeOutputDims();
     virtual bool outputDimsForwarded() const;
     ///////////////////////////////////////////////////
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index a80bac8d9097a91088ead24af2651548e09a8b75..9b93b9448fb04be616497b6961c4692a5a846303 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -29,17 +29,17 @@ enum class SliceAttr { Starts, Ends, Axes };
 class Slice_Op
     : public OperatorTensor,
       public Registrable<Slice_Op, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op &)>,
-      public StaticAttributes<SliceAttr, std::vector<std::int32_t>, std::vector<std::int32_t>, std::vector<std::int32_t>> {
+      public StaticAttributes<SliceAttr, std::vector<std::size_t>, std::vector<std::size_t>, std::vector<std::size_t>> {
 public:
     static const std::string Type;
 
     Slice_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::int32_t>, std::vector<std::int32_t>, std::vector<std::int32_t>>;
+    using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::size_t>, std::vector<std::size_t>, std::vector<std::size_t>>;
     template <SliceAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    Slice_Op(const std::vector<std::int32_t>& starts, const std::vector<std::int32_t>&  ends, const std::vector<std::int32_t>& axes)
+    Slice_Op(const std::vector<std::size_t>& starts, const std::vector<std::size_t>&  ends, const std::vector<std::size_t>& axes)
         : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<SliceAttr::Starts>(starts),
                       attr<SliceAttr::Ends>(ends),
@@ -85,9 +85,9 @@ public:
 };
 
 
-inline std::shared_ptr<Node> Slice(const std::vector<std::int32_t> starts,
-                                   const std::vector<std::int32_t> ends,
-                                   const std::vector<std::int32_t> axes, 
+inline std::shared_ptr<Node> Slice(const std::vector<std::size_t> starts,
+                                   const std::vector<std::size_t> ends,
+                                   const std::vector<std::size_t> axes,
                                    const std::string &name = "") {
     // FIXME: properly handle default w&b initialization in every cases
     return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes), name);
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 554c535f229af0ab5b59fa6f57607c7bacd872fa..377f991a7bb0d6c7c2e8a63198218f878da64f13 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -616,11 +616,11 @@ std::shared_ptr<Aidge::Node>
 Aidge::GraphView::getNode(const std::string& nodeName) const {
   std::map<std::string, std::shared_ptr<Node>>::const_iterator it =
       mNodeRegistry.find(nodeName);
-  if (it != mNodeRegistry.end()) {
+  if (it != mNodeRegistry.cend()) {
     return it->second;
   } else {
     printf("No Node named %s in the current GraphView.\n", nodeName.c_str());
-    exit(-1);
+    return nullptr;
   }
 }
 
@@ -760,14 +760,6 @@ bool Aidge::GraphView::replace(const std::set<Aidge::NodePtr>& oldNodes, const s
         return false;
     }
 
-    for (const auto& nodePtr : oldNodes) {
-        for (const auto& g : commonGraphViews) {
-            g -> remove(nodePtr, false);
-            g -> updateInputsOutputsDelete(nodePtr);
-        }
-        nodePtr -> resetConnections(true);
-    }
-
     if ((oldOI.size() == newOI.size()) &&
         (oldOO.size() == newOO.size())) {
         // Case 1
@@ -793,7 +785,7 @@ bool Aidge::GraphView::replace(const std::set<Aidge::NodePtr>& oldNodes, const s
                         inputParents[i].first -> addChild(outputChildren[i].first, inputParents[i].second, outputChildren[i].second);
                 }
             }
-            else if (oldOI.size() == 1) {
+            else if ((oldOI.size() == 1) && (inputParents[0].first)) {
                 for (std::size_t i = 0; i < oldOI.size(); ++i) {
                     inputParents[0].first -> addChild(outputChildren[i].first, inputParents[0].second, outputChildren[i].second);
                 }
@@ -804,13 +796,15 @@ bool Aidge::GraphView::replace(const std::set<Aidge::NodePtr>& oldNodes, const s
             ((oldOO.size() == newOO.size()))
         ) {
             // Case 2
-            if ((oldOI.size() == 1)) {
+            if ((oldOI.size() == 1) && (inputParents[0].first)) {
                 for (std::size_t i = 0; i < newOI.size(); ++i) {
                     inputParents[0].first -> addChild(newOI[i].first, inputParents[0].second, newOI[i].second);
                 }
             } else {
                 for (std::size_t i = 0; i < oldOI.size(); ++i) {
-                    inputParents[i].first -> addChild(newOI[0].first, inputParents[i].second, newOI[0].second);
+                    if (inputParents[i].first) {
+                        inputParents[i].first -> addChild(newOI[0].first, inputParents[i].second, newOI[0].second);
+                    }
                 }
             }
             for (std::size_t o = 0; o < oldOO.size(); ++o) {
@@ -829,6 +823,27 @@ bool Aidge::GraphView::replace(const std::set<Aidge::NodePtr>& oldNodes, const s
             return false;
         }
     }
+
+    auto oldGOutputs = oldG->outputNodes();
+    for (const auto& nodePtr : oldNodes) {
+        bool removeFromGraphs = true;
+        if (std::find(oldGOutputs.cbegin(), oldGOutputs.cend(), nodePtr) == oldGOutputs.cend()) {
+            for (const auto& chPtr : nodePtr->getChildren()) {
+                if (oldNodes.find(chPtr) == oldNodes.cend()) {
+                    removeFromGraphs = false;
+                }
+            }
+        }
+        if (removeFromGraphs) {
+            for (const auto& g : commonGraphViews) {
+                g -> remove(nodePtr, false);
+                g -> updateInputsOutputsDelete(nodePtr);
+            }
+            nodePtr -> resetConnections(true);
+        }
+
+    }
+
     for (const auto& nodePtr : newNodes) {
         for (const auto& g : commonGraphViews) {
             g -> add(nodePtr);
@@ -934,10 +949,10 @@ void Aidge::GraphView::updateInputsOutputsNew(std::shared_ptr<Node> newNode) {
 
   // Check if node outputs are outputs for the GraphView and add them to the output list if so
   IOIndex_t outputIdx = 0;
-  for (auto orderedChilds : newNode->getOrderedChildren()) {
+  for (const auto& orderedChilds : newNode->getOrderedChildren()) {
     bool noInsideConnection = true;
-    for (auto ch_ptr : orderedChilds) {
-      if (mNodes.find(ch_ptr) != mNodes.end()) {
+    for (const auto& ch_ptr : orderedChilds) {
+      if (mNodes.find(ch_ptr) != mNodes.cend()) {
         noInsideConnection = false;
         break;
       }
@@ -946,7 +961,7 @@ void Aidge::GraphView::updateInputsOutputsNew(std::shared_ptr<Node> newNode) {
     if (noInsideConnection) {
       const auto val = std::make_pair(newNode, outputIdx);
       // Output may be already be present (see addChild() with a node already in GraphView)
-      if (std::find(mOutputNodes.begin(), mOutputNodes.end(), val) == mOutputNodes.end()) {
+      if (std::find(mOutputNodes.cbegin(), mOutputNodes.cend(), val) == mOutputNodes.cend()) {
         newOutputsInsertionPoint = mOutputNodes.insert(newOutputsInsertionPoint, val);
         newOutputsInsertionPoint = std::next(newOutputsInsertionPoint);
       }
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index 1237fdc0b5565681ab1a6af6d88f74a48cbd5b57..7a9d89dae2bb3029daa0f266056ea83b981d5087 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -88,8 +88,8 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aid
 }
 
 
-std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>> Aidge::OperatorTensor::computeReceptiveField(
-        const std::size_t firstIdx,
+std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>> Aidge::OperatorTensor::computeReceptiveField(
+        const std::vector<DimSize_t>& firstEltDims,
         const std::vector<Aidge::DimSize_t>& outputDims,
         const Aidge::IOIndex_t outputIdx) const
 {
@@ -103,14 +103,13 @@ std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>> Aidge::Operat
     if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
     }
-    const auto outputIdxDims = getOutput(0)->getCoord(firstIdx);
     for (DimIdx_t i = 0; i < outputDims.size(); ++i) {
-        if (((outputDims[i] + outputIdxDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+        if (((outputDims[i] + firstEltDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
         }
     }
     // return the same Tensor description as given in function parameter for each data input
-    return std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::size_t, std::vector<Aidge::DimSize_t>>(firstIdx, outputDims));
+    return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims));
 }
 
 void Aidge::OperatorTensor::computeOutputDims() {
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 3849f2a17edef1166a3a1ff56679785f354abb2c..880dd8f9ee6acbd6a19ee3cafafbabf441168c72 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -8,16 +8,19 @@
  * SPDX-License-Identifier: EPL-2.0
  *
  ********************************************************************************/
+#include "aidge/operator/Slice.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
 #include <cassert>
 #include <cstddef>
 #include <string>
-#include <vector>
 #include <utility>
+#include <vector>
 
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/operator/Slice.hpp"
-#include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
 
 const std::string Aidge::Slice_Op::Type = "Slice";
 
@@ -29,19 +32,18 @@ void Aidge::Slice_Op::computeOutputDims() {
 
     DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size();
     std::vector<DimSize_t> outDims = getInput(0)->dims();
-    for(std::size_t i=0; i<nbAxes;++i)
-    {
+    for (std::size_t i = 0; i < nbAxes; ++i) {
         // For each slice operation get the params and cast them to size_t
         std::int64_t axis_ = this->template getAttr<SliceAttr::Axes>()[i];
         std::int64_t start_ = this->template getAttr<SliceAttr::Starts>()[i];
         std::int64_t end_ = this->template getAttr<SliceAttr::Ends>()[i];
-        std::size_t axis = axis_>=0?axis_:axis_+getInput(0)->nbDims();
-        std::size_t start = start_>=0?start_:start_+getInput(0)->dims()[axis];
-        std::size_t end = end_>=0?end_:end_+getInput(0)->dims()[axis];
+        std::size_t axis = axis_ >= 0 ? axis_ : axis_ + getInput(0)->nbDims();
+        std::size_t start = start_ >= 0 ? start_ : start_ + getInput(0)->dims()[axis];
+        std::size_t end = end_ >= 0 ? end_ : end_ + getInput(0)->dims()[axis];
 
         std::size_t sliceLength = end - start + 1;
         // Check if slice length is valid
-        if(sliceLength>getInput(0)->dims()[axis])
+        if (sliceLength > getInput(0)->dims()[axis])
             AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds");
         outDims[axis] = sliceLength;
     }
diff --git a/src/recipies/FuseMulAdd.cpp b/src/recipies/FuseMulAdd.cpp
index d37f4749635b2bf76d10f7f8de3a44e254c56347..322b1d9a0632b893a912c6225ac5b13d63278f8d 100644
--- a/src/recipies/FuseMulAdd.cpp
+++ b/src/recipies/FuseMulAdd.cpp
@@ -38,9 +38,8 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
     // Fetch the output dimension throught the bias size
     std::shared_ptr<Node> bias = (addNode->getParent(1)) ? addNode->getParent(1)->cloneSharedOperators() : nullptr;
 
-    if (!(matmulNode->getParent(1))) {
-        AIDGE_INTERNAL_ASSERT("No weight detected to produce the fuseMulAdd recipe.");
-    }
+    AIDGE_ASSERT(matmulNode->getParent(1), "No weight detected to produce the fuseMulAdd recipe.");
+
     std::shared_ptr<Node> weight = matmulNode->getParent(1)->cloneSharedOperators();
     const DimSize_t outSize = std::dynamic_pointer_cast<MatMul_Op>(matmulNode->getOperator()) -> getAttr<DimSize_t>("OutChannels");
 
diff --git a/src/recipies/HorizontalTiling.cpp b/src/recipies/HorizontalTiling.cpp
index f1d8d3bdd2acfddc26f664913b2fa6c3217b8919..48d8cfc0b9011e88dea0c1d605cb8c72bfe18d96 100644
--- a/src/recipies/HorizontalTiling.cpp
+++ b/src/recipies/HorizontalTiling.cpp
@@ -11,6 +11,7 @@
 
 #include <set>
 #include <memory>
+#include <numeric>   // std::iota
 #include <vector>
 #include <utility>
 
@@ -75,13 +76,19 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
     }
 
     for (IOIndex_t i = 0; currentFirstDims[axis] < outTensor->dims()[axis]; currentFirstDims[axis] += outputDims[axis], ++i) {
-        const auto inputDims = op->computeReceptiveField(outTensor->getIdx(currentFirstDims), outputDims, 0);
+        const auto inputDims = op->computeReceptiveField(currentFirstDims, outputDims, 0);
         auto newNode = node -> clone(); // no input associated to clones
         newNode -> setName(node->name() + "_" + std::to_string(currentFirstDims[axis]));
         clonedInputs[1] -> addChild(newNode, 0, 1);
         clonedInputs[2] -> addChild(newNode, 0, 2);
         // Slice for input and each parameter
-        auto slice = Slice(inputDims[0].first, inputDims[0].second, "Slice_" + std::to_string(currentFirstDims[axis]));
+        auto inputDimsEnd = inputDims[0].first;
+        for (std::size_t dim = 0; dim < inputDimsEnd.size(); ++dim) {
+            inputDimsEnd[dim] += inputDims[0].second[dim];
+        }
+        std::vector<std::size_t> usedDims(inputDimsEnd.size());
+        std::iota(usedDims.begin(), usedDims.end(), static_cast<std::size_t>(0));
+        auto slice = Slice(inputDims[0].first, inputDimsEnd, usedDims, "Slice_" + std::to_string(currentFirstDims[axis]));
         slice -> addChild(newNode, 0, 0);
         newNode -> addChild(concat, 0, i);
 
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index acbea04a27a0b6be22105bb73fda53fedf621235..ebbfb3ad89721eb4f1390c3efca475acbb0b6f46 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -23,6 +23,8 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Testing.hpp"
 #include "aidge/operator/Conv.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/graph/OpArgs.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/Producer.hpp"
 
@@ -589,6 +591,56 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
 
         REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({newMatmulWeight0, newAddBias0, newAddBias1, newMatmulWeight1, fc1, fc0}));
     }
+
+    SECTION("Nodes with shared parameters") {
+
+        auto myConv1 = Conv(1, 5, {1,1}, "conv1");
+        auto myConv2 = Conv(5, 5, {1,1}, "conv2");
+        auto myConv3 = Conv(5, 5, {1,1}, "conv3");
+        auto myConv4 = Conv(5, 5, {1,1}, "conv4");
+        auto myConv5 = Conv(5, 5, {1,1}, "conv5");
+
+        auto sharedWeightTensor = std::make_shared<Tensor>();
+        sharedWeightTensor->resize({5,5,1,1});
+        auto sharedWeight = Producer(sharedWeightTensor, "sharedWeight");
+        sharedWeight -> addChild(myConv2, 0, 1);
+        sharedWeight -> addChild(myConv3, 0, 1);
+        sharedWeight -> addChild(myConv4, 0, 1);
+
+        auto sharedBiasTensor = std::make_shared<Tensor>();
+        sharedBiasTensor->resize({5});
+        auto sharedBias = Producer(sharedBiasTensor, "sharedBias");
+        sharedBias -> addChild(myConv2, 0, 2);
+        sharedBias -> addChild(myConv3, 0, 2);
+        sharedBias -> addChild(myConv4, 0, 2);
+
+        auto g = Sequential({
+            myConv1,
+            myConv2,
+            myConv3,
+            myConv4,
+            myConv5
+        });
+
+        REQUIRE(g->getNode("sharedWeight") != nullptr);
+        REQUIRE(g->getNode("sharedBias") != nullptr);
+
+
+        auto newReLU4 = ReLU("relu4");
+        GraphView::replace({myConv4, myConv4->getParent(1), myConv4->getParent(2)}, {newReLU4});
+        REQUIRE(g->getNode("sharedWeight") != nullptr);
+        REQUIRE(g->getNode("sharedBias") != nullptr);
+
+         auto newReLU3 = ReLU("relu3");
+        GraphView::replace({myConv3, myConv3->getParent(1), myConv3->getParent(2)}, {newReLU3});
+        REQUIRE(g->getNode("sharedWeight") != nullptr);
+        REQUIRE(g->getNode("sharedBias") != nullptr);
+
+         auto newReLU2 = ReLU("relu2");
+        GraphView::replace({myConv2, myConv2->getParent(1), myConv2->getParent(2)}, {newReLU2});
+        REQUIRE(g->getNode("sharedWeight") == nullptr);
+        REQUIRE(g->getNode("sharedBias") == nullptr);
+    }
 }
 
 TEST_CASE("[GraphView] clone") {
diff --git a/unit_tests/operator/Test_ConvDepthWise_Op.cpp b/unit_tests/operator/Test_ConvDepthWise_Op.cpp
index 14d4dc537f527b32414151ee7f93e601f5a4bd8a..6008e3bfac346725935d5d8ffe87f392c49a3409 100644
--- a/unit_tests/operator/Test_ConvDepthWise_Op.cpp
+++ b/unit_tests/operator/Test_ConvDepthWise_Op.cpp
@@ -45,20 +45,20 @@ TEST_CASE("[core/operator] ConvDepthWise_Op(computeReceptiveField)", "[Operator]
     auto op4 = std::dynamic_pointer_cast<OperatorTensor>(cdw4 -> getOperator());
 
     SECTION("Check individual receptive fields") {
-        auto res1 = op1->computeReceptiveField(0, {16,3,10,10});
-        auto res2 = op2->computeReceptiveField(op2->getOutput(0)->getIdx({3,1,100,28}), {4,2,30,40});
-        auto res3 = op3->computeReceptiveField(0, {1,1,109,109});
-        auto res4 = op4->computeReceptiveField(op4->getInput(0)->getIdx({5,0,108,108}), {10,1,1,1});
+        auto res1 = op1->computeReceptiveField({0,0,0,0}, {16,3,10,10});
+        auto res2 = op2->computeReceptiveField({3,1,100,28}, {4,2,30,40});
+        auto res3 = op3->computeReceptiveField({0,0,0,0}, {1,1,109,109});
+        auto res4 = op4->computeReceptiveField({5,0,108,108}, {10,1,1,1});
 
-        REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
-        REQUIRE(((res2[0].first == op2->getInput(0)->getIdx({3,1,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 2, 32, 42}))));
-        REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 1, 218, 218}))));
-        REQUIRE(((res4[0].first == op4->getInput(0)->getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 1, 1, 1}))));
+        REQUIRE(((res1[0].first == std::vector<DimSize_t>({0,0,0,0})) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
+        REQUIRE(((res2[0].first == std::vector<DimSize_t>({3,1,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 2, 32, 42}))));
+        REQUIRE(((res3[0].first == std::vector<DimSize_t>({0,0,0,0})) && (res3[0].second == std::vector<DimSize_t>({1, 1, 218, 218}))));
+        REQUIRE(((res4[0].first == std::vector<DimSize_t>({5,0,108,108})) && (res4[0].second == std::vector<DimSize_t>({10, 1, 1, 1}))));
     }
 
     SECTION("Check receptive field propagation") {
         // input:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
-        auto res4 = op4->computeReceptiveField(op4->getInput(0)->getIdx({5,0,50,50}), {1,1,1,1});
+        auto res4 = op4->computeReceptiveField({5,0,50,50}, {1,1,1,1});
         // cdw4 RF:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
         auto res3 = op3->computeReceptiveField(res4[0].first, res4[0].second);
         // cdw3 RF:  first-{5, 0, 100, 100} dims-{1, 1, 2, 2}
@@ -67,7 +67,7 @@ TEST_CASE("[core/operator] ConvDepthWise_Op(computeReceptiveField)", "[Operator]
         auto res1 = op1->computeReceptiveField(res2[0].first, res2[0].second);
         // cdw1 RF:  first-{5, 0, 100, 100} dims-{1, 1, 8, 8}
 
-        REQUIRE(((res1[0].first == op1->getInput(0)->getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 1, 8, 8}))));
+        REQUIRE(((res1[0].first == std::vector<DimSize_t>({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 1, 8, 8}))));
     }
 }
 }  // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/operator/Test_Conv_Op.cpp b/unit_tests/operator/Test_Conv_Op.cpp
index a3e84999eb2e2a31f1217330ac9718f35b0ca396..bc24fc8081d78dedf853450ff648b6d91b47c1dc 100644
--- a/unit_tests/operator/Test_Conv_Op.cpp
+++ b/unit_tests/operator/Test_Conv_Op.cpp
@@ -45,22 +45,22 @@ TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeR
     auto op4 = std::dynamic_pointer_cast<OperatorTensor>(conv4 -> getOperator());
 
     SECTION("Check individual receptive fields") {
-        auto res1 = op1 -> computeReceptiveField(0, {16,32,10,10});
-        auto res2 = op2 -> computeReceptiveField(op2 -> getOutput(0)->getIdx({3,20,100,28}), {4,20,30,40});
-        auto res3 = op3 -> computeReceptiveField(0, {1,1,109,109});
-        auto res4 = op4 -> computeReceptiveField(op4 -> getOutput(0)->getIdx({5,0,108,108}), {10,10,1,1});
+        auto res1 = op1 -> computeReceptiveField({0,0,0,0}, {16,32,10,10});
+        auto res2 = op2 -> computeReceptiveField({3,20,100,28}, {4,20,30,40});
+        auto res3 = op3 -> computeReceptiveField({0,0,0,0}, {1,1,109,109});
+        auto res4 = op4 -> computeReceptiveField({5,0,108,108}, {10,10,1,1});
 
-        REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
-        REQUIRE(((res1[1].first == 0) && (res1[1].second == std::vector<DimSize_t>({32, 3, 5, 5}))));
-        REQUIRE(((res1[2].first == 0) && (res1[2].second == std::vector<DimSize_t>({32}))));
-        REQUIRE(((res2[0].first == op2->getInput(0)->getIdx({3,0,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 32, 32, 42}))));
-        REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 64, 218, 218}))));
-        REQUIRE(((res4[0].first == op4->getInput(0)->getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 10, 1, 1}))));
+        REQUIRE(((res1[0].first == std::vector<DimSize_t>({0,0,0,0})) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
+        REQUIRE(((res1[1].first == std::vector<DimSize_t>({0,0,0,0})) && (res1[1].second == std::vector<DimSize_t>({32, 3, 5, 5}))));
+        REQUIRE(((res1[2].first == std::vector<DimSize_t>({0})) && (res1[2].second == std::vector<DimSize_t>({32}))));
+        REQUIRE(((res2[0].first == std::vector<DimSize_t>({3,0,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 32, 32, 42}))));
+        REQUIRE(((res3[0].first == std::vector<DimSize_t>({0,0,0,0})) && (res3[0].second == std::vector<DimSize_t>({1, 64, 218, 218}))));
+        REQUIRE(((res4[0].first == std::vector<DimSize_t>({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 10, 1, 1}))));
     }
 
     SECTION("Check receptive field propagation") {
         // input:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
-        auto res4 = op4->computeReceptiveField(op4->getOutput(0)->getIdx({5,0,50,50}), {1,1,1,1});
+        auto res4 = op4->computeReceptiveField({5,0,50,50}, {1,1,1,1});
         // conv4 RF:  first-{5, 0, 50, 50}  dims-{1, 10, 1, 1}
         auto res3 = op3->computeReceptiveField(res4[0].first, res4[0].second);
         // conv3 RF:  first-{5, 0, 100, 100} dims-{1, 64, 2, 2}
@@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeR
         auto res1 = op1->computeReceptiveField(res2[0].first, res2[0].second);
         // conv1 RF:  first-{5, 0, 100, 100} dims-{1, 3, 8, 8}
 
-        REQUIRE(((res1[0].first == op1->getInput(0)->getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 3, 8, 8}))));
+        REQUIRE(((res1[0].first == std::vector<DimSize_t>({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 3, 8, 8}))));
 
 
         // std::cout << "conv1: {";