diff --git a/include/aidge/graphRegex/GraphParser.hpp b/include/aidge/graphRegex/GraphParser.hpp
index cfe25c22709a3516b4f55ba774a616e3b94a055c..2c25ac0b76368242891e6e5ba92c2c5fc913a23c 100644
--- a/include/aidge/graphRegex/GraphParser.hpp
+++ b/include/aidge/graphRegex/GraphParser.hpp
@@ -12,15 +12,17 @@ namespace Aidge{
 /**
  * @brief this class uses the lexer to create an AST according to a set of gramer rules
  */
-class GraphParser{
+class GraphParser {
 
-    public:
+public:
     /**
      * @brief AST graph creation function
      * @param gRegexExpressions String representing the logical fuction to be performed
      */
     GraphParser(const std::string gRegexExpressions);
 
+    ~GraphParser() noexcept;
+
     /**
      * @brief AST graph creation function
      * @return The AST tree
@@ -35,7 +37,7 @@ class GraphParser{
     const std::string getQuery();
 
 
-    private:
+private:
     /**
      * @brief restart at the start of the ConditionalExpressions for LEXER and restart  mCurrentToken
      */
diff --git a/include/aidge/nodeTester/ConditionalParser.hpp b/include/aidge/nodeTester/ConditionalParser.hpp
index c21eca0407b77808287138fd39e33c00d241fb70..1f3671ea5b68008a67be5d6a63d09051d49939d5 100644
--- a/include/aidge/nodeTester/ConditionalParser.hpp
+++ b/include/aidge/nodeTester/ConditionalParser.hpp
@@ -29,7 +29,7 @@ using ASTNodeCh = std::vector<std::shared_ptr<AstNode<ConditionalTokenTypes>>>;
 /**
  * @brief this class uses the lexer to create an AST according to a set of gramer rules
  */
-class ConditionalParser{
+class ConditionalParser {
 
     public:
     /**
@@ -38,6 +38,8 @@ class ConditionalParser{
      */
     ConditionalParser(const std::string ConditionalExpressions);
 
+    ~ConditionalParser() noexcept;
+
     /**
      * @brief AST graph creation function
      * @return The AST tree
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index c53cd7a5383bf6075e8cb5f4fa7869958dedee54..28f89cf09f41ff6225c8c9e7248d106f8a0c1428 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -47,7 +47,7 @@ public:
     Add_Op(const Add_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Add_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Add_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 64f583ac243d2e9d28b49aa3e61406917870d82f..469d8485afe39692847ad88726ebca5926708c84 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -60,7 +60,7 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -94,40 +94,44 @@ public:
     }
 
 
-    // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
-    //     if (outputIdx != 0) {
-    //         AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
-    //     }
-    //     if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
-    //         // Offset
-    //         const auto outputIdxDims = mOutput->getCoord(firstIdx);
-    //         std::vector<DimSize_t> inputIdxDims = outputIdxDims;
-
-    //         for (DimIdx_t i = 0; i < (DIM+2); ++i) {
-    //             if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-    //                 AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
-    //             }
-    //         }
-
-    //         // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
-    //         // Width
-    //         std::vector<DimSize_t> inputDims;
-    //         inputDims.push_back(outputDims[0]); // same batch value
-    //         inputDims.push_back(outputDims[1]); // same channel value
-
-    //         for (DimIdx_t i = 0; i < DIM; ++i) {
-    //             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-    //                         * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
-    //                         + 1
-    //                         + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
-    //             inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
-    //         }
-    //         std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>();
-    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInput->getIdx(inputIdxDims), inputDims));
-    //         return res;
-    //     }
-    //     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
-    // }
+    std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>
+    computeReceptiveField(const std::size_t firstIdx,
+                            const std::vector<DimSize_t>& outputDims,
+                            const IOIndex_t outputIdx = 0) const override final
+    {
+        if (outputIdx != 0) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+        }
+        if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+            // Offset
+            const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
+            std::vector<DimSize_t> inputIdxDims = outputIdxDims;
+
+            for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+                if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+                }
+            }
+
+            // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+            // Width
+            std::vector<DimSize_t> inputDims;
+            inputDims.push_back(outputDims[0]); // same batch value
+            inputDims.push_back(outputDims[1]); // same channel value
+
+            for (DimIdx_t i = 0; i < DIM; ++i) {
+                inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+                            * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                            + 1
+                            + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
+                inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
+            }
+            std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
+            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
+            return res;
+        }
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+    }
 
 
     void setBackend(const std::string &name) override {
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 3613aa12b9623652555ed3411b3d89e104721623..6dc0455bd78d8f196d28bb03b26630f46eabd95b 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -54,7 +54,7 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 391191f668ca99c705d22434d5bb798863b35430..080f763cb176c463f3e03a672de4a13cf05a497b 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -55,7 +55,7 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Concat_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Concat_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 8129e49f083032f5781f169605b02dc3140bff12..194ac313dd7f9b22c55fdbe7e0e30d37d816bcb8 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -65,7 +65,7 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -77,9 +77,9 @@ public:
     }
 
     // Data operator[](const char* inputName) override final {
-    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
-    //         (strcmp(inputName, "weight") ? mInputs[1] :
-    //         (strcmp(inputName, "bias") ? mInputs[2] :
+    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? getInput(0) :
+    //         (strcmp(inputName, "weight") ? getInput(1) :
+    //         (strcmp(inputName, "bias") ? getInput(2) :
     //         nullptr));
     //     assert((in!=nullptr) && "No such parameter");
     //     return *in;
@@ -119,55 +119,57 @@ public:
     }
 
 
-// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
-    //     if (outputIdx != 0) {
-    //         AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
-    //     }
-    //     if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
-    //         // Offset
-    //         const auto outputIdxDims = mOutput->getCoord(firstIdx);
-    //         auto inputIdxDims = outputIdxDims; // batch idx is the same
-    //         inputIdxDims[1] = 0; // each channel is used so start with the first one
-
-    //         for (DimIdx_t i = 0; i < (DIM+2); ++i) {
-    //             if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-    //                 AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
-    //             }
-    //         }
-
-    //         // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
-    //         // Input
-    //         // same batch value, every input channel is used
-    //         std::vector<DimSize_t> inputDims{outputDims[0], mInputs[0]->dims()[1]};
-    //                     for (DimIdx_t i = 0; i < DIM; ++i) {
-    //             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-    //                         * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
-    //                         + 1
-    //                         + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-    //                         * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-    //             inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
-    //         }
-
-    //         // Weight
-    //         // same output value, every input channel is used
-    //         std::vector<DimSize_t> weightDims{outputDims[0], mInputs[0]->dims()[1]};
-    //         weightDims.insert(weightDims.end(), this->template getAttr<ConvAttr::KernelDims>()[0], this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(DIM)]);
-    //         std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
-    //         weightIdxDims[0] = outputIdxDims[1];
-
-    //         // Bias
-    //         const std::vector<DimSize_t> biasDims{outputDims[0]};
-    //         const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
-
-    //         // Result
-    //         std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
-    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
-    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[1]->getIdx(weightIdxDims), weightDims));
-    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[2]->getIdx(biasIdxDims), biasDims));
-    //         return res;
-    //     }
-    //     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
-    // }
+std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
+        if (outputIdx != 0) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+        }
+        if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+            // Offset
+            const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
+            auto inputIdxDims = outputIdxDims; // batch idx is the same
+            inputIdxDims[1] = 0; // each channel is used so start with the first one
+
+            for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+                if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+                }
+            }
+
+            // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+            // Input
+            // same batch value, every input channel is used
+            std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
+            for (DimIdx_t i = 0; i < DIM; ++i) {
+                inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+                            * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                            + 1
+                            + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                            * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+                inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
+            }
+
+            // Weight
+            // same output value, every input channel is used
+            std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
+            for (std::size_t i = 0; i < DIM; ++i) {
+                weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
+            }
+            std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
+            weightIdxDims[0] = outputIdxDims[1];
+
+            // Bias
+            const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
+            const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
+
+            // Result
+            std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
+            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(0)->getIdx(inputIdxDims), inputDims));
+            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(1)->getIdx(weightIdxDims), weightDims));
+            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(2)->getIdx(biasIdxDims), biasDims));
+            return res;
+        }
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+    }
 
     void setBackend(const std::string &name) override {
         mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 98ecfa8c8a85e60d8136af6cd5cc04a24c601124..6f1f3f7ffbaf8dd750f374f2b391ccc90fad8254 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -67,7 +67,7 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
@@ -115,41 +115,55 @@ public:
         }
     }
 
-    // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
-    //     if (outputIdx != 0) {
-    //         AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
-    //     }
-    //     if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
-    //         // Offset
-    //         const auto outputIdxDims = mOutput->getCoord(firstIdx);
-    //         auto inputIdxDims = outputIdxDims; // batch idx is the same
-
-    //         for (DimIdx_t i = 0; i < (DIM+2); ++i) {
-    //             if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-    //                 AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
-    //             }
-    //         }
-
-    //         // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
-    //         // Width
-    //         std::vector<DimSize_t> inputDims;
-    //         inputDims.push_back(outputDims[0]); // same batch value
-    //         inputDims.push_back(outputDims[1]); // same channel value
-
-    //         for (DimIdx_t i = 0; i < DIM; ++i) {
-    //             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-    //                         * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
-    //                         + 1
-    //                         + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-    //                         * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-    //             inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
-    //         }
-    //         std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>();
-    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
-    //         return res;
-    //     }
-    //     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
-    // }
+    std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
+        if (outputIdx != 0) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+        }
+        if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+            // Offset
+            const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
+            auto inputIdxDims = outputIdxDims; // batch idx is the same
+
+            for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+                if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+                }
+            }
+
+            // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+            // Input
+            // same batch value
+            std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
+            for (DimIdx_t i = 0; i < DIM; ++i) {
+                inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+                            * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                            + 1
+                            + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                            * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+                inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
+            }
+
+            // Weight
+            std::vector<DimSize_t> weightDims{outputDims[1], 1};
+            for (std::size_t i = 0; i < DIM; ++i) {
+                weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
+            }
+            std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
+            weightIdxDims[0] = outputIdxDims[1];
+
+            // Bias
+            const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
+            const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
+
+            // Result
+            std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
+            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(0)->getIdx(inputIdxDims), inputDims));
+            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(1)->getIdx(weightIdxDims), weightDims));
+            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(2)->getIdx(biasIdxDims), biasDims));
+            return res;
+        }
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+    }
 
     void setBackend(const std::string &name) override {
         mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index a3c0eca6721ad5a46c4d6618b0692f5452e9ea44..84de3308efcc07fa14bb3663ee7b66fde3f22123 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -40,7 +40,7 @@ public:
     Div_Op(const Div_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Div_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 764642f4916319f4073aa5bb1317ceb646c6dbc0..ecd2b97ea8a524736e6dc3a44819df29bbf4e3d8 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -57,7 +57,7 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<FC_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index eee5733326efbd8b9cbb475e2b45a779df7a0cdf..f9bbef46283ba8b9b480c1eba0a11c6caf954897 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -54,7 +54,7 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index cf9305d0df9433a189f14608fbe36141f251adec..1014488a77a6ffe5b6048cfc23da669416710c92 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -56,7 +56,7 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<MatMul_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<MatMul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 4235aee0ba944b68413e00558dc2c77cea77f04a..0a292449385807a4deb8b7d0458720c9d9a8e99f 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -64,7 +64,7 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 01b199e4ecce3508f58447419f35d23bed5fcf5c..47da898829f9581d4907ddad97bf847c6746a536 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -43,7 +43,7 @@ public:
     Mul_Op(const Mul_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Mul_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Mul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 1fd8172b5a5fcb4efc8cb1c7db5941a548f94d00..5cd35be72aa4ecf880818aaf10dddbb11735e53e 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -74,15 +74,6 @@ public:
     virtual std::shared_ptr<Operator> clone() const = 0;
 
     virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
-    /**
-     * @brief For a given output feature area, compute the associated receptive
-     * field for each data input.
-     * @param firstIdx First index of the output feature.
-     * @param outputDims Size of output feature.
-     * @param outputIdx Index of the output. Default 0.
-     * @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
-     */
-    // virtual std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
 
     /**
      * @brief Set the specified input by performing a deep copy of the given data.
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index 218a380f2b5b3014fba4053363f94fda13b6e47a..b956da474311b5863690f5a5e40329e443f1345a 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -56,7 +56,8 @@ public:
           mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)),
           mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) {
         for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
-            mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i)));
+            mOutputs[i] = std::make_shared<Tensor>();
+            // mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i)));
             // datatype already copied
         }
     }
@@ -90,6 +91,16 @@ public:
 
     ///////////////////////////////////////////////////
     // Tensor dimensions
+    /**
+     * @brief For a given output feature area, compute the associated receptive
+     * field for each data input.
+     * @param firstIdx First index of the output feature.
+     * @param outputDims Size of output feature.
+     * @param outputIdx Index of the output. Default 0.
+     * @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>
+     * For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
+     */
+    virtual std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
     virtual void computeOutputDims();
     virtual bool outputDimsForwarded() const;
     ///////////////////////////////////////////////////
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index 97bad75dc9ca46d274def78ed79e56fab0c46b89..ee22bd9aec908a66d2ca6cbac0b9a8dcd5dec409 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -40,7 +40,7 @@ public:
     Pow_Op(const Pow_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Pow_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Pow_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index d6060dd6c036d515611d62fbd071c0a7430ae5e8..1440a939f13da54dcae2cebedb0d4d807d8244d7 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -51,7 +51,10 @@ public:
     Producer_Op(const Producer_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
+            mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i)));
+        }
+        mImpl = op.mImpl ? Registrar<Producer_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 825e86f47bdbf016a43afbca89d140e0e170fc7c..e72db011795639c5231e6afe5fbd24bbbc71b8c5 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -39,7 +39,7 @@ public:
     ReLU_Op(const ReLU_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<ReLU_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<ReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 2c4a5c5dc4e8bf2683acba9b57ff45a2ad590fa4..b64c9f9b9513a97295ca5aa75db3f6e2979b2eef 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -55,7 +55,7 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Scaling_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Scaling_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index b449f91af3473f5dc70015f9e1c5e86bd375dbc8..5968fdeb40ba864802fbdc5a164f4e8837ee788b 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -26,21 +26,20 @@
 namespace Aidge {
 enum class SliceAttr { Beginning, SliceDims };
 
-template <DimIdx_t DIM>
 class Slice_Op
     : public OperatorTensor,
-      public Registrable<Slice_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op<DIM> &)>,
-      public StaticAttributes<SliceAttr, std::size_t, std::array<DimSize_t, DIM>> {
+      public Registrable<Slice_Op, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op &)>,
+      public StaticAttributes<SliceAttr, std::size_t, std::vector<DimSize_t>> {
 public:
     static const std::string Type;
 
     Slice_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SliceAttr, std::size_t, std::array<DimSize_t, DIM>>;
+    using Attributes_ = StaticAttributes<SliceAttr, std::size_t, std::vector<DimSize_t>>;
     template <SliceAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    Slice_Op(std::size_t beginningPos, std::array<DimSize_t, DIM> sliceDims)
+    Slice_Op(const std::size_t beginningPos, const std::vector<DimSize_t> sliceDims)
         : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<SliceAttr::Beginning>(beginningPos),
                       attr<SliceAttr::SliceDims>(sliceDims))
@@ -55,7 +54,7 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Slice_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this)
+        mImpl = op.mImpl ? Registrar<Slice_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this)
                          : nullptr;
     }
 
@@ -70,12 +69,8 @@ public:
         if (!getInput(0) || (getInput(0)->empty())) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
         }
-        // Check input dimensions is compatible with slice dimensions
-        if (getInput(0)->nbDims() != DIM) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: input and slice dimensions are not the same size.");
-        }
-        std::array<DimSize_t, DIM> outputDims;
-        const std::array<DimSize_t, DIM> inputDims = getInput(0)->template dims<DIM>();
+        std::vector<DimSize_t> outputDims = std::vector<DimSize_t>(getInput(0)->nbDims());
+        const std::vector<DimSize_t> inputDims = getInput(0)->dims();
 
         // Check that the sliced Tensor is actually part of the input Tensor
         // For a 5*5 tensor ('x') and a 3*3 slice kernel ('o'):
@@ -85,7 +80,7 @@ public:
         // xxooo               xxxoo
         // xxooo               xxxoo
         std::vector<std::size_t> beginningCoords = mInputs[0]->getCoord(this->template getAttr<SliceAttr::Beginning>());
-        for (std::size_t i = 0; i < DIM; ++i) {
+        for (std::size_t i = 0; i < getInput(0)->nbDims(); ++i) {
             if (beginningCoords[i] + this->template getAttr<SliceAttr::SliceDims>()[i] > inputDims[i]) {
                 AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds");
             } else {
@@ -95,7 +90,7 @@ public:
         mOutputs[0]->resize(outputDims);
     }
 
-    void setBackend(const std::string &name) {
+    void setBackend(const std::string &name) override {
         mImpl = Registrar<Slice_Op>::create(name)(*this);
         mOutputs[0]->setBackend(name);
 
@@ -111,19 +106,11 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string Slice_Op<DIM>::Type = "Slice";
 
-template <std::size_t DIM>
-inline std::shared_ptr<Node> Slice(std::size_t beginningPos, std::array<DimSize_t, DIM> sliceDims,
+inline std::shared_ptr<Node> Slice(const std::size_t beginningPos, const std::vector<DimSize_t> sliceDims,
                                    const std::string &name = "") {
     // FIXME: properly handle default w&b initialization in every cases
-    return std::make_shared<Node>(std::make_shared<Slice_Op<DIM>>( beginningPos, sliceDims), name);
-}
-
-template <DimIdx_t DIM>
-inline std::shared_ptr<Node> Slice(std::size_t beginningPos, DimSize_t const (&sliceDims)[DIM], const std::string& name = "") {
-  return Slice(beginningPos, to_array(sliceDims), name);
+    return std::make_shared<Node>(std::make_shared<Slice_Op>(beginningPos, sliceDims), name);
 }
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index f7a1201f5fe5227fb87fed0a9a3e5c36eeb5eeb7..bcf9a5a66147b821a062cd6b93087cb1c45bca00 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -40,7 +40,7 @@ public:
     Softmax_Op(const Softmax_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Softmax_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Softmax_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index d4c95d2947b7436cdab49b8527eb7ed3be5f2644..5eb4d89308d9684811876588917ab53efd1bd069 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -45,7 +45,7 @@ public:
     Sqrt_Op(const Sqrt_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index 5d52102d9bd1c214cd4fe9ece8e0e9d6bc4a23b7..fad65e00e973c6b0352de2bdf5e43a79b4f3d4e4 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -45,7 +45,7 @@ public:
     Sub_Op(const Sub_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Sub_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<Sub_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
diff --git a/include/aidge/recipies/Recipies.hpp b/include/aidge/recipies/Recipies.hpp
index 8c5ba8d085482eaaba75dfd8716eda2aa58c3bb5..5ad08a6582aa886604d0068f75cab9fe1631b05e 100644
--- a/include/aidge/recipies/Recipies.hpp
+++ b/include/aidge/recipies/Recipies.hpp
@@ -84,7 +84,7 @@ void fuseBatchNorm(std::shared_ptr<MatchSolution> solution);
  */
 void fuseBatchNorm(std::shared_ptr<GraphView> graphView);
 
-// std::set<std::shared_ptr<Node>> getHorizontalTiling(const std::shared_ptr<Node>& node, const DimIdx_t axis, const std::size_t nbSlices);
+std::set<std::shared_ptr<Node>> getConvHorizontalTiling(const std::shared_ptr<Node>& node, const DimIdx_t axis, const std::size_t nbSlices);
 // void horizontalTiling(std::shared_ptr<Node> node, DimIdx_t dim, std::size_t nbSlices);
 // std::set<std::shared_ptr<Node>> getHorizontalTiling(std::set<std::shared_ptr<Node>> setOfNodes, DimIdx_t dim, std::size_t nbSlices);
 // void horizontalTiling(std::set<std::shared_ptr<Node>> setOfNodes, DimIdx_t dim, std::size_t nbSlices);
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index faf6c49bdbe28e7214f06a4d116cf23a1739154f..6dcec5aaa4fa80aefebd538a1728445051ca080e 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -23,7 +23,7 @@ class Node;
 class GraphView;
 
 class SequentialScheduler {
-public:
+private:
     struct SchedulingElement {
         SchedulingElement(
             std::shared_ptr<Node> node_,
@@ -36,6 +36,7 @@ public:
         std::chrono::time_point<std::chrono::high_resolution_clock> end;
     };
 
+public:
     SequentialScheduler(std::shared_ptr<GraphView> graphView)
         : mGraphView(graphView)
     {
@@ -44,6 +45,10 @@ public:
     ~SequentialScheduler() = default;
 
     void generateScheduling(bool verbose = false);
+    inline void resetScheduling() {
+        mScheduling.clear();
+        mStaticSchedule.clear();
+    }
 
     /**
      * @brief Run the provided Computational Graph with a batch of data
@@ -58,13 +63,12 @@ public:
 
     /**
      * @brief Return a vector of Node ordered by the order they are called by the scheduler
-     *
      * @return std::vector<std::shared_ptr<Node>>
      */
-    std::vector<std::shared_ptr<Node>> getStaticScheduling(){
+    inline std::vector<std::shared_ptr<Node>> getStaticScheduling() const noexcept {
         return mStaticSchedule;
     }
-    std::shared_ptr<GraphView> getGraphView(){
+    inline std::shared_ptr<GraphView> getGraphView() const noexcept {
         return mGraphView;
     }
 
@@ -77,20 +81,11 @@ private:
      */
     std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const;
 
-    /**
-     * @brief Shared ptr to the scheduled graph view
-     *
-     */
+    /** @brief Shared ptr to the scheduled graph view */
     std::shared_ptr<GraphView> mGraphView;
-    /**
-     * @brief List of SchedulingElement (i.e: Nodes with their computation time)
-     *
-     */
+    /** @brief List of SchedulingElement (i.e: Nodes with their computation time) */
     std::vector<SchedulingElement> mScheduling;
-    /**
-     * @brief List of nodes ordered by their
-     *
-     */
+    /** @brief List of nodes ordered by their */
     std::vector<std::shared_ptr<Node>> mStaticSchedule;
 };
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index ef027af7d81be938468da10728137e1aee62058d..f5c5145e0a86d939b96e6d2a579dfa2579f8b3a5 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -122,7 +122,8 @@ void init_MetaOperatorDefs(py::module &m) {
   declare_PaddedMaxPoolingOp<2>(m);
   declare_PaddedMaxPoolingOp<3>(m);
 
-  py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, Operator>(m, "MetaOperator_Op", py::multiple_inheritance());
+  py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, Operator>(m, "MetaOperator_Op", py::multiple_inheritance())
+  .def("get_micro_graph", &MetaOperator_Op::getMicroGraph);
 
   m.def("meta_operator", &MetaOperator,
     py::arg("type"),
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0956d6260e50d3be2418b1cf4089df87e442e54a
--- /dev/null
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -0,0 +1,66 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <iostream>
+#include <string>
+#include <vector>
+#include <array>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
+  py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Operator, Attributes>(
+    m, ("PadOp" + std::to_string(DIM) + "D").c_str(),
+    py::multiple_inheritance())
+  .def(py::init<const std::array<DimSize_t, 2*DIM> &,
+                const PadBorderType &,
+                double>(),
+        py::arg("beginEndTuples"),
+        py::arg("borderType") = PadBorderType::Constant,
+        py::arg("borderValue") = 0.0)
+    .def("get_inputs_name", &Pad_Op<DIM>::getInputsName)
+    .def("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
+    ;
+
+  m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
+                                                        const std::string& name,
+                                                        const PadBorderType &borderType = PadBorderType::Constant,
+                                                        double borderValue = 0.0) {
+        AIDGE_ASSERT(beginEndTuples.size() == 2*DIM, "begin_end_tuples size [%ld] does not match DIM [%d]", beginEndTuples.size(), 2*DIM);
+        return Pad<DIM>(to_array<2*DIM>(beginEndTuples.begin()), name, borderType, borderValue);
+    },
+       py::arg("begin_end_tuples"),
+       py::arg("name") = "",
+       py::arg("border_type") = PadBorderType::Constant,
+       py::arg("border_value") = 0.0);
+}
+
+
+void init_Pad(py::module &m) {
+  py::enum_<PadBorderType>(m, "pad_border_type")
+    .value("Constant", PadBorderType::Constant)
+    .value("Edge",     PadBorderType::Edge)
+    .value("Reflect",  PadBorderType::Reflect)
+    .value("Wrap",     PadBorderType::Wrap)
+    .export_values();
+  declare_PadOp<1>(m);
+  declare_PadOp<2>(m);
+  declare_PadOp<3>(m);
+}
+} // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 23b54e46b23a341add8ba7291551c0f84f705bea..b1e0e0d11fbbae61a6b853e866adc02e77f315dd 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -35,6 +35,7 @@ void init_MaxPooling(py::module&);
 void init_MetaOperatorDefs(py::module&);
 void init_Mul(py::module&);
 void init_Producer(py::module&);
+void init_Pad(py::module&);
 void init_Pow(py::module&);
 void init_ReLU(py::module&);
 void init_Softmax(py::module&);
@@ -81,6 +82,8 @@ void init_Aidge(py::module& m){
     init_MaxPooling(m);
     init_MetaOperatorDefs(m);
     init_Mul(m);
+    init_Pad(m);
+
     init_Pow(m);
     init_ReLU(m);
     init_Softmax(m);
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 0b64c518cd5aad7d4ae6841dea53d828c4c85923..2716de30040a7faff648bfb57878fbe7544c6bd6 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -269,18 +269,17 @@ void Aidge::GraphView::forwardDims() {
             // assess if the input was not already set and is a Tensor then link it to parent output
             std::pair<std::shared_ptr<Node>, IOIndex_t> inputI = nodePtr->input(i);
             if (inputI.first) {
-              if ( std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i)) != inputI.first->getOperator()->getRawOutput(inputI.second)) {
-                  if ((strcmp(nodePtr->getOperator()->getRawInput(i)->type(), Tensor::Type) == 0) && (strcmp(inputI.first->getOperator()->getRawOutput(inputI.second)->type(), Tensor::Type)==0)) {
-                    // assert provided Data is of "Tensor" type
-                    nodePtr->getOperator()->associateInput(i, inputI.first->getOperator()->getRawOutput(inputI.second));
-                  }
-                  else {
-                    assert(false && "Non-tensor entries not handled yet.\n");
-                  }
-              }
-            } else
-            {
-              assert(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty());
+                if ( std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i)) != inputI.first->getOperator()->getRawOutput(inputI.second)) {
+                    if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) {
+                        // assert provided Data is of "Tensor" type
+                        nodePtr->getOperator()->associateInput(i, inputI.first->getOperator()->getRawOutput(inputI.second));
+                    }
+                    else {
+                        assert(false && "Non-tensor entries not handled yet.\n");
+                    }
+                }
+            } else {
+                assert(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty());
             }
 
         }
diff --git a/src/graphRegex/GraphParser.cpp b/src/graphRegex/GraphParser.cpp
index 9c3d10114d777cf7755432a5723a3b70b81d37a1..9ad96a34bfbe36bdae65cae072eb4f1edcd3faaf 100644
--- a/src/graphRegex/GraphParser.cpp
+++ b/src/graphRegex/GraphParser.cpp
@@ -1,19 +1,23 @@
-#include "aidge/graphRegex/GraphParser.hpp"
+#include <memory>
+#include <string>
+#include <vector>
 
-using namespace Aidge; 
+#include "aidge/graphRegex/GraphParser.hpp"
 
-GraphParser::GraphParser(const std::string gRegexExpressions):
+Aidge::GraphParser::GraphParser(const std::string gRegexExpressions):
 mLexer(gRegexExpressions)
 {
     mCurrentToken = mLexer.getNextToken();
 }
 
+Aidge::GraphParser::~GraphParser() noexcept = default;
 
-const std::string GraphParser::getQuery(){
+
+const std::string Aidge::GraphParser::getQuery(){
     return mLexer.getQuery();
 }
 
-std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::parse(void){
+std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::parse(void){
 
     std::shared_ptr<AstNode<gRegexTokenTypes>> astTree = constructAstAllExpr();
     rstParser();
@@ -21,14 +25,14 @@ std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::parse(void){
 }
 
 
-void GraphParser::rstParser(void){
+void Aidge::GraphParser::rstParser(void){
     mLexer.rstPosition();
     mCurrentToken = mLexer.getNextToken();
 }
 
 
-void GraphParser::ackToken(gRegexTokenTypes  tokenType){
-    
+void Aidge::GraphParser::ackToken(gRegexTokenTypes  tokenType){
+
     if(mCurrentToken->getType() == tokenType ){
         try {
             mCurrentToken = mLexer.getNextToken();
@@ -48,7 +52,7 @@ void GraphParser::ackToken(gRegexTokenTypes  tokenType){
 /*
 exp : KEY(QOM | QZM)?  | CKEY | domain
 */
-std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstExp(void)
+std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::constructAstExp(void)
 {
 
     try{
@@ -86,15 +90,15 @@ std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstExp(void)
 }
 
 /*
-seq :exp (NEXT seq)* 
+seq :exp (NEXT seq)*
 */
-std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstSeq(void)
+std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::constructAstSeq(void)
 {
 
    try{
-   
+
         std::shared_ptr<AstNode<gRegexTokenTypes>> left = constructAstExp();
-        if(mCurrentToken->getType() == gRegexTokenTypes::NEXT ) 
+        if(mCurrentToken->getType() == gRegexTokenTypes::NEXT )
         {
             std::shared_ptr<ParsingToken<gRegexTokenTypes>> token = mCurrentToken->copy();
             ackToken(gRegexTokenTypes::NEXT);
@@ -114,15 +118,15 @@ std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstSeq(void)
 
 
 /*
-LPAREN seq RPAREN (QOM | QZM) 
+LPAREN seq RPAREN (QOM | QZM)
 */
-std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstDomain(void)
+std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::constructAstDomain(void)
 {
 
    try{
         std::shared_ptr<ParsingToken<gRegexTokenTypes>> token ;
         std::shared_ptr<AstNode<gRegexTokenTypes>> node ;
- 
+
         token = mCurrentToken->copy();
         ackToken(gRegexTokenTypes::LPAREN);
         node = std::make_shared<AstNode<gRegexTokenTypes>>(token,
@@ -144,7 +148,7 @@ std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstDomain(void)
             errorMessage << "Bad syntax constructAstDomain must have quantifier \n";
             throw std::runtime_error(errorMessage.str());
         }
-   
+
         return node;
 
     } catch (const std::runtime_error& e) {
@@ -157,12 +161,12 @@ std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstDomain(void)
 /*
         allExpr: seq (SEP allExpr)* | STOP
 */
-std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstAllExpr(void)
+std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::constructAstAllExpr(void)
 {
 
     try{
         std::shared_ptr<AstNode<gRegexTokenTypes>> left = constructAstSeq();
-        if(mCurrentToken->getType() == gRegexTokenTypes::SEP ) 
+        if(mCurrentToken->getType() == gRegexTokenTypes::SEP )
         {
             std::shared_ptr<ParsingToken<gRegexTokenTypes>> token = mCurrentToken->copy();
             ackToken(gRegexTokenTypes::SEP);
@@ -170,7 +174,7 @@ std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstAllExpr(void
             if(mCurrentToken->getType() == gRegexTokenTypes::STOP )
             {
                  return left;
-            } 
+            }
             std::shared_ptr<AstNode<gRegexTokenTypes>> newNode = std::make_shared<AstNode<gRegexTokenTypes>>(token,
             std::vector<std::shared_ptr<AstNode<gRegexTokenTypes>>>{left,constructAstAllExpr()});
             left = newNode;
diff --git a/src/nodeTester/ConditionalParser.cpp b/src/nodeTester/ConditionalParser.cpp
index 3ca2843aabefe9f98bc8ad46a36fe03883d0baef..ba40c561375e0c09eb86009d447a782ab99d5d0b 100644
--- a/src/nodeTester/ConditionalParser.cpp
+++ b/src/nodeTester/ConditionalParser.cpp
@@ -1,23 +1,27 @@
+#include <memory>
+#include <vector>
 
 #include "aidge/nodeTester/ConditionalParser.hpp"
 
-using namespace Aidge;
-
 
 //////////////////////////////
 //ConditionalParser
 //////////////////////////////
 
-ConditionalParser::ConditionalParser(const std::string ConditionalExpressions):mLexer(ConditionalExpressions){
+Aidge::ConditionalParser::ConditionalParser(const std::string ConditionalExpressions)
+    : mLexer(ConditionalExpressions)
+{
     mCurrentToken = mLexer.getNextToken();
 }
 
-void ConditionalParser::rstParser(void){
+Aidge::ConditionalParser::~ConditionalParser() noexcept = default;
+
+void Aidge::ConditionalParser::rstParser(void){
     mLexer.rstPosition();
     mCurrentToken = mLexer.getNextToken();
 }
 
-void ConditionalParser::ackToken(ConditionalTokenTypes  tokenType){
+void Aidge::ConditionalParser::ackToken(ConditionalTokenTypes  tokenType){
     if(mCurrentToken->getType() == tokenType ){
 
         try {
@@ -38,7 +42,7 @@ void ConditionalParser::ackToken(ConditionalTokenTypes  tokenType){
 
 
 
-std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstVal(void){
+std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::constructAstVal(void){
     /*
     val : (KEY|INTEGER|FOAT|STRING|LAMBDA)
     */
@@ -76,7 +80,7 @@ std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstV
 
 }
 
-std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstLambda(void){
+std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::constructAstLambda(void){
     /*
     AstLambda :  LAMBDA val (ARGSEP val)* RPAREN
     */
@@ -94,7 +98,7 @@ std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstL
     return std::make_shared<AstNode<ConditionalTokenTypes>>(tokenLdb,paramLambda);
 }
 
-std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstCmpr(void){
+std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::constructAstCmpr(void){
       /*
         cmpr   : val (EQ|NEQ) val | LPAREN expr RPAREN
         NOT ir ?
@@ -125,7 +129,7 @@ std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstC
      }
 }
 
-std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstExpr(std::size_t precLimit /*= 0*/){
+std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::constructAstExpr(std::size_t precLimit /*= 0*/){
     /*
         expr   : cmpr ((AND | OR) cmpr)*
         the NOT is not binary OP can be use in pratt
@@ -134,27 +138,27 @@ std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstE
         OR
     */
 
-   //the not 
+   //the not
     std::shared_ptr<AstNode<ConditionalTokenTypes>> left;
     std::shared_ptr<ParsingToken<ConditionalTokenTypes>> token = mCurrentToken->copy();
-    
+
     if (mCurrentToken->getType() == ConditionalTokenTypes::NOT  ){
         ackToken(ConditionalTokenTypes::NOT );
         left= std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{constructAstCmpr()});
     }else{
         left= constructAstCmpr();
     }
-    
+
     //pratt
-    while (mCurrentToken->getType() != ConditionalTokenTypes::STOP ) //security 
+    while (mCurrentToken->getType() != ConditionalTokenTypes::STOP ) //security
     {
         token = mCurrentToken->copy();
-        //if the token is not in the map is not a operator so we consider a prec of 0 
+        //if the token is not in the map is not a operator so we consider a prec of 0
         if (ConditionalPrec.find(token->getType()) ==ConditionalPrec.end() ){
             return left;
         }
 
-        //if my actual operator have a prec <= of the last operator 
+        //if my actual operator have a prec <= of the last operator
         std::size_t prec = ConditionalPrec.at(token->getType());
         if (prec <= precLimit){
             return left;
@@ -165,7 +169,7 @@ std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstE
 
         std::shared_ptr<AstNode<ConditionalTokenTypes>> right = constructAstExpr(prec);
 
-        //i'm not sur what append to newNode 
+        //i'm not sur what append to newNode
         //std::shared_ptr<AstNode<ConditionalTokenTypes>> newNode = std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{left,constructAstCmpr()});
         std::shared_ptr<AstNode<ConditionalTokenTypes>> newNode = std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{left,right});
         left = newNode;
@@ -174,10 +178,10 @@ std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstE
 }
 
 
-std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::parse(void){
+std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::parse(void){
     /*
         expr   : cmpr ((AND | OR) cmpr)*
-        cmpr   : val (EQ|NEQ) val | LPAREN expr RPAREN | BOOL | LAMBDA 
+        cmpr   : val (EQ|NEQ) val | LPAREN expr RPAREN | BOOL | LAMBDA
         val    : (KEY|INTEGER|FOAT|STRING|LAMBDA)
         lambda :  LAMBDA val (ARGSEP val)* RPAREN
     */
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index 2e3e77288bf1e0613f0aa572e3c50e94599a902f..bc268263e8a6e2ec7c9944faa31da84dc50c4f53 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -19,6 +19,8 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
+const std::string Aidge::Mul_Op::Type = "Mul";
+
 void Aidge::Mul_Op::computeOutputDims() {
     // check inputs have been associated
     if (!getInput(0) || !getInput(1)) {
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index eb94db87df250767967348c3adfed8a1e35b4c5f..4adc57f55f7531c28c0c0603ee01c176bdd59e96 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -31,29 +31,6 @@ Aidge::Operator::~Operator() noexcept = default;
 //        IMPLEMENTATION
 ///////////////////////////////////////////////////////
 
-// std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>> Aidge::Operator::computeReceptiveField(
-//         const std::size_t firstIdx, const std::vector<Aidge::DimSize_t>& outputDims, const Aidge::IOIndex_t outputIdx) const
-// {
-//     static_cast<void>(outputIdx);
-//     if (outputIdx >= nbOutputs()) {
-//         AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator output index out of range.");
-//     }
-//     if (nbInputs() != nbDataInputs()) {
-//         AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function.");
-//     }
-//     if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
-//         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
-//     }
-//     const auto outputIdxDims = getOutput(0)->getCoord(firstIdx);
-//     for (DimIdx_t i = 0; i < outputDims.size(); ++i) {
-//         if (((outputDims[i] + outputIdxDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) {
-//             AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
-//         }
-//     }
-//     // return the same Tensor description as given in function parameter for each data input
-//     return std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>>(nbDataInputs(),std::pair<std::size_t, std::vector<Aidge::DimSize_t>>(firstIdx, outputDims));
-// }
-
 Aidge::NbElts_t Aidge::Operator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     return mImpl->getNbRequiredData(inputIdx);
 }
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index 1d16e9064010269174501d3c824c705c36971641..1237fdc0b5565681ab1a6af6d88f74a48cbd5b57 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -88,6 +88,31 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aid
 }
 
 
+std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>> Aidge::OperatorTensor::computeReceptiveField(
+        const std::size_t firstIdx,
+        const std::vector<Aidge::DimSize_t>& outputDims,
+        const Aidge::IOIndex_t outputIdx) const
+{
+    static_cast<void>(outputIdx);
+    if (outputIdx >= nbOutputs()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator output index out of range.");
+    }
+    if (nbInputs() != nbData()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function.");
+    }
+    if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+    }
+    const auto outputIdxDims = getOutput(0)->getCoord(firstIdx);
+    for (DimIdx_t i = 0; i < outputDims.size(); ++i) {
+        if (((outputDims[i] + outputIdxDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+        }
+    }
+    // return the same Tensor description as given in function parameter for each data input
+    return std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::size_t, std::vector<Aidge::DimSize_t>>(firstIdx, outputDims));
+}
+
 void Aidge::OperatorTensor::computeOutputDims() {
     // check inputs have been associated
     bool associated = (nbInputs() > 0); // do not compute anything if no input
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a25e290dff35e4257d486613a5fe06894119d367
--- /dev/null
+++ b/src/operator/Slice.cpp
@@ -0,0 +1,16 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <string>
+
+#include "aidge/operator/Slice.hpp"
+
+const std::string Aidge::Slice_Op::Type = "Slice";
\ No newline at end of file
diff --git a/src/recipies/HorizontalTiling.cpp b/src/recipies/HorizontalTiling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d8eb015939e7be19eb866b75e5a5601ba80631d0
--- /dev/null
+++ b/src/recipies/HorizontalTiling.cpp
@@ -0,0 +1,93 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <set>
+#include <memory>
+#include <vector>
+#include <utility>
+
+#include "aidge/recipies/Recipies.hpp"
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/utils/Types.h"
+
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Concat.hpp"
+#include "aidge/operator/Slice.hpp"
+
+// TODO: assert Operator uses Tensors when implemented
+std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std::shared_ptr<Aidge::Node>& node,
+                                                            const Aidge::DimIdx_t axis,
+                                                            const std::size_t nbSlices)
+{
+    if (node->getOperator()->type() != "Conv") {
+        AIDGE_INTERNAL_ASSERT("Operator should be a Convolution.");
+    }
+    const auto& op = std::dynamic_pointer_cast<OperatorTensor>(node->getOperator());
+    if (op->nbOutputs() != 1 || op->nbData() > 1) {
+        AIDGE_INTERNAL_ASSERT("Only slice Operators with one output and at most one input for now.");
+    }
+    if (!op->outputDimsForwarded()) {
+        AIDGE_INTERNAL_ASSERT("Dimensions must be forwarded before any tiling");
+    }
+    // start by doing a tiling with strict dimensions division
+    const auto& outTensor = op->getOutput(0);
+    if (op->getOutput(0)->dims()[axis] % nbSlices != 0) {
+        AIDGE_INTERNAL_ASSERT("axis should be a multiple of nbSlices");
+    }
+
+    // dimensions of a Slice
+    std::vector<DimSize_t> outputDims = outTensor->dims();
+    outputDims[axis] /= nbSlices;
+
+    std::vector<DimSize_t> currentFirstDims = std::vector<DimSize_t>(outTensor->nbDims(), 0);
+
+    std::set<std::shared_ptr<Aidge::Node>> res;
+    auto concat = Concat(nbSlices, axis);
+    res.insert(concat);
+
+    // check slice sizes
+    // const auto inputDims = op->computeReceptiveField(currentFirstDims[axis], outputDims, 0);
+    // std::vector<bool> shareTensor(node->nbInputs(), false);
+    // for (DimSize_t inputID = 0; inputID < node->nbInputs(); ++inputID) {
+    //     const auto inTensor = std::dynamic_pointer_cast<Tensor>(node->getOperator()->getRawInput(inputID));
+    //     if (inTensor->dims() == inputDims[inputID].second)
+    //         shareTensor[inputID] = true;
+    // }
+
+    std::vector<std::shared_ptr<Node>> clonedInputs = std::vector<std::shared_ptr<Node>>(node->nbInputs(), nullptr);
+    for (std::size_t i = node->nbData(); i < node ->nbInputs(); ++i) {
+        clonedInputs[i] = node -> getParent(i) -> cloneSharedOperators();
+        clonedInputs[i] -> setName(node -> name() + "_0");
+        res.insert(clonedInputs[i]);
+    }
+
+    for (; currentFirstDims[axis] < outTensor->dims()[axis]; currentFirstDims[axis] += outputDims[axis]) {
+        const auto inputDims = op->computeReceptiveField(outTensor->getIdx(currentFirstDims), outputDims, 0);
+        auto newNode = node -> clone(); // no input associated to clones
+        newNode -> setName(node->name() + "_" + std::to_string(currentFirstDims[axis]));
+        clonedInputs[1] -> addChild(newNode, 0, 1);
+        clonedInputs[2] -> addChild(newNode, 0, 2);
+        // Slice for input and each parameter
+        auto slice = Slice(inputDims[0].first, inputDims[0].second, "Slice_" + std::to_string(currentFirstDims[axis]));
+        slice -> addChild(newNode, 0, 0);
+        newNode -> addChild(concat, 0, currentFirstDims[axis]);
+
+        res.insert(slice);
+        res.insert(newNode);
+    }
+
+    return res;
+}
\ No newline at end of file
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 1f34091e54c0f83dae6b60589c20fb8fdf1d5064..3afbcd0442fd40214687751d50bfc98809bba840 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -19,6 +19,7 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/operator/OperatorTensor.hpp"
 
 void drawProgressBar(double progress, int barWidth, const std::string& additionalInfo = "") {
     putchar('[');
diff --git a/unit_tests/operator/Test_ConvDepthWise_Op.cpp b/unit_tests/operator/Test_ConvDepthWise_Op.cpp
index ef68c439d3a3cdf95b7122c1b41bc9fc97311f2d..14d4dc537f527b32414151ee7f93e601f5a4bd8a 100644
--- a/unit_tests/operator/Test_ConvDepthWise_Op.cpp
+++ b/unit_tests/operator/Test_ConvDepthWise_Op.cpp
@@ -22,47 +22,52 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-// TEST_CASE("[core/operator] ConvDepthWise_Op(computeReceptiveField)", "[Operator][computeReceptiveFiled][ConvDepthWise]") {
-//     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
-//     auto conv1 = ConvDepthWise({5, 5}, "conv1");         // output dims: {16, 3, 220, 220}
-//     auto conv2 = ConvDepthWise({3, 3}, "conv2");         // output dims: {16, 3, 218, 218}
-//     auto conv3 = ConvDepthWise({2, 2}, "conv3", {2,2});  // output dims: {16, 3, 109, 109}
-//     auto conv4 = ConvDepthWise({1, 1}, "conv4");         // output dims: {16, 3, 109, 109}
+TEST_CASE("[core/operator] ConvDepthWise_Op(computeReceptiveField)", "[Operator][computeReceptiveFiled][ConvDepthWise]") {
+    auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
+    auto cdw1 = ConvDepthWise(3, {5, 5}, "cdw1");         // output dims: {16, 3, 220, 220}
+    auto cdw2 = ConvDepthWise(3, {3, 3}, "cdw2");         // output dims: {16, 3, 218, 218}
+    auto cdw3 = ConvDepthWise(3, {2, 2}, "cdw3", {2,2});  // output dims: {16, 3, 109, 109}
+    auto cdw4 = ConvDepthWise(3, {1, 1}, "cdw4");         // output dims: {16, 3, 109, 109}
 
-//     auto g = std::make_shared<GraphView>("TestGraph");
+    auto g = std::make_shared<GraphView>("TestGraph");
 
-//     dataProvider->addChild(conv1, 0);
-//     g->add(conv1);
-//     g->addChild(conv2, conv1, 0);
-//     g->addChild(conv3, conv2, 0);
-//     g->addChild(conv4, conv3, 0);
+    dataProvider->addChild(cdw1, 0);
+    g->add(cdw1);
+    g->addChild(cdw2, cdw1, 0);
+    g->addChild(cdw3, cdw2, 0);
+    g->addChild(cdw4, cdw3, 0);
 
-//     g->forwardDims();
+    g->forwardDims();
 
-//     SECTION("Check individual receptive fields") {
-//         auto res1 = conv1->getOperator()->computeReceptiveField(0, {16,3,10,10});
-//         auto res2 = conv2->getOperator()->computeReceptiveField(conv2->getOperator()->output(0).getIdx({3,1,100,28}), {4,2,30,40});
-//         auto res3 = conv3->getOperator()->computeReceptiveField(0, {1,1,109,109});
-//         auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->input(0).getIdx({5,0,108,108}), {10,1,1,1});
+    auto op1 = std::dynamic_pointer_cast<OperatorTensor>(cdw1 -> getOperator());
+    auto op2 = std::dynamic_pointer_cast<OperatorTensor>(cdw2 -> getOperator());
+    auto op3 = std::dynamic_pointer_cast<OperatorTensor>(cdw3 -> getOperator());
+    auto op4 = std::dynamic_pointer_cast<OperatorTensor>(cdw4 -> getOperator());
 
-//         REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
-//         REQUIRE(((res2[0].first == conv2->getOperator()->input(0).getIdx({3,1,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 2, 32, 42}))));
-//         REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 1, 218, 218}))));
-//         REQUIRE(((res4[0].first == conv4->getOperator()->input(0).getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 1, 1, 1}))));
-//     }
+    SECTION("Check individual receptive fields") {
+        auto res1 = op1->computeReceptiveField(0, {16,3,10,10});
+        auto res2 = op2->computeReceptiveField(op2->getOutput(0)->getIdx({3,1,100,28}), {4,2,30,40});
+        auto res3 = op3->computeReceptiveField(0, {1,1,109,109});
+        auto res4 = op4->computeReceptiveField(op4->getInput(0)->getIdx({5,0,108,108}), {10,1,1,1});
 
-//     SECTION("Check receptive field propagation") {
-//         // input:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
-//         auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->input(0).getIdx({5,0,50,50}), {1,1,1,1});
-//         // conv4 RF:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
-//         auto res3 = conv3->getOperator()->computeReceptiveField(res4[0].first, res4[0].second);
-//         // conv3 RF:  first-{5, 0, 100, 100} dims-{1, 1, 2, 2}
-//         auto res2 = conv2->getOperator()->computeReceptiveField(res3[0].first, res3[0].second);
-//         // conv2 RF:  first-{5, 0, 100, 100} dims-{1, 1, 4, 4}
-//         auto res1 = conv1->getOperator()->computeReceptiveField(res2[0].first, res2[0].second);
-//         // conv1 RF:  first-{5, 0, 100, 100} dims-{1, 1, 8, 8}
+        REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
+        REQUIRE(((res2[0].first == op2->getInput(0)->getIdx({3,1,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 2, 32, 42}))));
+        REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 1, 218, 218}))));
+        REQUIRE(((res4[0].first == op4->getInput(0)->getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 1, 1, 1}))));
+    }
 
-//         REQUIRE(((res1[0].first == conv1->getOperator()->input(0).getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 1, 8, 8}))));
-//     }
-// }
+    SECTION("Check receptive field propagation") {
+        // input:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
+        auto res4 = op4->computeReceptiveField(op4->getInput(0)->getIdx({5,0,50,50}), {1,1,1,1});
+        // cdw4 RF:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
+        auto res3 = op3->computeReceptiveField(res4[0].first, res4[0].second);
+        // cdw3 RF:  first-{5, 0, 100, 100} dims-{1, 1, 2, 2}
+        auto res2 = op2->computeReceptiveField(res3[0].first, res3[0].second);
+        // cdw2 RF:  first-{5, 0, 100, 100} dims-{1, 1, 4, 4}
+        auto res1 = op1->computeReceptiveField(res2[0].first, res2[0].second);
+        // cdw1 RF:  first-{5, 0, 100, 100} dims-{1, 1, 8, 8}
+
+        REQUIRE(((res1[0].first == op1->getInput(0)->getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 1, 8, 8}))));
+    }
+}
 }  // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/operator/Test_Conv_Op.cpp b/unit_tests/operator/Test_Conv_Op.cpp
index ac667ec5af69dccc3e421530a17aca88018aab09..a3e84999eb2e2a31f1217330ac9718f35b0ca396 100644
--- a/unit_tests/operator/Test_Conv_Op.cpp
+++ b/unit_tests/operator/Test_Conv_Op.cpp
@@ -22,58 +22,65 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-// TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeReceptiveField][Conv]") {
-//     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
-//     auto conv1 = Conv(3, 32, {5, 5}, "conv1");          // output dims: {16, 32, 220, 220}
-//     auto conv2 = Conv(32, 64, {3, 3}, "conv2");         // output dims: {16, 64, 218, 218}
-//     auto conv3 = Conv(64, 10, {2, 2}, "conv3", {2,2});  // output dims: {16, 10, 109, 109}
-//     auto conv4 = Conv(10, 10, {1, 1}, "conv4");         // output dims: {16, 10, 109, 109}
+TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeReceptiveField][Conv]") {
+    auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
+    auto conv1 = Conv(3, 32, {5, 5}, "conv1");          // output dims: {16, 32, 220, 220}
+    auto conv2 = Conv(32, 64, {3, 3}, "conv2");         // output dims: {16, 64, 218, 218}
+    auto conv3 = Conv(64, 10, {2, 2}, "conv3", {2,2});  // output dims: {16, 10, 109, 109}
+    auto conv4 = Conv(10, 10, {1, 1}, "conv4");         // output dims: {16, 10, 109, 109}
 
-//     auto g = std::make_shared<GraphView>("TestGraph");
+    auto g = std::make_shared<GraphView>("TestGraph");
 
-//     dataProvider->addChild(conv1, 0);
-//     g->add(conv1);
-//     g->addChild(conv2, conv1, 0);
-//     g->addChild(conv3, conv2, 0);
-//     g->addChild(conv4, conv3, 0);
+    dataProvider->addChild(conv1, 0);
+    g->add(conv1);
+    g->addChild(conv2, conv1, 0);
+    g->addChild(conv3, conv2, 0);
+    g->addChild(conv4, conv3, 0);
 
-//     g->forwardDims();
+    g->forwardDims();
 
-//     SECTION("Check individual receptive fields") {
-//         auto res1 = conv1->getOperator()->computeReceptiveField(0, {16,32,10,10});
-//         auto res2 = conv2->getOperator()->computeReceptiveField(conv2->getOperator()->output(0).getIdx({3,20,100,28}), {4,20,30,40});
-//         auto res3 = conv3->getOperator()->computeReceptiveField(0, {1,1,109,109});
-//         auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->output(0).getIdx({5,0,108,108}), {10,10,1,1});
+    auto op1 = std::dynamic_pointer_cast<OperatorTensor>(conv1 -> getOperator());
+    auto op2 = std::dynamic_pointer_cast<OperatorTensor>(conv2 -> getOperator());
+    auto op3 = std::dynamic_pointer_cast<OperatorTensor>(conv3 -> getOperator());
+    auto op4 = std::dynamic_pointer_cast<OperatorTensor>(conv4 -> getOperator());
 
-//         REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
-//         REQUIRE(((res2[0].first == conv2->getOperator()->input(0).getIdx({3,0,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 32, 32, 42}))));
-//         REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 64, 218, 218}))));
-//         REQUIRE(((res4[0].first == conv4->getOperator()->input(0).getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 10, 1, 1}))));
-//     }
+    SECTION("Check individual receptive fields") {
+        auto res1 = op1 -> computeReceptiveField(0, {16,32,10,10});
+        auto res2 = op2 -> computeReceptiveField(op2 -> getOutput(0)->getIdx({3,20,100,28}), {4,20,30,40});
+        auto res3 = op3 -> computeReceptiveField(0, {1,1,109,109});
+        auto res4 = op4 -> computeReceptiveField(op4 -> getOutput(0)->getIdx({5,0,108,108}), {10,10,1,1});
 
-//     SECTION("Check receptive field propagation") {
-//         // input:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
-//         auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->output(0).getIdx({5,0,50,50}), {1,1,1,1});
-//         // conv4 RF:  first-{5, 0, 50, 50}  dims-{1, 10, 1, 1}
-//         auto res3 = conv3->getOperator()->computeReceptiveField(res4[0].first, res4[0].second);
-//         // conv3 RF:  first-{5, 0, 100, 100} dims-{1, 64, 2, 2}
-//         auto res2 = conv2->getOperator()->computeReceptiveField(res3[0].first, res3[0].second);
-//         // conv2 RF:  first-{5, 0, 100, 100} dims-{1, 32, 4, 4}
-//         auto res1 = conv1->getOperator()->computeReceptiveField(res2[0].first, res2[0].second);
-//         // conv1 RF:  first-{5, 0, 100, 100} dims-{1, 3, 8, 8}
+        REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
+        REQUIRE(((res1[1].first == 0) && (res1[1].second == std::vector<DimSize_t>({32, 3, 5, 5}))));
+        REQUIRE(((res1[2].first == 0) && (res1[2].second == std::vector<DimSize_t>({32}))));
+        REQUIRE(((res2[0].first == op2->getInput(0)->getIdx({3,0,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 32, 32, 42}))));
+        REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 64, 218, 218}))));
+        REQUIRE(((res4[0].first == op4->getInput(0)->getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 10, 1, 1}))));
+    }
 
-//         REQUIRE(((res1[0].first == conv1->getOperator()->input(0).getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 3, 8, 8}))));
+    SECTION("Check receptive field propagation") {
+        // input:  first-{5, 0, 50, 50}  dims-{1, 1, 1, 1}
+        auto res4 = op4->computeReceptiveField(op4->getOutput(0)->getIdx({5,0,50,50}), {1,1,1,1});
+        // conv4 RF:  first-{5, 0, 50, 50}  dims-{1, 10, 1, 1}
+        auto res3 = op3->computeReceptiveField(res4[0].first, res4[0].second);
+        // conv3 RF:  first-{5, 0, 100, 100} dims-{1, 64, 2, 2}
+        auto res2 = op2->computeReceptiveField(res3[0].first, res3[0].second);
+        // conv2 RF:  first-{5, 0, 100, 100} dims-{1, 32, 4, 4}
+        auto res1 = op1->computeReceptiveField(res2[0].first, res2[0].second);
+        // conv1 RF:  first-{5, 0, 100, 100} dims-{1, 3, 8, 8}
 
+        REQUIRE(((res1[0].first == op1->getInput(0)->getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 3, 8, 8}))));
 
-//         // std::cout << "conv1: {";
-//         // std::cout << conv1->getOperator()->input(0).getCoord(res1[0].first)[0] << ", "
-//         //           << conv1->getOperator()->input(0).getCoord(res1[0].first)[1] << ", "
-//         //           << conv1->getOperator()->input(0).getCoord(res1[0].first)[2] << ", "
-//         //           << conv1->getOperator()->input(0).getCoord(res1[0].first)[3] << "} - {";
-//         // std::cout << res1[0].second[0] << ", "
-//         //           << res1[0].second[1] << ", "
-//         //           << res1[0].second[2] << ", "
-//         //           << res1[0].second[3] << "}" << std::endl;
-//     }
-// }
+
+        // std::cout << "conv1: {";
+        // std::cout << op1->input(0).getCoord(res1[0].first)[0] << ", "
+        //           << op1->input(0).getCoord(res1[0].first)[1] << ", "
+        //           << op1->input(0).getCoord(res1[0].first)[2] << ", "
+        //           << op1->input(0).getCoord(res1[0].first)[3] << "} - {";
+        // std::cout << res1[0].second[0] << ", "
+        //           << res1[0].second[1] << ", "
+        //           << res1[0].second[2] << ", "
+        //           << res1[0].second[3] << "}" << std::endl;
+    }
+}
 }  // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/recipies/Test_HorizontalTiling.cpp b/unit_tests/recipies/Test_HorizontalTiling.cpp
deleted file mode 100644
index c9fb5ed6dc8a5d994ce2d3434a8176c29e418f95..0000000000000000000000000000000000000000
--- a/unit_tests/recipies/Test_HorizontalTiling.cpp
+++ /dev/null
@@ -1,200 +0,0 @@
-// /********************************************************************************
-//  * Copyright (c) 2023 CEA-List
-//  *
-//  * This program and the accompanying materials are made available under the
-//  * terms of the Eclipse Public License 2.0 which is available at
-//  * http://www.eclipse.org/legal/epl-2.0.
-//  *
-//  * SPDX-License-Identifier: EPL-2.0
-//  *
-//  ********************************************************************************/
-
-// #include <catch2/catch_test_macros.hpp>
-// #include <set>
-
-// #include "aidge/graph/GraphView.hpp"
-// #include "aidge/graph/OpArgs.hpp"
-// #include "aidge/operator/Conv.hpp"
-// #include "aidge/operator/ReLU.hpp"
-// #include "aidge/recipies/Recipies.hpp"
-
-
-// namespace Aidge {
-
-// TEST_CASE("[core/recipies] Tiling(transformation)", "[Tiling][Recipies]") {
-
-//     SECTION("Transform a pre-generated GraphView") {
-
-//         SECTION("Simple Node: Conv") {
-//             std::shared_ptr<Node> myConv = Conv(3,4,{3,3}, "myconv");
-//             myConv->getOperator()->setDatatype(DataType::Int32);
-//             myConv->getOperator()->setBackend("cpu");
-//             std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
-//                 {
-//                     {
-//                         {{  0,   1,   2},
-//                          {  3,   4,   5},
-//                          {  6,   7,   8}},
-//                         {{  9,  10,  11},
-//                          { 12,  13,  14},
-//                          { 15,  16,  17}},
-//                         {{ 18,  19,  20},
-//                          { 21,  22,  23},
-//                          { 24,  25,  26}}
-//                     },
-//                     {
-//                         {{ 27,  28,  29},
-//                         { 30,  31,  32},
-//                         { 33,  34,  35}},
-//                         {{ 36,  37,  38},
-//                         { 39,  40,  41},
-//                         { 42,  43,  44}},
-//                         {{ 45,  46,  47},
-//                         { 48,  49,  50},
-//                         { 51,  52,  53}}
-//                     },
-//                     {
-//                         {{ 54,  55,  56},
-//                         { 57,  58,  59},
-//                         { 60,  61,  62}},
-//                         {{ 63,  64,  65},
-//                         { 66,  67,  68},
-//                         { 69,  70,  71}},
-//                         {{ 72,  73,  74},
-//                         { 75,  76,  77},
-//                         { 78,  79,  80}}
-//                     },
-//                     {
-//                         {{ 81,  82,  83},
-//                         { 84,  85,  86},
-//                         { 87,  88,  89}},
-//                         {{ 90,  91,  92},
-//                         { 93,  94,  95},
-//                         { 96,  97,  98}},
-//                         {{ 99, 100, 101},
-//                         {102, 103, 104},
-//                         {105, 106, 107}}
-//                     }
-//                 }
-//             });
-//             std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
-//             std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
-//                 {
-//                     {
-//                         {{  0,   1,   2,   3,   4},
-//                         {  5,   6,   7,   8,   9},
-//                         { 10,  11,  12,  13,  14},
-//                         { 15,  16,  17,  18,  19},
-//                         { 20,  21,  22,  23,  24}},
-
-//                         {{ 25,  26,  27,  28,  29},
-//                         { 30,  31,  32,  33,  34},
-//                         { 35,  36,  37,  38,  39},
-//                         { 40,  41,  42,  43,  44},
-//                         { 45,  46,  47,  48,  49}},
-
-//                         {{ 50,  51,  52,  53,  54},
-//                         { 55,  56,  57,  58,  59},
-//                         { 60,  61,  62,  63,  64},
-//                         { 65,  66,  67,  68,  69},
-//                         { 70,  71,  72,  73,  74}}
-//                     },
-//                     {
-//                         {{ 75,  76,  77,  78,  79},
-//                         { 80,  81,  82,  83,  84},
-//                         { 85,  86,  87,  88,  89},
-//                         { 90,  91,  92,  93,  94},
-//                         { 95,  96,  97,  98,  99}},
-
-//                         {{100, 101, 102, 103, 104},
-//                         {105, 106, 107, 108, 109},
-//                         {110, 111, 112, 113, 114},
-//                         {115, 116, 117, 118, 119},
-//                         {120, 121, 122, 123, 124}},
-
-//                         {{125, 126, 127, 128, 129},
-//                         {130, 131, 132, 133, 134},
-//                         {135, 136, 137, 138, 139},
-//                         {140, 141, 142, 143, 144},
-//                         {145, 146, 147, 148, 149}}
-//                     }
-//                 }
-//             });
-//             std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> {
-//                 {
-//                     {
-//                         {{ 15226,  15577,  15928},
-//                          { 16981,  17332,  17683},
-//                          { 18736,  19087,  19438}},
-
-//                         {{ 37818,  38898,  39978},
-//                          { 43218,  44298,  45378},
-//                          { 48618,  49698,  50778}},
-
-//                         {{ 60426,  62235,  64044},
-//                          { 69471,  71280,  73089},
-//                          { 78516,  80325,  82134}},
-
-//                         {{ 83016,  85554,  88092},
-//                          { 95706,  98244, 100782},
-//                          {108396, 110934, 113472}}
-//                     },
-//                     {
-//                         {{ 41551,  41902,  42253},
-//                          { 43306,  43657,  44008},
-//                          { 45061,  45412,  45763}},
-
-//                         {{118818, 119898, 120978},
-//                          {124218, 125298, 126378},
-//                          {129618, 130698, 131778}},
-
-//                         {{196101, 197910, 199719},
-//                          {205146, 206955, 208764},
-//                          {214191, 216000, 217809}},
-
-//                         {{273366, 275904, 278442},
-//                          {286056, 288594, 291132},
-//                          {298746, 301284, 303822}}
-//                     }
-//                 }
-//             });
-//             myConv->getOperator()->associateInput(0,myInput);
-//             myConv->getOperator()->associateInput(1,myWeights);
-//             myConv->getOperator()->associateInput(2,myBias);
-//             myConv->getOperator()->computeOutputDims();
-
-//             std::shared_ptr<GraphView> g;
-//             g->add(myConv);
-//             horizontalTiling({myConv}, 3);
-
-//             SequentialScheduler s(g);
-//             s->forward();
-
-//             // myConv->getOperator()->getOutput(0)->print();
-//             REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
-//         }
-//     }
-// }
-// }
-//         // std::shared_ptr<GraphView> g = Sequential({
-//         //     Conv(3, 16, {3,3}, "conv1"),
-//         //     ReLU("relu1"),
-//         //     Conv(16, 32, {1,1}, "conv2"),
-//         //     Conv(32, 16, {1,1}, "conv3"),
-//         //     Conv(16, 10, {3,3}, "conv4"),
-//         //     ReLU("relu2")
-//         // });
-
-//     //     for (auto& individualConv : g->match("Conv")) {
-//     //         auto tiledConv = horizontalTiling(individualConv);
-//     //         g->replace(individualConv, tiledConv);
-//     //     }
-//     // }
-
-//     // SECTION("Create the GraphView with tiled layers") {
-//     //     std::shared_ptr<GraphView> g;
-//     //     g->addChild(horizontalTiling(Conv()))
-//     // }
-
-// // }
-// // } // namespace Aidge
\ No newline at end of file