diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml
index 62878a57d7f00d36eff92f95e5a2efff484e42df..6bfae0be1e31a89d27413677fa4cdc4612561333 100644
--- a/.gitlab/ci/build.gitlab-ci.yml
+++ b/.gitlab/ci/build.gitlab-ci.yml
@@ -130,16 +130,26 @@ build:windows_python:
   stage: build
   needs: []
   tags:
-    - docker
+    - windows
 
+  image: buildtools
+  before_script:
+    # Install Chocolatey
+    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+    # Install dependencies
+    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+    - choco install git -Y
+    - choco install python -Y
+    # Update PATH
+    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
   script:
-    - python3 -m pip install virtualenv
+    - python -m pip install virtualenv
     - virtualenv venv
-    - source venv/bin/activate
+    - venv\Scripts\Activate.ps1
     # Numpy dependancy for unit test
-    - python3 -m pip install numpy
-    - export AIDGE_INSTALL=`pwd`/install
-    - python3 -m pip install .
+    - python -m pip install numpy
+    - $env:AIDGE_INSTALL = "$pwd" + "install"
+    - python -m pip install .
   artifacts:
     expire_in: 1 week
     paths:
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 47ded2a462477958320bfad3ad84e6b8f6ef6082..c3f97f96e6b797afca7a28928f717691ae998185 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -35,7 +35,8 @@
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/MaxPooling.hpp"
-//#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/MetaOperatorDefs.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/ReLU.hpp"
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index c354f76f59dc7f907c9216d8a0c063ca73a082ce..7342565a0228d0cc81445320b9e6bfa1c56e5e02 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -123,7 +123,7 @@ public:
     }
 
     /**
-     * @brief List dataInput connections of the GraphView object's inputNodes.
+     * @brief List outside dataInput connections of the GraphView object's inputNodes.
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
     std::vector<std::pair<NodePtr, IOIndex_t>> dataInputs() const;
@@ -136,7 +136,7 @@ public:
     inline auto dataInputs(const std::string name) const { return mNodeRegistry.at(name)->dataInputs(); }
 
     /**
-     * @brief List input connections of the GraphView object's inputNodes.
+     * @brief List outside input connections of the GraphView object's inputNodes.
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
     std::vector<std::pair<NodePtr, IOIndex_t>> inputs() const;
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 8e9fa3a04002501d599928a52262386cd92ef2cb..db6cbeabc85b066654b8622bb1fc2657cce38dd7 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -30,8 +30,7 @@ namespace Aidge
 enum class AvgPoolingAttr
 {
     StrideDims,
-    KernelDims,
-    PaddingDims
+    KernelDims
 };
 
 template<DimIdx_t DIM>
@@ -43,8 +42,7 @@ class AvgPooling_Op : public Operator,
                       public StaticAttributes<
                           AvgPoolingAttr,
                           std::array<DimSize_t, DIM>,
-                          std::array<DimSize_t, DIM>,
-                          std::array<DimSize_t, (DIM << 1)>>
+                          std::array<DimSize_t, DIM>>
 {
 private:
     // FIXME: change accessibility
@@ -59,20 +57,16 @@ public:
     using Attributes_ = StaticAttributes<
         AvgPoolingAttr,
         std::array<DimSize_t, DIM>,
-        std::array<DimSize_t, DIM>,
-        std::array<DimSize_t, (DIM << 1)>>;
+        std::array<DimSize_t, DIM>>;
     template<AvgPoolingAttr e> using attr = typename Attributes_::template attr<e>;
 
     constexpr AvgPooling_Op(
         const std::array<DimSize_t, DIM> &kernel_dims,
-        const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
-        const std::array<DimSize_t, (DIM << 1)> &padding_dims
-        = create_array<DimSize_t, (DIM << 1)>(0)) :
+        const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1)) :
         Operator(Type),
         Attributes_(
             attr<AvgPoolingAttr::StrideDims>(stride_dims),
-            attr<AvgPoolingAttr::KernelDims>(kernel_dims),
-            attr<AvgPoolingAttr::PaddingDims>(padding_dims))
+            attr<AvgPoolingAttr::KernelDims>(kernel_dims))
     {
         setDatatype(DataType::Float32);
     }
@@ -128,10 +122,7 @@ public:
                       + static_cast<DimSize_t>(std::floor(
                           static_cast<float>(
                               mInput->dims()[dim + 2]
-                              - this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]
-                              + this->template getAttr<AvgPoolingAttr::PaddingDims>()[dim]
-                              + this->template getAttr<
-                                  AvgPoolingAttr::PaddingDims>()[dim + DIM])
+                              - this->template getAttr<AvgPoolingAttr::KernelDims>()[dim])
                           / static_cast<float>(this->template getAttr<
                                                AvgPoolingAttr::StrideDims>()[dim])));
             }
@@ -219,9 +210,7 @@ template<std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> AvgPooling(
     const std::array<DimSize_t, DIM> &kernel_dims,
     const std::string &name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
-    const std::array<DimSize_t, (DIM << 1)> &padding_dims
-    = create_array<DimSize_t, (DIM << 1)>(0))
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1))
 {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(
@@ -229,7 +218,7 @@ inline std::shared_ptr<Node> AvgPooling(
         "Too many kernel dimensions required by AvgPooling, not supported");
     auto avgPool = std::make_shared<Node>(
         std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(
-            kernel_dims, stride_dims, padding_dims),
+            kernel_dims, stride_dims),
         name);
     return avgPool;
 }
@@ -238,14 +227,12 @@ template<DimSize_t DIM>
 inline std::shared_ptr<Node> AvgPooling(
     DimSize_t const (&kernel_dims)[DIM],
     const std::string &name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
-    const std::array<DimSize_t, (DIM << 1)> &padding_dims
-    = create_array<DimSize_t, (DIM << 1)>(0))
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1))
 {
     static_assert(
         DIM <= MaxDim,
         "Too many kernel dimensions required by AvgPooling, not supported");
-    return AvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
+    return AvgPooling(to_array(kernel_dims), name, stride_dims);
 }
 } // namespace Aidge
 
@@ -253,7 +240,7 @@ namespace
 {
 template<>
 const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[]
-    = {"StrideDims", "KernelDims", "PaddingDims"};
+    = {"StrideDims", "KernelDims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index b291deb4a7a4515c47fa6e49e089b0757ff1fdf5..37dc16de422dddde9a316931e47369de1d832fa8 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -33,8 +33,7 @@ enum class ConvAttr
     DilationDims,
     InChannels,
     OutChannels,
-    KernelDims,
-    PaddingDims
+    KernelDims
 };
 
 template<DimIdx_t DIM>
@@ -49,8 +48,7 @@ class Conv_Op : public Operator,
                     std::array<DimSize_t, DIM>,
                     DimSize_t,
                     DimSize_t,
-                    std::array<DimSize_t, DIM>,
-                    std::array<DimSize_t, (DIM << 1)>>
+                    std::array<DimSize_t, DIM>>
 {
 public:
     // FIXME: change accessibility
@@ -71,8 +69,7 @@ public:
         std::array<DimSize_t, DIM>,
         DimSize_t,
         DimSize_t,
-        std::array<DimSize_t, DIM>,
-        std::array<DimSize_t, (DIM << 1)>>;
+        std::array<DimSize_t, DIM>>;
     template<ConvAttr e> using attr = typename Attributes_::template attr<e>;
 
     constexpr Conv_Op(
@@ -80,8 +77,6 @@ public:
         DimSize_t out_channels,
         const std::array<DimSize_t, DIM> &kernel_dims,
         const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
-        const std::array<DimSize_t, (DIM << 1)> &padding_dims
-        = create_array<DimSize_t, (DIM << 1)>(0),
         const std::array<DimSize_t, DIM> &dilation_dims
         = create_array<DimSize_t, DIM>(1)) :
         Operator(Type),
@@ -90,8 +85,7 @@ public:
             attr<ConvAttr::DilationDims>(dilation_dims),
             attr<ConvAttr::InChannels>(in_channels),
             attr<ConvAttr::OutChannels>(out_channels),
-            attr<ConvAttr::KernelDims>(kernel_dims),
-            attr<ConvAttr::PaddingDims>(padding_dims))
+            attr<ConvAttr::KernelDims>(kernel_dims))
     {
         setDatatype(DataType::Float32);
     }
@@ -162,11 +156,7 @@ public:
                 outputDims[dim + 2]
                     = 1
                       + static_cast<DimSize_t>(floor(
-                          static_cast<float>(
-                              mInputs[0]->dims()[dim + 2] - kernelExtent
-                              + this->template getAttr<ConvAttr::PaddingDims>()[dim]
-                              + this->template getAttr<
-                                  ConvAttr::PaddingDims>()[dim + DIM])
+                          static_cast<float>(mInputs[0]->dims()[dim + 2] - kernelExtent)
                           / static_cast<float>(
                               this->template getAttr<ConvAttr::StrideDims>()[dim])));
             }
@@ -258,8 +248,6 @@ inline std::shared_ptr<Node> Conv(
     const std::array<DimSize_t, DIM> &kernel_dims,
     const std::string &name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
-    const std::array<DimSize_t, (DIM << 1)> &padding_dims
-    = create_array<DimSize_t, (DIM << 1)>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t, DIM>(1))
 {
     // FIXME: properly handle default w&b initialization in every cases
@@ -267,16 +255,11 @@ inline std::shared_ptr<Node> Conv(
         DIM <= MaxDim, "Too many kernel dimensions required by Conv, not supported");
     auto conv = std::make_shared<Node>(
         std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(
-            in_channels,
-            out_channels,
-            kernel_dims,
-            stride_dims,
-            padding_dims,
-            dilation_dims),
+            in_channels, out_channels, kernel_dims, stride_dims, dilation_dims),
         name);
     // addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w");
     addProducer(conv, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
-    addProducer(conv, 2, {out_channels}, "b");
+    addProducer(conv, 2, std::array<DimSize_t, 1>({out_channels}), "b");
     return conv;
 }
 
@@ -287,8 +270,6 @@ inline std::shared_ptr<Node> Conv(
     DimSize_t const (&kernel_dims)[DIM],
     const std::string &name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
-    const std::array<DimSize_t, (DIM << 1)> &padding_dims
-    = create_array<DimSize_t, (DIM << 1)>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t, DIM>(1))
 {
     static_assert(
@@ -299,7 +280,6 @@ inline std::shared_ptr<Node> Conv(
         to_array(kernel_dims),
         name,
         stride_dims,
-        padding_dims,
         dilation_dims);
 }
 } // namespace Aidge
@@ -308,12 +288,7 @@ namespace
 {
 template<>
 const char *const EnumStrings<Aidge::ConvAttr>::data[]
-    = {"StrideDims",
-       "DilationDims",
-       "InChannels",
-       "OutChannels",
-       "KernelDims",
-       "PaddingDims"};
+    = {"StrideDims", "DilationDims", "InChannels", "OutChannels", "KernelDims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 9bf0ba23de27edb4ae005dfaba5e6e62485656da..f844ee9a80e9ae58a7b69e672fd3cc93459183a4 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -32,8 +32,7 @@ enum class ConvDepthWiseAttr
     StrideDims,
     DilationDims,
     Channels,
-    KernelDims,
-    PaddingDims
+    KernelDims
 };
 
 template<DimIdx_t DIM>
@@ -48,8 +47,7 @@ class ConvDepthWise_Op
           std::array<DimSize_t, DIM>,
           std::array<DimSize_t, DIM>,
           DimSize_t,
-          std::array<DimSize_t, DIM>,
-          std::array<DimSize_t, (DIM << 1)>>
+          std::array<DimSize_t, DIM>>
 {
 public:
     // FIXME: change accessibility
@@ -69,15 +67,12 @@ public:
         std::array<DimSize_t, DIM>,
         std::array<DimSize_t, DIM>,
         DimSize_t,
-        std::array<DimSize_t, DIM>,
-        std::array<DimSize_t, (DIM << 1)>>;
+        std::array<DimSize_t, DIM>>;
     template<ConvDepthWiseAttr e> using attr = typename Attributes_::template attr<e>;
 
     constexpr ConvDepthWise_Op(
         const std::array<DimSize_t, DIM> &kernel_dims,
         const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
-        const std::array<DimSize_t, (DIM << 1)> &padding_dims
-        = create_array<DimSize_t, (DIM << 1)>(0),
         const std::array<DimSize_t, DIM> &dilation_dims
         = create_array<DimSize_t, DIM>(1)) :
         Operator(Type),
@@ -85,8 +80,7 @@ public:
             attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
             attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
             attr<ConvDepthWiseAttr::Channels>(0),
-            attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
-            attr<ConvDepthWiseAttr::PaddingDims>(padding_dims))
+            attr<ConvDepthWiseAttr::KernelDims>(kernel_dims))
     {
         setDatatype(DataType::Float32);
     }
@@ -145,12 +139,7 @@ public:
                 outputDims[dim + 2]
                     = 1
                       + static_cast<DimSize_t>(floor(
-                          static_cast<float>(
-                              mInputs[0]->dims()[dim + 2] - kernelExtent
-                              + this->template getAttr<
-                                  ConvDepthWiseAttr::PaddingDims>()[dim]
-                              + this->template getAttr<
-                                  ConvDepthWiseAttr::PaddingDims>()[dim + DIM])
+                          static_cast<float>(mInputs[0]->dims()[dim + 2] - kernelExtent)
                           / static_cast<float>(this->template getAttr<
                                                ConvDepthWiseAttr::StrideDims>()[dim])));
             }
@@ -248,8 +237,6 @@ inline std::shared_ptr<Node> ConvDepthWise(
     const std::array<DimSize_t, DIM> &kernel_dims,
     const std::string &name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
-    const std::array<DimSize_t, (DIM << 1)> &padding_dims
-    = create_array<DimSize_t, (DIM << 1)>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t, DIM>(1))
 {
     // FIXME: properly handle default w&b initialization in every cases
@@ -258,7 +245,7 @@ inline std::shared_ptr<Node> ConvDepthWise(
         "Too many kernel dimensions required by ConvDepthWise, not supported");
     auto convDW = std::make_shared<Node>(
         std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(
-            kernel_dims, stride_dims, padding_dims, dilation_dims),
+            kernel_dims, stride_dims, dilation_dims),
         name);
     addProducer(convDW, 1, std::array<DimSize_t, 0>({}), "w");
     addProducer(convDW, 2, std::array<DimSize_t, 0>({}), "b");
@@ -270,15 +257,12 @@ inline std::shared_ptr<Node> ConvDepthWise(
     DimSize_t const (&kernel_dims)[DIM],
     const std::string &name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
-    const std::array<DimSize_t, (DIM << 1)> &padding_dims
-    = create_array<DimSize_t, (DIM << 1)>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t, DIM>(1))
 {
     static_assert(
         DIM <= MaxDim,
         "Too many kernel dimensions required by ConvDepthWise, not supported");
-    return ConvDepthWise(
-        to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
+    return ConvDepthWise(to_array(kernel_dims), name, stride_dims, dilation_dims);
 }
 } // namespace Aidge
 
@@ -286,7 +270,7 @@ namespace
 {
 template<>
 const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[]
-    = {"StrideDims", "DilationDims", "Channels", "KernelDims", "PaddingDims"};
+    = {"StrideDims", "DilationDims", "Channels", "KernelDims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 02687ab81a4b30068510429f32e174815c9702e3..86511e270a996e4c4a52cfb6a2c1a1a2440e3fcb 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -210,8 +210,8 @@ FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "")
 {
     // FIXME: properly handle default w&b initialization in every cases
     auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name);
-    addProducer(fc, 1, {out_channels, 1}, "w");
-    addProducer(fc, 2, {(noBias ? 0 : out_channels)}, "b"); // already sets bias dims
+    addProducer(fc, 1, std::array<DimSize_t, 2>({out_channels, 1}), "w");
+    addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({out_channels})), "b"); // already sets bias dims
     return fc;
 }
 } // namespace Aidge
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index dda925648de4959cd1451b5201bbdc17c2892e16..5efc4a7f448509091dda6439b6efada69773380b 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -187,7 +187,7 @@ inline std::shared_ptr<Node> MatMul(DimSize_t out_channels, const std::string& n
 {
     // FIXME: properly handle default w initialization in every cases
     auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(out_channels), name);
-    addProducer(matmul, 1, {out_channels, 1}, "w");
+    addProducer(matmul, 1, std::array<DimSize_t, 2>({out_channels, 1}), "w");
     return matmul;
 }
 } // namespace Aidge
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index b249d06330a3927528f40f16e2cd4fff9af5fd3c..9771133c5ec06b872feeab8f5dfb89be9a1384c6 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -30,8 +30,7 @@ namespace Aidge
 enum class MaxPoolingAttr
 {
     StrideDims,
-    KernelDims,
-    PaddingDims
+    KernelDims
 };
 
 template<DimIdx_t DIM>
@@ -43,8 +42,7 @@ class MaxPooling_Op : public Operator,
                       public StaticAttributes<
                           MaxPoolingAttr,
                           std::array<DimSize_t, DIM>,
-                          std::array<DimSize_t, DIM>,
-                          std::array<DimSize_t, (DIM << 1)>>
+                          std::array<DimSize_t, DIM>>
 {
 private:
     // FIXME: change accessibility
@@ -59,20 +57,16 @@ public:
     using Attributes_ = StaticAttributes<
         MaxPoolingAttr,
         std::array<DimSize_t, DIM>,
-        std::array<DimSize_t, DIM>,
-        std::array<DimSize_t, (DIM << 1)>>;
+        std::array<DimSize_t, DIM>>;
     template<MaxPoolingAttr e> using attr = typename Attributes_::template attr<e>;
 
     constexpr MaxPooling_Op(
         const std::array<DimSize_t, DIM> &kernel_dims,
-        const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
-        const std::array<DimSize_t, (DIM << 1)> &padding_dims
-        = create_array<DimSize_t, (DIM << 1)>(0)) :
+        const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1)) :
         Operator(Type),
         Attributes_(
             attr<MaxPoolingAttr::StrideDims>(stride_dims),
-            attr<MaxPoolingAttr::KernelDims>(kernel_dims),
-            attr<MaxPoolingAttr::PaddingDims>(padding_dims)),
+            attr<MaxPoolingAttr::KernelDims>(kernel_dims)),
         mOutput(std::make_shared<Tensor>())
     {
         setDatatype(DataType::Float32);
@@ -129,10 +123,7 @@ public:
                       + static_cast<DimSize_t>(std::floor(
                           static_cast<float>(
                               mInput->dims()[dim + 2]
-                              - this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]
-                              + this->template getAttr<MaxPoolingAttr::PaddingDims>()[dim]
-                              + this->template getAttr<
-                                  MaxPoolingAttr::PaddingDims>()[dim + DIM])
+                              - this->template getAttr<MaxPoolingAttr::KernelDims>()[dim])
                           / static_cast<float>(this->template getAttr<
                                                MaxPoolingAttr::StrideDims>()[dim])));
             }
@@ -220,9 +211,7 @@ template<std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> MaxPooling(
     const std::array<DimSize_t, DIM> &kernel_dims,
     const std::string &name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
-    const std::array<DimSize_t, (DIM << 1)> &padding_dims
-    = create_array<DimSize_t, (DIM << 1)>(0))
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1))
 {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(
@@ -230,7 +219,7 @@ inline std::shared_ptr<Node> MaxPooling(
         "Too many kernel dimensions required by MaxPooling, not supported");
     auto avgPool = std::make_shared<Node>(
         std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(
-            kernel_dims, stride_dims, padding_dims),
+            kernel_dims, stride_dims),
         name);
     return avgPool;
 }
@@ -239,14 +228,12 @@ template<DimSize_t DIM>
 inline std::shared_ptr<Node> MaxPooling(
     DimSize_t const (&kernel_dims)[DIM],
     const std::string &name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1),
-    const std::array<DimSize_t, (DIM << 1)> &padding_dims
-    = create_array<DimSize_t, (DIM << 1)>(0))
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1))
 {
     static_assert(
         DIM <= MaxDim,
         "Too many kernel dimensions required by MaxPooling, not supported");
-    return MaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
+    return MaxPooling(to_array(kernel_dims), name, stride_dims);
 }
 } // namespace Aidge
 
@@ -254,7 +241,7 @@ namespace
 {
 template<>
 const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[]
-    = {"StrideDims", "KernelDims", "PaddingDims"};
+    = {"StrideDims", "KernelDims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 0c77a752493d251303c036c4061823c4f8bc499d..bb34fd9c7756f103d4f31f17f815309c925306b7 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -13,21 +13,38 @@
 #define AIDGE_CORE_OPERATOR_METAOPERATOR_H_
 
 #include "aidge/operator/Operator.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/scheduler/Scheduler.hpp"
 
 namespace Aidge {
-class MetaOperator : public Operator {
+class MetaOperator_Op : public Operator,
+                public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)> {
 public:
-    MetaOperator()
-        : Operator("MetaOp")
-    {
-    }
+    std::vector<std::shared_ptr<Tensor>> mInputs;
+    std::vector<std::shared_ptr<Tensor>> mOutputs; // These are shared with micro-graph outputs tensors
+
+    // Micro-graph handling:
+    std::shared_ptr<GraphView> mGraph; // Meta operator micro-graph
+    std::shared_ptr<SequentialScheduler> mScheduler;
+    // Need to store an ordored list of input/output operators for the micro-graph,
+    // because input/output nodes in a GraphView are unordered.
+    // TODO: refactor GraphView to handle ordered input/output?
+    std::vector<std::pair<std::shared_ptr<Operator>, IOIndex_t>> mInputOps;
+    std::vector<std::pair<std::shared_ptr<Operator>, IOIndex_t>> mOutputOps;
+
+   public:
+    MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph,
+        std::vector<NodePtr> inputNodes = std::vector<NodePtr>(),
+        std::vector<NodePtr> outputNodes = std::vector<NodePtr>());
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    MetaOperator(const MetaOperator& op)
-        : Operator("MetaOp")
+    MetaOperator_Op(const MetaOperator_Op& op)
+        : Operator(op.type().c_str()),
+          mGraph(op.mGraph->clone())
     {
         // cpy-ctor
     }
@@ -37,11 +54,112 @@ public:
      * @see Operator::MatMul_Op
      */
     std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<MetaOperator>(*this);
+        return std::make_shared<MetaOperator_Op>(*this);
+    }
+
+    const std::shared_ptr<GraphView>& getMicroGraph() const {
+        return mGraph;
+    }
+
+    const std::shared_ptr<SequentialScheduler>& getMicroGraphScheduler() const {
+        return mScheduler;
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        const auto& inputOp = mInputOps[inputIdx];
+        inputOp.first->associateInput(inputOp.second, data);
+
+        // Associate inputs for custom implementation
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        // Forward dims of micro-graph
+        mGraph->forwardDims();
+
+        // Associate outputs to micro-graph outputs for custom implementation
+        for (size_t outputIdx = 0; outputIdx < mOutputOps.size(); ++outputIdx) {
+            const auto& outputOp = mOutputOps[outputIdx];
+            mOutputs[outputIdx] = outputOp.first->getOutput(outputOp.second);
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutputs[0]->empty()); }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < mInputs.size() && "inputIdx out of range");
+        return *(mInputs[inputIdx].get());
+    }
+
+    inline Tensor& output(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
+        return *(mOutputs[outputIdx].get());
+    }
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < mInputs.size() && "inputIdx out of range");
+        return mInputs[inputIdx];
+    }
+
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
+        return mOutputs[outputIdx];
+    }
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < mInputs.size() && "inputIdx out of range");
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
+        return std::static_pointer_cast<Data>(mOutputs[outputIdx]);
+    }
+
+    void setBackend(const std::string &name) override {
+        if (Registrar<MetaOperator_Op>::exists({name, type()})) {
+            // A custom implementation exists for this meta operator
+            mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
+        }
+
+        // The micro-graph should always be set to the right backend, since it
+        // shares input/output tensors.
+        // Input/output tensors backend are updated here.
+        mGraph->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) override {
+        // The micro-graph should always be set to the right data type, since it
+        // shares input/output tensors.
+        // Input/output tensors data type are updated here.
+        mGraph->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return mGraph->inputs().size(); }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return mGraph->dataInputs().size(); }
+    inline IOIndex_t nbOutputs() const noexcept override final { return mGraph->outputs().size(); }
+
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
+    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override;
+    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override;
+
+    void updateConsummerProducer() override;
+    void forward() override;
+    void backward() override {
+        assert(false && "not implemented");
     }
 
-    ~MetaOperator() = default;
 };
+
+inline std::shared_ptr<Node> MetaOperator(const char *type,
+                                  const std::shared_ptr<GraphView>& graph,
+                                  const std::string& name = "")
+{
+    return std::make_shared<Node>(std::make_shared<MetaOperator_Op>(type, graph), name);
 }
+}  // namespace Aidge
 
 #endif /* MetaOperator_H_ */
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..df66cec7e1accfee1518378ce2e9697cdc7f91fb
--- /dev/null
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -0,0 +1,124 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_
+#define AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_
+
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/AvgPooling.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/Pad.hpp"
+
+namespace Aidge {
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
+                                  DimSize_t out_channels,
+                                  const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name = "",
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0},
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+{
+    // Construct micro-graph
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(padding_dims, PadBorderType::Constant, 0.0), (!name.empty()) ? name + "_pad" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
+    // Need to specify the ordered list of input operators
+    const std::vector<NodePtr> orderedInputNodes = {pad, conv};
+
+    auto metaOp = std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}), orderedInputNodes), name);
+    addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
+    addProducer(metaOp, 2, {out_channels}, "b");
+    return metaOp;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> PaddedConv(
+    DimSize_t in_channels,
+    DimSize_t out_channels,
+    DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name = "",
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0},
+    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+{
+    return PaddedConv<DIM>(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> PaddedConvDepthWise(DimSize_t in_channels,
+                                  DimSize_t out_channels,
+                                  const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name = "",
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0},
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+{
+    // Construct micro-graph
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(padding_dims, PadBorderType::Constant, 0.0), (!name.empty()) ? name + "_pad" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
+    // Need to specify the ordered list of input operators
+    const std::vector<NodePtr> orderedInputNodes = {pad, conv};
+
+    auto metaOp = std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}), orderedInputNodes), name);
+    addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
+    addProducer(metaOp, 2, {out_channels}, "b");
+    return metaOp;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> PaddedConvDepthWise(
+    DimSize_t in_channels,
+    DimSize_t out_channels,
+    DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name = "",
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0},
+    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+{
+    return PaddedConvDepthWise<DIM>(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> PaddedAvgPooling(DimSize_t in_channels,
+                                  DimSize_t out_channels,
+                                  const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name = "",
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0})
+{
+    auto graph = Sequential({
+        Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
+        AvgPooling_Op<DIM>(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
+    });
+
+    return std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedAvgPooling", graph), name);
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> PaddedMaxPooling(DimSize_t in_channels,
+                                  DimSize_t out_channels,
+                                  const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name = "",
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0})
+{
+    auto graph = Sequential({
+        Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
+        MaxPooling_Op<DIM>(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims)
+    });
+
+    return std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedMaxPooling", graph), name);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 5b0c199e75f0cedd4a0d36f6d2c87d89833e0dd5..e3544171de9b97a2795f1d936adfeff341bd32dc 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -81,7 +81,7 @@ public:
      * @param inputIdx Index of the input analysed.
      * @return NbElts_t
      */
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const;
+    virtual NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const;
 
     /**
      * @brief Amount of data from a specific input actually used in one computation pass.
@@ -89,7 +89,7 @@ public:
      * @param inputIdx Index of the input analysed.
      * @return NbElts_t
      */
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const;
+    virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const;
 
     /**
      * @brief Amount of data ready to be used on a specific output.
@@ -97,9 +97,9 @@ public:
      * @param outputIdx Index of the output analysed.
      * @return NbElts_t
      */
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const;
+    virtual NbElts_t getNbProducedData(const IOIndex_t outputIdx) const;
 
-    void updateConsummerProducer();
+    virtual void updateConsummerProducer();
 
     virtual void forward();
 
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..deae0e6b8c5a91e5c10e7655549a4e46ac90eb0b
--- /dev/null
+++ b/include/aidge/operator/Pad.hpp
@@ -0,0 +1,233 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_PAD_H_
+#define AIDGE_CORE_OPERATOR_PAD_H_
+
+#include <array>
+#include <numeric>
+#include <vector>
+#include <cmath>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
+enum class PadBorderType { Constant, Replicate, Reflect, Wrap };
+
+template <DimIdx_t DIM>
+class Pad_Op : public Operator,
+                public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
+                public StaticAttributes<PadAttr,
+                                       std::array<std::array<DimSize_t, 2>, DIM>,
+                                       PadBorderType,
+                                       double> {
+private:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char *Type = "Pad";
+
+    Pad_Op() = delete;
+
+    using Attributes_ = StaticAttributes<PadAttr,
+                                             std::array<std::array<DimSize_t, 2>, DIM>,
+                                             PadBorderType,
+                                             double>;
+    template <PadAttr e>
+    using attr = typename Attributes_::template attr<e>;
+
+    constexpr Pad_Op(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
+                     const PadBorderType &borderType = PadBorderType::Constant,
+                     double borderValue = 0.0)
+        : Operator(Type),
+          Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples),
+                           attr<PadAttr::BorderType>(borderType),
+                           attr<PadAttr::BorderValue>(borderValue)) {
+        setDatatype(DataType::Float32);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Pad_Op(const Pad_Op& op)
+        : Operator(Type),
+          Attributes_(op),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Pad_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Pad_Op<DIM>>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 1 && "operators supports only 3 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInput->empty()) {
+            std::array<DimSize_t, DIM + 2> outputDims = {};
+
+            for (std::size_t dim = 0; dim < DIM; ++dim) {
+                outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[dim][0]
+                                    + mInput->dims()[dim+2]
+                                    + this->template getAttr<PadAttr::BeginEndBorders>()[dim][1];
+            }
+            outputDims[1] = mInput->dims()[1];
+            outputDims[0] = mInput->dims()[0];
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return *(mInput.get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "Pad Operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "Pad Operators has only 1 outputs");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string &name) override {
+        mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) override {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> Pad(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
+                                           const std::string& name = "",
+                                           const PadBorderType &borderType = PadBorderType::Constant,
+                                           double borderValue = 0.0)
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
+    return pad;
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, DIM> &dimBeginEnd,
+                                           const std::string& name = "",
+                                           const PadBorderType &borderType = PadBorderType::Constant,
+                                           double borderValue = 0.0)
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    std::array<std::array<DimSize_t, 2>, DIM> beginEndTuples;
+    for (size_t i = 0; i < DIM; ++i) {
+        beginEndTuples[i] = {dimBeginEnd[i], dimBeginEnd[i]};
+    }
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
+    return pad;
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> ZeroPad(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
+                                           const std::string& name = "")
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadBorderType::Constant, 0.0), name);
+    return pad;
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> ZeroPad(const std::array<DimSize_t, DIM> &dimBeginEnd,
+                                           const std::string& name = "")
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    std::array<std::array<DimSize_t, 2>, DIM> beginEndTuples;
+    for (size_t i = 0; i < DIM; ++i) {
+        beginEndTuples[i] = {dimBeginEnd[i], dimBeginEnd[i]};
+    }
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadBorderType::Constant, 0.0), name);
+    return pad;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> Pad(
+    std::array<DimSize_t, 2> const (&beginEndTuples)[DIM],
+    const std::string& name = "",
+    const PadBorderType &borderType = PadBorderType::Constant,
+    double borderValue = 0.0)
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    return Pad(to_array(beginEndTuples), name, borderType, borderValue);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::PadAttr>::data[] = {"BeginEndBorders", "BorderType", "BorderValue"};
+
+template <>
+const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Replicate", "Reflect", "Wrap"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 9916ee2004bd1aa9f33acf96d95cae4703f692df..1896894ee8690cedaef696394da0829604e36211 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -89,11 +89,6 @@ private:
      *
      */
     std::vector<std::shared_ptr<Node>> mStaticSchedule;
-    /**
-     * @brief Number of computation node (i.e: nb nodes != Producer)
-     *
-     */
-    std::size_t mComputationNumber = 0; // TODO: Check if not inferable from mStaticSchedule
 };
 } // namespace Aidge
 
diff --git a/include/aidge/utils/Utils.hpp b/include/aidge/utils/ErrorHandling.hpp
similarity index 54%
rename from include/aidge/utils/Utils.hpp
rename to include/aidge/utils/ErrorHandling.hpp
index 71817dcfc9713ad36a74175affd21b03cb6ed181..8fbeff30abecfec0077786b21825b6a6f36677c6 100644
--- a/include/aidge/utils/Utils.hpp
+++ b/include/aidge/utils/ErrorHandling.hpp
@@ -10,17 +10,21 @@
  ********************************************************************************/
 
 
-#ifndef AIDGE_UTILS_H_
-#define AIDGE_UTILS_H_
+#ifndef AIDGE_ERRORHANDLING_H_
+#define AIDGE_ERRORHANDLING_H_
 
 #include <cstdio>
 #include <memory>
 
-#ifdef NO_EXCEPTIONS
+#define AIDGE_STRINGIZE_DETAIL(x) #x
+#define AIDGE_STRINGIZE(x) AIDGE_STRINGIZE_DETAIL(x)
+
+#ifdef NO_EXCEPTION
 #define AIDGE_THROW_OR_ABORT(ex, ...) \
 do { std::printf(__VA_ARGS__); std::abort(); } while (false)
 #else
 #include <stdexcept>
+#include <memory>
 #define AIDGE_THROW_OR_ABORT(ex, ...) \
 do { \
     int n = 128; \
@@ -35,4 +39,21 @@ do { \
 } while (false)
 #endif
 
-#endif //AIDGE_UTILS_H_
\ No newline at end of file
+/**
+ * Macro for specified API assertions.
+ * Used to check logic directly related to user's inputs.
+ * If it asserts, it means an user error.
+*/
+#define AIDGE_ASSERT(stm, ...) \
+if (!(stm)) { printf("Assertion failed: " AIDGE_STRINGIZE(stm) " in " __FILE__ ":%d", __LINE__); \
+    AIDGE_THROW_OR_ABORT(std::runtime_error, __VA_ARGS__); }
+
+/**
+ * Macro for internal assertions.
+ * Used to check internal logic not directly related to API user's inputs.
+ * If it asserts, it means a bug.
+*/
+#define AIDGE_INTERNAL_ASSERT(stm) \
+assert((stm) && "Internal assertion failed: " #stm " in " __FILE__ ":" AIDGE_STRINGIZE(__LINE__))
+
+#endif //AIDGE_ERRORHANDLING_H_
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index de543e95a16475c4443164af7be5c379d6554f8d..3b29c472b3a540c9ef3b8ed46520e3e718e8cbfb 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -58,6 +58,11 @@ struct Registrar {
         //assert(newInsert && "registrar already exists");
     }
 
+    static bool exists(const typename C::registrar_key& key) {
+        const auto it = C::registry().find(key);
+        return (it != C::registry().end());
+    }
+
     static auto create(const typename C::registrar_key& key){
         const auto it = C::registry().find(key);
         assert(it != C::registry().end() && "invalid registrar key");
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 9810f524ee54f4a7bd3fa4ec7a8ca8247488abaa..0ac14c79a006889dd44e6a23a2d507df29ded482 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -18,7 +18,7 @@
 #include <typeinfo>
 
 #include "aidge/utils/Attributes.hpp"
-#include "aidge/utils/Utils.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
 /**
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 372afebdd3e1626cd0af88e335b78ec7fd73a5f4..5820e94c5cbd24150a4e81b0db34328ac35e1bf5 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -30,16 +30,13 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
     m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, (DIM<<1)> &>(),
+                const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
-        py::arg("stride_dims"),
-        py::arg("padding_dims"));
+        py::arg("stride_dims"));
   
   m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
                                                                   const std::string& name,
-                                                                  const std::vector<DimSize_t> &stride_dims,
-                                                                  const std::vector<DimSize_t> &padding_dims) {
+                                                                  const std::vector<DimSize_t> &stride_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
         // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
         if (kernel_dims.size() != DIM) {
@@ -48,9 +45,6 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
         if (stride_dims.size() != DIM) {
             throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
-        if (padding_dims.size() != (DIM<<1)) {
-            throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
-        }
         DimSize_t tmp_kernel_dims_array[DIM];
         for (size_t i = 0; i < DIM; ++i) {
             tmp_kernel_dims_array[i] = kernel_dims[i];
@@ -59,18 +53,12 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
         for (size_t i = 0; i < DIM; ++i) {
             tmp_stride_dims_array[i] = stride_dims[i];
         }
-        DimSize_t tmp_padding_dims_array[DIM<<1];
-        for (size_t i = 0; i < (DIM<<1); ++i) {
-            tmp_padding_dims_array[i] = padding_dims[i];
-        }
         const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
         const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
-        const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
-        return AvgPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
+        return AvgPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array));
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
-       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
   
 }
 
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 0c09917d71e520227eed48705527adaf204857ee..91ede7b6a289f3def2a9c8261ff04d2ab9836cdd 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -32,13 +32,11 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
                 DimSize_t,
                 const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, (DIM<<1)> &,
                 const std::array<DimSize_t, DIM> &>(),
         py::arg("in_channels"),
         py::arg("out_channels"),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("padding_dims"),
         py::arg("dilation_dims"));
   
   m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
@@ -46,7 +44,6 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::string& name, 
                                                          const std::vector<DimSize_t> &stride_dims,
-                                                         const std::vector<DimSize_t> &padding_dims,
                                                          const std::vector<DimSize_t> &dilation_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
         // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
@@ -56,9 +53,6 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
         if (stride_dims.size() != DIM) {
             throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
-        if (padding_dims.size() != (DIM<<1)) {
-            throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
-        }
         if (dilation_dims.size() != DIM) {
             throw std::runtime_error("dilation_dims size [" + std::to_string(dilation_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
@@ -70,25 +64,19 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
         for (size_t i = 0; i < DIM; ++i) {
             tmp_stride_dims_array[i] = stride_dims[i];
         }
-        DimSize_t tmp_padding_dims_array[DIM<<1];
-        for (size_t i = 0; i < (DIM<<1); ++i) {
-            tmp_padding_dims_array[i] = padding_dims[i];
-        }
         DimSize_t tmp_dilation_dims_array[DIM];
         for (size_t i = 0; i < DIM; ++i) {
             tmp_dilation_dims_array[i] = dilation_dims[i];
         }
         const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
         const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
-        const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
         const DimSize_t (&dilation_dims_array)[DIM] = tmp_dilation_dims_array;
-        return Conv<DIM>(in_channels, out_channels, to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array), to_array(dilation_dims_array));
+        return Conv<DIM>(in_channels, out_channels, to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(dilation_dims_array));
     }, py::arg("in_channels"),
        py::arg("out_channels"),
        py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
   
 }
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 3f48c50f7ffdb44450c0e2a155d85dcbf9f73fd9..446bcdcceb3ba805223fc22e6fc19a22dcf354ec 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -31,17 +31,14 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, (DIM<<1)> &,
                 const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("padding_dims"),
         py::arg("dilation_dims"));
   
   m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
-                                                                  const std::vector<DimSize_t> &padding_dims,
                                                                   const std::vector<DimSize_t> &dilation_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
         // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
@@ -51,9 +48,6 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         if (stride_dims.size() != DIM) {
             throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
-        if (padding_dims.size() != (DIM<<1)) {
-            throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
-        }
         if (dilation_dims.size() != DIM) {
             throw std::runtime_error("dilation_dims size [" + std::to_string(dilation_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
@@ -65,23 +59,17 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         for (size_t i = 0; i < DIM; ++i) {
             tmp_stride_dims_array[i] = stride_dims[i];
         }
-        DimSize_t tmp_padding_dims_array[DIM<<1];
-        for (size_t i = 0; i < (DIM<<1); ++i) {
-            tmp_padding_dims_array[i] = padding_dims[i];
-        }
         DimSize_t tmp_dilation_dims_array[DIM];
         for (size_t i = 0; i < DIM; ++i) {
             tmp_dilation_dims_array[i] = dilation_dims[i];
         }
         const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
         const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
-        const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
         const DimSize_t (&dilation_dims_array)[DIM] = tmp_dilation_dims_array;
-        return ConvDepthWise<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array), to_array(dilation_dims_array));
+        return ConvDepthWise<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(dilation_dims_array));
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
   
 }
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 2efd18c816c2d588e574872b3d3776a3409dc4ba..a930b496b49280629d71725cee79aea4d850358e 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -30,16 +30,13 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, (DIM<<1)> &>(),
+                const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
-        py::arg("stride_dims"),
-        py::arg("padding_dims"));
+        py::arg("stride_dims"));
   
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
                                                                   const std::string& name,
-                                                                  const std::vector<DimSize_t> &stride_dims,
-                                                                  const std::vector<DimSize_t> &padding_dims) {
+                                                                  const std::vector<DimSize_t> &stride_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
         // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
         if (kernel_dims.size() != DIM) {
@@ -48,9 +45,6 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         if (stride_dims.size() != DIM) {
             throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
-        if (padding_dims.size() != (DIM<<1)) {
-            throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
-        }
         DimSize_t tmp_kernel_dims_array[DIM];
         for (size_t i = 0; i < DIM; ++i) {
             tmp_kernel_dims_array[i] = kernel_dims[i];
@@ -59,18 +53,12 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         for (size_t i = 0; i < DIM; ++i) {
             tmp_stride_dims_array[i] = stride_dims[i];
         }
-        DimSize_t tmp_padding_dims_array[DIM<<1];
-        for (size_t i = 0; i < (DIM<<1); ++i) {
-            tmp_padding_dims_array[i] = padding_dims[i];
-        }
         const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
         const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
-        const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
-        return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
+        return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array));
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
-       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
   
 }
 
diff --git a/setup.py b/setup.py
index 4611ac78aad0436f663b1348d012bb3c3bd0054a..b88329e54feab78e39bd79be0a129030098e216a 100644
--- a/setup.py
+++ b/setup.py
@@ -70,8 +70,8 @@ class CMakeBuild(build_ext):
 
         self.spawn(['cmake', str(cwd), param_py, '-DTEST=OFF', f'-DCMAKE_INSTALL_PREFIX:PATH={install_path}'])
         if not self.dry_run:
-            self.spawn(['cmake', '--build', '.', '-j', max_jobs])
-            self.spawn(['cmake', '--install', '.'])
+            self.spawn(['cmake', '--build', '.', '--config', 'Debug', '-j', max_jobs])
+            self.spawn(['cmake', '--install', '.', '--config', 'Debug'])
         os.chdir(str(cwd))
 
         aidge_package = build_lib / (get_project_name())
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 699df74ce4c8033d5804784cd703574f730fb836..fa413e7ff9fe1bb7e7f76722baad24c79b8a73a4 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -125,21 +125,17 @@ Aidge::IOIndex_t Aidge::GraphView::getNbFreeDataInputs() const {
 
 std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>
 Aidge::GraphView::dataInputs() const {
-  IOIndex_t nbDataIn = 0U;
-  for (const std::shared_ptr<Node>& inputNode : mInputNodes) {
-    nbDataIn += inputNode->nbDataInputs();
-  }
-  std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res =
-      std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbDataIn);
-  nbDataIn = 0U;
+  std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res;
+
   for (const std::shared_ptr<Node>& inputNode : mInputNodes) {
     std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> inputNodeinputs =
         inputNode->dataInputs();
-    std::move(inputNodeinputs.begin(), inputNodeinputs.end(),
-              res.begin() + nbDataIn);
-    nbDataIn += inputNode->nbDataInputs();
-    // res.insert(res.end(), (inputNode -> inputs()).begin(), (inputNode ->
-    // inputs()).end());
+
+    for (const auto& input : inputNodeinputs) {
+      if (mNodes.find(input.first) == mNodes.end()) {
+        res.push_back(input);
+      }
+    }
   }
   return res;
 }
@@ -147,21 +143,17 @@ Aidge::GraphView::dataInputs() const {
 
 std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>
 Aidge::GraphView::inputs() const {
-  std::size_t nbIn = 0U;
-  for (const std::shared_ptr<Node>& inputNode : mInputNodes) {
-    nbIn += inputNode->nbInputs();
-  }
-  std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res =
-      std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbIn);
-  nbIn = 0U;
+  std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res;
+
   for (const std::shared_ptr<Node>& inputNode : mInputNodes) {
     std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> inputNodeinputs =
         inputNode->inputs();
-    std::move(inputNodeinputs.begin(), inputNodeinputs.end(),
-              res.begin() + nbIn);
-    nbIn += inputNode->nbInputs();
-    // res.insert(res.end(), (inputNode -> inputs()).begin(), (inputNode ->
-    // inputs()).end());
+
+    for (const auto& input : inputNodeinputs) {
+      if (mNodes.find(input.first) == mNodes.end()) {
+        res.push_back(input);
+      }
+    }
   }
   return res;
 }
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c1f58c68686d9359fa3b8ea4b5eb54244e988895
--- /dev/null
+++ b/src/operator/MetaOperator.cpp
@@ -0,0 +1,141 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph,
+    std::vector<NodePtr> inputNodes,
+    std::vector<NodePtr> outputNodes)
+    : Operator(type),
+        mGraph(graph)
+{
+    mInputs = std::vector<std::shared_ptr<Tensor>>(mGraph->inputs().size());
+    for (std::size_t i = 0; i < mInputs.size(); ++i) {
+        mInputs[i] = std::make_shared<Tensor>();
+    }
+    mOutputs = std::vector<std::shared_ptr<Tensor>>(mGraph->outputs().size());
+    for (std::size_t i = 0; i < mOutputs.size(); ++i) {
+        mOutputs[i] = std::make_shared<Tensor>();
+    }
+
+    // Fill inputsNodes and outputsNodes when there is no ambiguity
+    if (inputNodes.empty()) {
+        AIDGE_ASSERT(mGraph->inputNodes().size() == 1, "need to specify internal nodes input mapping");
+        inputNodes.push_back(*mGraph->inputNodes().begin());
+    }
+
+    if (outputNodes.empty()) {
+        AIDGE_ASSERT(mGraph->outputNodes().size() == 1, "need to specify internal nodes output mapping");
+        outputNodes.push_back(*mGraph->outputNodes().begin());
+    }
+
+    AIDGE_ASSERT(mGraph->inputNodes().size() == inputNodes.size(), "wrong number of specified input nodes");
+    AIDGE_ASSERT(mGraph->outputNodes().size() == outputNodes.size(), "wrong number of specified output nodes");
+
+    // Identify inputs that are outside the micro-graph
+    for (const auto& inputNode : inputNodes) {
+        AIDGE_ASSERT(mGraph->inView(inputNode), "input node must be in the graph");
+        const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> inputNodeinputs =
+            inputNode->inputs();
+        
+        int inputIdx = 0;   // input idx relative to the current node
+        for (const auto& in : inputNodeinputs) {
+            if (in.first == nullptr || !mGraph->inView(in.first)) {
+                // The input is not connected inside the micro-graph
+                // (no connection to this input or connection outside the micro-graph)
+                // => it is therefore an input for the meta-operator
+                mInputOps.push_back(std::make_pair(inputNode->getOperator(), inputIdx));
+            }
+
+            ++inputIdx;
+        }
+    }
+
+    // The outputs of the output nodes are also the outputs of the meta-operator
+    for (const auto& outputNode : outputNodes) {
+        AIDGE_ASSERT(mGraph->inView(outputNode), "output node must be in the graph");
+        const std::vector<std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>> outputNodeoutputs =
+            outputNode->outputs();
+
+        for (size_t outputIdx = 0; outputIdx < outputNodeoutputs.size(); ++outputIdx) {
+            mOutputOps.push_back(std::make_pair(outputNode->getOperator(), outputIdx));
+        }
+    }
+
+    AIDGE_INTERNAL_ASSERT(mInputOps.size() == mGraph->inputs().size());
+    AIDGE_INTERNAL_ASSERT(mOutputOps.size() == mGraph->outputs().size());
+}
+
+Aidge::NbElts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const {
+    if (mImpl) {
+        return mImpl->getNbRequiredData(inputIdx);
+    }
+    else {
+        const auto& inputOp = mInputOps[inputIdx];
+        return inputOp.first->getNbRequiredData(inputOp.second);
+    }
+}
+
+Aidge::NbElts_t Aidge::MetaOperator_Op::getNbConsumedData(IOIndex_t inputIdx) const {
+    if (mImpl) {
+        return mImpl->getNbConsumedData(inputIdx);
+    }
+    else {
+        const auto& inputOp = mInputOps[inputIdx];
+        return inputOp.first->getNbConsumedData(inputOp.second);
+    }
+}
+
+Aidge::NbElts_t Aidge::MetaOperator_Op::getNbProducedData(IOIndex_t outputIdx) const {
+    if (mImpl) {
+        return mImpl->getNbProducedData(outputIdx);
+    }
+    else {
+        const auto& outputOp = mOutputOps[outputIdx];
+        return outputOp.first->getNbProducedData(outputOp.second);
+    }
+}
+
+void Aidge::MetaOperator_Op::updateConsummerProducer() {
+    if (mImpl) {
+        mImpl->updateConsummerProducer();
+    }
+    else {
+        if (!mScheduler) {
+            // Lazy initialization
+            mScheduler = std::make_shared<SequentialScheduler>(mGraph);
+        }
+        
+        // TODO: check that generateScheduling() can be called multiple time to iteratively update the schedule.
+        // It could be a good idea to unify updateConsummerProducer() and generateScheduling() into a "updateScheduling()"
+        mScheduler->generateScheduling();
+    }
+}
+
+void Aidge::MetaOperator_Op::forward() {
+    if (mImpl) {
+        // A custom implementation exists for this meta operator
+        mImpl->forward();
+    }
+    else {
+        // No custom implementation, use the individual operators implementations
+        if (!mScheduler) {
+            // Lazy initialization
+            // TODO: should we assert that a scheduler already exists at this point?
+            // => should be created in updateConsummerProducer()
+            mScheduler = std::make_shared<SequentialScheduler>(mGraph);
+            mScheduler->generateScheduling();
+        }
+
+        mScheduler->forward(false);
+    }
+}
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index cda5baedfa513ae9140f0f53bcf5c7867d9b90b1..1f34091e54c0f83dae6b60589c20fb8fdf1d5064 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -40,13 +40,10 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
     // TODO: optimize memory usage
 
     // setup initial producers list
-    mComputationNumber = 0;
     std::set<std::shared_ptr<Node>> producers;
     for (const std::shared_ptr<Node>& nodePtr : mGraphView->getNodes()) {
         if (nodePtr->type() == "Producer") {
             producers.insert(nodePtr);
-        } else {
-            ++mComputationNumber;
         }
     }
     // add Data Input
@@ -112,6 +109,7 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
 
         // Push consumers in the list of nodes to run and update the consumer producer system
         for (const auto& runnable : runnableConsumers) {
+            if (verbose) printf("Runnable: %s\n", (runnable->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))).c_str());
             runnable->getOperator()->updateConsummerProducer();
             mStaticSchedule.push_back(runnable);
         }
@@ -177,14 +175,19 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
 
 // TODO: handle multiple inputs/outputs
 void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) {
+    // Forward dims (if allowed)
     if (forwardDims) {mGraphView->forwardDims(); }
 
-    // add each Producer Node.
-    std::set<std::shared_ptr<Node>> computationOver;
+    // Generate scheduling *only if empty*
+    // If scheduling was already generated (in one or several steps, i.e. one or
+    // several successive call to generateScheduling()), do not generate it twice
+    if (mStaticSchedule.empty()) {
+        this->generateScheduling();
+    }
 
+    // Clear previous scheduling results
     mScheduling.clear();
 
-    this->generateScheduling();
     int cpt = 0;
     for (const auto& runnable : mStaticSchedule) {
         if (verbose)
@@ -202,7 +205,6 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) {
     }
     if (!verbose) drawProgressBar(1.0, 50, "                                   ");
     printf("\n");
-
 }
 
 void Aidge::SequentialScheduler::saveSchedulingDiagram(const std::string& fileName) const {
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 4b929286ba494a452c7f9cb71ce944c7d576c03a..9f014364636c70031b522b09c893e1144af3f133 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -161,7 +161,7 @@ TEST_CASE("[core/graph] GraphView(addChild)") {
 TEST_CASE("[core/graph] GraphView(inputs)") {
     auto g1 = std::make_shared<GraphView>("TestGraph");
     std::shared_ptr<Node> conv = Conv(3, 32, {3, 3});
-    g1->add(conv);
+    g1->add(conv, false);
 
     REQUIRE(g1->inputs() == conv->inputs());
 }
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d15c1e6dc92c3e07c969c66a07c71a7e2fb2de9d
--- /dev/null
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -0,0 +1,65 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/MetaOperatorDefs.hpp"
+#include <cstddef>
+
+using namespace Aidge;
+
+TEST_CASE("[core/operators] MetaOperator", "[Operator]")
+{
+    SECTION("PaddedConv")
+    {
+        auto op = PaddedConv(1, 3, {3, 3}, "padded_conv", {1, 1}, {{{1, 1}, {1, 1}}});
+
+        auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(op->getOperator())
+                              ->getMicroGraph();
+
+        REQUIRE(microGraph->getNodes().size() == 2);
+        REQUIRE(
+            microGraph->inputNodes().size()
+            == 2); // 2 because Conv has inputs outside the meta-op (Producers for weight
+                   // and bias)
+        // Order not garanteed by the GraphView
+        // REQUIRE((*microGraph->inputNodes().begin())->getOperator()->type() == "Pad");
+        REQUIRE(microGraph->outputNodes().size() == 1);
+        REQUIRE((*microGraph->outputNodes().begin())->getOperator()->type() == "Conv");
+        REQUIRE(op->nbInputs() == 3);
+        REQUIRE(op->nbDataInputs() == 1);
+        REQUIRE(op->nbOutputs() == 1);
+
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>();
+        myInput->resize({2, 3, 5, 5});
+        op->getOperator()->associateInput(0, myInput);
+        op->getOperator()->computeOutputDims();
+
+        REQUIRE(op->getOperator()->outputDimsForwarded());
+        REQUIRE(
+            op->getOperator()->getOutput(0)->dims()
+            == std::vector<DimSize_t>({2, 3, 5, 5}));
+        REQUIRE(op->getOperator()->getInput(0) == myInput);
+        // Order not garanteed by the GraphView
+        // REQUIRE((*microGraph->inputNodes().begin())->getOperator()->getInput(0) ==
+        // myInput);
+        REQUIRE(
+            op->getOperator()->getOutput(0)
+            == (*microGraph->outputNodes().begin())->getOperator()->getOutput(0));
+
+        // op->getOperator()->updateConsummerProducer();  // require implementation
+        // auto microGraphScheduler =
+        // std::dynamic_pointer_cast<MetaOperator_Op>(op->getOperator())->getMicroGraphScheduler();
+        // REQUIRE(microGraphScheduler->getStaticScheduling().size() == 2);
+    }
+}