diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 47ded2a462477958320bfad3ad84e6b8f6ef6082..c3f97f96e6b797afca7a28928f717691ae998185 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -35,7 +35,8 @@
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/MaxPooling.hpp"
-//#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/MetaOperatorDefs.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/ReLU.hpp"
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 1f1eeafa859b116606613392a13a65ad398669ad..89ba148497709f0af475bbf953ff285c88036102 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -124,7 +124,7 @@ public:
     }
 
     /**
-     * @brief List dataInput connections of the GraphView object's inputNodes.
+     * @brief List outside dataInput connections of the GraphView object's inputNodes.
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
     std::vector<std::pair<NodePtr, IOIndex_t>> dataInputs() const;
@@ -137,7 +137,7 @@ public:
     inline auto dataInputs(const std::string name) const { return mNodeRegistry.at(name)->dataInputs(); }
 
     /**
-     * @brief List input connections of the GraphView object's inputNodes.
+     * @brief List outside input connections of the GraphView object's inputNodes.
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
     std::vector<std::pair<NodePtr, IOIndex_t>> inputs() const;
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index b534f1f68c96ea0252e1b2506bd29ea4c07d2985..be15ceb66ce32b98bfafab4af4213eee163dfbf9 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -26,15 +26,14 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class AvgPoolingAttr { StrideDims, KernelDims, PaddingDims };
+enum class AvgPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
 class AvgPooling_Op : public Operator,
                 public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
                 public StaticAttributes<AvgPoolingAttr,
                                        std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, (DIM<<1) >> {
+                                       std::array<DimSize_t, DIM>> {
 private:
     // FIXME: change accessibility
     std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
@@ -47,18 +46,15 @@ public:
 
     using Attributes_ = StaticAttributes<AvgPoolingAttr,
                                              std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, (DIM<<1)> >;
+                                             std::array<DimSize_t, DIM>>;
     template <AvgPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
 
     constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
-                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                            const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
+                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
         : Operator(Type),
           Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
-                           attr<AvgPoolingAttr::KernelDims>(kernel_dims),
-                           attr<AvgPoolingAttr::PaddingDims>(padding_dims)) {
+                      attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {
         setDatatype(DataType::Float32);
     }
 
@@ -99,9 +95,7 @@ public:
             for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                             std::floor(static_cast<float>(mInput->dims()[dim+2] -
-                                                                    this->template getAttr<AvgPoolingAttr::KernelDims>()[dim] +
-                                                                    this->template getAttr<AvgPoolingAttr::PaddingDims>()[dim] +
-                                                                    this->template getAttr<AvgPoolingAttr::PaddingDims>()[dim+DIM]) /
+                                                                    this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
                                             static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
             }
             outputDims[1] = mInput->dims()[1];
@@ -168,11 +162,10 @@ public:
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                            const std::string& name = "",
-                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                           const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
-    auto avgPool = std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
+    auto avgPool = std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
     return avgPool;
 }
 
@@ -180,17 +173,16 @@ template <DimSize_t DIM>
 inline std::shared_ptr<Node> AvgPooling(
     DimSize_t const (&kernel_dims)[DIM],
     const std::string& name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
-    return AvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
+    return AvgPooling(to_array(kernel_dims), name, stride_dims);
 }
 }  // namespace Aidge
 
 namespace {
 template <>
 const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
-                                                          "KernelDims", "PaddingDims"};
+                                                          "KernelDims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index ae8fecca7fec1e4c378d31fb350abf1abffb0106..c8e229cbb3815ae7bd24064e862dc407b327febd 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -26,13 +26,13 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, PaddingDims };
+enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims };
 
 template <DimIdx_t DIM>
 class Conv_Op : public Operator,
                 public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
                 public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
-                                       DimSize_t, std::array<DimSize_t, DIM>, std::array<DimSize_t, (DIM<<1) >> {
+                                       DimSize_t, std::array<DimSize_t, DIM>> {
 public:
     // FIXME: change accessibility
     std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
@@ -45,7 +45,7 @@ public:
     Conv_Op() = delete;
 
     using Attributes_ = StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
-                                             DimSize_t, DimSize_t, std::array<DimSize_t, DIM>, std::array<DimSize_t, (DIM<<1) >>;
+                                             DimSize_t, DimSize_t, std::array<DimSize_t, DIM>>;
     template <ConvAttr e>
     using attr = typename Attributes_::template attr<e>;
 
@@ -53,15 +53,13 @@ public:
                       DimSize_t out_channels,
                       const std::array<DimSize_t, DIM> &kernel_dims,
                       const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                      const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
                       const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
         : Operator(Type),
           Attributes_(attr<ConvAttr::StrideDims>(stride_dims),
-                           attr<ConvAttr::DilationDims>(dilation_dims),
-                           attr<ConvAttr::InChannels>(in_channels),
-                           attr<ConvAttr::OutChannels>(out_channels),
-                           attr<ConvAttr::KernelDims>(kernel_dims),
-                           attr<ConvAttr::PaddingDims>(padding_dims)) {
+                      attr<ConvAttr::DilationDims>(dilation_dims),
+                      attr<ConvAttr::InChannels>(in_channels),
+                      attr<ConvAttr::OutChannels>(out_channels),
+                      attr<ConvAttr::KernelDims>(kernel_dims)) {
         setDatatype(DataType::Float32);
     }
 
@@ -117,9 +115,7 @@ public:
                                                1;
 
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                        floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent +
-                                                 this->template getAttr<ConvAttr::PaddingDims>()[dim] +
-                                                 this->template getAttr<ConvAttr::PaddingDims>()[dim+DIM]) /
+                        floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
                               static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
             }
 
@@ -189,11 +185,10 @@ inline std::shared_ptr<Node> Conv(DimSize_t in_channels,
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
                                   const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, padding_dims, dilation_dims), name);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), name);
     // addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w");
     addProducer(conv, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
     addProducer(conv, 2, std::array<DimSize_t, 1>({out_channels}), "b");
@@ -207,10 +202,9 @@ inline std::shared_ptr<Node> Conv(
     DimSize_t const (&kernel_dims)[DIM],
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    return Conv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
+    return Conv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, dilation_dims);
 }
 }  // namespace Aidge
 
@@ -221,8 +215,7 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
     "DilationDims",
     "InChannels",
     "OutChannels",
-    "KernelDims",
-    "PaddingDims"
+    "KernelDims"
 };
 }
 
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 2b80278994bc462b2b2c98b7aae68aa60f1e1e9b..55a48a978f4bd515f31cff4feae79c3ab262b0e0 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -26,7 +26,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims, PaddingDims };
+enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public Operator,
@@ -35,8 +35,7 @@ class ConvDepthWise_Op : public Operator,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        DimSize_t,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, (DIM<<1) >> {
+                                       std::array<DimSize_t, DIM>> {
    public:
     // FIXME: change accessibility
     std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
@@ -52,21 +51,18 @@ class ConvDepthWise_Op : public Operator,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              DimSize_t,
-                                             std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, (DIM<<1) >>;
+                                             std::array<DimSize_t, DIM>>;
     template <ConvDepthWiseAttr e>
     using attr = typename Attributes_::template attr<e>;
 
     constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                               const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
                                const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
         : Operator(Type),
           Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
-                           attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
-                           attr<ConvDepthWiseAttr::Channels>(0),
-                           attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
-                           attr<ConvDepthWiseAttr::PaddingDims>(padding_dims)) {
+                      attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
+                      attr<ConvDepthWiseAttr::Channels>(0),
+                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {
         setDatatype(DataType::Float32);
     }
 
@@ -109,9 +105,7 @@ class ConvDepthWise_Op : public Operator,
                                                1;
 
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                        floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent +
-                                                 this->template getAttr<ConvDepthWiseAttr::PaddingDims>()[dim] +
-                                                 this->template getAttr<ConvDepthWiseAttr::PaddingDims>()[dim+DIM]) /
+                        floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
                               static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
             }
             this->template getAttr<ConvDepthWiseAttr::Channels>() = mInputs[0]->dims()[1];
@@ -188,11 +182,10 @@ template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
                                            const std::string& name = "",
                                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                           const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
                                            const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims, dilation_dims), name);
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), name);
     addProducer(convDW, 1, std::array<DimSize_t,0>({}), "w");
     addProducer(convDW, 2, std::array<DimSize_t,0>({}), "b");
     return convDW;
@@ -203,17 +196,16 @@ inline std::shared_ptr<Node> ConvDepthWise(
     DimSize_t const (&kernel_dims)[DIM],
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    return ConvDepthWise(to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
+    return ConvDepthWise(to_array(kernel_dims), name, stride_dims, dilation_dims);
 }
 }  // namespace Aidge
 
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels",
-                                                          "KernelDims", "PaddingDims"};
+                                                          "KernelDims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index c23378eeee92d2b8958a23446b622cc98a79cf69..bf802238c2dba8d13a0bb230750f3b882b6c09f5 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -26,15 +26,14 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class MaxPoolingAttr { StrideDims, KernelDims, PaddingDims };
+enum class MaxPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public Operator,
                 public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
                 public StaticAttributes<MaxPoolingAttr,
                                        std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, (DIM<<1) >> {
+                                       std::array<DimSize_t, DIM>> {
 private:
     // FIXME: change accessibility
     std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
@@ -47,18 +46,15 @@ public:
 
     using Attributes_ = StaticAttributes<MaxPoolingAttr,
                                              std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, (DIM<<1)> >;
+                                             std::array<DimSize_t, DIM>>;
     template <MaxPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
 
     constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
-                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                            const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
+                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
         : Operator(Type),
           Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
-                           attr<MaxPoolingAttr::KernelDims>(kernel_dims),
-                           attr<MaxPoolingAttr::PaddingDims>(padding_dims)),
+                      attr<MaxPoolingAttr::KernelDims>(kernel_dims)),
           mOutput(std::make_shared<Tensor>()) {
         setDatatype(DataType::Float32);
     }
@@ -100,9 +96,7 @@ public:
             for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                             std::floor(static_cast<float>(mInput->dims()[dim+2] -
-                                                                    this->template getAttr<MaxPoolingAttr::KernelDims>()[dim] +
-                                                                    this->template getAttr<MaxPoolingAttr::PaddingDims>()[dim] +
-                                                                    this->template getAttr<MaxPoolingAttr::PaddingDims>()[dim+DIM]) /
+                                                                    this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
                                             static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
             }
             outputDims[1] = mInput->dims()[1];
@@ -169,11 +163,10 @@ public:
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                            const std::string& name = "",
-                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                           const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
-    auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
+    auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
     return avgPool;
 }
 
@@ -181,16 +174,15 @@ template <DimSize_t DIM>
 inline std::shared_ptr<Node> MaxPooling(
     DimSize_t const (&kernel_dims)[DIM],
     const std::string& name = "",
-    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
-    return MaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
+    return MaxPooling(to_array(kernel_dims), name, stride_dims);
 }
 }  // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
+const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"StrideDims", "KernelDims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 0c77a752493d251303c036c4061823c4f8bc499d..ae62a118122fa4e577f0ab898799ca40a567742d 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -13,21 +13,43 @@
 #define AIDGE_CORE_OPERATOR_METAOPERATOR_H_
 
 #include "aidge/operator/Operator.hpp"
+#include "aidge/operator/AvgPooling.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/scheduler/Scheduler.hpp"
 
 namespace Aidge {
-class MetaOperator : public Operator {
+class MetaOperator_Op : public Operator,
+                public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)> {
 public:
-    MetaOperator()
-        : Operator("MetaOp")
-    {
-    }
+    std::vector<std::shared_ptr<Tensor>> mInputs;
+    std::vector<std::shared_ptr<Tensor>> mOutputs; // These are shared with micro-graph outputs tensors
+
+    // Micro-graph handling:
+    std::shared_ptr<GraphView> mGraph; // Meta operator micro-graph
+    std::shared_ptr<SequentialScheduler> mScheduler;
+    // Need to store an ordored list of input/output operators for the micro-graph,
+    // because input/output nodes in a GraphView are unordered.
+    // TODO: refactor GraphView to handle ordered input/output?
+    std::vector<std::pair<std::shared_ptr<Operator>, IOIndex_t>> mInputOps;
+    std::vector<std::pair<std::shared_ptr<Operator>, IOIndex_t>> mOutputOps;
+
+   public:
+    MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph,
+        std::vector<NodePtr> inputNodes = std::vector<NodePtr>(),
+        std::vector<NodePtr> outputNodes = std::vector<NodePtr>());
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    MetaOperator(const MetaOperator& op)
-        : Operator("MetaOp")
+    MetaOperator_Op(const MetaOperator_Op& op)
+        : Operator(op.type().c_str()),
+          mGraph(op.mGraph->clone())
     {
         // cpy-ctor
     }
@@ -37,11 +59,112 @@ public:
      * @see Operator::MatMul_Op
      */
     std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<MetaOperator>(*this);
+        return std::make_shared<MetaOperator_Op>(*this);
+    }
+
+    const std::shared_ptr<GraphView>& getMicroGraph() const {
+        return mGraph;
+    }
+
+    const std::shared_ptr<SequentialScheduler>& getMicroGraphScheduler() const {
+        return mScheduler;
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        const auto& inputOp = mInputOps[inputIdx];
+        inputOp.first->associateInput(inputOp.second, data);
+
+        // Associate inputs for custom implementation
+        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        // Forward dims of micro-graph
+        mGraph->forwardDims();
+
+        // Associate outputs to micro-graph outputs for custom implementation
+        for (size_t outputIdx = 0; outputIdx < mOutputOps.size(); ++outputIdx) {
+            const auto& outputOp = mOutputOps[outputIdx];
+            mOutputs[outputIdx] = outputOp.first->getOutput(outputOp.second);
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutputs[0]->empty()); }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < mInputs.size() && "inputIdx out of range");
+        return *(mInputs[inputIdx].get());
+    }
+
+    inline Tensor& output(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
+        return *(mOutputs[outputIdx].get());
+    }
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < mInputs.size() && "inputIdx out of range");
+        return mInputs[inputIdx];
+    }
+
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
+        return mOutputs[outputIdx];
+    }
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx < mInputs.size() && "inputIdx out of range");
+        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
+    }
+
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx < mOutputs.size() && "outputIdx out of range");
+        return std::static_pointer_cast<Data>(mOutputs[outputIdx]);
+    }
+
+    void setBackend(const std::string &name) override {
+        if (Registrar<MetaOperator_Op>::exists({name, type()})) {
+            // A custom implementation exists for this meta operator
+            mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
+        }
+
+        // The micro-graph should always be set to the right backend, since it
+        // shares input/output tensors.
+        // Input/output tensors backend are updated here.
+        mGraph->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) override {
+        // The micro-graph should always be set to the right data type, since it
+        // shares input/output tensors.
+        // Input/output tensors data type are updated here.
+        mGraph->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return mGraph->inputs().size(); }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return mGraph->dataInputs().size(); }
+    inline IOIndex_t nbOutputs() const noexcept override final { return mGraph->outputs().size(); }
+
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
+    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override;
+    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override;
+
+    void updateConsummerProducer() override;
+    void forward() override;
+    void backward() override {
+        assert(false && "not implemented");
     }
 
-    ~MetaOperator() = default;
 };
+
+inline std::shared_ptr<Node> MetaOperator(const char *type,
+                                  const std::shared_ptr<GraphView>& graph,
+                                  const std::string& name = "")
+{
+    return std::make_shared<Node>(std::make_shared<MetaOperator_Op>(type, graph), name);
 }
+}  // namespace Aidge
 
 #endif /* MetaOperator_H_ */
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..346905dc9eb7cfcd7e5fab80788a6c773d001476
--- /dev/null
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -0,0 +1,85 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_
+#define AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_
+
+#include "aidge/operator/MetaOperator.hpp"
+
+namespace Aidge {
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
+                                  DimSize_t out_channels,
+                                  const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name = "",
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0},
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+{
+    // Construct micro-graph
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(padding_dims, PadBorderType::Constant, 0.0), (!name.empty()) ? name + "_pad" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
+    // Need to specify the ordered list of input operators
+    const std::vector<NodePtr> orderedInputNodes = {pad, conv};
+
+    auto metaOp = std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}), orderedInputNodes), name);
+    addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
+    addProducer(metaOp, 2, {out_channels}, "b");
+    return metaOp;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> PaddedConv(
+    DimSize_t in_channels,
+    DimSize_t out_channels,
+    DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name = "",
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0},
+    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+{
+    return PaddedConv<DIM>(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> PaddedAvgPooling(DimSize_t in_channels,
+                                  DimSize_t out_channels,
+                                  const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name = "",
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0})
+{
+    auto graph = Sequential({
+        Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
+        AvgPooling_Op<DIM>(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
+    });
+
+    return std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedAvgPooling", graph), name);
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> PaddedMaxPooling(DimSize_t in_channels,
+                                  DimSize_t out_channels,
+                                  const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name = "",
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0})
+{
+    auto graph = Sequential({
+        Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
+        MaxPooling_Op<DIM>(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims)
+    });
+
+    return std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedMaxPooling", graph), name);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 5b0c199e75f0cedd4a0d36f6d2c87d89833e0dd5..e3544171de9b97a2795f1d936adfeff341bd32dc 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -81,7 +81,7 @@ public:
      * @param inputIdx Index of the input analysed.
      * @return NbElts_t
      */
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const;
+    virtual NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const;
 
     /**
      * @brief Amount of data from a specific input actually used in one computation pass.
@@ -89,7 +89,7 @@ public:
      * @param inputIdx Index of the input analysed.
      * @return NbElts_t
      */
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const;
+    virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const;
 
     /**
      * @brief Amount of data ready to be used on a specific output.
@@ -97,9 +97,9 @@ public:
      * @param outputIdx Index of the output analysed.
      * @return NbElts_t
      */
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const;
+    virtual NbElts_t getNbProducedData(const IOIndex_t outputIdx) const;
 
-    void updateConsummerProducer();
+    virtual void updateConsummerProducer();
 
     virtual void forward();
 
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..deae0e6b8c5a91e5c10e7655549a4e46ac90eb0b
--- /dev/null
+++ b/include/aidge/operator/Pad.hpp
@@ -0,0 +1,233 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_PAD_H_
+#define AIDGE_CORE_OPERATOR_PAD_H_
+
+#include <array>
+#include <numeric>
+#include <vector>
+#include <cmath>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
+enum class PadBorderType { Constant, Replicate, Reflect, Wrap };
+
+template <DimIdx_t DIM>
+class Pad_Op : public Operator,
+                public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
+                public StaticAttributes<PadAttr,
+                                       std::array<std::array<DimSize_t, 2>, DIM>,
+                                       PadBorderType,
+                                       double> {
+private:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char *Type = "Pad";
+
+    Pad_Op() = delete;
+
+    using Attributes_ = StaticAttributes<PadAttr,
+                                             std::array<std::array<DimSize_t, 2>, DIM>,
+                                             PadBorderType,
+                                             double>;
+    template <PadAttr e>
+    using attr = typename Attributes_::template attr<e>;
+
+    constexpr Pad_Op(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
+                     const PadBorderType &borderType = PadBorderType::Constant,
+                     double borderValue = 0.0)
+        : Operator(Type),
+          Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples),
+                           attr<PadAttr::BorderType>(borderType),
+                           attr<PadAttr::BorderValue>(borderValue)) {
+        setDatatype(DataType::Float32);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Pad_Op(const Pad_Op& op)
+        : Operator(Type),
+          Attributes_(op),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Pad_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Pad_Op<DIM>>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 1 && "operators supports only 3 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInput->empty()) {
+            std::array<DimSize_t, DIM + 2> outputDims = {};
+
+            for (std::size_t dim = 0; dim < DIM; ++dim) {
+                outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[dim][0]
+                                    + mInput->dims()[dim+2]
+                                    + this->template getAttr<PadAttr::BeginEndBorders>()[dim][1];
+            }
+            outputDims[1] = mInput->dims()[1];
+            outputDims[0] = mInput->dims()[0];
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return *(mInput.get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "Pad Operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "Pad Operators has only 1 outputs");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string &name) override {
+        mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) override {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> Pad(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
+                                           const std::string& name = "",
+                                           const PadBorderType &borderType = PadBorderType::Constant,
+                                           double borderValue = 0.0)
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
+    return pad;
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, DIM> &dimBeginEnd,
+                                           const std::string& name = "",
+                                           const PadBorderType &borderType = PadBorderType::Constant,
+                                           double borderValue = 0.0)
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    std::array<std::array<DimSize_t, 2>, DIM> beginEndTuples;
+    for (size_t i = 0; i < DIM; ++i) {
+        beginEndTuples[i] = {dimBeginEnd[i], dimBeginEnd[i]};
+    }
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
+    return pad;
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> ZeroPad(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
+                                           const std::string& name = "")
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadBorderType::Constant, 0.0), name);
+    return pad;
+}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> ZeroPad(const std::array<DimSize_t, DIM> &dimBeginEnd,
+                                           const std::string& name = "")
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    std::array<std::array<DimSize_t, 2>, DIM> beginEndTuples;
+    for (size_t i = 0; i < DIM; ++i) {
+        beginEndTuples[i] = {dimBeginEnd[i], dimBeginEnd[i]};
+    }
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadBorderType::Constant, 0.0), name);
+    return pad;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> Pad(
+    std::array<DimSize_t, 2> const (&beginEndTuples)[DIM],
+    const std::string& name = "",
+    const PadBorderType &borderType = PadBorderType::Constant,
+    double borderValue = 0.0)
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
+    return Pad(to_array(beginEndTuples), name, borderType, borderValue);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::PadAttr>::data[] = {"BeginEndBorders", "BorderType", "BorderValue"};
+
+template <>
+const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Replicate", "Reflect", "Wrap"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 9916ee2004bd1aa9f33acf96d95cae4703f692df..1896894ee8690cedaef696394da0829604e36211 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -89,11 +89,6 @@ private:
      *
      */
     std::vector<std::shared_ptr<Node>> mStaticSchedule;
-    /**
-     * @brief Number of computation node (i.e: nb nodes != Producer)
-     *
-     */
-    std::size_t mComputationNumber = 0; // TODO: Check if not inferable from mStaticSchedule
 };
 } // namespace Aidge
 
diff --git a/include/aidge/utils/Utils.hpp b/include/aidge/utils/ErrorHandling.hpp
similarity index 54%
rename from include/aidge/utils/Utils.hpp
rename to include/aidge/utils/ErrorHandling.hpp
index 71817dcfc9713ad36a74175affd21b03cb6ed181..8fbeff30abecfec0077786b21825b6a6f36677c6 100644
--- a/include/aidge/utils/Utils.hpp
+++ b/include/aidge/utils/ErrorHandling.hpp
@@ -10,17 +10,21 @@
  ********************************************************************************/
 
 
-#ifndef AIDGE_UTILS_H_
-#define AIDGE_UTILS_H_
+#ifndef AIDGE_ERRORHANDLING_H_
+#define AIDGE_ERRORHANDLING_H_
 
 #include <cstdio>
 #include <memory>
 
-#ifdef NO_EXCEPTIONS
+#define AIDGE_STRINGIZE_DETAIL(x) #x
+#define AIDGE_STRINGIZE(x) AIDGE_STRINGIZE_DETAIL(x)
+
+#ifdef NO_EXCEPTION
 #define AIDGE_THROW_OR_ABORT(ex, ...) \
 do { std::printf(__VA_ARGS__); std::abort(); } while (false)
 #else
 #include <stdexcept>
+#include <memory>
 #define AIDGE_THROW_OR_ABORT(ex, ...) \
 do { \
     int n = 128; \
@@ -35,4 +39,21 @@ do { \
 } while (false)
 #endif
 
-#endif //AIDGE_UTILS_H_
\ No newline at end of file
+/**
+ * Macro for specified API assertions.
+ * Used to check logic directly related to user's inputs.
+ * If it asserts, it means an user error.
+*/
+#define AIDGE_ASSERT(stm, ...) \
+if (!(stm)) { printf("Assertion failed: " AIDGE_STRINGIZE(stm) " in " __FILE__ ":%d", __LINE__); \
+    AIDGE_THROW_OR_ABORT(std::runtime_error, __VA_ARGS__); }
+
+/**
+ * Macro for internal assertions.
+ * Used to check internal logic not directly related to API user's inputs.
+ * If it asserts, it means a bug.
+*/
+#define AIDGE_INTERNAL_ASSERT(stm) \
+assert((stm) && "Internal assertion failed: " #stm " in " __FILE__ ":" AIDGE_STRINGIZE(__LINE__))
+
+#endif //AIDGE_ERRORHANDLING_H_
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index de543e95a16475c4443164af7be5c379d6554f8d..3b29c472b3a540c9ef3b8ed46520e3e718e8cbfb 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -58,6 +58,11 @@ struct Registrar {
         //assert(newInsert && "registrar already exists");
     }
 
+    static bool exists(const typename C::registrar_key& key) {
+        const auto it = C::registry().find(key);
+        return (it != C::registry().end());
+    }
+
     static auto create(const typename C::registrar_key& key){
         const auto it = C::registry().find(key);
         assert(it != C::registry().end() && "invalid registrar key");
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index a549948ba7b0625fab3e4bce04362bef4098a612..b67f69ae7afc2c22f3b424812ec994b10974b668 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -18,7 +18,7 @@
 #include <typeinfo>
 
 #include "aidge/utils/Attributes.hpp"
-#include "aidge/utils/Utils.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
 /**
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 372afebdd3e1626cd0af88e335b78ec7fd73a5f4..5820e94c5cbd24150a4e81b0db34328ac35e1bf5 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -30,16 +30,13 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
     m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, (DIM<<1)> &>(),
+                const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
-        py::arg("stride_dims"),
-        py::arg("padding_dims"));
+        py::arg("stride_dims"));
   
   m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
                                                                   const std::string& name,
-                                                                  const std::vector<DimSize_t> &stride_dims,
-                                                                  const std::vector<DimSize_t> &padding_dims) {
+                                                                  const std::vector<DimSize_t> &stride_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
         // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
         if (kernel_dims.size() != DIM) {
@@ -48,9 +45,6 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
         if (stride_dims.size() != DIM) {
             throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
-        if (padding_dims.size() != (DIM<<1)) {
-            throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
-        }
         DimSize_t tmp_kernel_dims_array[DIM];
         for (size_t i = 0; i < DIM; ++i) {
             tmp_kernel_dims_array[i] = kernel_dims[i];
@@ -59,18 +53,12 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
         for (size_t i = 0; i < DIM; ++i) {
             tmp_stride_dims_array[i] = stride_dims[i];
         }
-        DimSize_t tmp_padding_dims_array[DIM<<1];
-        for (size_t i = 0; i < (DIM<<1); ++i) {
-            tmp_padding_dims_array[i] = padding_dims[i];
-        }
         const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
         const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
-        const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
-        return AvgPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
+        return AvgPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array));
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
-       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
   
 }
 
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 0c09917d71e520227eed48705527adaf204857ee..91ede7b6a289f3def2a9c8261ff04d2ab9836cdd 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -32,13 +32,11 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
                 DimSize_t,
                 const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, (DIM<<1)> &,
                 const std::array<DimSize_t, DIM> &>(),
         py::arg("in_channels"),
         py::arg("out_channels"),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("padding_dims"),
         py::arg("dilation_dims"));
   
   m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
@@ -46,7 +44,6 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::string& name, 
                                                          const std::vector<DimSize_t> &stride_dims,
-                                                         const std::vector<DimSize_t> &padding_dims,
                                                          const std::vector<DimSize_t> &dilation_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
         // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
@@ -56,9 +53,6 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
         if (stride_dims.size() != DIM) {
             throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
-        if (padding_dims.size() != (DIM<<1)) {
-            throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
-        }
         if (dilation_dims.size() != DIM) {
             throw std::runtime_error("dilation_dims size [" + std::to_string(dilation_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
@@ -70,25 +64,19 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
         for (size_t i = 0; i < DIM; ++i) {
             tmp_stride_dims_array[i] = stride_dims[i];
         }
-        DimSize_t tmp_padding_dims_array[DIM<<1];
-        for (size_t i = 0; i < (DIM<<1); ++i) {
-            tmp_padding_dims_array[i] = padding_dims[i];
-        }
         DimSize_t tmp_dilation_dims_array[DIM];
         for (size_t i = 0; i < DIM; ++i) {
             tmp_dilation_dims_array[i] = dilation_dims[i];
         }
         const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
         const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
-        const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
         const DimSize_t (&dilation_dims_array)[DIM] = tmp_dilation_dims_array;
-        return Conv<DIM>(in_channels, out_channels, to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array), to_array(dilation_dims_array));
+        return Conv<DIM>(in_channels, out_channels, to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(dilation_dims_array));
     }, py::arg("in_channels"),
        py::arg("out_channels"),
        py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
   
 }
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 3f48c50f7ffdb44450c0e2a155d85dcbf9f73fd9..446bcdcceb3ba805223fc22e6fc19a22dcf354ec 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -31,17 +31,14 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, (DIM<<1)> &,
                 const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("padding_dims"),
         py::arg("dilation_dims"));
   
   m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
-                                                                  const std::vector<DimSize_t> &padding_dims,
                                                                   const std::vector<DimSize_t> &dilation_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
         // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
@@ -51,9 +48,6 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         if (stride_dims.size() != DIM) {
             throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
-        if (padding_dims.size() != (DIM<<1)) {
-            throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
-        }
         if (dilation_dims.size() != DIM) {
             throw std::runtime_error("dilation_dims size [" + std::to_string(dilation_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
@@ -65,23 +59,17 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         for (size_t i = 0; i < DIM; ++i) {
             tmp_stride_dims_array[i] = stride_dims[i];
         }
-        DimSize_t tmp_padding_dims_array[DIM<<1];
-        for (size_t i = 0; i < (DIM<<1); ++i) {
-            tmp_padding_dims_array[i] = padding_dims[i];
-        }
         DimSize_t tmp_dilation_dims_array[DIM];
         for (size_t i = 0; i < DIM; ++i) {
             tmp_dilation_dims_array[i] = dilation_dims[i];
         }
         const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
         const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
-        const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
         const DimSize_t (&dilation_dims_array)[DIM] = tmp_dilation_dims_array;
-        return ConvDepthWise<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array), to_array(dilation_dims_array));
+        return ConvDepthWise<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(dilation_dims_array));
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
   
 }
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 2efd18c816c2d588e574872b3d3776a3409dc4ba..a930b496b49280629d71725cee79aea4d850358e 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -30,16 +30,13 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, (DIM<<1)> &>(),
+                const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
-        py::arg("stride_dims"),
-        py::arg("padding_dims"));
+        py::arg("stride_dims"));
   
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
                                                                   const std::string& name,
-                                                                  const std::vector<DimSize_t> &stride_dims,
-                                                                  const std::vector<DimSize_t> &padding_dims) {
+                                                                  const std::vector<DimSize_t> &stride_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
         // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
         if (kernel_dims.size() != DIM) {
@@ -48,9 +45,6 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         if (stride_dims.size() != DIM) {
             throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
-        if (padding_dims.size() != (DIM<<1)) {
-            throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
-        }
         DimSize_t tmp_kernel_dims_array[DIM];
         for (size_t i = 0; i < DIM; ++i) {
             tmp_kernel_dims_array[i] = kernel_dims[i];
@@ -59,18 +53,12 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         for (size_t i = 0; i < DIM; ++i) {
             tmp_stride_dims_array[i] = stride_dims[i];
         }
-        DimSize_t tmp_padding_dims_array[DIM<<1];
-        for (size_t i = 0; i < (DIM<<1); ++i) {
-            tmp_padding_dims_array[i] = padding_dims[i];
-        }
         const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
         const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
-        const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
-        return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
+        return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array));
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
-       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
   
 }
 
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 03b2a9adb439eb00d0ba59a13fead4f25d617b36..8f8f51c89bbcc380963f355f781e8fda940dcffc 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -125,21 +125,17 @@ Aidge::IOIndex_t Aidge::GraphView::getNbFreeDataInputs() const {
 
 std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>
 Aidge::GraphView::dataInputs() const {
-  IOIndex_t nbDataIn = 0U;
-  for (const std::shared_ptr<Node>& inputNode : mInputNodes) {
-    nbDataIn += inputNode->nbDataInputs();
-  }
-  std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res =
-      std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbDataIn);
-  nbDataIn = 0U;
+  std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res;
+
   for (const std::shared_ptr<Node>& inputNode : mInputNodes) {
     std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> inputNodeinputs =
         inputNode->dataInputs();
-    std::move(inputNodeinputs.begin(), inputNodeinputs.end(),
-              res.begin() + nbDataIn);
-    nbDataIn += inputNode->nbDataInputs();
-    // res.insert(res.end(), (inputNode -> inputs()).begin(), (inputNode ->
-    // inputs()).end());
+
+    for (const auto& input : inputNodeinputs) {
+      if (mNodes.find(input.first) == mNodes.end()) {
+        res.push_back(input);
+      }
+    }
   }
   return res;
 }
@@ -147,21 +143,17 @@ Aidge::GraphView::dataInputs() const {
 
 std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>
 Aidge::GraphView::inputs() const {
-  std::size_t nbIn = 0U;
-  for (const std::shared_ptr<Node>& inputNode : mInputNodes) {
-    nbIn += inputNode->nbInputs();
-  }
-  std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res =
-      std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbIn);
-  nbIn = 0U;
+  std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res;
+
   for (const std::shared_ptr<Node>& inputNode : mInputNodes) {
     std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> inputNodeinputs =
         inputNode->inputs();
-    std::move(inputNodeinputs.begin(), inputNodeinputs.end(),
-              res.begin() + nbIn);
-    nbIn += inputNode->nbInputs();
-    // res.insert(res.end(), (inputNode -> inputs()).begin(), (inputNode ->
-    // inputs()).end());
+
+    for (const auto& input : inputNodeinputs) {
+      if (mNodes.find(input.first) == mNodes.end()) {
+        res.push_back(input);
+      }
+    }
   }
   return res;
 }
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d33376e4fd81a6778907de1ece7125d2812ab82a
--- /dev/null
+++ b/src/operator/MetaOperator.cpp
@@ -0,0 +1,140 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperator.hpp"
+
+Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph,
+    std::vector<NodePtr> inputNodes,
+    std::vector<NodePtr> outputNodes)
+    : Operator(type),
+        mGraph(graph)
+{
+    mInputs = std::vector<std::shared_ptr<Tensor>>(mGraph->inputs().size());
+    for (std::size_t i = 0; i < mInputs.size(); ++i) {
+        mInputs[i] = std::make_shared<Tensor>();
+    }
+    mOutputs = std::vector<std::shared_ptr<Tensor>>(mGraph->outputs().size());
+    for (std::size_t i = 0; i < mOutputs.size(); ++i) {
+        mOutputs[i] = std::make_shared<Tensor>();
+    }
+
+    // Fill inputsNodes and outputsNodes when there is no ambiguity
+    if (inputNodes.empty()) {
+        AIDGE_ASSERT(mGraph->inputNodes().size() == 1, "need to specify internal nodes input mapping");
+        inputNodes.push_back(*mGraph->inputNodes().begin());
+    }
+
+    if (outputNodes.empty()) {
+        AIDGE_ASSERT(mGraph->outputNodes().size() == 1, "need to specify internal nodes output mapping");
+        outputNodes.push_back(*mGraph->outputNodes().begin());
+    }
+
+    AIDGE_ASSERT(mGraph->inputNodes().size() == inputNodes.size(), "wrong number of specified input nodes");
+    AIDGE_ASSERT(mGraph->outputNodes().size() == outputNodes.size(), "wrong number of specified output nodes");
+
+    // Identify inputs that are outside the micro-graph
+    for (const auto& inputNode : inputNodes) {
+        AIDGE_ASSERT(mGraph->inView(inputNode), "input node must be in the graph");
+        const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> inputNodeinputs =
+            inputNode->inputs();
+        
+        int inputIdx = 0;   // input idx relative to the current node
+        for (const auto& in : inputNodeinputs) {
+            if (in.first == nullptr || !mGraph->inView(in.first)) {
+                // The input is not connected inside the micro-graph
+                // (no connection to this input or connection outside the micro-graph)
+                // => it is therefore an input for the meta-operator
+                mInputOps.push_back(std::make_pair(inputNode->getOperator(), inputIdx));
+            }
+
+            ++inputIdx;
+        }
+    }
+
+    // The outputs of the output nodes are also the outputs of the meta-operator
+    for (const auto& outputNode : outputNodes) {
+        AIDGE_ASSERT(mGraph->inView(outputNode), "output node must be in the graph");
+        const std::vector<std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>> outputNodeoutputs =
+            outputNode->outputs();
+
+        for (size_t outputIdx = 0; outputIdx < outputNodeoutputs.size(); ++outputIdx) {
+            mOutputOps.push_back(std::make_pair(outputNode->getOperator(), outputIdx));
+        }
+    }
+
+    AIDGE_INTERNAL_ASSERT(mInputOps.size() == mGraph->inputs().size());
+    AIDGE_INTERNAL_ASSERT(mOutputOps.size() == mGraph->outputs().size());
+}
+
+Aidge::NbElts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const {
+    if (mImpl) {
+        return mImpl->getNbRequiredData(inputIdx);
+    }
+    else {
+        const auto& inputOp = mInputOps[inputIdx];
+        return inputOp.first->getNbRequiredData(inputOp.second);
+    }
+}
+
+Aidge::NbElts_t Aidge::MetaOperator_Op::getNbConsumedData(IOIndex_t inputIdx) const {
+    if (mImpl) {
+        return mImpl->getNbConsumedData(inputIdx);
+    }
+    else {
+        const auto& inputOp = mInputOps[inputIdx];
+        return inputOp.first->getNbConsumedData(inputOp.second);
+    }
+}
+
+Aidge::NbElts_t Aidge::MetaOperator_Op::getNbProducedData(IOIndex_t outputIdx) const {
+    if (mImpl) {
+        return mImpl->getNbProducedData(outputIdx);
+    }
+    else {
+        const auto& outputOp = mOutputOps[outputIdx];
+        return outputOp.first->getNbProducedData(outputOp.second);
+    }
+}
+
+void Aidge::MetaOperator_Op::updateConsummerProducer() {
+    if (mImpl) {
+        mImpl->updateConsummerProducer();
+    }
+    else {
+        if (!mScheduler) {
+            // Lazy initialization
+            mScheduler = std::make_shared<SequentialScheduler>(mGraph);
+        }
+        
+        // TODO: check that generateScheduling() can be called multiple time to iteratively update the schedule.
+        // It could be a good idea to unify updateConsummerProducer() and generateScheduling() into a "updateScheduling()"
+        mScheduler->generateScheduling();
+    }
+}
+
+void Aidge::MetaOperator_Op::forward() {
+    if (mImpl) {
+        // A custom implementation exists for this meta operator
+        mImpl->forward();
+    }
+    else {
+        // No custom implementation, use the individual operators implementations
+        if (!mScheduler) {
+            // Lazy initialization
+            // TODO: should we assert that a scheduler already exists at this point?
+            // => should be created in updateConsummerProducer()
+            mScheduler = std::make_shared<SequentialScheduler>(mGraph);
+            mScheduler->generateScheduling();
+        }
+
+        mScheduler->forward(false);
+    }
+}
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index cda5baedfa513ae9140f0f53bcf5c7867d9b90b1..1f34091e54c0f83dae6b60589c20fb8fdf1d5064 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -40,13 +40,10 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
     // TODO: optimize memory usage
 
     // setup initial producers list
-    mComputationNumber = 0;
     std::set<std::shared_ptr<Node>> producers;
     for (const std::shared_ptr<Node>& nodePtr : mGraphView->getNodes()) {
         if (nodePtr->type() == "Producer") {
             producers.insert(nodePtr);
-        } else {
-            ++mComputationNumber;
         }
     }
     // add Data Input
@@ -112,6 +109,7 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
 
         // Push consumers in the list of nodes to run and update the consumer producer system
         for (const auto& runnable : runnableConsumers) {
+            if (verbose) printf("Runnable: %s\n", (runnable->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))).c_str());
             runnable->getOperator()->updateConsummerProducer();
             mStaticSchedule.push_back(runnable);
         }
@@ -177,14 +175,19 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
 
 // TODO: handle multiple inputs/outputs
 void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) {
+    // Forward dims (if allowed)
     if (forwardDims) {mGraphView->forwardDims(); }
 
-    // add each Producer Node.
-    std::set<std::shared_ptr<Node>> computationOver;
+    // Generate scheduling *only if empty*
+    // If scheduling was already generated (in one or several steps, i.e. one or
+    // several successive call to generateScheduling()), do not generate it twice
+    if (mStaticSchedule.empty()) {
+        this->generateScheduling();
+    }
 
+    // Clear previous scheduling results
     mScheduling.clear();
 
-    this->generateScheduling();
     int cpt = 0;
     for (const auto& runnable : mStaticSchedule) {
         if (verbose)
@@ -202,7 +205,6 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) {
     }
     if (!verbose) drawProgressBar(1.0, 50, "                                   ");
     printf("\n");
-
 }
 
 void Aidge::SequentialScheduler::saveSchedulingDiagram(const std::string& fileName) const {
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 4b929286ba494a452c7f9cb71ce944c7d576c03a..9f014364636c70031b522b09c893e1144af3f133 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -161,7 +161,7 @@ TEST_CASE("[core/graph] GraphView(addChild)") {
 TEST_CASE("[core/graph] GraphView(inputs)") {
     auto g1 = std::make_shared<GraphView>("TestGraph");
     std::shared_ptr<Node> conv = Conv(3, 32, {3, 3});
-    g1->add(conv);
+    g1->add(conv, false);
 
     REQUIRE(g1->inputs() == conv->inputs());
 }
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e9718fc694d29713797565d3ae8c8107cc7612de
--- /dev/null
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -0,0 +1,53 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/MetaOperatorDefs.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include <cstddef>
+
+using namespace Aidge;
+
+TEST_CASE("[core/operators] MetaOperator", "[Operator]") {
+    SECTION("PaddedConv") {
+        auto op = PaddedConv(1, 3, {3, 3}, "padded_conv", {1, 1}, {{{1, 1}, {1, 1}}});
+
+        auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(op->getOperator())->getMicroGraph();
+
+        REQUIRE(microGraph->getNodes().size() == 2);
+        REQUIRE(microGraph->inputNodes().size() == 2);  // 2 because Conv has inputs outside the meta-op (Producers for weight and bias)
+        // Order not garanteed by the GraphView
+        //REQUIRE((*microGraph->inputNodes().begin())->getOperator()->type() == "Pad");
+        REQUIRE(microGraph->outputNodes().size() == 1);
+        REQUIRE((*microGraph->outputNodes().begin())->getOperator()->type() == "Conv");
+        REQUIRE(op->nbInputs() == 3);
+        REQUIRE(op->nbDataInputs() == 1);
+        REQUIRE(op->nbOutputs() == 1);
+
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>();
+        myInput->resize({2,3,5,5});
+        op->getOperator()->associateInput(0,myInput);
+        op->getOperator()->computeOutputDims();
+
+        REQUIRE(op->getOperator()->outputDimsForwarded());
+        REQUIRE(op->getOperator()->getOutput(0)->dims() == std::vector<size_t>({2,3,5,5}));
+        REQUIRE(op->getOperator()->getInput(0) == myInput);
+        // Order not garanteed by the GraphView
+        //REQUIRE((*microGraph->inputNodes().begin())->getOperator()->getInput(0) == myInput);
+        REQUIRE(op->getOperator()->getOutput(0) == (*microGraph->outputNodes().begin())->getOperator()->getOutput(0));
+        
+        //op->getOperator()->updateConsummerProducer();  // require implementation
+        //auto microGraphScheduler = std::dynamic_pointer_cast<MetaOperator_Op>(op->getOperator())->getMicroGraphScheduler();
+        //REQUIRE(microGraphScheduler->getStaticScheduling().size() == 2);
+    }
+}