diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp
index 1f45d700f6fc9f1d69682cb2de601979049c0af6..4d334e2a492962ab40efaf9bf1fe64682f225db2 100644
--- a/include/aidge/backend/cpu.hpp
+++ b/include/aidge/backend/cpu.hpp
@@ -22,6 +22,7 @@
 #include "aidge/backend/cpu/operator/FCImpl.hpp"
 #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
+#include "aidge/backend/cpu/operator/PadImpl.hpp"
 #include "aidge/backend/cpu/operator/ProducerImpl.hpp"
 #include "aidge/backend/cpu/operator/ReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp
index 60b4923bdc18674da52be9bd07d9947fb9790f0d..ea46a540ad04b6227d6ec01c965e2eb99806d5e1 100644
--- a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp
@@ -43,11 +43,11 @@ void AvgPoolingImpl2D_cpu_forward_kernel(const AvgPooling_Op<2>::Attrs &attrs,
 
     // output H size
     const std::size_t oxSize =
-            static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] + std::get<2>(attrs)[0] + std::get<2>(attrs)[2] - std::get<1>(attrs)[0] + std::get<0>(attrs)[0]) /
+            static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] - std::get<1>(attrs)[0] + std::get<0>(attrs)[0]) /
                                 static_cast<float>(std::get<0>(attrs)[0])));
     // output W size
     const std::size_t oySize =
-            static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] + std::get<2>(attrs)[1] + std::get<2>(attrs)[3] - std::get<1>(attrs)[1] + std::get<0>(attrs)[1])/
+            static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] - std::get<1>(attrs)[1] + std::get<0>(attrs)[1]) /
                                 static_cast<float>(std::get<0>(attrs)[1])));
 
     // TODO: kernel computation
@@ -61,11 +61,11 @@ void AvgPoolingImpl2D_cpu_forward_kernel(const AvgPooling_Op<2>::Attrs &attrs,
             const std::size_t oIndex = (ch + batch*dims[1]) * oxSize * oySize;
             const std::size_t iIndex = (ch + batch*dims[1]) * dims[2] * dims[3];
             for (std::size_t ox = 0; ox < oxSize; ++ox) {
-                const signedsize difx = static_cast<signedsize>(std::get<2>(attrs)[0] - ox * std::get<0>(attrs)[0]);
+                const signedsize difx = static_cast<signedsize>(- ox * std::get<0>(attrs)[0]);
                 const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0)));
                 const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<1>(attrs)[0] ? std::get<1>(attrs)[0] : dims[2] + difx);
                 for (std::size_t oy = 0; oy < oySize; ++oy) {
-                    const signedsize dify = static_cast<signedsize>(std::get<2>(attrs)[1] - oy * std::get<0>(attrs)[1]);
+                    const signedsize dify = static_cast<signedsize>(- oy * std::get<0>(attrs)[1]);
                     const std::size_t syMin = static_cast<std::size_t>(std::max(dify, signedsize(0)));
                     const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<1>(attrs)[1] ? std::get<1>(attrs)[1] : dims[3] + dify);
                     const std::size_t oIndexFull = oIndex + ox*oySize + oy;
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
index d8bcff3e8c03b3c31a00cdf60832cbc671737dc2..5aa29ac55740d46bba873bb9d85a04cd004cc3bd 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
@@ -46,11 +46,11 @@ void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Attrs &at
 
     // output H size
     const std::size_t oxSize =
-            static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] + std::get<4>(attrs)[0] + std::get<4>(attrs)[2] - std::get<3>(attrs)[0] + std::get<0>(attrs)[0]) /
+            static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] - std::get<3>(attrs)[0] + std::get<0>(attrs)[0]) /
                                 static_cast<float>(std::get<0>(attrs)[0])));
     // output W size
     const std::size_t oySize =
-            static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] + std::get<4>(attrs)[1] + std::get<4>(attrs)[3] - std::get<3>(attrs)[1] + std::get<0>(attrs)[1]) /
+            static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] - std::get<3>(attrs)[1] + std::get<0>(attrs)[1]) /
                                 static_cast<float>(std::get<0>(attrs)[1])));
 
     // TODO: kernel computation
@@ -67,16 +67,16 @@ void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Attrs &at
             const std::size_t iIndex = (ch + batch*dims[1]) * dims[2] * dims[3];
             const std::size_t wIndex = ch * std::get<3>(attrs)[0] * std::get<3>(attrs)[1];
             for (std::size_t ox = 0; ox < oxSize; ++ox) {
-                const signedsize difx = static_cast<signedsize>(std::get<4>(attrs)[0] - ox * std::get<0>(attrs)[0]);
+                const signedsize difx = static_cast<signedsize>(- ox * std::get<0>(attrs)[0]);
                 const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0)));
                 const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<3>(attrs)[0] ? std::get<3>(attrs)[0] : dims[2] + difx);
                 for (std::size_t oy = 0; oy < oySize; ++oy) {
-                    const signedsize dify = static_cast<signedsize>(std::get<4>(attrs)[1] - oy * std::get<0>(attrs)[1]);
+                    const signedsize dify = static_cast<signedsize>(- oy * std::get<0>(attrs)[1]);
                     const std::size_t syMin = static_cast<std::size_t>(std::max(dify, signedsize(0)));
                     const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<3>(attrs)[1] ? std::get<3>(attrs)[1] : dims[3] + dify);
                     const std::size_t oIndexFull = oIndex + ox*oySize + oy;
-                    const signedsize ix = static_cast<signedsize>(ox * std::get<0>(attrs)[0]) - std::get<4>(attrs)[0];
-                    const signedsize iy = static_cast<signedsize>(oy * std::get<0>(attrs)[1]) - std::get<4>(attrs)[1];
+                    const signedsize ix = static_cast<signedsize>(ox * std::get<0>(attrs)[0]);
+                    const signedsize iy = static_cast<signedsize>(oy * std::get<0>(attrs)[1]);
 
                     if (sxMin == 0 && syMin == 0 && sxMax == 3 && syMax == 3) {
                         output[oIndexFull] +=  (weights[wIndex + 0*std::get<3>(attrs)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+0)] +
diff --git a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
index 9d4d6dfdfcc114e47e478089c4d5a42c2bee0f28..03e2c35170432181c7a9b3934d61f0bd18471876 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
@@ -88,11 +88,11 @@ void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Attrs &attrs, const std::ar
 
     // output H size
     const std::size_t oxSize =
-            static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] + std::get<5>(attrs)[0] + std::get<5>(attrs)[2] - std::get<4>(attrs)[0] + std::get<0>(attrs)[0]) /
+            static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] - std::get<4>(attrs)[0] + std::get<0>(attrs)[0]) /
                                 static_cast<float>(std::get<0>(attrs)[0])));
     // output W size
     const std::size_t oySize =
-            static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] + std::get<5>(attrs)[1] + std::get<5>(attrs)[3] - std::get<4>(attrs)[1] + std::get<0>(attrs)[1]) /
+            static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] - std::get<4>(attrs)[1] + std::get<0>(attrs)[1]) /
                                 static_cast<float>(std::get<0>(attrs)[1])));
 
     // TODO: kernel computation
@@ -110,16 +110,16 @@ void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Attrs &attrs, const std::ar
                 const std::size_t iIndex = (inCh + batch*dims[1]) * dims[2] * dims[3];
                 const std::size_t wIndex = (inCh + outCh*dims[1]) * std::get<4>(attrs)[0] * std::get<4>(attrs)[1];
                 for (std::size_t ox = 0; ox < oxSize; ++ox) {
-                    const signedsize difx = static_cast<signedsize>(std::get<5>(attrs)[0] - ox * std::get<0>(attrs)[0]);
+                    const signedsize difx = static_cast<signedsize>(- ox * std::get<0>(attrs)[0]);
                     const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0)));
                     const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<4>(attrs)[0] ? std::get<4>(attrs)[0] : dims[2] + difx);
                     for (std::size_t oy = 0; oy < oySize; ++oy) {
-                        const signedsize dify = static_cast<signedsize>(std::get<5>(attrs)[1] - oy * std::get<0>(attrs)[1]);
+                        const signedsize dify = static_cast<signedsize>(- oy * std::get<0>(attrs)[1]);
                         const std::size_t syMin = static_cast<std::size_t>(std::max(dify, signedsize(0)));
                         const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<4>(attrs)[1] ? std::get<4>(attrs)[1] : dims[3] + dify);
                         const std::size_t oIndexFull = oIndex + ox*oySize + oy;
-                        const signedsize ix = static_cast<signedsize>(ox * std::get<0>(attrs)[0]) - std::get<5>(attrs)[0];
-                        const signedsize iy = static_cast<signedsize>(oy * std::get<0>(attrs)[1]) - std::get<5>(attrs)[1];
+                        const signedsize ix = static_cast<signedsize>(ox * std::get<0>(attrs)[0]);
+                        const signedsize iy = static_cast<signedsize>(oy * std::get<0>(attrs)[1]);
 
                         if (sxMin == 0 && syMin == 0 && sxMax == 3 && syMax == 3) {
                             output[oIndexFull] += (weights[wIndex + 0*std::get<4>(attrs)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+0)] +
diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp
index eae73f8d8fb7546bb234e7ad7dce38b59378b4ee..caa99e8678a72c7fd3c77fe8b7579ea739ac64c7 100644
--- a/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp
@@ -42,15 +42,14 @@ void MaxPoolingImpl2D_cpu_forward_kernel(const MaxPooling_Op<2>::Attrs &attrs,
 
     std::array<DimSize_t, 2> strideDims  = std::get<0>(attrs);
     std::array<DimSize_t, 2> kernelDims  = std::get<1>(attrs);
-    std::array<DimSize_t, 4> paddingDims = std::get<2>(attrs);
 
     // output H size
     const std::size_t oxSize =
-            static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] + paddingDims[0] + paddingDims[2] - kernelDims[0] + strideDims[0]) /
+            static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] - kernelDims[0] + strideDims[0]) /
                                 static_cast<float>(strideDims[0])));
     // output W size
     const std::size_t oySize =
-            static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] + paddingDims[1] + paddingDims[3] - kernelDims[1] + strideDims[1]) /
+            static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] - kernelDims[1] + strideDims[1]) /
                                 static_cast<float>(strideDims[1])));
 
     // TODO: kernel computation
@@ -64,11 +63,11 @@ void MaxPoolingImpl2D_cpu_forward_kernel(const MaxPooling_Op<2>::Attrs &attrs,
             const std::size_t oIndex = (ch + batch*dims[1]) * oxSize * oySize;
             const std::size_t iIndex = (ch + batch*dims[1]) * dims[2] * dims[3];
             for (std::size_t ox = 0; ox < oxSize; ++ox) {
-                const signedsize difx = static_cast<signedsize>(paddingDims[0] - ox * strideDims[0]);
+                const signedsize difx = static_cast<signedsize>(- ox * strideDims[0]);
                 const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0)));
                 const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > kernelDims[0] ? kernelDims[0] : dims[2] + difx);
                 for (std::size_t oy = 0; oy < oySize; ++oy) {
-                    const signedsize dify = static_cast<signedsize>(paddingDims[1] - oy * strideDims[1]);
+                    const signedsize dify = static_cast<signedsize>(- oy * strideDims[1]);
                     const std::size_t syMin = static_cast<std::size_t>(std::max(dify, signedsize(0)));
                     const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > kernelDims[1] ? kernelDims[1] : dims[3] + dify);
                     const std::size_t oIndexFull = oIndex + ox*oySize + oy;
diff --git a/include/aidge/backend/cpu/operator/PadImpl.hpp b/include/aidge/backend/cpu/operator/PadImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..1a3202a7c71a6669e191fefa7e7b29849fc017f8
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/PadImpl.hpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_PADIMPL_H_
+#define AIDGE_CPU_OPERATOR_PADIMPL_H_
+
+#include <array>
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+// class Pad_Op;
+
+// compute kernel registry for forward and backward
+class PadImpl2DForward_cpu
+    : public Registrable<PadImpl2DForward_cpu,
+                         std::tuple<DataType, DataType>,
+                         void(const Pad_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *,
+                              void *)> {};
+class PadImpl2DBackward_cpu
+    : public Registrable<PadImpl2DBackward_cpu,
+                         std::tuple<DataType, DataType>,
+                         void(const Pad_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *,
+                              void *)> {};
+
+class PadImpl2D_cpu : public OperatorImpl {
+   private:
+    const Pad_Op<2> &mOp;
+    std::array<NbElts_t, 1> mNbConsumedData = {0};
+    std::array<NbElts_t, 1> mNbProducedData = {0};
+
+   public:
+    PadImpl2D_cpu(const Pad_Op<2> &op) : mOp(op) {}
+
+    static std::unique_ptr<PadImpl2D_cpu> create(const Pad_Op<2> &op) {
+        return std::make_unique<PadImpl2D_cpu>(op);
+    }
+
+   public:
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+    void updateConsummerProducer() override final;
+
+    void forward() override;
+
+    void backward() override;
+};
+
+namespace {
+// add cpu backend to Pad_Op<2> implementation registry
+static Registrar<Pad_Op<2>> registrarPadImpl2D_cpu("cpu", Aidge::PadImpl2D_cpu::create);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_PADIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..1babc3ecf2ff2fa8db2fd5ffbd34d89b8d7d88b6
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp
@@ -0,0 +1,110 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_PADIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_PADIMPL_FORWARD_KERNEL_H_
+
+#include "aidge/utils/Registrar.hpp"
+
+#include "aidge/backend/cpu/operator/PadImpl.hpp"
+#include "aidge/utils/Types.h"
+#include <cmath>
+#include <array>
+#include <algorithm>
+
+namespace Aidge {
+/**
+ * @brief Forward kernel for 2D Padding on CPU backend.
+ * @tparam I Input data type.
+ * @tparam O Output data type.
+ * @param attrs tuple of Parameters from the Operator
+ * @param dims Array of input dimensions.
+ * @param input_ const input Tensor.
+ * @param output_ Output Tensor.
+ */
+template <class I, class O>
+void PadImpl2D_cpu_forward_kernel(const Pad_Op<2>::Attrs &attrs, const std::array<DimSize_t, 4> &dims,
+                                       const void *input_, void *output_)
+{
+    const I *input = static_cast<const I *>(input_);
+    O *output = static_cast<O *>(output_);
+
+    const std::size_t oySize = dims[2] + std::get<0>(attrs)[0][0] + std::get<0>(attrs)[0][1];
+    const std::size_t oxSize = dims[3] + std::get<0>(attrs)[1][0] + std::get<0>(attrs)[1][1];
+
+    for (std::size_t batch = 0; batch < dims[0]; ++batch) {
+        for (std::size_t ch = 0; ch < dims[1]; ++ch) {
+            const std::size_t iIndex = (ch + batch*dims[1]) * dims[2] * dims[3];
+            const std::size_t oIndex = (ch + batch*dims[1]) * oxSize * oySize;
+
+            for (unsigned int oy = 0; oy < oySize; ++oy) {
+                for (unsigned int ox = 0; ox < oxSize; ++ox) {
+                    const std::size_t oIndexFull = oIndex + ox*oySize + oy;
+
+                    O outputValue = std::get<2>(attrs);
+
+                    if (std::get<1>(attrs) == PadBorderType::Constant) {
+                        int ix = static_cast<int>(ox) - static_cast<int>(std::get<0>(attrs)[1][1]);
+                        int iy = static_cast<int>(oy) - static_cast<int>(std::get<0>(attrs)[0][1]);
+
+                        if (ix >= 0  && ix < static_cast<int>(dims[3]) && iy >= 0  && iy < static_cast<int>(dims[2])) {
+                            outputValue = input[iIndex + static_cast<std::size_t>(ix)*dims[2] + static_cast<std::size_t>(iy)];
+                        }
+                    }
+                    else if (std::get<1>(attrs) == PadBorderType::Replicate) {
+                        int ix = std::max(0, std::min(static_cast<int>(dims[3]) - 1, static_cast<int>(ox) - static_cast<int>(std::get<0>(attrs)[1][1])));
+                        int iy = std::max(0, std::min(static_cast<int>(dims[2]) - 1, static_cast<int>(oy) - static_cast<int>(std::get<0>(attrs)[0][1])));
+
+                        outputValue = input[iIndex + static_cast<std::size_t>(ix)*dims[2] + static_cast<std::size_t>(iy)];
+                    }
+                    else if (std::get<1>(attrs) == PadBorderType::Reflect) {
+                        int ix = static_cast<int>(ox) - static_cast<int>(std::get<0>(attrs)[1][1]);
+                        int iy = static_cast<int>(oy) - static_cast<int>(std::get<0>(attrs)[0][1]);
+
+                        if (ix < 0)
+                            ix = 0 - ix;
+                        if (iy < 0)
+                            iy = 0 - iy;
+                        if (ix >= static_cast<int>(dims[3]))
+                            ix = static_cast<int>(dims[3]) - ix;
+                        if (iy >= static_cast<int>(dims[2]))
+                            iy = static_cast<int>(dims[2]) - iy;
+
+                        outputValue = input[iIndex + static_cast<std::size_t>(ix)*dims[2] + static_cast<std::size_t>(iy)];
+                    }
+                    else if (std::get<1>(attrs) == PadBorderType::Wrap) {
+                        int ix = (static_cast<int>(dims[3]) + static_cast<int>(ox) - static_cast<int>(std::get<0>(attrs)[1][1])) % static_cast<int>(dims[3]);
+                        int iy = (static_cast<int>(dims[2]) + static_cast<int>(oy) - static_cast<int>(std::get<0>(attrs)[0][1])) % static_cast<int>(dims[2]);
+
+                        outputValue = input[iIndex + static_cast<std::size_t>(ix)*dims[2] + static_cast<std::size_t>(iy)];
+                    }
+
+                    output[oIndexFull] = outputValue;
+                }
+            }
+        }
+    }
+}
+
+namespace {
+static Registrar<PadImpl2DForward_cpu> registrarPadImpl2DForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32},
+        Aidge::PadImpl2D_cpu_forward_kernel<float, float>);
+static Registrar<PadImpl2DForward_cpu> registrarPadImpl2DForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32},
+        Aidge::PadImpl2D_cpu_forward_kernel<int, int>);
+static Registrar<PadImpl2DForward_cpu> registrarPadImpl2DForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64},
+        Aidge::PadImpl2D_cpu_forward_kernel<double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_PADIMPL_FORWARD_KERNEL_H_ */
diff --git a/src/operator/PadImpl.cpp b/src/operator/PadImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a7d2fb4e3fd7d10a59bc019c992cfad263954c17
--- /dev/null
+++ b/src/operator/PadImpl.cpp
@@ -0,0 +1,92 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/utils/Types.h"
+#include "aidge/operator/Conv.hpp"
+
+#include "aidge/backend/cpu/operator/PadImpl.hpp"
+#include "aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp"
+
+Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+    assert(inputIdx == 0 && "operator has only one input");
+    (void) inputIdx;
+
+    // Requires the whole tensors
+    const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims();
+
+    return std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
+}
+
+Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbRequiredProtected(IOIndex_t inputIdx) const {
+    assert(inputIdx == 0 && "operator has only one input");
+    (void) inputIdx;
+
+    // Padding cannot be in-place!
+    // We must ensure that we do not override data that has not been consummed yet.
+    const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims();
+    const size_t inputSize = std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
+    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
+    const size_t outputSize = std::accumulate(outputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
+
+    return (outputSize - inputSize);
+}
+
+Aidge::NbElts_t Aidge::PadImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
+    // Requires the whole tensors, regardless of available data on inputs
+    assert(outputIdx == 0 && "operator has only one output");
+    (void) outputIdx;
+
+    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
+    return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
+}
+
+Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
+    assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
+    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
+}
+
+Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
+    assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
+    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
+}
+
+void Aidge::PadImpl2D_cpu::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
+                                                                   // amount for a forward pass
+
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
+
+void Aidge::PadImpl2D_cpu::forward() {
+    // FIXME: uncomment the following code once memory handling will work
+    assert(mOp.getInput(0) && "missing input #0");
+
+    // Find the correct kernel type
+    auto kernelFunc =
+            Registrar<PadImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()});
+
+    // Call kernel
+    kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
+               mOp.getInput(0)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
+
+
+}
+
+void Aidge::PadImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/unit_tests/operator/Test_ConvImpl.cpp b/unit_tests/operator/Test_ConvImpl.cpp
index 23ff1aaebcfb79a4d4b1abc4f1a77f1c6de63b21..891f0e94b02d07d41751728e83fa9b42e4b89be8 100644
--- a/unit_tests/operator/Test_ConvImpl.cpp
+++ b/unit_tests/operator/Test_ConvImpl.cpp
@@ -156,165 +156,8 @@ TEST_CASE("[cpu/operator] Conv(forward)") {
         // myConv->getOperator()->getOutput(0)->print();
         REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
     }
-    SECTION("test Padding") {
-        std::shared_ptr<Node> myConv = Conv(3,4,{3,3}, "myconv", {1,1}, {1,1,1,1});
-        myConv->getOperator()->setDatatype(DataType::Int32);
-        myConv->getOperator()->setBackend("cpu");
-        std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
-            {
-                {
-                    {{  0,   1,   2},
-                    {  3,   4,   5},
-                    {  6,   7,   8}},
-                    {{  9,  10,  11},
-                    { 12,  13,  14},
-                    { 15,  16,  17}},
-                    {{ 18,  19,  20},
-                    { 21,  22,  23},
-                    { 24,  25,  26}}
-                },
-                {
-                    {{ 27,  28,  29},
-                    { 30,  31,  32},
-                    { 33,  34,  35}},
-                    {{ 36,  37,  38},
-                    { 39,  40,  41},
-                    { 42,  43,  44}},
-                    {{ 45,  46,  47},
-                    { 48,  49,  50},
-                    { 51,  52,  53}}
-                },
-                {
-                    {{ 54,  55,  56},
-                    { 57,  58,  59},
-                    { 60,  61,  62}},
-                    {{ 63,  64,  65},
-                    { 66,  67,  68},
-                    { 69,  70,  71}},
-                    {{ 72,  73,  74},
-                    { 75,  76,  77},
-                    { 78,  79,  80}}
-                },
-                {
-                    {{ 81,  82,  83},
-                    { 84,  85,  86},
-                    { 87,  88,  89}},
-                    {{ 90,  91,  92},
-                    { 93,  94,  95},
-                    { 96,  97,  98}},
-                    {{ 99, 100, 101},
-                    {102, 103, 104},
-                    {105, 106, 107}}
-                }
-            }
-        });
-        std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
-        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
-            {
-                {
-                    {{  0,   1,   2,   3,   4},
-                    {  5,   6,   7,   8,   9},
-                    { 10,  11,  12,  13,  14},
-                    { 15,  16,  17,  18,  19},
-                    { 20,  21,  22,  23,  24}},
-
-                    {{ 25,  26,  27,  28,  29},
-                    { 30,  31,  32,  33,  34},
-                    { 35,  36,  37,  38,  39},
-                    { 40,  41,  42,  43,  44},
-                    { 45,  46,  47,  48,  49}},
-
-                    {{ 50,  51,  52,  53,  54},
-                    { 55,  56,  57,  58,  59},
-                    { 60,  61,  62,  63,  64},
-                    { 65,  66,  67,  68,  69},
-                    { 70,  71,  72,  73,  74}}
-                },
-                {
-                    {{ 75,  76,  77,  78,  79},
-                    { 80,  81,  82,  83,  84},
-                    { 85,  86,  87,  88,  89},
-                    { 90,  91,  92,  93,  94},
-                    { 95,  96,  97,  98,  99}},
-
-                    {{100, 101, 102, 103, 104},
-                    {105, 106, 107, 108, 109},
-                    {110, 111, 112, 113, 114},
-                    {115, 116, 117, 118, 119},
-                    {120, 121, 122, 123, 124}},
-
-                    {{125, 126, 127, 128, 129},
-                    {130, 131, 132, 133, 134},
-                    {135, 136, 137, 138, 139},
-                    {140, 141, 142, 143, 144},
-                    {145, 146, 147, 148, 149}}
-                }
-            }
-        });
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,5,5> { 
-            {
-                {
-                    {{  6895,  10225,  10486,  10747,   7063},
-                     { 10303,  15226,  15577,  15928,  10429},
-                     { 11518,  16981,  17332,  17683,  11554},
-                     { 12733,  18736,  19087,  19438,  12679},
-                     {  8047,  11791,  11998,  12205,   7927}},
-
-                    {{ 15960,  24069,  24816,  25563,  17100},
-                     { 25119,  37818,  38898,  39978,  26703},
-                     { 28764,  43218,  44298,  45378,  30258},
-                     { 32409,  48618,  49698,  50778,  33813},
-                     { 21972,  32925,  33618,  34311,  22824}},
-
-                    {{ 25041,  37929,  39162,  40395,  27153},
-                     { 39951,  60426,  62235,  64044,  42993},
-                     { 46026,  69471,  71280,  73089,  48978},
-                     { 52101,  78516,  80325,  82134,  54963},
-                     { 35913,  54075,  55254,  56433,  37737}},
-
-                    {{ 34104,  51771,  53490,  55209,  37188},
-                     { 54765,  83016,  85554,  88092,  59265},
-                     { 63270,  95706,  98244, 100782,  67680},
-                     { 71775, 108396, 110934, 113472,  76095},
-                     { 49836,  75207,  76872,  78537,  52632}}
-                },
-                {
-                    {{ 20395,  29800,  30061,  30322,  19663},
-                     { 28528,  41551,  41902,  42253,  27304},
-                     { 29743,  43306,  43657,  44008,  28429},
-                     { 30958,  45061,  45412,  45763,  29554},
-                     { 18847,  27316,  27523,  27730,  17827}},
-
-                    {{ 53760,  80094,  80841,  81588,  54000},
-                     { 79794, 118818, 119898, 120978,  80028},
-                     { 83439, 124218, 125298, 126378,  83583},
-                     { 87084, 129618, 130698, 131778,  87138},
-                     { 57072,  84900,  85593,  86286,  57024}},
-
-                    {{ 87141, 130404, 131637, 132870,  88353},
-                     {131076, 196101, 197910, 199719, 132768},
-                     {137151, 205146, 206955, 208764, 138753},
-                     {143226, 214191, 216000, 217809, 144738},
-                     { 95313, 142500, 143679, 144858,  96237}},
-
-                    {{120504, 180696, 182415, 184134, 122688},
-                     {182340, 273366, 275904, 278442, 185490},
-                     {190845, 286056, 288594, 291132, 193905},
-                     {199350, 298746, 301284, 303822, 202320},
-                     {133536, 200082, 201747, 203412, 135432}}
-                }
-            }
-        });
-        myConv->getOperator()->associateInput(0,myInput);
-        myConv->getOperator()->associateInput(1,myWeights);
-        myConv->getOperator()->associateInput(2,myBias);
-        myConv->getOperator()->computeOutputDims();
-        myConv->forward();
-
-        REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
-    }
     SECTION("Point-wise") {
-        std::shared_ptr<Node> myConv = Conv(3,4,{1,1}, "myconv", {1,1}, {0,0,0,0});
+        std::shared_ptr<Node> myConv = Conv(3,4,{1,1}, "myconv", {1,1});
         myConv->getOperator()->setDatatype(DataType::Float32);
         myConv->getOperator()->setBackend("cpu");
         myConv->getOperator()->input(0) = Array4D<float,2,3,3,3> {
diff --git a/unit_tests/operator/Test_PadImpl.cpp b/unit_tests/operator/Test_PadImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f40b62faa8207d770b895258c786580f82ae7f21
--- /dev/null
+++ b/unit_tests/operator/Test_PadImpl.cpp
@@ -0,0 +1,569 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstdlib>
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Pad.hpp"
+
+#include "aidge/backend/cpu.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Pad(forward)") {
+    SECTION("Symmetric Pad") {
+        const int pv = 0; // pad value
+
+        std::shared_ptr<Node> myPad = Pad({{1, 1}, {1, 1}}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
+        myPad->getOperator()->setDatatype(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
+            {
+                {
+                    {{  0,   1,   2,   3,   4},
+                    {  5,   6,   7,   8,   9},
+                    { 10,  11,  12,  13,  14},
+                    { 15,  16,  17,  18,  19},
+                    { 20,  21,  22,  23,  24}},
+
+                    {{ 25,  26,  27,  28,  29},
+                    { 30,  31,  32,  33,  34},
+                    { 35,  36,  37,  38,  39},
+                    { 40,  41,  42,  43,  44},
+                    { 45,  46,  47,  48,  49}},
+
+                    {{ 50,  51,  52,  53,  54},
+                    { 55,  56,  57,  58,  59},
+                    { 60,  61,  62,  63,  64},
+                    { 65,  66,  67,  68,  69},
+                    { 70,  71,  72,  73,  74}}
+                },
+                {
+                    {{ 75,  76,  77,  78,  79},
+                    { 80,  81,  82,  83,  84},
+                    { 85,  86,  87,  88,  89},
+                    { 90,  91,  92,  93,  94},
+                    { 95,  96,  97,  98,  99}},
+
+                    {{100, 101, 102, 103, 104},
+                    {105, 106, 107, 108, 109},
+                    {110, 111, 112, 113, 114},
+                    {115, 116, 117, 118, 119},
+                    {120, 121, 122, 123, 124}},
+
+                    {{125, 126, 127, 128, 129},
+                    {130, 131, 132, 133, 134},
+                    {135, 136, 137, 138, 139},
+                    {140, 141, 142, 143, 144},
+                    {145, 146, 147, 148, 149}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
+            {
+                {
+                    {{ pv,  pv,   pv,   pv,   pv,   pv,  pv},
+                    { pv,   0,   1,   2,   3,   4,  pv},
+                    { pv,   5,   6,   7,   8,   9,  pv},
+                    { pv,  10,  11,  12,  13,  14,  pv},
+                    { pv,  15,  16,  17,  18,  19,  pv},
+                    { pv,  20,  21,  22,  23,  24,  pv},
+                    { pv,  pv,   pv,   pv,   pv,   pv,  pv}},
+
+                    {{ pv,  pv,   pv,   pv,   pv,   pv,  pv},
+                    { pv,  25,  26,  27,  28,  29,  pv},
+                    { pv,  30,  31,  32,  33,  34,  pv},
+                    { pv,  35,  36,  37,  38,  39,  pv},
+                    { pv,  40,  41,  42,  43,  44,  pv},
+                    { pv,  45,  46,  47,  48,  49,  pv},
+                    { pv,  pv,   pv,   pv,   pv,   pv,  pv}},
+
+                    {{ pv,  pv,   pv,   pv,   pv,   pv,  pv},
+                    { pv,  50,  51,  52,  53,  54,  pv},
+                    { pv,  55,  56,  57,  58,  59,  pv},
+                    { pv,  60,  61,  62,  63,  64,  pv},
+                    { pv,  65,  66,  67,  68,  69,  pv},
+                    { pv,  70,  71,  72,  73,  74,  pv},
+                    { pv,  pv,   pv,   pv,   pv,   pv,  pv}}
+                },
+                {
+                    {{ pv,  pv,   pv,   pv,   pv,   pv,  pv},
+                    { pv,  75,  76,  77,  78,  79,  pv},
+                    { pv,  80,  81,  82,  83,  84,  pv},
+                    { pv,  85,  86,  87,  88,  89,  pv},
+                    { pv,  90,  91,  92,  93,  94,  pv},
+                    { pv,  95,  96,  97,  98,  99,  pv},
+                    { pv,  pv,   pv,   pv,   pv,   pv,  pv}},
+
+                    {{ pv,  pv,   pv,   pv,   pv,   pv,  pv},
+                    {pv,  100, 101, 102, 103, 104,  pv},
+                    {pv,  105, 106, 107, 108, 109,  pv},
+                    {pv,  110, 111, 112, 113, 114,  pv},
+                    {pv,  115, 116, 117, 118, 119,  pv},
+                    {pv,  120, 121, 122, 123, 124,  pv},
+                    { pv,  pv,   pv,   pv,   pv,   pv,  pv}},
+
+                    {{ pv,  pv,   pv,   pv,   pv,   pv,  pv},
+                    {pv,  125, 126, 127, 128, 129,  pv},
+                    {pv,  130, 131, 132, 133, 134,  pv},
+                    {pv,  135, 136, 137, 138, 139,  pv},
+                    {pv,  140, 141, 142, 143, 144,  pv},
+                    {pv,  145, 146, 147, 148, 149,  pv},
+                    { pv,  pv,   pv,   pv,   pv,   pv,  pv}}
+                }
+            }
+        });
+
+        myPad->getOperator()->associateInput(0,myInput);
+        myPad->getOperator()->computeOutputDims();
+        myPad->forward();
+        // myPad->getOperator()->getOutput(0)->print();
+        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+    }
+
+    SECTION("Asymmetric Pad") {
+        const int pv = 0; // pad value
+
+        std::shared_ptr<Node> myPad = Pad({{1, 0}, {0, 1}}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
+        myPad->getOperator()->setDatatype(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
+            {
+                {
+                    {{  0,   1,   2,   3,   4},
+                    {  5,   6,   7,   8,   9},
+                    { 10,  11,  12,  13,  14},
+                    { 15,  16,  17,  18,  19},
+                    { 20,  21,  22,  23,  24}},
+
+                    {{ 25,  26,  27,  28,  29},
+                    { 30,  31,  32,  33,  34},
+                    { 35,  36,  37,  38,  39},
+                    { 40,  41,  42,  43,  44},
+                    { 45,  46,  47,  48,  49}},
+
+                    {{ 50,  51,  52,  53,  54},
+                    { 55,  56,  57,  58,  59},
+                    { 60,  61,  62,  63,  64},
+                    { 65,  66,  67,  68,  69},
+                    { 70,  71,  72,  73,  74}}
+                },
+                {
+                    {{ 75,  76,  77,  78,  79},
+                    { 80,  81,  82,  83,  84},
+                    { 85,  86,  87,  88,  89},
+                    { 90,  91,  92,  93,  94},
+                    { 95,  96,  97,  98,  99}},
+
+                    {{100, 101, 102, 103, 104},
+                    {105, 106, 107, 108, 109},
+                    {110, 111, 112, 113, 114},
+                    {115, 116, 117, 118, 119},
+                    {120, 121, 122, 123, 124}},
+
+                    {{125, 126, 127, 128, 129},
+                    {130, 131, 132, 133, 134},
+                    {135, 136, 137, 138, 139},
+                    {140, 141, 142, 143, 144},
+                    {145, 146, 147, 148, 149}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,6,6> { //NCHW
+            {
+                {
+                    {{ pv,   pv,   pv,   pv,   pv,  pv},
+                    { 0,   1,   2,   3,   4,  pv},
+                    { 5,   6,   7,   8,   9,  pv},
+                    { 10,  11,  12,  13,  14,  pv},
+                    { 15,  16,  17,  18,  19,  pv},
+                    { 20,  21,  22,  23,  24,  pv}},
+
+                    {{ pv,   pv,   pv,   pv,   pv,  pv},
+                    { 25,  26,  27,  28,  29,  pv},
+                    { 30,  31,  32,  33,  34,  pv},
+                    { 35,  36,  37,  38,  39,  pv},
+                    { 40,  41,  42,  43,  44,  pv},
+                    { 45,  46,  47,  48,  49,  pv}},
+
+                    {{ pv,   pv,   pv,   pv,   pv,  pv},
+                    { 50,  51,  52,  53,  54,  pv},
+                    { 55,  56,  57,  58,  59,  pv},
+                    { 60,  61,  62,  63,  64,  pv},
+                    { 65,  66,  67,  68,  69,  pv},
+                    { 70,  71,  72,  73,  74,  pv}}
+                },
+                {
+                    {{ pv,   pv,   pv,   pv,   pv,  pv},
+                    { 75,  76,  77,  78,  79,  pv},
+                    { 80,  81,  82,  83,  84,  pv},
+                    { 85,  86,  87,  88,  89,  pv},
+                    { 90,  91,  92,  93,  94,  pv},
+                    { 95,  96,  97,  98,  99,  pv}},
+
+                    {{ pv,   pv,   pv,   pv,   pv,  pv},
+                    { 100, 101, 102, 103, 104,  pv},
+                    { 105, 106, 107, 108, 109,  pv},
+                    { 110, 111, 112, 113, 114,  pv},
+                    { 115, 116, 117, 118, 119,  pv},
+                    { 120, 121, 122, 123, 124,  pv}},
+
+                    {{ pv,   pv,   pv,   pv,   pv,  pv},
+                    { 125, 126, 127, 128, 129,  pv},
+                    { 130, 131, 132, 133, 134,  pv},
+                    { 135, 136, 137, 138, 139,  pv},
+                    { 140, 141, 142, 143, 144,  pv},
+                    { 145, 146, 147, 148, 149,  pv}}
+                }
+            }
+        });
+
+        myPad->getOperator()->associateInput(0,myInput);
+        myPad->getOperator()->computeOutputDims();
+        myPad->forward();
+        // myPad->getOperator()->getOutput(0)->print();
+        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+    }
+
+    SECTION("Pad Replicate") {
+        std::shared_ptr<Node> myPad = Pad({{1, 1}, {1, 1}}, "mypad", PadBorderType::Replicate);
+        myPad->getOperator()->setDatatype(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
+            {
+                {
+                    {{  0,   1,   2,   3,   4},
+                    {  5,   6,   7,   8,   9},
+                    { 10,  11,  12,  13,  14},
+                    { 15,  16,  17,  18,  19},
+                    { 20,  21,  22,  23,  24}},
+
+                    {{ 25,  26,  27,  28,  29},
+                    { 30,  31,  32,  33,  34},
+                    { 35,  36,  37,  38,  39},
+                    { 40,  41,  42,  43,  44},
+                    { 45,  46,  47,  48,  49}},
+
+                    {{ 50,  51,  52,  53,  54},
+                    { 55,  56,  57,  58,  59},
+                    { 60,  61,  62,  63,  64},
+                    { 65,  66,  67,  68,  69},
+                    { 70,  71,  72,  73,  74}}
+                },
+                {
+                    {{ 75,  76,  77,  78,  79},
+                    { 80,  81,  82,  83,  84},
+                    { 85,  86,  87,  88,  89},
+                    { 90,  91,  92,  93,  94},
+                    { 95,  96,  97,  98,  99}},
+
+                    {{100, 101, 102, 103, 104},
+                    {105, 106, 107, 108, 109},
+                    {110, 111, 112, 113, 114},
+                    {115, 116, 117, 118, 119},
+                    {120, 121, 122, 123, 124}},
+
+                    {{125, 126, 127, 128, 129},
+                    {130, 131, 132, 133, 134},
+                    {135, 136, 137, 138, 139},
+                    {140, 141, 142, 143, 144},
+                    {145, 146, 147, 148, 149}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
+            {
+                {
+                    {{ 0,  0,   1,   2,   3,   4,  4},
+                    { 0,   0,   1,   2,   3,   4,  4},
+                    { 5,   5,   6,   7,   8,   9,  9},
+                    { 10,  10,  11,  12,  13,  14,  14},
+                    { 15,  15,  16,  17,  18,  19,  19},
+                    { 20,  20,  21,  22,  23,  24,  24},
+                    { 20,  20,  21,  22,  23,  24,  24}},
+
+                    {{ 25,  25,  26,  27,  28,  29,  29},
+                    { 25,  25,  26,  27,  28,  29,  29},
+                    { 30,  30,  31,  32,  33,  34,  34},
+                    { 35,  35,  36,  37,  38,  39,  39},
+                    { 40,  40,  41,  42,  43,  44,  44},
+                    { 45,  45,  46,  47,  48,  49,  49},
+                    { 45,  45,  46,  47,  48,  49, 49}},
+
+                    {{ 50,  50,  51,  52,  53,  54,  54},
+                    { 50,  50,  51,  52,  53,  54,  54},
+                    { 55,  55,  56,  57,  58,  59,  59},
+                    { 60,  60,  61,  62,  63,  64,  64},
+                    { 65,  65,  66,  67,  68,  69,  69},
+                    { 70,  70,  71,  72,  73,  74,  74},
+                    { 70,  70,  71,  72,  73,  74,  74}}
+                },
+                {
+                    {{ 75,  75,  76,  77,  78,  79,  79},
+                    { 75,  75,  76,  77,  78,  79,  79},
+                    { 80,  80,  81,  82,  83,  84,  84},
+                    { 85,  85,  86,  87,  88,  89,  89},
+                    { 90,  90,  91,  92,  93,  94,  94},
+                    { 95,  95,  96,  97,  98,  99,  99},
+                    { 95,  95,  96,  97,  98,  99,  99}},
+
+                    {{100,  100, 101, 102, 103, 104,  104},
+                    {100,  100, 101, 102, 103, 104,  104},
+                    {105,  105, 106, 107, 108, 109, 109},
+                    {110,  110, 111, 112, 113, 114,  114},
+                    {115,  115, 116, 117, 118, 119,  119},
+                    {120,  120, 121, 122, 123, 124,  124},
+                    {120,  120, 121, 122, 123, 124,  124}},
+
+                    {{125,  125, 126, 127, 128, 129,  129},
+                    {125,  125, 126, 127, 128, 129,  129},
+                    {130,  130, 131, 132, 133, 134,  134},
+                    {135,  135, 136, 137, 138, 139,  139},
+                    {140,  140, 141, 142, 143, 144,  144},
+                    {145,  145, 146, 147, 148, 149,  149},
+                    {145,  145, 146, 147, 148, 149,  149}}
+                }
+            }
+        });
+
+        myPad->getOperator()->associateInput(0,myInput);
+        myPad->getOperator()->computeOutputDims();
+        myPad->forward();
+        // myPad->getOperator()->getOutput(0)->print();
+        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+    }
+
+    SECTION("Pad Reflect") {
+        std::shared_ptr<Node> myPad = Pad({{1, 1}, {1, 1}}, "mypad", PadBorderType::Reflect);
+        myPad->getOperator()->setDatatype(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
+            {
+                {
+                    {{  0,   1,   2,   3,   4},
+                    {  5,   6,   7,   8,   9},
+                    { 10,  11,  12,  13,  14},
+                    { 15,  16,  17,  18,  19},
+                    { 20,  21,  22,  23,  24}},
+
+                    {{ 25,  26,  27,  28,  29},
+                    { 30,  31,  32,  33,  34},
+                    { 35,  36,  37,  38,  39},
+                    { 40,  41,  42,  43,  44},
+                    { 45,  46,  47,  48,  49}},
+
+                    {{ 50,  51,  52,  53,  54},
+                    { 55,  56,  57,  58,  59},
+                    { 60,  61,  62,  63,  64},
+                    { 65,  66,  67,  68,  69},
+                    { 70,  71,  72,  73,  74}}
+                },
+                {
+                    {{ 75,  76,  77,  78,  79},
+                    { 80,  81,  82,  83,  84},
+                    { 85,  86,  87,  88,  89},
+                    { 90,  91,  92,  93,  94},
+                    { 95,  96,  97,  98,  99}},
+
+                    {{100, 101, 102, 103, 104},
+                    {105, 106, 107, 108, 109},
+                    {110, 111, 112, 113, 114},
+                    {115, 116, 117, 118, 119},
+                    {120, 121, 122, 123, 124}},
+
+                    {{125, 126, 127, 128, 129},
+                    {130, 131, 132, 133, 134},
+                    {135, 136, 137, 138, 139},
+                    {140, 141, 142, 143, 144},
+                    {145, 146, 147, 148, 149}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
+            {
+                {
+                    {
+                    { 6, 5, 6, 7, 8, 9, 5},
+                    { 1, 0, 1, 2, 3, 4, 0},
+                    { 6, 5, 6, 7, 8, 9, 5},
+                    { 11, 10, 11, 12, 13, 14, 10},
+                    { 16, 15, 16, 17, 18, 19, 15},
+                    { 21, 20, 21, 22, 23, 24, 20},
+                    { 1, 0, 1, 2, 3, 4, 0}
+                    },
+                    {
+                    { 31, 30, 31, 32, 33, 34, 30},
+                    { 26, 25, 26, 27, 28, 29, 25},
+                    { 31, 30, 31, 32, 33, 34, 30},
+                    { 36, 35, 36, 37, 38, 39, 35},
+                    { 41, 40, 41, 42, 43, 44, 40},
+                    { 46, 45, 46, 47, 48, 49, 45},
+                    { 26, 25, 26, 27, 28, 29, 25}
+                    },
+                    {
+                    { 56, 55, 56, 57, 58, 59, 55},
+                    { 51, 50, 51, 52, 53, 54, 50},
+                    { 56, 55, 56, 57, 58, 59, 55},
+                    { 61, 60, 61, 62, 63, 64, 60},
+                    { 66, 65, 66, 67, 68, 69, 65},
+                    { 71, 70, 71, 72, 73, 74, 70},
+                    { 51, 50, 51, 52, 53, 54, 50}
+                    }
+                },
+                {
+                    {
+                    { 81, 80, 81, 82, 83, 84, 80},
+                    { 76, 75, 76, 77, 78, 79, 75},
+                    { 81, 80, 81, 82, 83, 84, 80},
+                    { 86, 85, 86, 87, 88, 89, 85},
+                    { 91, 90, 91, 92, 93, 94, 90},
+                    { 96, 95, 96, 97, 98, 99, 95},
+                    { 76, 75, 76, 77, 78, 79, 75}
+                    },
+                    {
+                    { 106, 105, 106, 107, 108, 109, 105},
+                    { 101, 100, 101, 102, 103, 104, 100},
+                    { 106, 105, 106, 107, 108, 109, 105},
+                    { 111, 110, 111, 112, 113, 114, 110},
+                    { 116, 115, 116, 117, 118, 119, 115},
+                    { 121, 120, 121, 122, 123, 124, 120},
+                    { 101, 100, 101, 102, 103, 104, 100}
+                    },
+                    {
+                    { 131, 130, 131, 132, 133, 134, 130},
+                    { 126, 125, 126, 127, 128, 129, 125},
+                    { 131, 130, 131, 132, 133, 134, 130},
+                    { 136, 135, 136, 137, 138, 139, 135},
+                    { 141, 140, 141, 142, 143, 144, 140},
+                    { 146, 145, 146, 147, 148, 149, 145},
+                    { 126, 125, 126, 127, 128, 129, 125}
+                    }
+                    }
+                }
+        });
+
+        myPad->getOperator()->associateInput(0,myInput);
+        myPad->getOperator()->computeOutputDims();
+        myPad->forward();
+         myPad->getOperator()->getOutput(0)->print();
+        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+    }
+
+    SECTION("Pad Wrap") {
+        std::shared_ptr<Node> myPad = Pad({{1, 1}, {1, 1}}, "mypad", PadBorderType::Wrap);
+        myPad->getOperator()->setDatatype(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
+            {
+                {
+                    {{  0,   1,   2,   3,   4},
+                    {  5,   6,   7,   8,   9},
+                    { 10,  11,  12,  13,  14},
+                    { 15,  16,  17,  18,  19},
+                    { 20,  21,  22,  23,  24}},
+
+                    {{ 25,  26,  27,  28,  29},
+                    { 30,  31,  32,  33,  34},
+                    { 35,  36,  37,  38,  39},
+                    { 40,  41,  42,  43,  44},
+                    { 45,  46,  47,  48,  49}},
+
+                    {{ 50,  51,  52,  53,  54},
+                    { 55,  56,  57,  58,  59},
+                    { 60,  61,  62,  63,  64},
+                    { 65,  66,  67,  68,  69},
+                    { 70,  71,  72,  73,  74}}
+                },
+                {
+                    {{ 75,  76,  77,  78,  79},
+                    { 80,  81,  82,  83,  84},
+                    { 85,  86,  87,  88,  89},
+                    { 90,  91,  92,  93,  94},
+                    { 95,  96,  97,  98,  99}},
+
+                    {{100, 101, 102, 103, 104},
+                    {105, 106, 107, 108, 109},
+                    {110, 111, 112, 113, 114},
+                    {115, 116, 117, 118, 119},
+                    {120, 121, 122, 123, 124}},
+
+                    {{125, 126, 127, 128, 129},
+                    {130, 131, 132, 133, 134},
+                    {135, 136, 137, 138, 139},
+                    {140, 141, 142, 143, 144},
+                    {145, 146, 147, 148, 149}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
+            {
+                {
+                    {{ 24,  20,  21,  22,  23,  24,  20},
+                    { 4,   0,   1,   2,   3,   4,  0},
+                    { 9,   5,   6,   7,   8,   9,  5},
+                    { 14,  10,  11,  12,  13,  14,  10},
+                    { 19,  15,  16,  17,  18,  19,  15},
+                    { 24,  20,  21,  22,  23,  24,  20},
+                    { 4,   0,   1,   2,   3,   4,  0}},
+
+                    {{ 49,  45,  46,  47,  48,  49, 45},
+                    { 29,  25,  26,  27,  28,  29,  25},
+                    { 34,  30,  31,  32,  33,  34,  30},
+                    { 39,  35,  36,  37,  38,  39,  35},
+                    { 44,  40,  41,  42,  43,  44,  40},
+                    { 49,  45,  46,  47,  48,  49,  45},
+                    { 29,  25,  26,  27,  28,  29,  25}},
+
+                    {{ 74,  70,  71,  72,  73,  74,  70},
+                    { 54,  50,  51,  52,  53,  54,  50},
+                    { 59,  55,  56,  57,  58,  59,  55},
+                    { 64,  60,  61,  62,  63,  64,  60},
+                    { 69,  65,  66,  67,  68,  69,  65},
+                    { 74,  70,  71,  72,  73,  74,  70},
+                    { 54,  50,  51,  52,  53,  54,  50}}
+                },
+                {
+                    {{ 99,  95,  96,  97,  98,  99,  95},
+                    { 79,  75,  76,  77,  78,  79,  75},
+                    { 84,  80,  81,  82,  83,  84,  80},
+                    { 89,  85,  86,  87,  88,  89,  85},
+                    { 94,  90,  91,  92,  93,  94,  90},
+                    { 99,  95,  96,  97,  98,  99,  95},
+                    { 79,  75,  76,  77,  78,  79,  75}},
+
+                    {{124,  120, 121, 122, 123, 124,  120},
+                    {104,  100, 101, 102, 103, 104,  100},
+                    {109,  105, 106, 107, 108, 109, 105},
+                    {114,  110, 111, 112, 113, 114,  110},
+                    {119,  115, 116, 117, 118, 119,  115},
+                    {124,  120, 121, 122, 123, 124,  120},
+                    {104,  100, 101, 102, 103, 104,  100}},
+
+                    {{149,  145, 146, 147, 148, 149,  145},
+                    {129,  125, 126, 127, 128, 129,  125},
+                    {134,  130, 131, 132, 133, 134,  130},
+                    {139,  135, 136, 137, 138, 139,  135},
+                    {144,  140, 141, 142, 143, 144,  140},
+                    {149,  145, 146, 147, 148, 149,  145},
+                    {129,  125, 126, 127, 128, 129,  125}}
+                }
+            }
+        });
+
+        myPad->getOperator()->associateInput(0,myInput);
+        myPad->getOperator()->computeOutputDims();
+        myPad->forward();
+        // myPad->getOperator()->getOutput(0)->print();
+        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_PaddedConv.cpp b/unit_tests/operator/Test_PaddedConv.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e41be85ab00faae1af7239c43b74a34f558a663c
--- /dev/null
+++ b/unit_tests/operator/Test_PaddedConv.cpp
@@ -0,0 +1,319 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstdlib>
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/MetaOperatorDefs.hpp"
+
+#include "aidge/backend/cpu.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] PaddedConv(forward)") {
+    SECTION("Classic Conv") {
+        std::shared_ptr<Node> myConv = PaddedConv(3,4,{3,3}, "myconv");
+        myConv->getOperator()->setDatatype(DataType::Int32);
+        myConv->getOperator()->setBackend("cpu");
+        std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
+            {
+                {
+                    {{  0,   1,   2},
+                    {  3,   4,   5},
+                    {  6,   7,   8}},
+                    {{  9,  10,  11},
+                    { 12,  13,  14},
+                    { 15,  16,  17}},
+                    {{ 18,  19,  20},
+                    { 21,  22,  23},
+                    { 24,  25,  26}}
+                },
+                {
+                    {{ 27,  28,  29},
+                    { 30,  31,  32},
+                    { 33,  34,  35}},
+                    {{ 36,  37,  38},
+                    { 39,  40,  41},
+                    { 42,  43,  44}},
+                    {{ 45,  46,  47},
+                    { 48,  49,  50},
+                    { 51,  52,  53}}
+                },
+                {
+                    {{ 54,  55,  56},
+                    { 57,  58,  59},
+                    { 60,  61,  62}},
+                    {{ 63,  64,  65},
+                    { 66,  67,  68},
+                    { 69,  70,  71}},
+                    {{ 72,  73,  74},
+                    { 75,  76,  77},
+                    { 78,  79,  80}}
+                },
+                {
+                    {{ 81,  82,  83},
+                    { 84,  85,  86},
+                    { 87,  88,  89}},
+                    {{ 90,  91,  92},
+                    { 93,  94,  95},
+                    { 96,  97,  98}},
+                    {{ 99, 100, 101},
+                    {102, 103, 104},
+                    {105, 106, 107}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
+            {
+                {
+                    {{  0,   1,   2,   3,   4},
+                    {  5,   6,   7,   8,   9},
+                    { 10,  11,  12,  13,  14},
+                    { 15,  16,  17,  18,  19},
+                    { 20,  21,  22,  23,  24}},
+
+                    {{ 25,  26,  27,  28,  29},
+                    { 30,  31,  32,  33,  34},
+                    { 35,  36,  37,  38,  39},
+                    { 40,  41,  42,  43,  44},
+                    { 45,  46,  47,  48,  49}},
+
+                    {{ 50,  51,  52,  53,  54},
+                    { 55,  56,  57,  58,  59},
+                    { 60,  61,  62,  63,  64},
+                    { 65,  66,  67,  68,  69},
+                    { 70,  71,  72,  73,  74}}
+                },
+                {
+                    {{ 75,  76,  77,  78,  79},
+                    { 80,  81,  82,  83,  84},
+                    { 85,  86,  87,  88,  89},
+                    { 90,  91,  92,  93,  94},
+                    { 95,  96,  97,  98,  99}},
+
+                    {{100, 101, 102, 103, 104},
+                    {105, 106, 107, 108, 109},
+                    {110, 111, 112, 113, 114},
+                    {115, 116, 117, 118, 119},
+                    {120, 121, 122, 123, 124}},
+
+                    {{125, 126, 127, 128, 129},
+                    {130, 131, 132, 133, 134},
+                    {135, 136, 137, 138, 139},
+                    {140, 141, 142, 143, 144},
+                    {145, 146, 147, 148, 149}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> { 
+            {
+                {
+                    {{ 15226,  15577,  15928},
+                    { 16981,  17332,  17683},
+                    { 18736,  19087,  19438}},
+                    {{ 37818,  38898,  39978},
+                    { 43218,  44298,  45378},
+                    { 48618,  49698,  50778}},
+                    {{ 60426,  62235,  64044},
+                    { 69471,  71280,  73089},
+                    { 78516,  80325,  82134}},
+                    {{ 83016,  85554,  88092},
+                    { 95706,  98244, 100782},
+                    {108396, 110934, 113472}}
+                },
+                {
+                    {{ 41551,  41902,  42253},
+                    { 43306,  43657,  44008},
+                    { 45061,  45412,  45763}},
+                    {{118818, 119898, 120978},
+                    {124218, 125298, 126378},
+                    {129618, 130698, 131778}},
+                    {{196101, 197910, 199719},
+                    {205146, 206955, 208764},
+                    {214191, 216000, 217809}},
+                    {{273366, 275904, 278442},
+                    {286056, 288594, 291132},
+                    {298746, 301284, 303822}}
+                }
+            }
+        });
+
+        myConv->getOperator()->associateInput(0,myInput);
+        myConv->getOperator()->associateInput(1,myWeights);
+        myConv->getOperator()->associateInput(2,myBias);
+        myConv->getOperator()->computeOutputDims();
+        myConv->forward();
+
+        REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
+    }
+    SECTION("test Padding") {
+        std::shared_ptr<Node> myConv = PaddedConv(3,4,{3,3}, "myconv", {1,1}, {1,1,1,1});
+        myConv->getOperator()->setDatatype(DataType::Int32);
+        myConv->getOperator()->setBackend("cpu");
+        std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
+            {
+                {
+                    {{  0,   1,   2},
+                    {  3,   4,   5},
+                    {  6,   7,   8}},
+                    {{  9,  10,  11},
+                    { 12,  13,  14},
+                    { 15,  16,  17}},
+                    {{ 18,  19,  20},
+                    { 21,  22,  23},
+                    { 24,  25,  26}}
+                },
+                {
+                    {{ 27,  28,  29},
+                    { 30,  31,  32},
+                    { 33,  34,  35}},
+                    {{ 36,  37,  38},
+                    { 39,  40,  41},
+                    { 42,  43,  44}},
+                    {{ 45,  46,  47},
+                    { 48,  49,  50},
+                    { 51,  52,  53}}
+                },
+                {
+                    {{ 54,  55,  56},
+                    { 57,  58,  59},
+                    { 60,  61,  62}},
+                    {{ 63,  64,  65},
+                    { 66,  67,  68},
+                    { 69,  70,  71}},
+                    {{ 72,  73,  74},
+                    { 75,  76,  77},
+                    { 78,  79,  80}}
+                },
+                {
+                    {{ 81,  82,  83},
+                    { 84,  85,  86},
+                    { 87,  88,  89}},
+                    {{ 90,  91,  92},
+                    { 93,  94,  95},
+                    { 96,  97,  98}},
+                    {{ 99, 100, 101},
+                    {102, 103, 104},
+                    {105, 106, 107}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
+            {
+                {
+                    {{  0,   1,   2,   3,   4},
+                    {  5,   6,   7,   8,   9},
+                    { 10,  11,  12,  13,  14},
+                    { 15,  16,  17,  18,  19},
+                    { 20,  21,  22,  23,  24}},
+
+                    {{ 25,  26,  27,  28,  29},
+                    { 30,  31,  32,  33,  34},
+                    { 35,  36,  37,  38,  39},
+                    { 40,  41,  42,  43,  44},
+                    { 45,  46,  47,  48,  49}},
+
+                    {{ 50,  51,  52,  53,  54},
+                    { 55,  56,  57,  58,  59},
+                    { 60,  61,  62,  63,  64},
+                    { 65,  66,  67,  68,  69},
+                    { 70,  71,  72,  73,  74}}
+                },
+                {
+                    {{ 75,  76,  77,  78,  79},
+                    { 80,  81,  82,  83,  84},
+                    { 85,  86,  87,  88,  89},
+                    { 90,  91,  92,  93,  94},
+                    { 95,  96,  97,  98,  99}},
+
+                    {{100, 101, 102, 103, 104},
+                    {105, 106, 107, 108, 109},
+                    {110, 111, 112, 113, 114},
+                    {115, 116, 117, 118, 119},
+                    {120, 121, 122, 123, 124}},
+
+                    {{125, 126, 127, 128, 129},
+                    {130, 131, 132, 133, 134},
+                    {135, 136, 137, 138, 139},
+                    {140, 141, 142, 143, 144},
+                    {145, 146, 147, 148, 149}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,5,5> { 
+            {
+                {
+                    {{  6895,  10225,  10486,  10747,   7063},
+                     { 10303,  15226,  15577,  15928,  10429},
+                     { 11518,  16981,  17332,  17683,  11554},
+                     { 12733,  18736,  19087,  19438,  12679},
+                     {  8047,  11791,  11998,  12205,   7927}},
+
+                    {{ 15960,  24069,  24816,  25563,  17100},
+                     { 25119,  37818,  38898,  39978,  26703},
+                     { 28764,  43218,  44298,  45378,  30258},
+                     { 32409,  48618,  49698,  50778,  33813},
+                     { 21972,  32925,  33618,  34311,  22824}},
+
+                    {{ 25041,  37929,  39162,  40395,  27153},
+                     { 39951,  60426,  62235,  64044,  42993},
+                     { 46026,  69471,  71280,  73089,  48978},
+                     { 52101,  78516,  80325,  82134,  54963},
+                     { 35913,  54075,  55254,  56433,  37737}},
+
+                    {{ 34104,  51771,  53490,  55209,  37188},
+                     { 54765,  83016,  85554,  88092,  59265},
+                     { 63270,  95706,  98244, 100782,  67680},
+                     { 71775, 108396, 110934, 113472,  76095},
+                     { 49836,  75207,  76872,  78537,  52632}}
+                },
+                {
+                    {{ 20395,  29800,  30061,  30322,  19663},
+                     { 28528,  41551,  41902,  42253,  27304},
+                     { 29743,  43306,  43657,  44008,  28429},
+                     { 30958,  45061,  45412,  45763,  29554},
+                     { 18847,  27316,  27523,  27730,  17827}},
+
+                    {{ 53760,  80094,  80841,  81588,  54000},
+                     { 79794, 118818, 119898, 120978,  80028},
+                     { 83439, 124218, 125298, 126378,  83583},
+                     { 87084, 129618, 130698, 131778,  87138},
+                     { 57072,  84900,  85593,  86286,  57024}},
+
+                    {{ 87141, 130404, 131637, 132870,  88353},
+                     {131076, 196101, 197910, 199719, 132768},
+                     {137151, 205146, 206955, 208764, 138753},
+                     {143226, 214191, 216000, 217809, 144738},
+                     { 95313, 142500, 143679, 144858,  96237}},
+
+                    {{120504, 180696, 182415, 184134, 122688},
+                     {182340, 273366, 275904, 278442, 185490},
+                     {190845, 286056, 288594, 291132, 193905},
+                     {199350, 298746, 301284, 303822, 202320},
+                     {133536, 200082, 201747, 203412, 135432}}
+                }
+            }
+        });
+
+        myConv->getOperator()->associateInput(0,myInput);
+        myConv->getOperator()->associateInput(1,myWeights);
+        myConv->getOperator()->associateInput(2,myBias);
+        myConv->getOperator()->computeOutputDims();
+        myConv->forward();
+
+        REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
+    }
+}