diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp
index 5dfd923957738138561808895280ef1d7059b6d4..4bb2aaaf558aa2f9152d24e8db8ae072a34ef7d1 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp
@@ -20,21 +20,22 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+
 // Operator implementation entry point for the backend
 using Conv1D_Op = Conv_Op<1>;
 using ConvImpl1D_cpu = OperatorImpl_cpu<Conv1D_Op,
-                                        void(const DimSize_t &,
-                                             const DimSize_t &,
-                                             const DimSize_t &,
+                                        void(const std::array<DimSize_t,1> &,
+                                             const std::array<DimSize_t,1> &,
+                                             const std::array<DimSize_t,1> &,
                                              const std::array<DimSize_t, 3> &,
                                              DimSize_t,
                                              const void *,
                                              const void *,
                                              const void *,
                                              void *),
-                                        void(const DimSize_t &,
-                                             const DimSize_t &,
-                                             const DimSize_t &,
+                                        void(const std::array<DimSize_t,1> &,
+                                             const std::array<DimSize_t,1> &,
+                                             const std::array<DimSize_t,1> &,
                                              const std::array<DimSize_t, 3> &,
                                              const std::array<DimSize_t, 3> &,
                                              const void *,
diff --git a/include/aidge/backend/cpu/operator/ConvImpl_kernels.hpp b/include/aidge/backend/cpu/operator/ConvImpl_kernels.hpp
index 36b6d4e23c21cd8349ed9ad8263b7336615c99c8..274f5f4f8d373ade68698d4325b089cf7986f259 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl_kernels.hpp
@@ -37,9 +37,9 @@ using std::array;
  * @param output_ Output Tensor.
  */
 template <class I, class W, class B, class O>
-void ConvImpl1D_cpu_forward_kernel(const DimSize_t &strideDim,
-                                   const DimSize_t &dilationDim,
-                                   const DimSize_t &kernelDim,
+void ConvImpl1D_cpu_forward_kernel(const array<DimSize_t, 1> &strideDim,
+                                   const array<DimSize_t, 1> &dilationDim,
+                                   const array<DimSize_t, 1> &kernelDim,
                                    const std::array<DimSize_t, 3> &inputDims,
                                    DimSize_t outChannels,
                                    const void *input_,
@@ -54,10 +54,10 @@ void ConvImpl1D_cpu_forward_kernel(const DimSize_t &strideDim,
 
     // output H size
     const std::size_t oxSize = static_cast<std::size_t>(std::floor(
-        static_cast<float>(inputDims[2] - dilationDim * (kernelDim - 1) - 1 +
-                           strideDim) /
-        static_cast<float>(strideDim)));
-    const DimSize_t dilated_kernel_x = dilationDim * (kernelDim - 1) + 1;
+        static_cast<float>(inputDims[2] - dilationDim[0] * (kernelDim[0] - 1) -
+                           1 + strideDim[0]) /
+        static_cast<float>(strideDim[0])));
+    const DimSize_t dilated_kernel_x = dilationDim[0] * (kernelDim[0] - 1) + 1;
 
     using signedsize = std::make_signed<std::size_t>::type;
     for (std::size_t batch = 0; batch < inputDims[0]; ++batch) {
@@ -70,28 +70,28 @@ void ConvImpl1D_cpu_forward_kernel(const DimSize_t &strideDim,
                 const std::size_t iIndex =
                     (inCh + batch * inputDims[1]) * inputDims[2];
                 const std::size_t wIndex =
-                    (inCh + outCh * inputDims[1]) * kernelDim;
+                    (inCh + outCh * inputDims[1]) * kernelDim[0];
                 for (std::size_t ox = 0; ox < oxSize; ++ox) {
                     // const signedsize difx = static_cast<signedsize>(- ox *
-                    // strideDims); const std::size_t sxMin =
+                    // strideDim[0s); const std::size_t sxMin =
                     // static_cast<std::size_t>(std::max(difx, signedsize(0)));
                     // const std::size_t sxMax =
                     // (static_cast<signedsize>(inputDims[2]) + difx) < 0 ? 0 :
-                    // ((inputDims[2] + difx) > kernelDims[0] ? kernelDims :
-                    // inputDims[2] + difx);
+                    // ((inputDims[2] + difx) > kernelDim[0s[0] ? kernelDim[0s
+                    // : inputDims[2] + difx);
                     const std::size_t sxMin = 0;
                     const std::size_t sxMax = dilated_kernel_x;
                     const std::size_t oIndexFull = oIndex + ox;
                     const signedsize ix =
-                        static_cast<signedsize>(ox * strideDim);
+                        static_cast<signedsize>(ox * strideDim[0]);
 
-                    for (std::size_t sx = sxMin; sx * dilationDim < sxMax;
+                    for (std::size_t sx = sxMin; sx * dilationDim[0] < sxMax;
                          ++sx) {
                         output[oIndexFull] +=
                             weights[wIndex + sx] *
                             input[iIndex + static_cast<std::size_t>(
                                                ix + static_cast<signedsize>(
-                                                        sx * dilationDim))];
+                                                        sx * dilationDim[0]))];
                     }
                 }
             }
@@ -135,9 +135,9 @@ void ConvImpl1D_cpu_forward_kernel(const DimSize_t &strideDim,
  * @param[inout] iGrad gradients of the input to update
  */
 template <class I, class W, class O>
-void conv1DBackwardInput(const DimSize_t &stride,
-                         const DimSize_t &dilation,
-                         const DimSize_t &kDim,
+void conv1DBackwardInput(const array<DimSize_t, 1> &stride,
+                         const array<DimSize_t, 1> &dilation,
+                         const array<DimSize_t, 1> &kDim,
                          const array<DimSize_t, 2> &kStrides,
                          const W *weights,
                          const array<DimSize_t, 3> &oDims,
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index 8ce62dcb0ebe22823928e19e98f596be93c28d9d..d23a9968ffb424b4639e0fcd2629a3a1cc2e11c3 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -41,9 +41,9 @@ void Aidge::ConvImpl1D_cpu::forward() {
 
     // Call kernel
     impl.forward(
-        op_.strideDims()[0],
-        op_.dilationDims()[0],
-        op_.kernelDims()[0],
+        op_.strideDims(),
+        op_.dilationDims(),
+        op_.kernelDims(),
         op_.getInput(0)->template dims<3>(), // input dimensions
         dynamic_cast<const Conv_Op<1> &>(mOp).outChannels(),    // outChannels
         input0.getImpl()->rawPtr(),                             // input