diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
index a61a7299ed6bd5c5a3e41c09e9d5b5f1f7ae3326..aa3b10970b1cd9beeead4353ff0c2b3d65fd9a83 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
@@ -25,6 +25,29 @@
 
 namespace Aidge {
 // class ConvDepthWise_Op;
+// compute kernel registry for forward and backward
+class ConvDepthWiseImpl1DForward_cpu
+    : public Registrable<ConvDepthWiseImpl1DForward_cpu,
+                         std::tuple<DataType, DataType, DataType, DataType>,
+                         void(const ConvDepthWise_Op<1>::Attrs &, const std::array<DimSize_t, 3> &, const void *,
+                              const void *, const void *, void *)> {};
+
+class ConvDepthWiseImpl1D_cpu : public OperatorImpl {
+public:
+    ConvDepthWiseImpl1D_cpu(const ConvDepthWise_Op<1> &op) : OperatorImpl(op, "cpu") {}
+
+    static std::unique_ptr<ConvDepthWiseImpl1D_cpu> create(const ConvDepthWise_Op<1> &op) {
+        return std::make_unique<ConvDepthWiseImpl1D_cpu>(op);
+    }
+
+    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+namespace {
+// add cpu backend to ConvDepthWise_Op<1> implementation registry
+static Registrar<ConvDepthWise_Op<1>> registrarConvDepthWiseImpl1D_cpu("cpu", Aidge::ConvDepthWiseImpl1D_cpu::create);
+}  // namespace
 
 // compute kernel registry for forward and backward
 class ConvDepthWiseImpl2DForward_cpu
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
index 720e331cafec570af73c355dda73ed85d435fa6c..db44ffe4313e6a6e03ecd279dc0262fece00b567 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
@@ -23,6 +23,76 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+/**
+ * @brief Forward kernel for 1D ConvDepthWiseolution on CPU backend.
+ * @tparam I Input data type.
+ * @tparam W Weight data type.
+ * @tparam B Bias data type.
+ * @tparam O Output data type.
+ * @param params tuple of Attributes from the Operator
+ * @param inputDims Array of input dimensions.
+ * @param input_ const input Tensor.
+ * @param weights_ const weight Tensor.
+ * @param biases_ const Biais Tensor.
+ * @param output_ Output Tensor.
+ */
+template <class I, class W, class B, class O>
+void ConvDepthWiseImpl1D_cpu_forward_kernel(const ConvDepthWise_Op<1>::Attrs &attrs, const std::array<DimSize_t, 3> &inputDims,
+                                       const void *input_, const void *weights_, const void *biases_, void *output_) {
+    // FIXME: missing convolution attributes as arguments
+    const I *input = static_cast<const I *>(input_);
+    const W *weights = static_cast<const W *>(weights_);
+    const B *biases = static_cast<const B *>(biases_);
+    O *output = static_cast<O *>(output_);
+
+
+    // output H size
+    const std::size_t oxSize =
+            static_cast<std::size_t>(std::floor(static_cast<float>(inputDims[2] - std::get<2>(attrs)[0] + std::get<0>(attrs)[0]) /
+                                static_cast<float>(std::get<0>(attrs)[0])));
+
+    // TODO: kernel computation
+    // output (batch, outCh, Xout, Yout)
+    // input  (batch, ch, Xin, Yin)
+    // weight (outCh, ch, kernelX, kernelY)
+    // does not take Dilation attribute into account
+    using signedsize = std::make_signed<std::size_t>::type;
+    for (std::size_t batch = 0; batch < inputDims[0]; ++batch) {
+        for (std::size_t ch = 0; ch < inputDims[1]; ++ch) {
+            const std::size_t oIndex = (ch + batch*inputDims[1]) * oxSize;
+            B biasVal = (biases != nullptr) ? biases[ch] : B(0);
+            std::fill(output + oIndex, output+(oIndex+oxSize), biasVal);
+            const std::size_t iIndex = (ch + batch*inputDims[1]) * inputDims[2];
+            const std::size_t wIndex = ch * std::get<2>(attrs)[0];
+            for (std::size_t ox = 0; ox < oxSize; ++ox) {
+                const signedsize difx = static_cast<signedsize>(- ox * std::get<0>(attrs)[0]);
+                const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0)));
+                const std::size_t sxMax = (static_cast<signedsize>(inputDims[2]) + difx) < 0 ? 0 : ((inputDims[2] + difx) > std::get<2>(attrs)[0] ? std::get<2>(attrs)[0] : inputDims[2] + difx);
+                const std::size_t oIndexFull = oIndex + ox;
+                const signedsize ix = static_cast<signedsize>(ox * std::get<0>(attrs)[0]);
+
+                for (std::size_t sx = sxMin; sx < sxMax; ++sx) {
+                    output[oIndexFull] += weights[wIndex + sx] *
+                                            input[iIndex + static_cast<std::size_t>(ix+static_cast<signedsize>(sx))];
+                }
+            }
+        }
+    }
+}
+
+namespace {
+static Registrar<ConvDepthWiseImpl1DForward_cpu> registrarConvDepthWiseImpl1DForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
+        Aidge::ConvDepthWiseImpl1D_cpu_forward_kernel<float, float, float, float>);
+static Registrar<ConvDepthWiseImpl1DForward_cpu> registrarConvDepthWiseImpl1DForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32},
+        Aidge::ConvDepthWiseImpl1D_cpu_forward_kernel<int, int, int, int>);
+static Registrar<ConvDepthWiseImpl1DForward_cpu> registrarConvDepthWiseImpl1DForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
+        Aidge::ConvDepthWiseImpl1D_cpu_forward_kernel<double, double, double, double>);
+}  // namespace
+
+
 /**
  * @brief Forward kernel for 2D ConvDepthWiseolution on CPU backend.
  * @tparam I Input data type.
@@ -64,7 +134,7 @@ void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Attrs &at
     for (std::size_t batch = 0; batch < inputDims[0]; ++batch) {
         for (std::size_t ch = 0; ch < inputDims[1]; ++ch) {
             const std::size_t oIndex = (ch + batch*inputDims[1]) * oxSize * oySize;
-            B biasVal = ((!std::get<3>(attrs)) && biases != nullptr) ? biases[ch] : B(0);
+            B biasVal = (biases != nullptr) ? biases[ch] : B(0);
             std::fill(output + oIndex, output+(oIndex+oxSize*oySize), biasVal);
             const std::size_t iIndex = (ch + batch*inputDims[1]) * inputDims[2] * inputDims[3];
             const std::size_t wIndex = ch * std::get<2>(attrs)[0] * std::get<2>(attrs)[1];
diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp
index 12af5860316ba0bc9f6c3eafc551037f531da6d7..1d85b31fbdbb6ac9a61ddba08d3f2c3df8ca91e3 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp
@@ -27,6 +27,32 @@ namespace Aidge {
 // class Conv_Op;
 
 // compute kernel registry for forward and backward
+// Conv 1D
+class ConvImpl1DForward_cpu
+    : public Registrable<ConvImpl1DForward_cpu,
+                         std::tuple<DataType, DataType, DataType, DataType>,
+                         void(const Conv_Op<1>::Attrs &, const std::array<DimSize_t, 3> &, DimSize_t, const void *,
+                              const void *, const void *, void *)> {};
+
+class ConvImpl1D_cpu : public OperatorImpl {
+   public:
+    ConvImpl1D_cpu(const Conv_Op<1>& op) : OperatorImpl(op, "cpu") {}
+
+    static std::unique_ptr<ConvImpl1D_cpu> create(const Conv_Op<1> &op) {
+        return std::make_unique<ConvImpl1D_cpu>(op);
+    }
+
+   public:
+    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+namespace {
+// add cpu backend to Conv_Op<1> implementation registry
+static Registrar<Conv_Op<1>> registrarConvImpl1D_cpu("cpu", Aidge::ConvImpl1D_cpu::create);
+}  // namespace
+
+// Conv 2D
 class ConvImpl2DForward_cpu
     : public Registrable<ConvImpl2DForward_cpu,
                          std::tuple<DataType, DataType, DataType, DataType>,
diff --git a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
index 0f171d79ae425bb83b5f3b8f0b67d69434a85355..718fc879fcc1124260901cdd06b059da6e8c7395 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
@@ -23,6 +23,81 @@
 #include <algorithm>
 
 namespace Aidge {
+/**
+ * @brief Forward kernel for 1D Convolution on CPU backend.
+ * @tparam I Input data type.
+ * @tparam W Weight data type.
+ * @tparam B Bias data type.
+ * @tparam O Output data type.
+ * @param params tuple of Attributes from the Operator
+ * @param inputDims Array of input dimensions.
+ * @param input_ const input Tensor.
+ * @param weights_ const weight Tensor.
+ * @param biases_ const Biais Tensor.
+ * @param output_ Output Tensor.
+ */
+template <class I, class W, class B, class O>
+void ConvImpl1D_cpu_forward_kernel(const Conv_Op<1>::Attrs &attrs, const std::array<DimSize_t, 3> &inputDims, DimSize_t outChannels,
+                                       const void *input_, const void *weights_, const void *biases_, void *output_) {
+    // FIXME: missing convolution attributes as arguments
+    const I *input = static_cast<const I *>(input_);
+    const W *weights = static_cast<const W *>(weights_);
+    const B *biases = static_cast<const B *>(biases_);
+    O *output = static_cast<O *>(output_);
+
+    // output H size
+    const std::size_t oxSize =
+            static_cast<std::size_t>(std::floor(static_cast<float>(inputDims[2] - std::get<2>(attrs)[0] + std::get<0>(attrs)[0]) /
+                                static_cast<float>(std::get<0>(attrs)[0])));
+
+    // TODO: kernel computation
+    // output (batch, outCh, Xout, Yout)
+    // input  (batch, inCh, Xin, Yin)
+    // weight (outCh, inCh, kernelX, kernelY)
+    // does not take Dilation attribute into account
+    using signedsize = std::make_signed<std::size_t>::type;
+    for (std::size_t batch = 0; batch < inputDims[0]; ++batch) {
+        for (std::size_t outCh = 0; outCh < outChannels; ++outCh) {
+            const std::size_t oIndex = (outCh + batch*outChannels) * oxSize;
+            // If bias = nullptr, set B(0)
+            B biasVal = (biases != nullptr) ? biases[outCh] : B(0);
+            std::fill(output + oIndex, output+(oIndex+oxSize), biasVal);
+            for (std::size_t inCh = 0; inCh < inputDims[1]; ++inCh) {
+                const std::size_t iIndex = (inCh + batch*inputDims[1]) * inputDims[2];
+                const std::size_t wIndex = (inCh + outCh*inputDims[1]) * std::get<2>(attrs)[0];
+                for (std::size_t ox = 0; ox < oxSize; ++ox) {
+                    const signedsize difx = static_cast<signedsize>(- ox * std::get<0>(attrs)[0]);
+                    const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0)));
+                    const std::size_t sxMax = (static_cast<signedsize>(inputDims[2]) + difx) < 0 ? 0 : ((inputDims[2] + difx) > std::get<2>(attrs)[0] ? std::get<2>(attrs)[0] : inputDims[2] + difx);
+                    const std::size_t oIndexFull = oIndex + ox;
+                    const signedsize ix = static_cast<signedsize>(ox * std::get<0>(attrs)[0]);
+
+                    for (std::size_t sx = sxMin; sx < sxMax; ++sx) {
+                        output[oIndexFull] += weights[wIndex + sx] *
+                                                input[iIndex + static_cast<std::size_t>(ix+static_cast<signedsize>(sx))];
+                    }
+                }
+            }
+        }
+    }
+}
+
+namespace {
+static Registrar<ConvImpl1DForward_cpu> registrarConvImpl1DForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
+        Aidge::ConvImpl1D_cpu_forward_kernel<float, float, float, float>);
+static Registrar<ConvImpl1DForward_cpu> registrarConvImpl1DForward_cpu_Float16(
+        {DataType::Float16, DataType::Float16, DataType::Float16, DataType::Float16},
+        Aidge::ConvImpl1D_cpu_forward_kernel<half_float::half, half_float::half, half_float::half, half_float::half>);
+static Registrar<ConvImpl1DForward_cpu> registrarConvImpl1DForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32},
+        Aidge::ConvImpl1D_cpu_forward_kernel<int, int, int, int>);
+static Registrar<ConvImpl1DForward_cpu> registrarConvImpl1DForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
+        Aidge::ConvImpl1D_cpu_forward_kernel<double, double, double, double>);
+}  // namespace
+
+
 /**
  * @brief Forward kernel for 2D Convolution on CPU backend.
  * @tparam I Input data type.
@@ -106,8 +181,8 @@ void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Attrs &attrs, const std::ar
     for (std::size_t batch = 0; batch < inputDims[0]; ++batch) {
         for (std::size_t outCh = 0; outCh < outChannels; ++outCh) {
             const std::size_t oIndex = (outCh + batch*outChannels) * oxSize * oySize;
-            // If  NoBias or bias = nullptr, set B(0)
-            B biasVal = ((!std::get<3>(attrs)) && biases != nullptr) ? biases[outCh] : B(0);
+            // If bias = nullptr, set B(0)
+            B biasVal = (biases != nullptr) ? biases[outCh] : B(0);
             std::fill(output + oIndex, output+(oIndex+oxSize*oySize), biasVal);
             for (std::size_t inCh = 0; inCh < inputDims[1]; ++inCh) {
                 const std::size_t iIndex = (inCh + batch*inputDims[1]) * inputDims[2] * inputDims[3];
diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp
index db5f76834411925f3356a42bfb4bfda7da600e8e..f9f97ffd097276704a97edf2de85ffd3a4b19697 100644
--- a/include/aidge/backend/cpu/operator/FCImpl.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl.hpp
@@ -30,8 +30,8 @@ class FCImplForward_cpu : public Registrable<FCImplForward_cpu,
                                                         DataType,
                                                         DataType,
                                                         DataType>,
-                                             void(const FC_Op::Attrs&,
-                                                  const DimSize_t,
+                                             void(
+                                                const DimSize_t,
                                                   const DimSize_t,
                                                   const DimSize_t,
                                                   const void *,
@@ -43,8 +43,8 @@ class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu,
                                                          DataType,
                                                          DataType,
                                                          DataType>,
-                                              void(const FC_Op::Attrs&,
-                                              const DimSize_t,
+                                              void(
+                                                const DimSize_t,
                                               const DimSize_t,
                                               const DimSize_t,
                                               const void *,
diff --git a/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp
index 9dd91eb883902db907f5e5004dd0cf4db59bd6a2..c93a44d922dce2dc18df94bf903134ddadf5256f 100644
--- a/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp
@@ -19,8 +19,7 @@
 
 namespace Aidge {
 template <class I, class O, class W, class B>
-void FCImpl_cpu_backward_kernel(const FC_Op::Attrs& attrs,
-                                const DimSize_t batchSize,
+void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
                                 const DimSize_t inputFeatureSize,
                                 const DimSize_t outputFeatureSize,
                                 const void* input_,
@@ -40,7 +39,7 @@ void FCImpl_cpu_backward_kernel(const FC_Op::Attrs& attrs,
 
 
     // bias grad
-    if (std::get<0>(attrs)) { // no bias
+    if (biasesGrad == nullptr) { // no bias
         std::fill(biasesGrad, biasesGrad + outputFeatureSize, B(0));
     } else {
         for (std::size_t o = 0; o < outputFeatureSize; ++o) { // nb outputs
diff --git a/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp
index 2a1a86bac51106e23593a8dc5aa13d72511582b6..a82e8850e1575b52a786df2935efbd2cd93cf8f1 100644
--- a/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp
@@ -83,7 +83,7 @@ namespace Aidge {
 // }
 
 template <class I, class W, class B, class O>
-void FCImpl_cpu_forward_kernel(const FC_Op::Attrs& attrs, const DimSize_t batchSize, const DimSize_t inputFeatureSize,
+void FCImpl_cpu_forward_kernel(const DimSize_t batchSize, const DimSize_t inputFeatureSize,
                                     const DimSize_t outputFeatureSize,
                                    const void* input_, const void* weights_, const void* biases_, void* output_) {
     // FIXME: missing FC attributes as arguments
@@ -92,7 +92,7 @@ void FCImpl_cpu_forward_kernel(const FC_Op::Attrs& attrs, const DimSize_t batchS
     const B* biases = static_cast<const B*>(biases_);
     O* output = static_cast<O*>(output_);
 
-    if (std::get<0>(attrs)) {
+    if (biases == nullptr) {
         std::fill(output, output+(batchSize*outputFeatureSize), B(0));
     }
     else {
diff --git a/include/aidge/backend/cpu/operator/PadImpl.hpp b/include/aidge/backend/cpu/operator/PadImpl.hpp
index b3c91a43419e9a5e9e1299f4a2118a51b6b64fc7..72d60fc16f6d730a5cbd4941da03bfbcf72ff85b 100644
--- a/include/aidge/backend/cpu/operator/PadImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PadImpl.hpp
@@ -25,6 +25,30 @@
 
 namespace Aidge {
 // class Pad_Op;
+// compute kernel registry for forward and backward
+class PadImpl1DForward_cpu
+    : public Registrable<PadImpl1DForward_cpu,
+                         std::tuple<DataType, DataType>,
+                         void(const Pad_Op<1>::Attrs &, const std::array<DimSize_t, 3> &, const void *,
+                              void *)> {};
+
+class PadImpl1D_cpu : public OperatorImpl {
+public:
+    PadImpl1D_cpu(const Pad_Op<1> &op) : OperatorImpl(op, "cpu") {}
+
+    static std::unique_ptr<PadImpl1D_cpu> create(const Pad_Op<1> &op) {
+        return std::make_unique<PadImpl1D_cpu>(op);
+    }
+
+    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+namespace {
+// add cpu backend to Pad_Op<1> implementation registry
+static Registrar<Pad_Op<1>> registrarPadImpl1D_cpu("cpu", Aidge::PadImpl1D_cpu::create);
+}  // namespace
+
 
 // compute kernel registry for forward and backward
 class PadImpl2DForward_cpu
diff --git a/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp
index f6f00bc4df661921708e605f44056a77bb8125f4..c9f6b708d1aaeed71d0836fa3b6feb08c1093559 100644
--- a/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp
@@ -22,6 +22,81 @@
 #include <algorithm>
 
 namespace Aidge {
+/**
+ * @brief Forward kernel for 1D Padding on CPU backend.
+ * @tparam I Input data type.
+ * @tparam O Output data type.
+ * @param attrs tuple of Parameters from the Operator
+ * @param dims Array of input dimensions.
+ * @param input_ const input Tensor.
+ * @param output_ Output Tensor.
+ */
+template <class I, class O>
+void PadImpl1D_cpu_forward_kernel(const Pad_Op<1>::Attrs &attrs, const std::array<DimSize_t, 3> &dims,
+                                       const void *input_, void *output_)
+{
+    const I *input = static_cast<const I *>(input_);
+    O *output = static_cast<O *>(output_);
+
+    const std::size_t oxSize = dims[2] + std::get<0>(attrs)[0] + std::get<0>(attrs)[1];
+
+    for (std::size_t batch = 0; batch < dims[0]; ++batch) {
+        for (std::size_t ch = 0; ch < dims[1]; ++ch) {
+            const std::size_t iIndex = (ch + batch*dims[1]) * dims[2];
+            const std::size_t oIndex = (ch + batch*dims[1]) * oxSize;
+
+            for (unsigned int ox = 0; ox < oxSize; ++ox) {
+                const std::size_t oIndexFull = oIndex + ox;
+
+                O outputValue = std::get<2>(attrs);
+
+                if (std::get<1>(attrs) == PadBorderType::Constant) {
+                    int ix = static_cast<int>(ox) - static_cast<int>(std::get<0>(attrs)[1]);
+
+                    if (ix >= 0  && ix < static_cast<int>(dims[2])) {
+                        outputValue = input[iIndex + static_cast<std::size_t>(ix)];
+                    }
+                }
+                else if (std::get<1>(attrs) == PadBorderType::Edge) {
+                    int ix = std::max(0, std::min(static_cast<int>(dims[2]) - 1, static_cast<int>(ox) - static_cast<int>(std::get<0>(attrs)[1])));
+
+                    outputValue = input[iIndex + static_cast<std::size_t>(ix)];
+                }
+                else if (std::get<1>(attrs) == PadBorderType::Reflect) {
+                    int ix = static_cast<int>(ox) - static_cast<int>(std::get<0>(attrs)[1]);
+
+                    if (ix < 0)
+                        ix = 0 - ix;
+                    if (ix >= static_cast<int>(dims[2]))
+                        ix = static_cast<int>(dims[2]) - ix;
+
+                    outputValue = input[iIndex + static_cast<std::size_t>(ix)];
+                }
+                else if (std::get<1>(attrs) == PadBorderType::Wrap) {
+                    int ix = (static_cast<int>(dims[2]) + static_cast<int>(ox) - static_cast<int>(std::get<0>(attrs)[1])) % static_cast<int>(dims[2]);
+
+                    outputValue = input[iIndex + static_cast<std::size_t>(ix)];
+                }
+
+                output[oIndexFull] = outputValue;
+            }
+        }
+    }
+}
+
+namespace {
+static Registrar<PadImpl1DForward_cpu> registrarPadImpl1DForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32},
+        Aidge::PadImpl1D_cpu_forward_kernel<float, float>);
+static Registrar<PadImpl1DForward_cpu> registrarPadImpl1DForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32},
+        Aidge::PadImpl1D_cpu_forward_kernel<int, int>);
+static Registrar<PadImpl1DForward_cpu> registrarPadImpl1DForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64},
+        Aidge::PadImpl1D_cpu_forward_kernel<double, double>);
+}  // namespace
+
+
 /**
  * @brief Forward kernel for 2D Padding on CPU backend.
  * @tparam I Input data type.
diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp
index 5c8d2fe307c70bd7ee3f64e14735417f7ffb0c67..11e50aaa9b25ad6a14aeaedc6ae5d936edb88893 100644
--- a/src/operator/ConvDepthWiseImpl.cpp
+++ b/src/operator/ConvDepthWiseImpl.cpp
@@ -22,29 +22,104 @@
 #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
 #include "aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp"
 
+Aidge::Elts_t Aidge::ConvDepthWiseImpl1D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return Elts_t::DataElts(0);
+}
+
+void Aidge::ConvDepthWiseImpl1D_cpu::forward() {
+    const auto& opTensor = static_cast<const OperatorTensor&>(mOp);
+
+    assert(mOp.getRawInput(0) && "missing input #0");
+    assert(mOp.getRawInput(1) && "missing input #1");
+
+    assert((std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->nbDims() == 3) && "support for 3-dimensions tensors only");
+
+    // Find the correct kernel type
+    const auto outputDataType = opTensor.getOutput(0)->dataType();
+    const Registrar<ConvDepthWiseImpl1DForward_cpu>::registrar_key registrarKey = {
+        opTensor.getInput(0)->dataType(),
+        opTensor.getInput(1)->dataType(),
+        ((opTensor.getInput(2)) ? opTensor.getInput(2)->dataType() : opTensor.getInput(1)->dataType()),
+        outputDataType};
+
+    Registrar<ConvDepthWiseImpl1DForward_cpu>::registrar_type kernelFunc;
+    if (Registrar<ConvDepthWiseImpl1DForward_cpu>::exists(registrarKey)) {
+        // One exists with the right inputs/output types
+        kernelFunc = Registrar<ConvDepthWiseImpl1DForward_cpu>::create(registrarKey);
+    }
+    else {
+        // Otherwise, fallback to the kernel with all types matching output type
+        kernelFunc = Registrar<ConvDepthWiseImpl1DForward_cpu>::create({
+            outputDataType, outputDataType, outputDataType, outputDataType});
+    }
+
+    // Convert input data (no overhead if not needed!)
+    // TODO: right now, if needed, memory will be allocated/deallocated at each
+    // call to forward(). We might put the following shared_ptr as members of
+    // this class to avoid that.
+    std::shared_ptr<Tensor> input0Fallback, input1Fallback, input2Fallback;
+    const auto& input0 = opTensor.getInput(0)->refCastFrom(input0Fallback, *opTensor.getOutput(0));
+    const auto& input1 = opTensor.getInput(1)->refCastFrom(input1Fallback, *opTensor.getOutput(0));
+    const auto& input2 = (opTensor.getInput(2)) ? opTensor.getInput(2)->refCastFrom(input2Fallback, *opTensor.getOutput(0)) : Tensor();
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const ConvDepthWise_Op<1>&>(mOp).getStaticAttributes(), // Conv attributes
+               opTensor.getInput(0)->template dims<3>(), // input dimensions
+               input0.getImpl()->rawPtr(), // input
+               input1.getImpl()->rawPtr(), // weight
+               (opTensor.getInput(2)) ? input2.getImpl()->rawPtr() : nullptr, // bias
+               getCPUPtr(mOp.getRawOutput(0)) // output
+            );
+}
+
 Aidge::Elts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
     return Elts_t::DataElts(0);
 }
 
 void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
+    const auto& opTensor = static_cast<const OperatorTensor&>(mOp);
+
     assert(mOp.getRawInput(0) && "missing input #0");
     assert(mOp.getRawInput(1) && "missing input #1");
-    assert(mOp.getRawInput(2) && "missing input #2");
 
-    assert((std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->nbDims() == 4) && "support for 4-dimensions tensors only");
+    assert((std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->nbDims() == 4) && "support for 3-dimensions tensors only");
 
     // Find the correct kernel type
-    auto kernelFunc =
-            Registrar<ConvDepthWiseImpl2DForward_cpu>::create({std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-                                                               std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
-                                                               std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->dataType(),
-                                                               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+    const auto outputDataType = opTensor.getOutput(0)->dataType();
+    const Registrar<ConvDepthWiseImpl2DForward_cpu>::registrar_key registrarKey = {
+        opTensor.getInput(0)->dataType(),
+        opTensor.getInput(1)->dataType(),
+        ((opTensor.getInput(2)) ? opTensor.getInput(2)->dataType() : opTensor.getInput(1)->dataType()),
+        outputDataType};
+
+    Registrar<ConvDepthWiseImpl2DForward_cpu>::registrar_type kernelFunc;
+    if (Registrar<ConvDepthWiseImpl2DForward_cpu>::exists(registrarKey)) {
+        // One exists with the right inputs/output types
+        kernelFunc = Registrar<ConvDepthWiseImpl2DForward_cpu>::create(registrarKey);
+    }
+    else {
+        // Otherwise, fallback to the kernel with all types matching output type
+        kernelFunc = Registrar<ConvDepthWiseImpl2DForward_cpu>::create({
+            outputDataType, outputDataType, outputDataType, outputDataType});
+    }
+
+    // Convert input data (no overhead if not needed!)
+    // TODO: right now, if needed, memory will be allocated/deallocated at each
+    // call to forward(). We might put the following shared_ptr as members of
+    // this class to avoid that.
+    std::shared_ptr<Tensor> input0Fallback, input1Fallback, input2Fallback;
+    const auto& input0 = opTensor.getInput(0)->refCastFrom(input0Fallback, *opTensor.getOutput(0));
+    const auto& input1 = opTensor.getInput(1)->refCastFrom(input1Fallback, *opTensor.getOutput(0));
+    const auto& input2 = (opTensor.getInput(2)) ? opTensor.getInput(2)->refCastFrom(input2Fallback, *opTensor.getOutput(0)) : Tensor();
 
     // Call kernel
-    kernelFunc(dynamic_cast<const ConvDepthWise_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
-               getCPUPtr(mOp.getRawInput(0)),
-               getCPUPtr(mOp.getRawInput(1)),
-               getCPUPtr(mOp.getRawInput(2)),
-               getCPUPtr(mOp.getRawOutput(0)));
+    kernelFunc(dynamic_cast<const ConvDepthWise_Op<2>&>(mOp).getStaticAttributes(), // Conv attributes
+               opTensor.getInput(0)->template dims<4>(), // input dimensions
+               input0.getImpl()->rawPtr(), // input
+               input1.getImpl()->rawPtr(), // weight
+               (opTensor.getInput(2)) ? input2.getImpl()->rawPtr() : nullptr, // bias
+               getCPUPtr(mOp.getRawOutput(0)) // output
+            );
 }
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index 27e2882c8ee7ddcc60d3d8521802debdcf4b9eb4..e522aac17ef6c1ffaa0778c49ac053db562f4def 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -22,6 +22,57 @@
 #include "aidge/operator/Conv.hpp"
 #include "aidge/utils/Types.h"
 
+Aidge::Elts_t Aidge::ConvImpl1D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return Elts_t::DataElts(0);
+}
+
+void Aidge::ConvImpl1D_cpu::forward() {
+    const auto& opTensor = static_cast<const OperatorTensor&>(mOp);
+
+    // FIXME: uncomment the following code once memory handling will work
+    assert(mOp.getRawInput(0) && "missing input #0");
+    assert(mOp.getRawInput(1) && "missing input #1");
+
+    // Find the correct kernel type
+    const auto outputDataType = opTensor.getOutput(0)->dataType();
+    const Registrar<ConvImpl1DForward_cpu>::registrar_key registrarKey = {
+        opTensor.getInput(0)->dataType(),
+        opTensor.getInput(1)->dataType(),
+        ((opTensor.getInput(2)) ? opTensor.getInput(2)->dataType() : opTensor.getInput(1)->dataType()),
+        outputDataType};
+
+    Registrar<ConvImpl1DForward_cpu>::registrar_type kernelFunc;
+    if (Registrar<ConvImpl1DForward_cpu>::exists(registrarKey)) {
+        // One exists with the right inputs/output types
+        kernelFunc = Registrar<ConvImpl1DForward_cpu>::create(registrarKey);
+    }
+    else {
+        // Otherwise, fallback to the kernel with all types matching output type
+        kernelFunc = Registrar<ConvImpl1DForward_cpu>::create({
+            outputDataType, outputDataType, outputDataType, outputDataType});
+    }
+
+    // Convert input data (no overhead if not needed!)
+    // TODO: right now, if needed, memory will be allocated/deallocated at each
+    // call to forward(). We might put the following shared_ptr as members of
+    // this class to avoid that.
+    std::shared_ptr<Tensor> input0Fallback, input1Fallback, input2Fallback;
+    const auto& input0 = opTensor.getInput(0)->refCastFrom(input0Fallback, *opTensor.getOutput(0));
+    const auto& input1 = opTensor.getInput(1)->refCastFrom(input1Fallback, *opTensor.getOutput(0));
+    const auto& input2 = (opTensor.getInput(2)) ? opTensor.getInput(2)->refCastFrom(input2Fallback, *opTensor.getOutput(0)) : Tensor();
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const Conv_Op<1>&>(mOp).getStaticAttributes(), // Conv attributes
+               opTensor.getInput(0)->template dims<3>(), // input dimensions
+               dynamic_cast<const Conv_Op<1>&>(mOp).outChannels(), // outChannels
+               input0.getImpl()->rawPtr(), // input
+               input1.getImpl()->rawPtr(), // weight
+               (opTensor.getInput(2)) ? input2.getImpl()->rawPtr() : nullptr, // bias
+               getCPUPtr(mOp.getRawOutput(0)) // output
+            );
+}
+
 Aidge::Elts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
     return Elts_t::DataElts(0);
@@ -33,14 +84,13 @@ void Aidge::ConvImpl2D_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getRawInput(0) && "missing input #0");
     assert(mOp.getRawInput(1) && "missing input #1");
-    assert(mOp.getRawInput(2) && "missing input #2");
 
     // Find the correct kernel type
     const auto outputDataType = opTensor.getOutput(0)->dataType();
     const Registrar<ConvImpl2DForward_cpu>::registrar_key registrarKey = {
         opTensor.getInput(0)->dataType(),
         opTensor.getInput(1)->dataType(),
-        opTensor.getInput(2)->dataType(),
+        ((opTensor.getInput(2)) ? opTensor.getInput(2)->dataType() : opTensor.getInput(1)->dataType()),
         outputDataType};
 
     Registrar<ConvImpl2DForward_cpu>::registrar_type kernelFunc;
@@ -61,7 +111,7 @@ void Aidge::ConvImpl2D_cpu::forward() {
     std::shared_ptr<Tensor> input0Fallback, input1Fallback, input2Fallback;
     const auto& input0 = opTensor.getInput(0)->refCastFrom(input0Fallback, *opTensor.getOutput(0));
     const auto& input1 = opTensor.getInput(1)->refCastFrom(input1Fallback, *opTensor.getOutput(0));
-    const auto& input2 = opTensor.getInput(2)->refCastFrom(input2Fallback, *opTensor.getOutput(0));
+    const auto& input2 = (opTensor.getInput(2)) ? opTensor.getInput(2)->refCastFrom(input2Fallback, *opTensor.getOutput(0)) : Tensor();
 
     // Call kernel
     kernelFunc(dynamic_cast<const Conv_Op<2>&>(mOp).getStaticAttributes(), // Conv attributes
@@ -69,7 +119,7 @@ void Aidge::ConvImpl2D_cpu::forward() {
                dynamic_cast<const Conv_Op<2>&>(mOp).outChannels(), // outChannels
                input0.getImpl()->rawPtr(), // input
                input1.getImpl()->rawPtr(), // weight
-               input2.getImpl()->rawPtr(), // bias
+               (opTensor.getInput(2)) ? input2.getImpl()->rawPtr() : nullptr, // bias
                getCPUPtr(mOp.getRawOutput(0)) // output
             );
 }
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index 9ade584146cd14b22dfb7a0e31147f136dd5fc8a..f7eebb7b21512fb3b388b6927409fba9a1d92b34 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -29,14 +29,13 @@ void Aidge::FCImpl_cpu::forward()
     const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0");
     AIDGE_ASSERT(op_.getInput(1), "missing input #1");
-    AIDGE_ASSERT(op_.getInput(2), "missing input #2");
 
     // Find the correct kernel type
     const auto outputDataType = op_.getOutput(0)->dataType();
     const Registrar<FCImplForward_cpu>::registrar_key registrarKey = {
-        outputDataType,
-        outputDataType,
-        outputDataType,
+        op_.getInput(0)->dataType(),
+        op_.getInput(1)->dataType(),
+        ((op_.getInput(2)) ? op_.getInput(2)->dataType() : op_.getInput(1)->dataType()),
         outputDataType};
 
     Registrar<FCImplForward_cpu>::registrar_type kernelFunc;
@@ -57,15 +56,16 @@ void Aidge::FCImpl_cpu::forward()
     std::shared_ptr<Tensor> input0Fallback, input1Fallback, input2Fallback;
     const auto& input0 = op_.getInput(0)->refCastFrom(input0Fallback, *(op_.getOutput(0)));
     const auto& input1 = op_.getInput(1)->refCastFrom(input1Fallback, *(op_.getOutput(0)));
-    const auto& input2 = op_.getInput(2)->refCastFrom(input2Fallback, *(op_.getOutput(0)));
+    const auto& input2 = (op_.getInput(2)) ? op_.getInput(2)->refCastFrom(input2Fallback, *(op_.getOutput(0))) : Tensor();
 
     // Call kernel
     const auto batchSize = (input0.dims().size() > 1) ? input0.dims()[0] : 1;
-    kernelFunc(dynamic_cast<const FC_Op&>(mOp).getStaticAttributes(),
-        batchSize,
+    kernelFunc(batchSize,
         input1.dims()[1], // nb input features
         input1.dims()[0], // nb output features
-        input0.getImpl()->rawPtr(), input1.getImpl()->rawPtr(), input2.getImpl()->rawPtr(),
+        input0.getImpl()->rawPtr(),
+        input1.getImpl()->rawPtr(),
+        (op_.getInput(2)) ? input2.getImpl()->rawPtr() : nullptr,
         getCPUPtr(mOp.getRawOutput(0)));
 }
 
@@ -76,14 +76,13 @@ void Aidge::FCImpl_cpu::backward()
     AIDGE_ASSERT(fc_grad, "missing ouput #0 gradient");
     AIDGE_ASSERT(op_.getInput(0)->grad(), "missing input #0 gradient");
     AIDGE_ASSERT(op_.getInput(1)->grad(), "missing input #1 gradient");
-    AIDGE_ASSERT(op_.getInput(2)->grad(), "missing input #2 gradient");
 
     // Find the correct kernel type
     const Registrar<FCImplBackward_cpu>::registrar_key registrarKey = {
         fc_grad->dataType(),
-        op_.getInput(0)->grad()->dataType(),
         op_.getInput(1)->grad()->dataType(),
-        op_.getInput(2)->grad()->dataType()};
+        (op_.getInput(2)) ? op_.getInput(2)->grad()->dataType() : op_.getInput(1)->grad()->dataType(),
+        op_.getInput(0)->grad()->dataType()};
 
     Registrar<FCImplBackward_cpu>::registrar_type kernelFunc;
     if (Registrar<FCImplBackward_cpu>::exists(registrarKey)) {
@@ -103,12 +102,11 @@ void Aidge::FCImpl_cpu::backward()
     std::shared_ptr<Tensor> input0gradFallback, input1gradFallback, input2gradFallback;
     const auto& input0grad = op_.getInput(0)->grad()->refCastFrom(input0gradFallback, *(op_.getOutput(0)));
     const auto& input1grad = op_.getInput(1)->grad()->refCastFrom(input1gradFallback, *(op_.getOutput(0)));
-    const auto& input2grad = op_.getInput(2)->grad()->refCastFrom(input2gradFallback, *(op_.getOutput(0)));
+    const auto& input2grad = (op_.getInput(2)) ? op_.getInput(2)->grad()->refCastFrom(input2gradFallback, *(op_.getOutput(0))) : Tensor();
 
     // Call kernel
     const auto batchSize = (input0grad.dims().size() > 1) ? input0grad.dims()[0] : 1;
-    kernelFunc(dynamic_cast<const FC_Op&>(mOp).getStaticAttributes(),
-        batchSize,
+    kernelFunc(batchSize,
         input1grad.dims()[1], // nb input features
         input1grad.dims()[0], // nb output features
         getCPUPtr(fc_grad),
@@ -116,5 +114,5 @@ void Aidge::FCImpl_cpu::backward()
         getCPUPtr(mOp.getRawInput(1)),
         input0grad.getImpl()->rawPtr(),
         input1grad.getImpl()->rawPtr(),
-        input2grad.getImpl()->rawPtr());
+        (op_.getInput(2)) ? input2grad.getImpl()->rawPtr() : nullptr);
 }
diff --git a/src/operator/PadImpl.cpp b/src/operator/PadImpl.cpp
index cd420a6241723c5d3fa5836838f84ce6bfe965d1..f7ac36428536b88da73736cc7f3898bb16578b10 100644
--- a/src/operator/PadImpl.cpp
+++ b/src/operator/PadImpl.cpp
@@ -22,6 +22,31 @@
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
 #include "aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp"
 
+Aidge::Elts_t Aidge::PadImpl1D_cpu::getNbRequiredProtected(IOIndex_t inputIdx) const {
+    assert(inputIdx == 0 && "operator has only one input");
+    (void) inputIdx;
+
+    // Padding cannot be in-place!
+    // We must ensure that we do not override data that has not been consummed yet.
+    const auto inputSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size();
+    const auto outputSize = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size();
+    return Elts_t::DataElts(outputSize - inputSize);
+}
+
+void Aidge::PadImpl1D_cpu::forward() {
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
+
+    // Find the correct kernel type
+    auto kernelFunc =
+            Registrar<PadImpl1DForward_cpu>::create({std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const Pad_Op<1>&>(mOp).getStaticAttributes(),
+                        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<3>(),
+                        getCPUPtr(mOp.getRawInput(0)),
+                        getCPUPtr(mOp.getRawOutput(0)));
+}
+
 Aidge::Elts_t Aidge::PadImpl2D_cpu::getNbRequiredProtected(IOIndex_t inputIdx) const {
     assert(inputIdx == 0 && "operator has only one input");
     (void) inputIdx;
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index 78058eca33a1ab7a23f4f6a12703fc85ed17bb89..2893788ee0922b238147a0562cf79d7535d5f60b 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -200,7 +200,13 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
         microGraph->save("lstm", false, true);
 
         REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
-        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->inputCategory(0) == InputCategory::Data);
+        for (size_t i = 1; i < 9; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::Param);
+        }
+        for (size_t i = 9; i < 17; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::OptionalParam);
+        }
         REQUIRE(myLSTM->nbOutputs() == 2);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
@@ -259,7 +265,13 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
         microGraph->save("lstm", false, false);
 
         REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
-        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->inputCategory(0) == InputCategory::Data);
+        for (size_t i = 1; i < 9; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::Param);
+        }
+        for (size_t i = 9; i < 17; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::OptionalParam);
+        }
         REQUIRE(myLSTM->nbOutputs() == 2);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
@@ -316,7 +328,13 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
         auto op = std::static_pointer_cast<OperatorTensor>(myLSTM->getOperator());
 
         REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
-        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->inputCategory(0) == InputCategory::Data);
+        for (size_t i = 1; i < 9; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::Param);
+        }
+        for (size_t i = 9; i < 17; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::OptionalParam);
+        }
         REQUIRE(myLSTM->nbOutputs() == 2);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
@@ -378,7 +396,13 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
         myGraph->add(pop);
 
         REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
-        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->inputCategory(0) == InputCategory::Data);
+        for (size_t i = 1; i < 9; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::Param);
+        }
+        for (size_t i = 9; i < 17; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::OptionalParam);
+        }
         REQUIRE(myLSTM->nbOutputs() == 2);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
@@ -441,7 +465,13 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
         myGraph->add(pop);
 
         REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
-        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->inputCategory(0) == InputCategory::Data);
+        for (size_t i = 1; i < 9; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::Param);
+        }
+        for (size_t i = 9; i < 17; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::OptionalParam);
+        }
         REQUIRE(myLSTM->nbOutputs() == 2);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp
index 01ccd37c319ee64deb15240b30cc369b37c9e47d..16112628053a35ef71d5819a53aacc85425da88d 100644
--- a/unit_tests/scheduler/Test_Scheduler.cpp
+++ b/unit_tests/scheduler/Test_Scheduler.cpp
@@ -416,7 +416,7 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(backward)", "[scheduler][backward
     auto myProd = Producer(inputTensor, "prod");
     myProd -> addChild(gv);
     gv -> compile("cpu", DataType::Float32);
-    compile_gradient(gv);
+
     SequentialScheduler scheduler(gv);
     scheduler.forward();
     auto outNode = gv->getOrderedOutputs()[0].first;
@@ -432,7 +432,6 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(backward)", "[scheduler][backward
                                                                  {6.0f, 6.0f, 6.0f, 6.0f, 6.0f},
                                                                  {6.0f, 6.0f, 6.0f, 7.0f, 7.0f},
                                                                  {7.0f, 7.0f, 7.0f, 7.0f, 7.0f}}}}});
-    predictedOutput->initGrad();
     predictedOutput->setGrad(targetOutput);
     REQUIRE_NOTHROW(scheduler.backward());
 }