diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index df02f67b0f6573e23efc3cad8dda27b09c4cce79..14ab86c56268f4e5861445d32a83dd7fa2213969 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -1,7 +1,6 @@
 #ifndef AIDGE_CPU_DATA_TENSORIMPL_H_
 #define AIDGE_CPU_DATA_TENSORIMPL_H_
 
-#include "aidge/backend/TensorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp
index f18618ddbb6d4598cb4b3369f03622d681a9cd27..9ae36017127d73b3136fba6cce151e8ad4acf910 100644
--- a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp
@@ -27,7 +27,7 @@ namespace Aidge
  * @brief Forward kernel for 2D AvgPoolingolution on CPU backend.
  * @tparam I Input data type.
  * @tparam O Output data type.
- * @param params tuple of Attributes from the Operator
+ * @param params tuple of Attributes from the OperatorTensor
  * @param dims Array of input dimensions.
  * @param input_ const input Tensor.
  * @param output_ Output Tensor.
@@ -67,7 +67,7 @@ void AvgPoolingImpl2D_cpu_forward_kernel(
             for (std::size_t ox = 0; ox < oxSize; ++ox)
             {
                 const signedsize difx
-                    = static_cast<signedsize>(-ox * std::get<0>(attrs)[0]);
+                    = -static_cast<signedsize>(ox * std::get<0>(attrs)[0]);
                 const std::size_t sxMin
                     = static_cast<std::size_t>(std::max(difx, signedsize(0)));
                 const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ?
@@ -78,7 +78,7 @@ void AvgPoolingImpl2D_cpu_forward_kernel(
                 for (std::size_t oy = 0; oy < oySize; ++oy)
                 {
                     const signedsize dify
-                        = static_cast<signedsize>(-oy * std::get<0>(attrs)[1]);
+                        = -static_cast<signedsize>(oy * std::get<0>(attrs)[1]);
                     const std::size_t syMin
                         = static_cast<std::size_t>(std::max(dify, signedsize(0)));
                     const std::size_t syMax
diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp
index 486829e782ae2173332a7efa6646bb7bba322252..ca08ea773c3b61bb45912134f544f6dc5c0da1f3 100644
--- a/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp
@@ -16,18 +16,19 @@
 
 #include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
 #include "aidge/utils/Types.h"
+#include <algorithm>
 #include <array>
 #include <cmath>
-#include <algorithm>
 
-namespace Aidge {
+namespace Aidge
+{
 /**
  * @brief Forward kernel for 2D BatchNormolution on CPU backend.
  * @tparam I Input data type.
  * @tparam W Weight data type.
  * @tparam B Bias data type.
  * @tparam O Output data type.
- * @param params tuple of Attributes from the Operator
+ * @param params tuple of Attributes from the OperatorTensor
  * @param dims Array of input dimensions.
  * @param input_ const input Tensor.
  * @param scale_ const scale Tensor.
@@ -36,9 +37,18 @@ namespace Aidge {
  * @param batchVar_ const variance Tensor.
  * @param output_ Output Tensor.
  */
-template <class I, class P, class O>
-void BatchNormImpl2D_cpu_forward_kernel(const BatchNorm_Op<2>::Attrs &attrs, const std::array<DimSize_t, 4> &dims,
-                                       const void *input_, const void *scale_, const void *shift_, void *batchMean_, void *batchVar_, void *output_, const bool freeze) {
+template<class I, class P, class O>
+void BatchNormImpl2D_cpu_forward_kernel(
+    const BatchNorm_Op<2>::Attrs &attrs,
+    const std::array<DimSize_t, 4> &dims,
+    const void *input_,
+    const void *scale_,
+    const void *shift_,
+    void *batchMean_,
+    void *batchVar_,
+    void *output_,
+    const bool freeze)
+{
     // FIXME: missing convolution attributes as arguments
     const I *input = static_cast<const I *>(input_);
     const P *scale = static_cast<const P *>(scale_);
@@ -49,61 +59,77 @@ void BatchNormImpl2D_cpu_forward_kernel(const BatchNorm_Op<2>::Attrs &attrs, con
 
     const DimSize_t nbBatch = dims[0];
     const DimSize_t nbChannels = dims[1];
-    const DimSize_t featureMapSize = dims[2]*dims[3];
-
-
-    if ((freeze == true) || (std::get<1>(attrs) == 0.0f)) {
-        for (std::size_t batch = 0; batch < nbBatch; ++batch) {
-            for (std::size_t ch = 0; ch < nbChannels; ++ch) {
-                const std::size_t ioIndex = (ch + batch*nbChannels) * featureMapSize;
+    const DimSize_t featureMapSize = dims[2] * dims[3];
+
+    if ((freeze == true) || (std::get<1>(attrs) == 0.0f))
+    {
+        for (std::size_t batch = 0; batch < nbBatch; ++batch)
+        {
+            for (std::size_t ch = 0; ch < nbChannels; ++ch)
+            {
+                const std::size_t ioIndex = (ch + batch * nbChannels) * featureMapSize;
                 std::fill(output + ioIndex, output + ioIndex + featureMapSize, shift[ch]);
-                const P var = std::sqrt(batchVar[ch] + static_cast<P>(std::get<0>(attrs)));
+                const P var
+                    = std::sqrt(batchVar[ch] + static_cast<P>(std::get<0>(attrs)));
 
-                for (std::size_t feature = 0; feature<featureMapSize; ++feature) {
-                    output[ioIndex + feature] += scale[ch] * (input[ioIndex + feature]-batchMean[ch]) / var;
+                for (std::size_t feature = 0; feature < featureMapSize; ++feature)
+                {
+                    output[ioIndex + feature]
+                        += scale[ch] * (input[ioIndex + feature] - batchMean[ch]) / var;
                 }
             }
         }
-    } else {
+    }
+    else
+    {
         const std::size_t nbDataPerChannel = nbBatch * featureMapSize;
-        for (std::size_t ch = 0; ch < nbChannels; ++ch) {
+        for (std::size_t ch = 0; ch < nbChannels; ++ch)
+        {
             I sum = I(0);
             I sumSquare = I(0);
-            for (std::size_t batch = 0; batch < nbBatch; ++batch) {
-                const std::size_t ioIndex = (ch + batch*nbChannels) * featureMapSize;
+            for (std::size_t batch = 0; batch < nbBatch; ++batch)
+            {
+                const std::size_t ioIndex = (ch + batch * nbChannels) * featureMapSize;
                 std::fill(output + ioIndex, output + ioIndex + featureMapSize, shift[ch]);
 
-                for (std::size_t feature = 0; feature<featureMapSize; ++feature) {
+                for (std::size_t feature = 0; feature < featureMapSize; ++feature)
+                {
                     sum += input[ioIndex + feature];
                     sumSquare += input[ioIndex + feature] * input[ioIndex + feature];
                 }
             }
             const I inputMean = sum / static_cast<I>(nbDataPerChannel);
-            const I inputVar = sumSquare / static_cast<I>(nbDataPerChannel)  - inputMean*inputMean;
+            const I inputVar
+                = sumSquare / static_cast<I>(nbDataPerChannel) - inputMean * inputMean;
 
-            batchMean[ch] = batchMean[ch]*(1-std::get<1>(attrs)) + inputMean*std::get<1>(attrs);
-            batchVar[ch] = batchVar[ch]*(1-std::get<1>(attrs)) + inputVar*(static_cast<I>(nbDataPerChannel)/static_cast<I>(nbDataPerChannel-1))*std::get<1>(attrs);
+            batchMean[ch] = batchMean[ch] * (1 - std::get<1>(attrs))
+                            + inputMean * std::get<1>(attrs);
+            batchVar[ch] = batchVar[ch] * (1 - std::get<1>(attrs))
+                           + inputVar
+                                 * (static_cast<I>(nbDataPerChannel)
+                                    / static_cast<I>(nbDataPerChannel - 1))
+                                 * std::get<1>(attrs);
 
             const P var = std::sqrt(inputVar + static_cast<P>(std::get<0>(attrs)));
-            for (std::size_t batch = 0; batch < nbBatch; ++batch) {
-                const std::size_t ioIndex = (ch + batch*nbChannels) * featureMapSize;
-                for (std::size_t feature = 0; feature<featureMapSize; ++feature) {
-                    output[ioIndex + feature] += scale[ch] * (input[ioIndex + feature]-inputMean) / var;
+            for (std::size_t batch = 0; batch < nbBatch; ++batch)
+            {
+                const std::size_t ioIndex = (ch + batch * nbChannels) * featureMapSize;
+                for (std::size_t feature = 0; feature < featureMapSize; ++feature)
+                {
+                    output[ioIndex + feature]
+                        += scale[ch] * (input[ioIndex + feature] - inputMean) / var;
                 }
             }
         }
     }
 }
 
-
-
-
-
-namespace {
+namespace
+{
 static Registrar<BatchNormImpl2DForward_cpu> registrarBatchNormImpl2DForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32, DataType::Float32},
-        Aidge::BatchNormImpl2D_cpu_forward_kernel<float, float, float>);
-}  // namespace
-}  // namespace Aidge
+    {DataType::Float32, DataType::Float32, DataType::Float32},
+    Aidge::BatchNormImpl2D_cpu_forward_kernel<float, float, float>);
+} // namespace
+} // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_BATCHNORMIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
index 5aa29ac55740d46bba873bb9d85a04cd004cc3bd..a859bceff74efca2aa93b1e55375c0f03b3bfe18 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
@@ -16,42 +16,48 @@
 
 #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
 #include "aidge/utils/Types.h"
-#include <cmath>
-#include <array>
 #include <algorithm>
+#include <array>
+#include <cmath>
 
-namespace Aidge {
+namespace Aidge
+{
 /**
  * @brief Forward kernel for 2D ConvDepthWiseolution on CPU backend.
  * @tparam I Input data type.
  * @tparam W Weight data type.
  * @tparam B Bias data type.
  * @tparam O Output data type.
- * @param params tuple of Attributes from the Operator
+ * @param params tuple of Attributes from the OperatorTensor
  * @param dims Array of input dimensions.
  * @param input_ const input Tensor.
  * @param weights_ const weight Tensor.
  * @param biases_ const Biais Tensor.
  * @param output_ Output Tensor.
  */
-template <class I, class W, class B, class O>
-void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Attrs &attrs, const std::array<DimSize_t, 4> &dims,
-                                       const void *input_, const void *weights_, const void *biases_, void *output_) {
+template<class I, class W, class B, class O>
+void ConvDepthWiseImpl2D_cpu_forward_kernel(
+    const ConvDepthWise_Op<2>::Attrs &attrs,
+    const std::array<DimSize_t, 4> &dims,
+    const void *input_,
+    const void *weights_,
+    const void *biases_,
+    void *output_)
+{
     // FIXME: missing convolution attributes as arguments
     const I *input = static_cast<const I *>(input_);
     const W *weights = static_cast<const W *>(weights_);
     const B *biases = static_cast<const B *>(biases_);
     O *output = static_cast<O *>(output_);
 
-
     // output H size
-    const std::size_t oxSize =
-            static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] - std::get<3>(attrs)[0] + std::get<0>(attrs)[0]) /
-                                static_cast<float>(std::get<0>(attrs)[0])));
+    const std::size_t oxSize = static_cast<std::size_t>(std::floor(
+        static_cast<float>(dims[2] - std::get<3>(attrs)[0] + std::get<0>(attrs)[0])
+        / static_cast<float>(std::get<0>(attrs)[0])));
     // output W size
-    const std::size_t oySize =
-            static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] - std::get<3>(attrs)[1] + std::get<0>(attrs)[1]) /
-                                static_cast<float>(std::get<0>(attrs)[1])));
+    const std::size_t oySize = static_cast<std::size_t>(std::floor(
+        static_cast<float>(dims[3] - std::get<3>(attrs)[1] + std::get<0>(attrs)[1])
+        / static_cast<float>(std::get<0>(attrs)[1])));
 
     // TODO: kernel computation
     // output (batch, outCh, Xout, Yout)
@@ -59,40 +65,109 @@ void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Attrs &at
     // weight (outCh, ch, kernelX, kernelY)
     // does not take Dilation attribute into account
     using signedsize = std::make_signed<std::size_t>::type;
-    for (std::size_t batch = 0; batch < dims[0]; ++batch) {
-        for (std::size_t ch = 0; ch < std::get<2>(attrs); ++ch) {
-            const std::size_t oIndex = (ch + batch*std::get<2>(attrs)) * oxSize * oySize;
+    for (std::size_t batch = 0; batch < dims[0]; ++batch)
+    {
+        for (std::size_t ch = 0; ch < std::get<2>(attrs); ++ch)
+        {
+            const std::size_t oIndex
+                = (ch + batch * std::get<2>(attrs)) * oxSize * oySize;
             B biasVal = (biases != nullptr) ? biases[ch] : B(0);
-            std::fill(output + oIndex, output+(oIndex+oxSize*oySize), biasVal);
-            const std::size_t iIndex = (ch + batch*dims[1]) * dims[2] * dims[3];
+            std::fill(output + oIndex, output + (oIndex + oxSize * oySize), biasVal);
+            const std::size_t iIndex = (ch + batch * dims[1]) * dims[2] * dims[3];
             const std::size_t wIndex = ch * std::get<3>(attrs)[0] * std::get<3>(attrs)[1];
-            for (std::size_t ox = 0; ox < oxSize; ++ox) {
-                const signedsize difx = static_cast<signedsize>(- ox * std::get<0>(attrs)[0]);
-                const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0)));
-                const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<3>(attrs)[0] ? std::get<3>(attrs)[0] : dims[2] + difx);
-                for (std::size_t oy = 0; oy < oySize; ++oy) {
-                    const signedsize dify = static_cast<signedsize>(- oy * std::get<0>(attrs)[1]);
-                    const std::size_t syMin = static_cast<std::size_t>(std::max(dify, signedsize(0)));
-                    const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<3>(attrs)[1] ? std::get<3>(attrs)[1] : dims[3] + dify);
-                    const std::size_t oIndexFull = oIndex + ox*oySize + oy;
-                    const signedsize ix = static_cast<signedsize>(ox * std::get<0>(attrs)[0]);
-                    const signedsize iy = static_cast<signedsize>(oy * std::get<0>(attrs)[1]);
+            for (std::size_t ox = 0; ox < oxSize; ++ox)
+            {
+                const signedsize difx
+                    = -static_cast<signedsize>(ox * std::get<0>(attrs)[0]);
+                const std::size_t sxMin
+                    = static_cast<std::size_t>(std::max(difx, signedsize(0)));
+                const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ?
+                                              0 :
+                                              ((dims[2] + difx) > std::get<3>(attrs)[0] ?
+                                                   std::get<3>(attrs)[0] :
+                                                   dims[2] + difx);
+                for (std::size_t oy = 0; oy < oySize; ++oy)
+                {
+                    const signedsize dify
+                        = -static_cast<signedsize>(oy * std::get<0>(attrs)[1]);
+                    const std::size_t syMin
+                        = static_cast<std::size_t>(std::max(dify, signedsize(0)));
+                    const std::size_t syMax
+                        = (static_cast<signedsize>(dims[3]) + dify) < 0 ?
+                              0 :
+                              ((dims[3] + dify) > std::get<3>(attrs)[1] ?
+                                   std::get<3>(attrs)[1] :
+                                   dims[3] + dify);
+                    const std::size_t oIndexFull = oIndex + ox * oySize + oy;
+                    const signedsize ix
+                        = static_cast<signedsize>(ox * std::get<0>(attrs)[0]);
+                    const signedsize iy
+                        = static_cast<signedsize>(oy * std::get<0>(attrs)[1]);
 
-                    if (sxMin == 0 && syMin == 0 && sxMax == 3 && syMax == 3) {
-                        output[oIndexFull] +=  (weights[wIndex + 0*std::get<3>(attrs)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+0)] +
-                                                weights[wIndex + 0*std::get<3>(attrs)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+1)] +
-                                                weights[wIndex + 0*std::get<3>(attrs)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+2)] +
-                                                weights[wIndex + 1*std::get<3>(attrs)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+0)] +
-                                                weights[wIndex + 1*std::get<3>(attrs)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+1)] +
-                                                weights[wIndex + 1*std::get<3>(attrs)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+2)] +
-                                                weights[wIndex + 2*std::get<3>(attrs)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+0)] +
-                                                weights[wIndex + 2*std::get<3>(attrs)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+1)] +
-                                                weights[wIndex + 2*std::get<3>(attrs)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+2)]);
-                    } else {
-                        for (std::size_t sx = sxMin; sx < sxMax; ++sx) {
-                            for (std::size_t sy = syMin; sy < syMax; ++sy) {
-                                output[oIndexFull] += weights[wIndex + sx*std::get<3>(attrs)[1] + sy] *
-                                                        input[iIndex + static_cast<std::size_t>(ix+static_cast<signedsize>(sx))*dims[3] + static_cast<std::size_t>(iy+static_cast<signedsize>(sy))];
+                    if (sxMin == 0 && syMin == 0 && sxMax == 3 && syMax == 3)
+                    {
+                        output[oIndexFull]
+                            += (weights[wIndex + 0 * std::get<3>(attrs)[1] + 0]
+                                    * input
+                                        [iIndex
+                                         + static_cast<std::size_t>(ix + 0) * dims[3]
+                                         + static_cast<std::size_t>(iy + 0)]
+                                + weights[wIndex + 0 * std::get<3>(attrs)[1] + 1]
+                                      * input
+                                          [iIndex
+                                           + static_cast<std::size_t>(ix + 0) * dims[3]
+                                           + static_cast<std::size_t>(iy + 1)]
+                                + weights[wIndex + 0 * std::get<3>(attrs)[1] + 2]
+                                      * input
+                                          [iIndex
+                                           + static_cast<std::size_t>(ix + 0) * dims[3]
+                                           + static_cast<std::size_t>(iy + 2)]
+                                + weights[wIndex + 1 * std::get<3>(attrs)[1] + 0]
+                                      * input
+                                          [iIndex
+                                           + static_cast<std::size_t>(ix + 1) * dims[3]
+                                           + static_cast<std::size_t>(iy + 0)]
+                                + weights[wIndex + 1 * std::get<3>(attrs)[1] + 1]
+                                      * input
+                                          [iIndex
+                                           + static_cast<std::size_t>(ix + 1) * dims[3]
+                                           + static_cast<std::size_t>(iy + 1)]
+                                + weights[wIndex + 1 * std::get<3>(attrs)[1] + 2]
+                                      * input
+                                          [iIndex
+                                           + static_cast<std::size_t>(ix + 1) * dims[3]
+                                           + static_cast<std::size_t>(iy + 2)]
+                                + weights[wIndex + 2 * std::get<3>(attrs)[1] + 0]
+                                      * input
+                                          [iIndex
+                                           + static_cast<std::size_t>(ix + 2) * dims[3]
+                                           + static_cast<std::size_t>(iy + 0)]
+                                + weights[wIndex + 2 * std::get<3>(attrs)[1] + 1]
+                                      * input
+                                          [iIndex
+                                           + static_cast<std::size_t>(ix + 2) * dims[3]
+                                           + static_cast<std::size_t>(iy + 1)]
+                                + weights[wIndex + 2 * std::get<3>(attrs)[1] + 2]
+                                      * input
+                                          [iIndex
+                                           + static_cast<std::size_t>(ix + 2) * dims[3]
+                                           + static_cast<std::size_t>(iy + 2)]);
+                    }
+                    else
+                    {
+                        for (std::size_t sx = sxMin; sx < sxMax; ++sx)
+                        {
+                            for (std::size_t sy = syMin; sy < syMax; ++sy)
+                            {
+                                output[oIndexFull]
+                                    += weights[wIndex + sx * std::get<3>(attrs)[1] + sy]
+                                       * input
+                                           [iIndex
+                                            + static_cast<std::size_t>(
+                                                  ix + static_cast<signedsize>(sx))
+                                                  * dims[3]
+                                            + static_cast<std::size_t>(
+                                                iy + static_cast<signedsize>(sy))];
                             }
                         }
                     }
@@ -102,17 +177,21 @@ void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Attrs &at
     }
 }
 
-namespace {
-static Registrar<ConvDepthWiseImpl2DForward_cpu> registrarConvDepthWiseImpl2DForward_cpu_Float32(
+namespace
+{
+static Registrar<ConvDepthWiseImpl2DForward_cpu>
+    registrarConvDepthWiseImpl2DForward_cpu_Float32(
         {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
         Aidge::ConvDepthWiseImpl2D_cpu_forward_kernel<float, float, float, float>);
-static Registrar<ConvDepthWiseImpl2DForward_cpu> registrarConvDepthWiseImpl2DForward_cpu_Int32(
+static Registrar<ConvDepthWiseImpl2DForward_cpu>
+    registrarConvDepthWiseImpl2DForward_cpu_Int32(
         {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32},
         Aidge::ConvDepthWiseImpl2D_cpu_forward_kernel<int, int, int, int>);
-static Registrar<ConvDepthWiseImpl2DForward_cpu> registrarConvDepthWiseImpl2DForward_cpu_Float64(
+static Registrar<ConvDepthWiseImpl2DForward_cpu>
+    registrarConvDepthWiseImpl2DForward_cpu_Float64(
         {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
         Aidge::ConvDepthWiseImpl2D_cpu_forward_kernel<double, double, double, double>);
-}  // namespace
-}  // namespace Aidge
+} // namespace
+} // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
index 03e2c35170432181c7a9b3934d61f0bd18471876..d1de876c0e7ab609779c8b16f9344bbcd1098e68 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
@@ -16,84 +16,92 @@
 
 #include "aidge/backend/cpu/operator/ConvImpl.hpp"
 #include "aidge/utils/Types.h"
-#include <cmath>
-#include <array>
 #include <algorithm>
+#include <array>
+#include <cmath>
 
-namespace Aidge {
+namespace Aidge
+{
 /**
  * @brief Forward kernel for 2D Convolution on CPU backend.
  * @tparam I Input data type.
  * @tparam W Weight data type.
  * @tparam B Bias data type.
  * @tparam O Output data type.
- * @param params tuple of Attributes from the Operator
+ * @param params tuple of Attributes from the OperatorTensor
  * @param dims Array of input dimensions.
  * @param input_ const input Tensor.
  * @param weights_ const weight Tensor.
  * @param biases_ const Biais Tensor.
  * @param output_ Output Tensor.
  */
-template <class I, class W, class B, class O>
-void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Attrs &attrs, const std::array<DimSize_t, 4> &dims,
-                                       const void *input_, const void *weights_, const void *biases_, void *output_) {
+template<class I, class W, class B, class O>
+void ConvImpl2D_cpu_forward_kernel(
+    const Conv_Op<2>::Attrs &attrs,
+    const std::array<DimSize_t, 4> &dims,
+    const void *input_,
+    const void *weights_,
+    const void *biases_,
+    void *output_)
+{
     // FIXME: missing convolution attributes as arguments
     const I *input = static_cast<const I *>(input_);
     const W *weights = static_cast<const W *>(weights_);
     const B *biases = static_cast<const B *>(biases_);
     O *output = static_cast<O *>(output_);
-/*
-    // output H size
-    const std::size_t oxSize =
-            static_cast<std::size_t>(static_cast<float>(dims[0] - std::get<4>(attrs)[0] + std::get<0>(attrs)[0]) /
-                                static_cast<float>(std::get<0>(attrs)[0]));
-    // output W size
-    const std::size_t oySize =
-            static_cast<std::size_t>(static_cast<float>(dims[1] - std::get<4>(attrs)[1] + std::get<0>(attrs)[1]) /
-                                static_cast<float>(std::get<0>(attrs)[1]));
+    /*
+        // output H size
+        const std::size_t oxSize =
+                static_cast<std::size_t>(static_cast<float>(dims[0] -
+       std::get<4>(attrs)[0] + std::get<0>(attrs)[0]) /
+                                    static_cast<float>(std::get<0>(attrs)[0]));
+        // output W size
+        const std::size_t oySize =
+                static_cast<std::size_t>(static_cast<float>(dims[1] -
+       std::get<4>(attrs)[1] + std::get<0>(attrs)[1]) /
+                                    static_cast<float>(std::get<0>(attrs)[1]));
 
-    // TODO: kernel computation
-    // output (Xout, Yout, outCh, batch)
-    // input  (Xin, Yin, inCh, batch)
-    // weight (kernelX, kernelY, inCh, outCh)
-    // does not take Dilation attribute into account
-    for (std::size_t ox = 0; ox < oxSize; ++ox) {
-        for (std::size_t oy = 0; oy < oySize; ++oy) {
-            const std::size_t ix = ox * std::get<0>(attrs)[0];
-            const std::size_t iy = oy * std::get<0>(attrs)[1];
+        // TODO: kernel computation
+        // output (Xout, Yout, outCh, batch)
+        // input  (Xin, Yin, inCh, batch)
+        // weight (kernelX, kernelY, inCh, outCh)
+        // does not take Dilation attribute into account
+        for (std::size_t ox = 0; ox < oxSize; ++ox) {
+            for (std::size_t oy = 0; oy < oySize; ++oy) {
+                const std::size_t ix = ox * std::get<0>(attrs)[0];
+                const std::size_t iy = oy * std::get<0>(attrs)[1];
 
-            for (std::size_t outCh = 0; outCh < std::get<3>(attrs); ++outCh) {
-                const std::size_t oIndex = dims[3] * (outCh + std::get<3>(attrs) * (oy + oySize * ox));
-                B biasVal = (biases != nullptr) ? biases[outCh] : B(0);
-                for (std::size_t batch = 0; batch < dims[3]; ++batch) {
-                    output[oIndex + batch] = biasVal;
-                }
-                for (std::size_t inCh = 0; inCh < dims[2]; ++inCh) {
-                    for (std::size_t sx = 0; sx < std::get<4>(attrs)[0]; ++sx) {
-                        for (std::size_t sy = 0; sy < std::get<4>(attrs)[1]; ++sy) {
-                            const std::size_t wIndex =
-                                    outCh + std::get<3>(attrs) * (inCh + dims[2] * (sy + std::get<4>(attrs)[1] * sx));
-                            std::size_t iIndex = dims[3] * (inCh + dims[2] * ((iy + sy) + dims[1] * (ix + sx)));
-                            for (std::size_t batch = 0; batch < dims[3]; ++batch) {
-                                output[oIndex + batch] += weights[wIndex] * input[iIndex + batch];
+                for (std::size_t outCh = 0; outCh < std::get<3>(attrs); ++outCh) {
+                    const std::size_t oIndex = dims[3] * (outCh + std::get<3>(attrs) * (oy
+       + oySize * ox)); B biasVal = (biases != nullptr) ? biases[outCh] : B(0); for
+       (std::size_t batch = 0; batch < dims[3]; ++batch) { output[oIndex + batch] =
+       biasVal;
+                    }
+                    for (std::size_t inCh = 0; inCh < dims[2]; ++inCh) {
+                        for (std::size_t sx = 0; sx < std::get<4>(attrs)[0]; ++sx) {
+                            for (std::size_t sy = 0; sy < std::get<4>(attrs)[1]; ++sy) {
+                                const std::size_t wIndex =
+                                        outCh + std::get<3>(attrs) * (inCh + dims[2] * (sy
+       + std::get<4>(attrs)[1] * sx)); std::size_t iIndex = dims[3] * (inCh + dims[2] *
+       ((iy + sy) + dims[1] * (ix + sx))); for (std::size_t batch = 0; batch < dims[3];
+       ++batch) { output[oIndex + batch] += weights[wIndex] * input[iIndex + batch];
+                                }
                             }
                         }
                     }
                 }
             }
         }
-    }
-*/
-
+    */
 
     // output H size
-    const std::size_t oxSize =
-            static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] - std::get<4>(attrs)[0] + std::get<0>(attrs)[0]) /
-                                static_cast<float>(std::get<0>(attrs)[0])));
+    const std::size_t oxSize = static_cast<std::size_t>(std::floor(
+        static_cast<float>(dims[2] - std::get<4>(attrs)[0] + std::get<0>(attrs)[0])
+        / static_cast<float>(std::get<0>(attrs)[0])));
     // output W size
-    const std::size_t oySize =
-            static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] - std::get<4>(attrs)[1] + std::get<0>(attrs)[1]) /
-                                static_cast<float>(std::get<0>(attrs)[1])));
+    const std::size_t oySize = static_cast<std::size_t>(std::floor(
+        static_cast<float>(dims[3] - std::get<4>(attrs)[1] + std::get<0>(attrs)[1])
+        / static_cast<float>(std::get<0>(attrs)[1])));
 
     // TODO: kernel computation
     // output (batch, outCh, Xout, Yout)
@@ -101,41 +109,123 @@ void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Attrs &attrs, const std::ar
     // weight (outCh, inCh, kernelX, kernelY)
     // does not take Dilation attribute into account
     using signedsize = std::make_signed<std::size_t>::type;
-    for (std::size_t batch = 0; batch < dims[0]; ++batch) {
-        for (std::size_t outCh = 0; outCh < std::get<3>(attrs); ++outCh) {
-            const std::size_t oIndex = (outCh + batch*std::get<3>(attrs)) * oxSize * oySize;
+    for (std::size_t batch = 0; batch < dims[0]; ++batch)
+    {
+        for (std::size_t outCh = 0; outCh < std::get<3>(attrs); ++outCh)
+        {
+            const std::size_t oIndex
+                = (outCh + batch * std::get<3>(attrs)) * oxSize * oySize;
             B biasVal = (biases != nullptr) ? biases[outCh] : B(0);
-            std::fill(output + oIndex, output+(oIndex+oxSize*oySize), biasVal);
-            for (std::size_t inCh = 0; inCh < dims[1]; ++inCh) {
-                const std::size_t iIndex = (inCh + batch*dims[1]) * dims[2] * dims[3];
-                const std::size_t wIndex = (inCh + outCh*dims[1]) * std::get<4>(attrs)[0] * std::get<4>(attrs)[1];
-                for (std::size_t ox = 0; ox < oxSize; ++ox) {
-                    const signedsize difx = static_cast<signedsize>(- ox * std::get<0>(attrs)[0]);
-                    const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0)));
-                    const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<4>(attrs)[0] ? std::get<4>(attrs)[0] : dims[2] + difx);
-                    for (std::size_t oy = 0; oy < oySize; ++oy) {
-                        const signedsize dify = static_cast<signedsize>(- oy * std::get<0>(attrs)[1]);
-                        const std::size_t syMin = static_cast<std::size_t>(std::max(dify, signedsize(0)));
-                        const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<4>(attrs)[1] ? std::get<4>(attrs)[1] : dims[3] + dify);
-                        const std::size_t oIndexFull = oIndex + ox*oySize + oy;
-                        const signedsize ix = static_cast<signedsize>(ox * std::get<0>(attrs)[0]);
-                        const signedsize iy = static_cast<signedsize>(oy * std::get<0>(attrs)[1]);
+            std::fill(output + oIndex, output + (oIndex + oxSize * oySize), biasVal);
+            for (std::size_t inCh = 0; inCh < dims[1]; ++inCh)
+            {
+                const std::size_t iIndex = (inCh + batch * dims[1]) * dims[2] * dims[3];
+                const std::size_t wIndex = (inCh + outCh * dims[1])
+                                           * std::get<4>(attrs)[0]
+                                           * std::get<4>(attrs)[1];
+                for (std::size_t ox = 0; ox < oxSize; ++ox)
+                {
+                    const signedsize difx
+                        = -static_cast<signedsize>(ox * std::get<0>(attrs)[0]);
+                    const std::size_t sxMin
+                        = static_cast<std::size_t>(std::max(difx, signedsize(0)));
+                    const std::size_t sxMax
+                        = (static_cast<signedsize>(dims[2]) + difx) < 0 ?
+                              0 :
+                              ((dims[2] + difx) > std::get<4>(attrs)[0] ?
+                                   std::get<4>(attrs)[0] :
+                                   dims[2] + difx);
+                    for (std::size_t oy = 0; oy < oySize; ++oy)
+                    {
+                        const signedsize dify
+                            = -static_cast<signedsize>(oy * std::get<0>(attrs)[1]);
+                        const std::size_t syMin
+                            = static_cast<std::size_t>(std::max(dify, signedsize(0)));
+                        const std::size_t syMax
+                            = (static_cast<signedsize>(dims[3]) + dify) < 0 ?
+                                  0 :
+                                  ((dims[3] + dify) > std::get<4>(attrs)[1] ?
+                                       std::get<4>(attrs)[1] :
+                                       dims[3] + dify);
+                        const std::size_t oIndexFull = oIndex + ox * oySize + oy;
+                        const signedsize ix
+                            = static_cast<signedsize>(ox * std::get<0>(attrs)[0]);
+                        const signedsize iy
+                            = static_cast<signedsize>(oy * std::get<0>(attrs)[1]);
 
-                        if (sxMin == 0 && syMin == 0 && sxMax == 3 && syMax == 3) {
-                            output[oIndexFull] += (weights[wIndex + 0*std::get<4>(attrs)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+0)] +
-                                                   weights[wIndex + 0*std::get<4>(attrs)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+1)] +
-                                                   weights[wIndex + 0*std::get<4>(attrs)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+2)] +
-                                                   weights[wIndex + 1*std::get<4>(attrs)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+0)] +
-                                                   weights[wIndex + 1*std::get<4>(attrs)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+1)] +
-                                                   weights[wIndex + 1*std::get<4>(attrs)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+2)] +
-                                                   weights[wIndex + 2*std::get<4>(attrs)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+0)] +
-                                                   weights[wIndex + 2*std::get<4>(attrs)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+1)] +
-                                                   weights[wIndex + 2*std::get<4>(attrs)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+2)]);
-                        } else {
-                            for (std::size_t sx = sxMin; sx < sxMax; ++sx) {
-                                for (std::size_t sy = syMin; sy < syMax; ++sy) {
-                                    output[oIndexFull] += weights[wIndex + sx*std::get<4>(attrs)[1] + sy] *
-                                                            input[iIndex + static_cast<std::size_t>(ix+static_cast<signedsize>(sx))*dims[3] + static_cast<std::size_t>(iy+static_cast<signedsize>(sy))];
+                        if (sxMin == 0 && syMin == 0 && sxMax == 3 && syMax == 3)
+                        {
+                            output[oIndexFull]
+                                += (weights[wIndex + 0 * std::get<4>(attrs)[1] + 0]
+                                        * input
+                                            [iIndex
+                                             + static_cast<std::size_t>(ix + 0) * dims[3]
+                                             + static_cast<std::size_t>(iy + 0)]
+                                    + weights[wIndex + 0 * std::get<4>(attrs)[1] + 1]
+                                          * input
+                                              [iIndex
+                                               + static_cast<std::size_t>(ix + 0)
+                                                     * dims[3]
+                                               + static_cast<std::size_t>(iy + 1)]
+                                    + weights[wIndex + 0 * std::get<4>(attrs)[1] + 2]
+                                          * input
+                                              [iIndex
+                                               + static_cast<std::size_t>(ix + 0)
+                                                     * dims[3]
+                                               + static_cast<std::size_t>(iy + 2)]
+                                    + weights[wIndex + 1 * std::get<4>(attrs)[1] + 0]
+                                          * input
+                                              [iIndex
+                                               + static_cast<std::size_t>(ix + 1)
+                                                     * dims[3]
+                                               + static_cast<std::size_t>(iy + 0)]
+                                    + weights[wIndex + 1 * std::get<4>(attrs)[1] + 1]
+                                          * input
+                                              [iIndex
+                                               + static_cast<std::size_t>(ix + 1)
+                                                     * dims[3]
+                                               + static_cast<std::size_t>(iy + 1)]
+                                    + weights[wIndex + 1 * std::get<4>(attrs)[1] + 2]
+                                          * input
+                                              [iIndex
+                                               + static_cast<std::size_t>(ix + 1)
+                                                     * dims[3]
+                                               + static_cast<std::size_t>(iy + 2)]
+                                    + weights[wIndex + 2 * std::get<4>(attrs)[1] + 0]
+                                          * input
+                                              [iIndex
+                                               + static_cast<std::size_t>(ix + 2)
+                                                     * dims[3]
+                                               + static_cast<std::size_t>(iy + 0)]
+                                    + weights[wIndex + 2 * std::get<4>(attrs)[1] + 1]
+                                          * input
+                                              [iIndex
+                                               + static_cast<std::size_t>(ix + 2)
+                                                     * dims[3]
+                                               + static_cast<std::size_t>(iy + 1)]
+                                    + weights[wIndex + 2 * std::get<4>(attrs)[1] + 2]
+                                          * input
+                                              [iIndex
+                                               + static_cast<std::size_t>(ix + 2)
+                                                     * dims[3]
+                                               + static_cast<std::size_t>(iy + 2)]);
+                        }
+                        else
+                        {
+                            for (std::size_t sx = sxMin; sx < sxMax; ++sx)
+                            {
+                                for (std::size_t sy = syMin; sy < syMax; ++sy)
+                                {
+                                    output[oIndexFull]
+                                        += weights
+                                               [wIndex + sx * std::get<4>(attrs)[1] + sy]
+                                           * input
+                                               [iIndex
+                                                + static_cast<std::size_t>(
+                                                      ix + static_cast<signedsize>(sx))
+                                                      * dims[3]
+                                                + static_cast<std::size_t>(
+                                                    iy + static_cast<signedsize>(sy))];
                                 }
                             }
                         }
@@ -146,17 +236,18 @@ void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Attrs &attrs, const std::ar
     }
 }
 
-namespace {
+namespace
+{
 static Registrar<ConvImpl2DForward_cpu> registrarConvImpl2DForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
-        Aidge::ConvImpl2D_cpu_forward_kernel<float, float, float, float>);
+    {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
+    Aidge::ConvImpl2D_cpu_forward_kernel<float, float, float, float>);
 static Registrar<ConvImpl2DForward_cpu> registrarConvImpl2DForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32},
-        Aidge::ConvImpl2D_cpu_forward_kernel<int, int, int, int>);
+    {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32},
+    Aidge::ConvImpl2D_cpu_forward_kernel<int, int, int, int>);
 static Registrar<ConvImpl2DForward_cpu> registrarConvImpl2DForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
-        Aidge::ConvImpl2D_cpu_forward_kernel<double, double, double, double>);
-}  // namespace
-}  // namespace Aidge
+    {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
+    Aidge::ConvImpl2D_cpu_forward_kernel<double, double, double, double>);
+} // namespace
+} // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_CONVIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp
index caa99e8678a72c7fd3c77fe8b7579ea739ac64c7..68d75267e3a1a84dcb6f4ed02d9636ab5648c3e3 100644
--- a/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp
@@ -15,42 +15,45 @@
 #include "aidge/utils/Registrar.hpp"
 
 #include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp"
-#include "aidge/utils/Types.h"
 #include "aidge/data/Data.hpp"
+#include "aidge/utils/Types.h"
 #include <array>
-#include <tuple>
 #include <cmath>
+#include <tuple>
 
-namespace Aidge {
+namespace Aidge
+{
 /**
  * @brief Forward kernel for 2D MaxPoolingolution on CPU backend.
  * @tparam I Input data type.
  * @tparam O Output data type.
- * @param attrs tuple of Attributes from the Operator
+ * @param attrs tuple of Attributes from the OperatorTensor
  * @param dims Array of input dimensions.
  * @param input_ const input Tensor.
  * @param output_ Output Tensor.
  */
-template <class I, class O>
-void MaxPoolingImpl2D_cpu_forward_kernel(const MaxPooling_Op<2>::Attrs &attrs,
-                                             const std::array<DimSize_t, 4> &dims,
-                                             const void *input_,
-                                             void *output_) {
+template<class I, class O>
+void MaxPoolingImpl2D_cpu_forward_kernel(
+    const MaxPooling_Op<2>::Attrs &attrs,
+    const std::array<DimSize_t, 4> &dims,
+    const void *input_,
+    void *output_)
+{
     // FIXME: missing convolution parameters as arguments
     const I *input = static_cast<const I *>(input_);
     O *output = static_cast<O *>(output_);
 
-    std::array<DimSize_t, 2> strideDims  = std::get<0>(attrs);
-    std::array<DimSize_t, 2> kernelDims  = std::get<1>(attrs);
+    std::array<DimSize_t, 2> strideDims = std::get<0>(attrs);
+    std::array<DimSize_t, 2> kernelDims = std::get<1>(attrs);
 
     // output H size
-    const std::size_t oxSize =
-            static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] - kernelDims[0] + strideDims[0]) /
-                                static_cast<float>(strideDims[0])));
+    const std::size_t oxSize = static_cast<std::size_t>(std::floor(
+        static_cast<float>(dims[2] - kernelDims[0] + strideDims[0])
+        / static_cast<float>(strideDims[0])));
     // output W size
-    const std::size_t oySize =
-            static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] - kernelDims[1] + strideDims[1]) /
-                                static_cast<float>(strideDims[1])));
+    const std::size_t oySize = static_cast<std::size_t>(std::floor(
+        static_cast<float>(dims[3] - kernelDims[1] + strideDims[1])
+        / static_cast<float>(strideDims[1])));
 
     // TODO: kernel computation
     // output (batch, outCh, Xout, Yout)
@@ -58,33 +61,50 @@ void MaxPoolingImpl2D_cpu_forward_kernel(const MaxPooling_Op<2>::Attrs &attrs,
     // weight (outCh, ch, kernelX, kernelY)
     // does not take Dilation parameter into account
     using signedsize = std::make_signed<std::size_t>::type;
-    for (std::size_t batch = 0; batch < dims[0]; ++batch) {
-        for (std::size_t ch = 0; ch < dims[1]; ++ch) {
-            const std::size_t oIndex = (ch + batch*dims[1]) * oxSize * oySize;
-            const std::size_t iIndex = (ch + batch*dims[1]) * dims[2] * dims[3];
-            for (std::size_t ox = 0; ox < oxSize; ++ox) {
-                const signedsize difx = static_cast<signedsize>(- ox * strideDims[0]);
-                const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0)));
-                const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > kernelDims[0] ? kernelDims[0] : dims[2] + difx);
-                for (std::size_t oy = 0; oy < oySize; ++oy) {
-                    const signedsize dify = static_cast<signedsize>(- oy * strideDims[1]);
-                    const std::size_t syMin = static_cast<std::size_t>(std::max(dify, signedsize(0)));
-                    const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > kernelDims[1] ? kernelDims[1] : dims[3] + dify);
-                    const std::size_t oIndexFull = oIndex + ox*oySize + oy;
+    for (std::size_t batch = 0; batch < dims[0]; ++batch)
+    {
+        for (std::size_t ch = 0; ch < dims[1]; ++ch)
+        {
+            const std::size_t oIndex = (ch + batch * dims[1]) * oxSize * oySize;
+            const std::size_t iIndex = (ch + batch * dims[1]) * dims[2] * dims[3];
+            for (std::size_t ox = 0; ox < oxSize; ++ox)
+            {
+                const signedsize difx = -static_cast<signedsize>(ox * strideDims[0]);
+                const std::size_t sxMin
+                    = static_cast<std::size_t>(std::max(difx, signedsize(0)));
+                const std::size_t sxMax
+                    = (static_cast<signedsize>(dims[2]) + difx) < 0 ?
+                          0 :
+                          ((dims[2] + difx) > kernelDims[0] ? kernelDims[0] :
+                                                              dims[2] + difx);
+                for (std::size_t oy = 0; oy < oySize; ++oy)
+                {
+                    const signedsize dify = -static_cast<signedsize>(oy * strideDims[1]);
+                    const std::size_t syMin
+                        = static_cast<std::size_t>(std::max(dify, signedsize(0)));
+                    const std::size_t syMax
+                        = (static_cast<signedsize>(dims[3]) + dify) < 0 ?
+                              0 :
+                              ((dims[3] + dify) > kernelDims[1] ? kernelDims[1] :
+                                                                  dims[3] + dify);
+                    const std::size_t oIndexFull = oIndex + ox * oySize + oy;
                     const std::size_t ix = ox * strideDims[0];
                     const std::size_t iy = oy * strideDims[1];
 
                     I poolValue(0.0);
                     bool valid = false;
 
-                    for (unsigned int channel = 0; channel < dims[1];
-                            ++channel){
-                        for (unsigned int sy = syMin; sy < syMax; ++sy) {
+                    for (unsigned int channel = 0; channel < dims[1]; ++channel)
+                    {
+                        for (unsigned int sy = syMin; sy < syMax; ++sy)
+                        {
                             for (unsigned int sx = sxMin; sx < sxMax; ++sx)
                             {
-                                const I value = input[iIndex + (ix+sx)*dims[3] + (iy+sy)];
+                                const I value
+                                    = input[iIndex + (ix + sx) * dims[3] + (iy + sy)];
 
-                                if (!valid || value > poolValue) {
+                                if (!valid || value > poolValue)
+                                {
                                     poolValue = value;
                                     valid = true;
                                 }
@@ -98,7 +118,7 @@ void MaxPoolingImpl2D_cpu_forward_kernel(const MaxPooling_Op<2>::Attrs &attrs,
     }
 }
 
-//N2D2 version
+// N2D2 version
 /*
 template <class T>
 void N2D2::PoolCell_Frame_Kernels::forwardMax(const T* alpha,
@@ -199,17 +219,20 @@ void N2D2::PoolCell_Frame_Kernels::forwardMax(const T* alpha,
 
 */
 
-namespace {
-static Registrar<MaxPoolingImpl2DForward_cpu> registrarMaxPoolingImpl2DForward_cpu_Float32(
+namespace
+{
+static Registrar<MaxPoolingImpl2DForward_cpu>
+    registrarMaxPoolingImpl2DForward_cpu_Float32(
         std::tuple<DataType, DataType>({DataType::Float32, DataType::Float32}),
         Aidge::MaxPoolingImpl2D_cpu_forward_kernel<float, float>);
 static Registrar<MaxPoolingImpl2DForward_cpu> registrarMaxPoolingImpl2DForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32},
-        Aidge::MaxPoolingImpl2D_cpu_forward_kernel<int, int>);
-static Registrar<MaxPoolingImpl2DForward_cpu> registrarMaxPoolingImpl2DForward_cpu_Float64(
+    {DataType::Int32, DataType::Int32},
+    Aidge::MaxPoolingImpl2D_cpu_forward_kernel<int, int>);
+static Registrar<MaxPoolingImpl2DForward_cpu>
+    registrarMaxPoolingImpl2DForward_cpu_Float64(
         {DataType::Float64, DataType::Float64},
         Aidge::MaxPoolingImpl2D_cpu_forward_kernel<double, double>);
-}  // namespace
-}  // namespace Aidge
+} // namespace
+} // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_MaxPOOLINGIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp
index 8b793257d2f3f126793316d463fe2542512da939..88c287c1525d96c4566e4e70763beea6b1c0226c 100644
--- a/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp
@@ -16,23 +16,27 @@
 
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
 #include "aidge/utils/Types.h"
-#include <cmath>
-#include <array>
 #include <algorithm>
+#include <array>
+#include <cmath>
 
-namespace Aidge {
+namespace Aidge
+{
 /**
  * @brief Forward kernel for 2D Padding on CPU backend.
  * @tparam I Input data type.
  * @tparam O Output data type.
- * @param attrs tuple of Parameters from the Operator
+ * @param attrs tuple of Parameters from the OperatorTensor
  * @param dims Array of input dimensions.
  * @param input_ const input Tensor.
  * @param output_ Output Tensor.
  */
-template <class I, class O>
-void PadImpl2D_cpu_forward_kernel(const Pad_Op<2>::Attrs &attrs, const std::array<DimSize_t, 4> &dims,
-                                       const void *input_, void *output_)
+template<class I, class O>
+void PadImpl2D_cpu_forward_kernel(
+    const Pad_Op<2>::Attrs &attrs,
+    const std::array<DimSize_t, 4> &dims,
+    const void *input_,
+    void *output_)
 {
     const I *input = static_cast<const I *>(input_);
     O *output = static_cast<O *>(output_);
@@ -40,34 +44,61 @@ void PadImpl2D_cpu_forward_kernel(const Pad_Op<2>::Attrs &attrs, const std::arra
     const std::size_t oySize = dims[2] + std::get<0>(attrs)[0] + std::get<0>(attrs)[1];
     const std::size_t oxSize = dims[3] + std::get<0>(attrs)[2] + std::get<0>(attrs)[3];
 
-    for (std::size_t batch = 0; batch < dims[0]; ++batch) {
-        for (std::size_t ch = 0; ch < dims[1]; ++ch) {
-            const std::size_t iIndex = (ch + batch*dims[1]) * dims[2] * dims[3];
-            const std::size_t oIndex = (ch + batch*dims[1]) * oxSize * oySize;
+    for (std::size_t batch = 0; batch < dims[0]; ++batch)
+    {
+        for (std::size_t ch = 0; ch < dims[1]; ++ch)
+        {
+            const std::size_t iIndex = (ch + batch * dims[1]) * dims[2] * dims[3];
+            const std::size_t oIndex = (ch + batch * dims[1]) * oxSize * oySize;
 
-            for (unsigned int oy = 0; oy < oySize; ++oy) {
-                for (unsigned int ox = 0; ox < oxSize; ++ox) {
-                    const std::size_t oIndexFull = oIndex + ox*oySize + oy;
+            for (unsigned int oy = 0; oy < oySize; ++oy)
+            {
+                for (unsigned int ox = 0; ox < oxSize; ++ox)
+                {
+                    const std::size_t oIndexFull = oIndex + ox * oySize + oy;
 
                     O outputValue = std::get<2>(attrs);
 
-                    if (std::get<1>(attrs) == PadBorderType::Constant) {
-                        int ix = static_cast<int>(ox) - static_cast<int>(std::get<0>(attrs)[3]);
-                        int iy = static_cast<int>(oy) - static_cast<int>(std::get<0>(attrs)[1]);
-
-                        if (ix >= 0  && ix < static_cast<int>(dims[3]) && iy >= 0  && iy < static_cast<int>(dims[2])) {
-                            outputValue = input[iIndex + static_cast<std::size_t>(ix)*dims[2] + static_cast<std::size_t>(iy)];
+                    if (std::get<1>(attrs) == PadBorderType::Constant)
+                    {
+                        int ix = static_cast<int>(ox)
+                                 - static_cast<int>(std::get<0>(attrs)[3]);
+                        int iy = static_cast<int>(oy)
+                                 - static_cast<int>(std::get<0>(attrs)[1]);
+
+                        if (ix >= 0 && ix < static_cast<int>(dims[3]) && iy >= 0
+                            && iy < static_cast<int>(dims[2]))
+                        {
+                            outputValue = input
+                                [iIndex + static_cast<std::size_t>(ix) * dims[2]
+                                 + static_cast<std::size_t>(iy)];
                         }
                     }
-                    else if (std::get<1>(attrs) == PadBorderType::Edge) {
-                        int ix = std::max(0, std::min(static_cast<int>(dims[3]) - 1, static_cast<int>(ox) - static_cast<int>(std::get<0>(attrs)[3])));
-                        int iy = std::max(0, std::min(static_cast<int>(dims[2]) - 1, static_cast<int>(oy) - static_cast<int>(std::get<0>(attrs)[1])));
-
-                        outputValue = input[iIndex + static_cast<std::size_t>(ix)*dims[2] + static_cast<std::size_t>(iy)];
+                    else if (std::get<1>(attrs) == PadBorderType::Edge)
+                    {
+                        int ix = std::max(
+                            0,
+                            std::min(
+                                static_cast<int>(dims[3]) - 1,
+                                static_cast<int>(ox)
+                                    - static_cast<int>(std::get<0>(attrs)[3])));
+                        int iy = std::max(
+                            0,
+                            std::min(
+                                static_cast<int>(dims[2]) - 1,
+                                static_cast<int>(oy)
+                                    - static_cast<int>(std::get<0>(attrs)[1])));
+
+                        outputValue = input
+                            [iIndex + static_cast<std::size_t>(ix) * dims[2]
+                             + static_cast<std::size_t>(iy)];
                     }
-                    else if (std::get<1>(attrs) == PadBorderType::Reflect) {
-                        int ix = static_cast<int>(ox) - static_cast<int>(std::get<0>(attrs)[3]);
-                        int iy = static_cast<int>(oy) - static_cast<int>(std::get<0>(attrs)[1]);
+                    else if (std::get<1>(attrs) == PadBorderType::Reflect)
+                    {
+                        int ix = static_cast<int>(ox)
+                                 - static_cast<int>(std::get<0>(attrs)[3]);
+                        int iy = static_cast<int>(oy)
+                                 - static_cast<int>(std::get<0>(attrs)[1]);
 
                         if (ix < 0)
                             ix = 0 - ix;
@@ -78,13 +109,22 @@ void PadImpl2D_cpu_forward_kernel(const Pad_Op<2>::Attrs &attrs, const std::arra
                         if (iy >= static_cast<int>(dims[2]))
                             iy = static_cast<int>(dims[2]) - iy;
 
-                        outputValue = input[iIndex + static_cast<std::size_t>(ix)*dims[2] + static_cast<std::size_t>(iy)];
+                        outputValue = input
+                            [iIndex + static_cast<std::size_t>(ix) * dims[2]
+                             + static_cast<std::size_t>(iy)];
                     }
-                    else if (std::get<1>(attrs) == PadBorderType::Wrap) {
-                        int ix = (static_cast<int>(dims[3]) + static_cast<int>(ox) - static_cast<int>(std::get<0>(attrs)[3])) % static_cast<int>(dims[3]);
-                        int iy = (static_cast<int>(dims[2]) + static_cast<int>(oy) - static_cast<int>(std::get<0>(attrs)[1])) % static_cast<int>(dims[2]);
-
-                        outputValue = input[iIndex + static_cast<std::size_t>(ix)*dims[2] + static_cast<std::size_t>(iy)];
+                    else if (std::get<1>(attrs) == PadBorderType::Wrap)
+                    {
+                        int ix = (static_cast<int>(dims[3]) + static_cast<int>(ox)
+                                  - static_cast<int>(std::get<0>(attrs)[3]))
+                                 % static_cast<int>(dims[3]);
+                        int iy = (static_cast<int>(dims[2]) + static_cast<int>(oy)
+                                  - static_cast<int>(std::get<0>(attrs)[1]))
+                                 % static_cast<int>(dims[2]);
+
+                        outputValue = input
+                            [iIndex + static_cast<std::size_t>(ix) * dims[2]
+                             + static_cast<std::size_t>(iy)];
                     }
 
                     output[oIndexFull] = outputValue;
@@ -94,17 +134,17 @@ void PadImpl2D_cpu_forward_kernel(const Pad_Op<2>::Attrs &attrs, const std::arra
     }
 }
 
-namespace {
+namespace
+{
 static Registrar<PadImpl2DForward_cpu> registrarPadImpl2DForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32},
-        Aidge::PadImpl2D_cpu_forward_kernel<float, float>);
+    {DataType::Float32, DataType::Float32},
+    Aidge::PadImpl2D_cpu_forward_kernel<float, float>);
 static Registrar<PadImpl2DForward_cpu> registrarPadImpl2DForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32},
-        Aidge::PadImpl2D_cpu_forward_kernel<int, int>);
+    {DataType::Int32, DataType::Int32}, Aidge::PadImpl2D_cpu_forward_kernel<int, int>);
 static Registrar<PadImpl2DForward_cpu> registrarPadImpl2DForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64},
-        Aidge::PadImpl2D_cpu_forward_kernel<double, double>);
-}  // namespace
-}  // namespace Aidge
+    {DataType::Float64, DataType::Float64},
+    Aidge::PadImpl2D_cpu_forward_kernel<double, double>);
+} // namespace
+} // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_PADIMPL_FORWARD_KERNEL_H_ */
diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp
index 01f2e1489f8edc573f3dc78b450002144e7d57f4..fde4febf3d6c101e00d9fd3146e28ebbe88a47ac 100644
--- a/unit_tests/data/Test_TensorImpl.cpp
+++ b/unit_tests/data/Test_TensorImpl.cpp
@@ -101,7 +101,7 @@ TEST_CASE("Tensor creation")
     {
         Tensor Rainbow;
         Rainbow.resize({2, 4, 5});
-        Rainbow.setDatatype(DataType::UInt16);
+        Rainbow.setDataType(DataType::UInt16);
         Rainbow.setBackend("cpu");
         REQUIRE(MakeRainbow<std::uint16_t>(Rainbow));
         bool res = true;
@@ -118,7 +118,7 @@ TEST_CASE("Tensor copy")
     {
         Tensor Rainbow;
         Rainbow.resize({2, 4, 5});
-        Rainbow.setDatatype(DataType::UInt16);
+        Rainbow.setDataType(DataType::UInt16);
         Rainbow.setBackend("cpu");
         MakeRainbow<std::uint16_t>(Rainbow);
         Tensor clone(Rainbow);
@@ -189,7 +189,7 @@ TEST_CASE("Tensor access")
         {
             Tensor Rainbow;
             Rainbow.resize({2, 4, 5});
-            Rainbow.setDatatype(DataType::UInt16);
+            Rainbow.setDataType(DataType::UInt16);
             Rainbow.setBackend("cpu");
             MakeRainbow<std::uint16_t>(Rainbow);
             Tensor extract(Rainbow, {0, 1, 1}, {2, 2, 3}, false);
@@ -221,7 +221,7 @@ TEST_CASE("Tensor extract")
     {
         Tensor Rainbow;
         Rainbow.resize({2, 4, 5});
-        Rainbow.setDatatype(DataType::UInt16);
+        Rainbow.setDataType(DataType::UInt16);
         Rainbow.setBackend("cpu");
         MakeRainbow<std::uint16_t>(Rainbow);
         Tensor view(Rainbow, {0, 1, 1}, {2, 2, 3});
@@ -242,7 +242,7 @@ TEST_CASE("Tensor extract")
     {
         Tensor Rainbow;
         Rainbow.resize({2, 4, 5});
-        Rainbow.setDatatype(DataType::UInt16);
+        Rainbow.setDataType(DataType::UInt16);
         Rainbow.setBackend("cpu");
         MakeRainbow<std::uint16_t>(Rainbow);
         Tensor extract(Rainbow, {0, 1, 1}, {2, 2, 3}, false);
diff --git a/unit_tests/operator/Test_AddImpl.cpp b/unit_tests/operator/Test_AddImpl.cpp
index 18d98d169ddcb74310c5153d7c2c95103c395bb7..8f859df7490f665fafa9dcac603605b8e116898f 100644
--- a/unit_tests/operator/Test_AddImpl.cpp
+++ b/unit_tests/operator/Test_AddImpl.cpp
@@ -18,93 +18,89 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Add(forward)") {
-    std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
-        {                                       //
-            {                                   //
-                {{20, 47},{21, 48},{22, 49}},   //
-                {{23, 50},{24, 51},{25, 52}},   //
-                {{26, 53},{27, 54},{28, 55}}    //
-            },                                  //
-            {                                   //
-                {{29, 56},{30, 57},{31, 58}},   //
-                {{32, 59},{33, 60},{34, 61}},   //
-                {{35, 62},{36, 63},{37, 64}}    //
-            },                                  //
-            {                                   //
-                {{38, 65},{39, 66},{40, 67}},   //
-                {{41, 68},{42, 69},{43, 70}},   //
-                {{44, 71},{45, 72},{46, 73}}    //
-            }                                   //
-        }                                       //
-    });                                         //
+TEST_CASE("[cpu/operator] Add(forward)")
+{
+    std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int, 3, 3, 3, 2>{
+        {
+            //
+            {
+                //
+                {{20, 47}, {21, 48}, {22, 49}}, //
+                {{23, 50}, {24, 51}, {25, 52}}, //
+                {{26, 53}, {27, 54}, {28, 55}} //
+            }, //
+            {
+                //
+                {{29, 56}, {30, 57}, {31, 58}}, //
+                {{32, 59}, {33, 60}, {34, 61}}, //
+                {{35, 62}, {36, 63}, {37, 64}} //
+            }, //
+            {
+                //
+                {{38, 65}, {39, 66}, {40, 67}}, //
+                {{41, 68}, {42, 69}, {43, 70}}, //
+                {{44, 71}, {45, 72}, {46, 73}} //
+            } //
+        } //
+    }); //
 
-    SECTION("One input") {
+    SECTION("One input")
+    {
         std::shared_ptr<Node> myAdd = Add<1>();
         myAdd->getOperator()->setBackend("cpu");
-        myAdd->getOperator()->setDatatype(DataType::Int32);
+        myAdd->getOperator()->setDataType(DataType::Int32);
         myAdd->getOperator()->associateInput(0, input1);
         myAdd->getOperator()->computeOutputDims();
         myAdd->forward();
 
-        REQUIRE(*std::static_pointer_cast<Tensor>(myAdd->getOperator()->getOutput(0)) == *input1);
+        REQUIRE(
+            *std::static_pointer_cast<Tensor>(myAdd->getOperator()->getOutput(0))
+            == *input1);
     }
 
-    SECTION("Two inputs") {
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
-            {
-                {
-                    {{40,  94},{42,  96},{44,  98}},
-                    {{46, 100},{48, 102},{50, 104}},
-                    {{52, 106},{54, 108},{56, 110}}
-                },
-                {
-                    {{58, 112},{60, 114},{62, 116}},
-                    {{64, 118},{66, 120},{68, 122}},
-                    {{70, 124},{72, 126},{74, 128}}
-                },
-                {
-                    {{76, 130},{78, 132},{80, 134}},
-                    {{82, 136},{84, 138},{86, 140}},
-                    {{88, 142},{90, 144},{92, 146}}
-                }
-            }
-        });
+    SECTION("Two inputs")
+    {
+        std::shared_ptr<Tensor> expectedOutput
+            = std::make_shared<Tensor>(Array4D<int, 3, 3, 3, 2>{
+                {{{{40, 94}, {42, 96}, {44, 98}},
+                  {{46, 100}, {48, 102}, {50, 104}},
+                  {{52, 106}, {54, 108}, {56, 110}}},
+                 {{{58, 112}, {60, 114}, {62, 116}},
+                  {{64, 118}, {66, 120}, {68, 122}},
+                  {{70, 124}, {72, 126}, {74, 128}}},
+                 {{{76, 130}, {78, 132}, {80, 134}},
+                  {{82, 136}, {84, 138}, {86, 140}},
+                  {{88, 142}, {90, 144}, {92, 146}}}}});
 
         std::shared_ptr<Node> myAdd = Add<2>();
-        myAdd->getOperator()->setDatatype(DataType::Int32);
+        myAdd->getOperator()->setDataType(DataType::Int32);
         myAdd->getOperator()->setBackend("cpu");
         myAdd->getOperator()->associateInput(0, input1);
         myAdd->getOperator()->associateInput(1, input1);
         myAdd->getOperator()->computeOutputDims();
         myAdd->forward();
 
-        REQUIRE(*std::static_pointer_cast<Tensor>(myAdd->getOperator()->getOutput(0)) == *expectedOutput);
+        REQUIRE(
+            *std::static_pointer_cast<Tensor>(myAdd->getOperator()->getOutput(0))
+            == *expectedOutput);
     }
 
-    SECTION("Three inputs") {
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
-            {
-                {
-                    {{ 60, 141},{ 63, 144},{ 66, 147}},
-                    {{ 69, 150},{ 72, 153},{ 75, 156}},
-                    {{ 78, 159},{ 81, 162},{ 84, 165}}
-                },
-                {
-                    {{ 87, 168},{ 90, 171},{ 93, 174}},
-                    {{ 96, 177},{ 99, 180},{102, 183}},
-                    {{105, 186},{108, 189},{111, 192}}
-                },
-                {
-                    {{114, 195},{117, 198},{120, 201}},
-                    {{123, 204},{126, 207},{129, 210}},
-                    {{132, 213},{135, 216},{138, 219}}
-                }
-            }
-        });
+    SECTION("Three inputs")
+    {
+        std::shared_ptr<Tensor> expectedOutput
+            = std::make_shared<Tensor>(Array4D<int, 3, 3, 3, 2>{
+                {{{{60, 141}, {63, 144}, {66, 147}},
+                  {{69, 150}, {72, 153}, {75, 156}},
+                  {{78, 159}, {81, 162}, {84, 165}}},
+                 {{{87, 168}, {90, 171}, {93, 174}},
+                  {{96, 177}, {99, 180}, {102, 183}},
+                  {{105, 186}, {108, 189}, {111, 192}}},
+                 {{{114, 195}, {117, 198}, {120, 201}},
+                  {{123, 204}, {126, 207}, {129, 210}},
+                  {{132, 213}, {135, 216}, {138, 219}}}}});
 
         std::shared_ptr<Node> myAdd = Add<3>();
-        myAdd->getOperator()->setDatatype(DataType::Int32);
+        myAdd->getOperator()->setDataType(DataType::Int32);
         myAdd->getOperator()->setBackend("cpu");
         myAdd->getOperator()->associateInput(0, input1);
         myAdd->getOperator()->associateInput(1, input1);
@@ -112,6 +108,8 @@ TEST_CASE("[cpu/operator] Add(forward)") {
         myAdd->getOperator()->computeOutputDims();
         myAdd->forward();
 
-        REQUIRE(*std::static_pointer_cast<Tensor>(myAdd->getOperator()->getOutput(0)) == *expectedOutput);
+        REQUIRE(
+            *std::static_pointer_cast<Tensor>(myAdd->getOperator()->getOutput(0))
+            == *expectedOutput);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_AvgPoolingImpl.cpp b/unit_tests/operator/Test_AvgPoolingImpl.cpp
index 9c659d8a7fda243f698a082b0c14e2bb67603059..17a3f37d4e6f16ae6bc0bea27394bc8c614186a7 100644
--- a/unit_tests/operator/Test_AvgPoolingImpl.cpp
+++ b/unit_tests/operator/Test_AvgPoolingImpl.cpp
@@ -49,7 +49,7 @@ TEST_CASE("[cpu/operator] AvgPooling(forward)")
     SECTION("Stride")
     {
         std::shared_ptr<Node> myAvgPool = AvgPooling({2, 2}, "mycdw", {2, 2});
-        myAvgPool->getOperator()->setDatatype(DataType::Float32);
+        myAvgPool->getOperator()->setDataType(DataType::Float32);
         myAvgPool->getOperator()->setBackend("cpu");
 
         std::shared_ptr<Tensor> myOutput
@@ -71,7 +71,7 @@ TEST_CASE("[cpu/operator] AvgPooling(forward)")
                                           {0.5987, 0.1560, 0.1560},
                                           {0.0581, 0.8662, 0.6011}}}}});
         std::shared_ptr<Node> myAvgPool = AvgPooling({3, 3}, "mycdw", {3, 3});
-        myAvgPool->getOperator()->setDatatype(DataType::Float32);
+        myAvgPool->getOperator()->setDataType(DataType::Float32);
         myAvgPool->getOperator()->setBackend("cpu");
 
         Tensor myOutput = Array4D<float, 1, 1, 1, 1>{
diff --git a/unit_tests/operator/Test_BatchNormImpl.cpp b/unit_tests/operator/Test_BatchNormImpl.cpp
index 5590ffa57656b10cbb1c11d61b69fa715e7814ef..2a67e6a864c8f06ddcefc1b6fc9e4c5c0fd946e6 100644
--- a/unit_tests/operator/Test_BatchNormImpl.cpp
+++ b/unit_tests/operator/Test_BatchNormImpl.cpp
@@ -22,7 +22,7 @@ using namespace Aidge;
 TEST_CASE("[cpu/operator] BatchNorm(forward)")
 {
     std::shared_ptr<Node> myBatchNorm = BatchNorm<2>(0.00001F, 0.1F, "mybatchnorm");
-    myBatchNorm->getOperator()->setDatatype(DataType::Float32);
+    myBatchNorm->getOperator()->setDataType(DataType::Float32);
     myBatchNorm->getOperator()->setBackend("cpu");
     std::shared_ptr<Tensor> myWeights
         = std::make_shared<Tensor>(Array1D<float, 3>{{0.9044, 0.3028, 0.0218}});
diff --git a/unit_tests/operator/Test_ConvDepthWiseImpl.cpp b/unit_tests/operator/Test_ConvDepthWiseImpl.cpp
index be070d9725636e6d72a9df9c07b6d99cff14d3a7..406a070c8056474041a4207e17f18dbbc5bf86f8 100644
--- a/unit_tests/operator/Test_ConvDepthWiseImpl.cpp
+++ b/unit_tests/operator/Test_ConvDepthWiseImpl.cpp
@@ -22,7 +22,7 @@ using namespace Aidge;
 TEST_CASE("[cpu/operator] ConvDepthWise(forward)")
 {
     std::shared_ptr<Node> myCDW = ConvDepthWise({3, 3}, "mycdw");
-    myCDW->getOperator()->setDatatype(DataType::Int32);
+    myCDW->getOperator()->setDataType(DataType::Int32);
     myCDW->getOperator()->setBackend("cpu");
     std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array3D<int, 4, 3, 3>{
         {{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}
diff --git a/unit_tests/operator/Test_ConvImpl.cpp b/unit_tests/operator/Test_ConvImpl.cpp
index d78b4b8c0c295e88caf6034e6f0ad0c641652667..6bb91a4b83aa880acb2019da6684b3b4cce5d967 100644
--- a/unit_tests/operator/Test_ConvImpl.cpp
+++ b/unit_tests/operator/Test_ConvImpl.cpp
@@ -25,7 +25,7 @@ TEST_CASE("[cpu/operator] Conv(forward)")
     SECTION("Classic Conv")
     {
         std::shared_ptr<Node> myConv = Conv(3, 4, {3, 3}, "myconv");
-        myConv->getOperator()->setDatatype(DataType::Int32);
+        myConv->getOperator()->setDataType(DataType::Int32);
         myConv->getOperator()->setBackend("cpu");
         std::shared_ptr<Tensor> myWeights
             = std::make_shared<Tensor>(Array4D<int, 4, 3, 3, 3>{
@@ -108,7 +108,7 @@ TEST_CASE("[cpu/operator] Conv(forward)")
     SECTION("Point-wise")
     {
         std::shared_ptr<Node> myConv = Conv(3, 4, {1, 1}, "myconv", {1, 1});
-        myConv->getOperator()->setDatatype(DataType::Float32);
+        myConv->getOperator()->setDataType(DataType::Float32);
         myConv->getOperator()->setBackend("cpu");
         myConv->getOperator()->input(0) = Array4D<float, 2, 3, 3, 3>{
             {{{{-1.38467371F, -0.87123615F, -0.22336592F},
diff --git a/unit_tests/operator/Test_DivImpl.cpp b/unit_tests/operator/Test_DivImpl.cpp
index a808c9b5930b9f50b6c1446bd80f079a75177641..bce420e7aa99afab8f2f40afd81896302eb47244 100644
--- a/unit_tests/operator/Test_DivImpl.cpp
+++ b/unit_tests/operator/Test_DivImpl.cpp
@@ -32,7 +32,7 @@ TEST_CASE("[cpu/operator] Div(forward)")
             Array2D<float, 2, 2>{{{0.15214217, 0.88150001}, {0.38989770, 0.40142286}}});
 
         std::shared_ptr<Node> myDiv = Div();
-        myDiv->getOperator()->setDatatype(DataType::Float32);
+        myDiv->getOperator()->setDataType(DataType::Float32);
         myDiv->getOperator()->setBackend("cpu");
         myDiv->getOperator()->associateInput(0, input_1);
         myDiv->getOperator()->associateInput(1, input_2);
@@ -58,7 +58,7 @@ TEST_CASE("[cpu/operator] Div(forward)")
             Array2D<float, 2, 2>{{{1.35017204, 0.62544787}, {1.96456301, 4.75375366}}});
 
         std::shared_ptr<Node> myDiv = Div();
-        myDiv->getOperator()->setDatatype(DataType::Float32);
+        myDiv->getOperator()->setDataType(DataType::Float32);
         myDiv->getOperator()->setBackend("cpu");
         myDiv->getOperator()->associateInput(0, input_1);
         myDiv->getOperator()->associateInput(1, input_2);
@@ -94,7 +94,7 @@ TEST_CASE("[cpu/operator] Div(forward)")
                   {0.52920723, 0.05856223, 0.46336490}}}});
 
         std::shared_ptr<Node> myDiv = Div();
-        myDiv->getOperator()->setDatatype(DataType::Float32);
+        myDiv->getOperator()->setDataType(DataType::Float32);
         myDiv->getOperator()->setBackend("cpu");
         myDiv->getOperator()->associateInput(0, input_1);
         myDiv->getOperator()->associateInput(1, input_2);
@@ -164,7 +164,7 @@ TEST_CASE("[cpu/operator] Div(forward)")
                    {0.08847972, 0.30863172, 0.27982998}}}}});
 
         std::shared_ptr<Node> myDiv = Div();
-        myDiv->getOperator()->setDatatype(DataType::Float32);
+        myDiv->getOperator()->setDataType(DataType::Float32);
         myDiv->getOperator()->setBackend("cpu");
         myDiv->getOperator()->associateInput(0, input_1);
         myDiv->getOperator()->associateInput(1, input_2);
diff --git a/unit_tests/operator/Test_FCImpl.cpp b/unit_tests/operator/Test_FCImpl.cpp
index e3494e20205f1a295eb537100b59fb7bbc26116a..c0a04cadb8e19663cb8c70a5e3a6d59dd2388237 100644
--- a/unit_tests/operator/Test_FCImpl.cpp
+++ b/unit_tests/operator/Test_FCImpl.cpp
@@ -19,91 +19,102 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/oeprator] FC(forward)") {
+TEST_CASE("[cpu/oeprator] FC(forward)")
+{
     std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array2D<int, 5, 75>{
-            {{1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
-             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
-             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
-             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
-             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15}}});
-    std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}});
+        {{1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+          5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+          9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+          13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
+         {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+          5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+          9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+          13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
+         {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+          5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+          9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+          13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
+         {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+          5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+          9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+          13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
+         {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+          5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+          9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+          13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15}}});
+    std::shared_ptr<Tensor> myBias
+        = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}});
     std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<int, 2, 5>{
-            {{23601, 23602, 23603, 23604, 23605}, {68601, 68602, 68603, 68604, 68605}}});
+        {{23601, 23602, 23603, 23604, 23605}, {68601, 68602, 68603, 68604, 68605}}});
 
     std::shared_ptr<Node> myFC = FC(5, false, "myfc");
-    myFC->getOperator()->setDatatype(DataType::Int32);
+    myFC->getOperator()->setDataType(DataType::Int32);
     myFC->getOperator()->setBackend("cpu");
     myFC->getOperator()->associateInput(1, myWeights);
     myFC->getOperator()->associateInput(2, myBias);
 
-    SECTION("2D input") {
+    SECTION("2D input")
+    {
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<int, 2, 75>{
-                {{0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 17, 18,
-                  19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
-                  38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
-                  57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74},
-                 {75,  76,  77,  78,  79,  80,  81,  82,  83,  84,  85,  86,  87,  88,  89,
-                  90,  91,  92,  93,  94,  95,  96,  97,  98,  99,  100, 101, 102, 103, 104,
-                  105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
-                  120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
-                  135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149}}});
+            {{0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 17, 18,
+              19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+              38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+              57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74},
+             {75,  76,  77,  78,  79,  80,  81,  82,  83,  84,  85,  86,  87,
+              88,  89,  90,  91,  92,  93,  94,  95,  96,  97,  98,  99,  100,
+              101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
+              114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
+              127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+              140, 141, 142, 143, 144, 145, 146, 147, 148, 149}}});
         myFC->getOperator()->associateInput(0, myInput);
         myFC->getOperator()->computeOutputDims();
         myFC->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myFC->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(
+            *std::static_pointer_cast<Tensor>(myFC->getOperator()->getOutput(0))
+            == *myOutput);
     }
-    SECTION("4D input") {
-        std::shared_ptr<Tensor> myInput =
-                std::make_shared<Tensor>(Array4D<int, 2, 3, 5, 5>{{{{{0, 1, 2, 3, 4},
-                                                                     {5, 6, 7, 8, 9},
-                                                                     {10, 11, 12, 13, 14},
-                                                                     {15, 16, 17, 18, 19},
-                                                                     {20, 21, 22, 23, 24}},
-                                                                    {{25, 26, 27, 28, 29},
-                                                                     {30, 31, 32, 33, 34},
-                                                                     {35, 36, 37, 38, 39},
-                                                                     {40, 41, 42, 43, 44},
-                                                                     {45, 46, 47, 48, 49}},
-                                                                    {{50, 51, 52, 53, 54},
-                                                                     {55, 56, 57, 58, 59},
-                                                                     {60, 61, 62, 63, 64},
-                                                                     {65, 66, 67, 68, 69},
-                                                                     {70, 71, 72, 73, 74}}},
-                                                                   {{{75, 76, 77, 78, 79},
-                                                                     {80, 81, 82, 83, 84},
-                                                                     {85, 86, 87, 88, 89},
-                                                                     {90, 91, 92, 93, 94},
-                                                                     {95, 96, 97, 98, 99}},
-                                                                    {{100, 101, 102, 103, 104},
-                                                                     {105, 106, 107, 108, 109},
-                                                                     {110, 111, 112, 113, 114},
-                                                                     {115, 116, 117, 118, 119},
-                                                                     {120, 121, 122, 123, 124}},
-                                                                    {{125, 126, 127, 128, 129},
-                                                                     {130, 131, 132, 133, 134},
-                                                                     {135, 136, 137, 138, 139},
-                                                                     {140, 141, 142, 143, 144},
-                                                                     {145, 146, 147, 148, 149}}}}});
+    SECTION("4D input")
+    {
+        std::shared_ptr<Tensor> myInput
+            = std::make_shared<Tensor>(Array4D<int, 2, 3, 5, 5>{
+                {{{{0, 1, 2, 3, 4},
+                   {5, 6, 7, 8, 9},
+                   {10, 11, 12, 13, 14},
+                   {15, 16, 17, 18, 19},
+                   {20, 21, 22, 23, 24}},
+                  {{25, 26, 27, 28, 29},
+                   {30, 31, 32, 33, 34},
+                   {35, 36, 37, 38, 39},
+                   {40, 41, 42, 43, 44},
+                   {45, 46, 47, 48, 49}},
+                  {{50, 51, 52, 53, 54},
+                   {55, 56, 57, 58, 59},
+                   {60, 61, 62, 63, 64},
+                   {65, 66, 67, 68, 69},
+                   {70, 71, 72, 73, 74}}},
+                 {{{75, 76, 77, 78, 79},
+                   {80, 81, 82, 83, 84},
+                   {85, 86, 87, 88, 89},
+                   {90, 91, 92, 93, 94},
+                   {95, 96, 97, 98, 99}},
+                  {{100, 101, 102, 103, 104},
+                   {105, 106, 107, 108, 109},
+                   {110, 111, 112, 113, 114},
+                   {115, 116, 117, 118, 119},
+                   {120, 121, 122, 123, 124}},
+                  {{125, 126, 127, 128, 129},
+                   {130, 131, 132, 133, 134},
+                   {135, 136, 137, 138, 139},
+                   {140, 141, 142, 143, 144},
+                   {145, 146, 147, 148, 149}}}}});
         myFC->getOperator()->associateInput(0, myInput);
         myFC->getOperator()->computeOutputDims();
         myFC->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myFC->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(
+            *std::static_pointer_cast<Tensor>(myFC->getOperator()->getOutput(0))
+            == *myOutput);
     }
 
-    // std::cout << static_cast<Tensor>((*myFC->getOperator())["weight"])[0][0][0][0] << std::endl;
+    // std::cout << static_cast<Tensor>((*myFC->getOperator())["weight"])[0][0][0][0] <<
+    // std::endl;
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_LeakyReLUImpl.cpp b/unit_tests/operator/Test_LeakyReLUImpl.cpp
index d5bd91ff75404a7b928c8919c64e06315b78206f..a1b68d88289dea80d19570cc3740d19bbf6a54fc 100644
--- a/unit_tests/operator/Test_LeakyReLUImpl.cpp
+++ b/unit_tests/operator/Test_LeakyReLUImpl.cpp
@@ -18,153 +18,99 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] LeakyReLU(forward)") {
-    SECTION("1D Tensor") {
-        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
-            {0, 1, 2,-3, 4,-5,-6, 7, 8, 9}
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,10> {
-            {0, 1, 2, 0, 4, 0, 0, 7, 8, 9}
-        });
+TEST_CASE("[cpu/operator] LeakyReLU(forward)")
+{
+    SECTION("1D Tensor")
+    {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(
+            Array1D<int, 10>{{0, 1, 2, -3, 4, -5, -6, 7, 8, 9}});
+        std::shared_ptr<Tensor> expectedOutput
+            = std::make_shared<Tensor>(Array1D<int, 10>{{0, 1, 2, 0, 4, 0, 0, 7, 8, 9}});
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU();
-        myLeakyReLU->getOperator()->setDatatype(DataType::Int32);
+        myLeakyReLU->getOperator()->setDataType(DataType::Int32);
         myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
+        myLeakyReLU->getOperator()->associateInput(0, input0);
         myLeakyReLU->getOperator()->computeOutputDims();
         myLeakyReLU->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myLeakyReLU->getOperator()->getOutput(0)) == *expectedOutput);
+        REQUIRE(
+            *std::static_pointer_cast<Tensor>(myLeakyReLU->getOperator()->getOutput(0))
+            == *expectedOutput);
     }
 
-    SECTION("2D Tensor") {
-        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array2D<int,2,10> {
-            {
-                { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-            }
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<int,2,10> {
-            {
-                { 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
-                { 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
-            }
-        });
+    SECTION("2D Tensor")
+    {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array2D<int, 2, 10>{
+            {{0, 1, 2, -3, 4, -5, -6, 7, 8, 9}, {-5, 4, 2, -3, 4, -5, -6, 7, -1, 10}}});
+        std::shared_ptr<Tensor> expectedOutput
+            = std::make_shared<Tensor>(Array2D<int, 2, 10>{
+                {{0, 1, 2, 0, 4, 0, 0, 7, 8, 9}, {0, 4, 2, 0, 4, 0, 0, 7, 0, 10}}});
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU();
-        myLeakyReLU->getOperator()->setDatatype(DataType::Int32);
+        myLeakyReLU->getOperator()->setDataType(DataType::Int32);
         myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
+        myLeakyReLU->getOperator()->associateInput(0, input0);
         myLeakyReLU->getOperator()->computeOutputDims();
         myLeakyReLU->forward();
         REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput);
     }
 
-    SECTION("3D Tensor") {
-        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<int,2,2,10> {
-            {
-                {
-                    { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                    {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-                },
-                {
-                    { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                    {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-                }
-            }
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,2,2,10> {
-            {
-                {
-                    { 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
-                    { 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
-                },
-                {
-                    { 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
-                    { 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
-                }
-            }
-        });
+    SECTION("3D Tensor")
+    {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<int, 2, 2, 10>{
+            {{{0, 1, 2, -3, 4, -5, -6, 7, 8, 9}, {-5, 4, 2, -3, 4, -5, -6, 7, -1, 10}},
+             {{0, 1, 2, -3, 4, -5, -6, 7, 8, 9}, {-5, 4, 2, -3, 4, -5, -6, 7, -1, 10}}}});
+        std::shared_ptr<Tensor> expectedOutput
+            = std::make_shared<Tensor>(Array3D<int, 2, 2, 10>{
+                {{{0, 1, 2, 0, 4, 0, 0, 7, 8, 9}, {0, 4, 2, 0, 4, 0, 0, 7, 0, 10}},
+                 {{0, 1, 2, 0, 4, 0, 0, 7, 8, 9}, {0, 4, 2, 0, 4, 0, 0, 7, 0, 10}}}});
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU();
-        myLeakyReLU->getOperator()->setDatatype(DataType::Int32);
+        myLeakyReLU->getOperator()->setDataType(DataType::Int32);
         myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
+        myLeakyReLU->getOperator()->associateInput(0, input0);
         myLeakyReLU->getOperator()->computeOutputDims();
         myLeakyReLU->forward();
         REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput);
     }
 
-    SECTION("4D Tensor") {
-        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
-            {
-                {
-                    {
-                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-                    },
-                    {
-                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-                    }
-                },
-                {
-                    {
-                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-                    },
-                    {
-                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
-                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
-                    }
-                }
-            }
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
-            {
-                {
-                    {
-                        { 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
-                        { 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
-                    },
-                    {
-                        { 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
-                        { 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
-                    }
-                },
-                {
-                    {
-                        { 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
-                        { 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
-                    },
-                    {
-                        { 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
-                        { 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
-                    }
-                }
-            }
-        });
+    SECTION("4D Tensor")
+    {
+        std::shared_ptr<Tensor> input0 = std::make_shared<
+            Tensor>(Array4D<int, 2, 2, 2, 10>{
+            {{{{0, 1, 2, -3, 4, -5, -6, 7, 8, 9}, {-5, 4, 2, -3, 4, -5, -6, 7, -1, 10}},
+              {{0, 1, 2, -3, 4, -5, -6, 7, 8, 9}, {-5, 4, 2, -3, 4, -5, -6, 7, -1, 10}}},
+             {{{0, 1, 2, -3, 4, -5, -6, 7, 8, 9}, {-5, 4, 2, -3, 4, -5, -6, 7, -1, 10}},
+              {{0, 1, 2, -3, 4, -5, -6, 7, 8, 9},
+               {-5, 4, 2, -3, 4, -5, -6, 7, -1, 10}}}}});
+        std::shared_ptr<Tensor> expectedOutput
+            = std::make_shared<Tensor>(Array4D<int, 2, 2, 2, 10>{
+                {{{{0, 1, 2, 0, 4, 0, 0, 7, 8, 9}, {0, 4, 2, 0, 4, 0, 0, 7, 0, 10}},
+                  {{0, 1, 2, 0, 4, 0, 0, 7, 8, 9}, {0, 4, 2, 0, 4, 0, 0, 7, 0, 10}}},
+                 {{{0, 1, 2, 0, 4, 0, 0, 7, 8, 9}, {0, 4, 2, 0, 4, 0, 0, 7, 0, 10}},
+                  {{0, 1, 2, 0, 4, 0, 0, 7, 8, 9}, {0, 4, 2, 0, 4, 0, 0, 7, 0, 10}}}}});
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU();
-        myLeakyReLU->getOperator()->setDatatype(DataType::Int32);
+        myLeakyReLU->getOperator()->setDataType(DataType::Int32);
         myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
+        myLeakyReLU->getOperator()->associateInput(0, input0);
         myLeakyReLU->getOperator()->computeOutputDims();
         myLeakyReLU->forward();
         REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput);
     }
 
-    SECTION("Test construction attribute: negative_slop") {
-        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<float,10> {
-            {0.0f, 1.0f, 2.0f,-3.0f, 4.0f,-5.0f,-6.0f, 7.0f, 8.0f, 9.0f}
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<float,10> {
-            {0.0f, 1.0f, 2.0f,-1.5f, 4.0f,-2.5f,-3.0f, 7.0f, 8.0f, 9.0f}
-        });
+    SECTION("Test construction attribute: negative_slop")
+    {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<float, 10>{
+            {0.0f, 1.0f, 2.0f, -3.0f, 4.0f, -5.0f, -6.0f, 7.0f, 8.0f, 9.0f}});
+        std::shared_ptr<Tensor> expectedOutput
+            = std::make_shared<Tensor>(Array1D<float, 10>{
+                {0.0f, 1.0f, 2.0f, -1.5f, 4.0f, -2.5f, -3.0f, 7.0f, 8.0f, 9.0f}});
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU(0.5f);
-        myLeakyReLU->getOperator()->setDatatype(DataType::Float32);
+        myLeakyReLU->getOperator()->setDataType(DataType::Float32);
         myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
+        myLeakyReLU->getOperator()->associateInput(0, input0);
         myLeakyReLU->getOperator()->computeOutputDims();
         myLeakyReLU->forward();
         REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput);
diff --git a/unit_tests/operator/Test_MatMulImpl.cpp b/unit_tests/operator/Test_MatMulImpl.cpp
index 0da01b3287043e07e5b967df8882960cfb814f8f..1e970d2cc1b1c6b0a4c1e03c203af7b25283dfdd 100644
--- a/unit_tests/operator/Test_MatMulImpl.cpp
+++ b/unit_tests/operator/Test_MatMulImpl.cpp
@@ -19,90 +19,100 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul]") {
+TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul]")
+{
     // Test MatMul forward with batch size = 2 and feature size = 75
     std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array2D<int, 5, 75>{
-            {{1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
-             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
-             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
-             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
-             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15}}});
+        {{1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+          5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+          9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+          13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
+         {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+          5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+          9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+          13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
+         {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+          5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+          9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+          13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
+         {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+          5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+          9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+          13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
+         {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+          5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+          9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+          13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15}}});
     std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<int, 2, 5>{
-            {{23600, 23600, 23600, 23600, 23600}, {68600, 68600, 68600, 68600, 68600}}});
+        {{23600, 23600, 23600, 23600, 23600}, {68600, 68600, 68600, 68600, 68600}}});
 
     std::shared_ptr<Node> myMatMul = MatMul(5, "mymatmul");
-    myMatMul->getOperator()->setDatatype(DataType::Int32);
+    myMatMul->getOperator()->setDataType(DataType::Int32);
     myMatMul->getOperator()->setBackend("cpu");
     myMatMul->getOperator()->associateInput(1, myWeights);
 
-    SECTION("2D input") {
+    SECTION("2D input")
+    {
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<int, 2, 75>{
-                {{0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 17, 18,
-                  19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
-                  38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
-                  57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74},
-                 {75,  76,  77,  78,  79,  80,  81,  82,  83,  84,  85,  86,  87,  88,  89,
-                  90,  91,  92,  93,  94,  95,  96,  97,  98,  99,  100, 101, 102, 103, 104,
-                  105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
-                  120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
-                  135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149}}});
+            {{0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 17, 18,
+              19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+              38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+              57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74},
+             {75,  76,  77,  78,  79,  80,  81,  82,  83,  84,  85,  86,  87,
+              88,  89,  90,  91,  92,  93,  94,  95,  96,  97,  98,  99,  100,
+              101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
+              114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
+              127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+              140, 141, 142, 143, 144, 145, 146, 147, 148, 149}}});
         myMatMul->getOperator()->associateInput(0, myInput);
         myMatMul->getOperator()->computeOutputDims();
         myMatMul->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myMatMul->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(
+            *std::static_pointer_cast<Tensor>(myMatMul->getOperator()->getOutput(0))
+            == *myOutput);
     }
-    SECTION("4D input") {
-        std::shared_ptr<Tensor> myInput =
-                std::make_shared<Tensor>(Array4D<int, 2, 3, 5, 5>{{{{{0, 1, 2, 3, 4},
-                                                                     {5, 6, 7, 8, 9},
-                                                                     {10, 11, 12, 13, 14},
-                                                                     {15, 16, 17, 18, 19},
-                                                                     {20, 21, 22, 23, 24}},
-                                                                    {{25, 26, 27, 28, 29},
-                                                                     {30, 31, 32, 33, 34},
-                                                                     {35, 36, 37, 38, 39},
-                                                                     {40, 41, 42, 43, 44},
-                                                                     {45, 46, 47, 48, 49}},
-                                                                    {{50, 51, 52, 53, 54},
-                                                                     {55, 56, 57, 58, 59},
-                                                                     {60, 61, 62, 63, 64},
-                                                                     {65, 66, 67, 68, 69},
-                                                                     {70, 71, 72, 73, 74}}},
-                                                                   {{{75, 76, 77, 78, 79},
-                                                                     {80, 81, 82, 83, 84},
-                                                                     {85, 86, 87, 88, 89},
-                                                                     {90, 91, 92, 93, 94},
-                                                                     {95, 96, 97, 98, 99}},
-                                                                    {{100, 101, 102, 103, 104},
-                                                                     {105, 106, 107, 108, 109},
-                                                                     {110, 111, 112, 113, 114},
-                                                                     {115, 116, 117, 118, 119},
-                                                                     {120, 121, 122, 123, 124}},
-                                                                    {{125, 126, 127, 128, 129},
-                                                                     {130, 131, 132, 133, 134},
-                                                                     {135, 136, 137, 138, 139},
-                                                                     {140, 141, 142, 143, 144},
-                                                                     {145, 146, 147, 148, 149}}}}});
+    SECTION("4D input")
+    {
+        std::shared_ptr<Tensor> myInput
+            = std::make_shared<Tensor>(Array4D<int, 2, 3, 5, 5>{
+                {{{{0, 1, 2, 3, 4},
+                   {5, 6, 7, 8, 9},
+                   {10, 11, 12, 13, 14},
+                   {15, 16, 17, 18, 19},
+                   {20, 21, 22, 23, 24}},
+                  {{25, 26, 27, 28, 29},
+                   {30, 31, 32, 33, 34},
+                   {35, 36, 37, 38, 39},
+                   {40, 41, 42, 43, 44},
+                   {45, 46, 47, 48, 49}},
+                  {{50, 51, 52, 53, 54},
+                   {55, 56, 57, 58, 59},
+                   {60, 61, 62, 63, 64},
+                   {65, 66, 67, 68, 69},
+                   {70, 71, 72, 73, 74}}},
+                 {{{75, 76, 77, 78, 79},
+                   {80, 81, 82, 83, 84},
+                   {85, 86, 87, 88, 89},
+                   {90, 91, 92, 93, 94},
+                   {95, 96, 97, 98, 99}},
+                  {{100, 101, 102, 103, 104},
+                   {105, 106, 107, 108, 109},
+                   {110, 111, 112, 113, 114},
+                   {115, 116, 117, 118, 119},
+                   {120, 121, 122, 123, 124}},
+                  {{125, 126, 127, 128, 129},
+                   {130, 131, 132, 133, 134},
+                   {135, 136, 137, 138, 139},
+                   {140, 141, 142, 143, 144},
+                   {145, 146, 147, 148, 149}}}}});
         myMatMul->getOperator()->associateInput(0, myInput);
         myMatMul->getOperator()->computeOutputDims();
         myMatMul->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myMatMul->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(
+            *std::static_pointer_cast<Tensor>(myMatMul->getOperator()->getOutput(0))
+            == *myOutput);
     }
 
-    // std::cout << static_cast<Tensor>((*myMatMul->getOperator())["weight"])[0][0][0][0] << std::endl;
+    // std::cout << static_cast<Tensor>((*myMatMul->getOperator())["weight"])[0][0][0][0]
+    // << std::endl;
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_MaxPoolingImpl.cpp b/unit_tests/operator/Test_MaxPoolingImpl.cpp
index 83fa7eaa670399c8d6c085a14db08fa35df9de8c..4803c0ba399d86ed12b1bd950639b2f818651e48 100644
--- a/unit_tests/operator/Test_MaxPoolingImpl.cpp
+++ b/unit_tests/operator/Test_MaxPoolingImpl.cpp
@@ -10,8 +10,8 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
-#include <memory>
 #include <cstdlib>
+#include <memory>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/MaxPooling.hpp"
@@ -20,60 +20,45 @@
 
 using namespace Aidge;
 
+TEST_CASE("[cpu/operator] MaxPooling(forward)")
+{
+    std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
+        Array4D<float, 2, 2, 5, 5>{// NCHW
+                                   {{{{-0.3848, 0.2166, -0.4373, 0.6142, 0.5277},
+                                      {0.7995, 0.3638, -1.4589, -1.0843, 1.0918},
+                                      {0.7147, 0.0936, -1.2902, 1.2037, 0.4874},
+                                      {-0.5981, 2.1184, -0.9175, 1.3859, 0.3305},
+                                      {-1.7700, 0.0563, -0.3914, 0.0538, -0.3955}},
 
-TEST_CASE("[cpu/operator] MaxPooling(forward)") {
-    std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<float,2,2,5,5> { //NCHW
-        {
-            {
-                {{-0.3848,  0.2166, -0.4373,  0.6142,  0.5277},
-                 {0.7995,  0.3638, -1.4589, -1.0843,  1.0918},
-            	 {0.7147,  0.0936, -1.2902,  1.2037,  0.4874},
-                 {-0.5981,  2.1184, -0.9175,  1.3859,  0.3305},
-                 {-1.7700,  0.0563, -0.3914,  0.0538, -0.3955}},
-
-                {{-3.1409, -0.4554,  0.0524,  2.2291,  0.4859},
-                 {-0.7465, -0.6567, -2.3703, -0.6386, -1.4152},
-                 { 2.2329, -0.5850,  0.0700,  1.2838, -1.7363},
-                 { 0.2139,  0.0624, -1.0689, -0.8221, -0.8038},
-                 { 0.1886, -0.7840, -0.2313,  0.2651, -1.6244}}
-            },
-            {
-                {{ 0.4371,  1.6417,  0.9129,  0.6325,  0.5438},
-                 {-2.3552, -0.8850, -0.0232, -0.5462, -1.2011},
-                 {1.7653, -1.6668, -1.0814,  0.6182,  1.2071},
-                 {0.9541, -0.5133,  0.8664, -0.8892,  1.4585},
-                 {1.0220, -0.5107,  0.1829, -0.2301, -0.4268}},
+                                     {{-3.1409, -0.4554, 0.0524, 2.2291, 0.4859},
+                                      {-0.7465, -0.6567, -2.3703, -0.6386, -1.4152},
+                                      {2.2329, -0.5850, 0.0700, 1.2838, -1.7363},
+                                      {0.2139, 0.0624, -1.0689, -0.8221, -0.8038},
+                                      {0.1886, -0.7840, -0.2313, 0.2651, -1.6244}}},
+                                    {{{0.4371, 1.6417, 0.9129, 0.6325, 0.5438},
+                                      {-2.3552, -0.8850, -0.0232, -0.5462, -1.2011},
+                                      {1.7653, -1.6668, -1.0814, 0.6182, 1.2071},
+                                      {0.9541, -0.5133, 0.8664, -0.8892, 1.4585},
+                                      {1.0220, -0.5107, 0.1829, -0.2301, -0.4268}},
 
-                {{ 1.0429,  0.6279, -0.2875,  0.7187, -0.1500},
-                 {1.6041,  2.9635,  1.4172, -0.7517,  0.5441},
-                 {-0.2276,  0.0857,  0.6776, -0.1389, -0.0614},
-                 {-0.1547, -0.3435,  0.0650, -0.5095, -1.8073},
-                 {1.7217,  0.3999, -0.5953,  1.0604, -0.4126}}
-            }
-        }
-    });
-    SECTION("Stride") {
-        std::shared_ptr<Node> myMaxPool = MaxPooling({2,2}, "mycdw", {2,2});
-        myMaxPool->getOperator()->setDatatype(DataType::Float32);
+                                     {{1.0429, 0.6279, -0.2875, 0.7187, -0.1500},
+                                      {1.6041, 2.9635, 1.4172, -0.7517, 0.5441},
+                                      {-0.2276, 0.0857, 0.6776, -0.1389, -0.0614},
+                                      {-0.1547, -0.3435, 0.0650, -0.5095, -1.8073},
+                                      {1.7217, 0.3999, -0.5953, 1.0604, -0.4126}}}}});
+    SECTION("Stride")
+    {
+        std::shared_ptr<Node> myMaxPool = MaxPooling({2, 2}, "mycdw", {2, 2});
+        myMaxPool->getOperator()->setDataType(DataType::Float32);
         myMaxPool->getOperator()->setBackend("cpu");
 
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,2,2,2,2> { 
-            {
-                {
-                    {{  0.7995,  0.6142},
-                     { 2.1184,  1.3859}},
-                    {{ -0.4554,  2.2291},
-                     {  2.2329,  1.2838}}
-                },
-                {
-                    {{1.6417,  0.9129},
-                     {1.7653,  0.8664}},
-                    {{2.9635,  1.4172},
-                     {0.0857,  0.6776}}
-                }
-            }
-        });
-        myMaxPool->getOperator()->associateInput(0,myInput);
+        std::shared_ptr<Tensor> myOutput
+            = std::make_shared<Tensor>(Array4D<float, 2, 2, 2, 2>{
+                {{{{0.7995, 0.6142}, {2.1184, 1.3859}},
+                  {{-0.4554, 2.2291}, {2.2329, 1.2838}}},
+                 {{{1.6417, 0.9129}, {1.7653, 0.8664}},
+                  {{2.9635, 1.4172}, {0.0857, 0.6776}}}}});
+        myMaxPool->getOperator()->associateInput(0, myInput);
         myMaxPool->getOperator()->computeOutputDims();
         myMaxPool->forward();
         myMaxPool->getOperator()->getOutput(0)->print();
diff --git a/unit_tests/operator/Test_MulImpl.cpp b/unit_tests/operator/Test_MulImpl.cpp
index 5f36e71619314d583f2a46590e147ced60a84733..d044c44c26e3fabd69979bfa91b557be03bfecce 100644
--- a/unit_tests/operator/Test_MulImpl.cpp
+++ b/unit_tests/operator/Test_MulImpl.cpp
@@ -32,7 +32,7 @@ TEST_CASE("[cpu/operator] Mul(forward)")
             Array2D<float, 2, 2>{{{1.16932082, 1.02192521}, {0.01281792, 2.72617555}}});
 
         std::shared_ptr<Node> myMul = Mul();
-        myMul->getOperator()->setDatatype(DataType::Float32);
+        myMul->getOperator()->setDataType(DataType::Float32);
         myMul->getOperator()->setBackend("cpu");
         myMul->getOperator()->associateInput(0, input_1);
         myMul->getOperator()->associateInput(1, input_2);
@@ -58,7 +58,7 @@ TEST_CASE("[cpu/operator] Mul(forward)")
             Array2D<float, 2, 2>{{{0.00920683, 0.08204205}, {0.00404580, 0.12279158}}});
 
         std::shared_ptr<Node> myMul = Mul();
-        myMul->getOperator()->setDatatype(DataType::Float32);
+        myMul->getOperator()->setDataType(DataType::Float32);
         myMul->getOperator()->setBackend("cpu");
         myMul->getOperator()->associateInput(0, input_1);
         myMul->getOperator()->associateInput(1, input_2);
@@ -94,7 +94,7 @@ TEST_CASE("[cpu/operator] Mul(forward)")
                   {0.12057999, 0.00177853, 0.61175603}}}});
 
         std::shared_ptr<Node> myMul = Mul();
-        myMul->getOperator()->setDatatype(DataType::Float32);
+        myMul->getOperator()->setDataType(DataType::Float32);
         myMul->getOperator()->setBackend("cpu");
         myMul->getOperator()->associateInput(0, input_1);
         myMul->getOperator()->associateInput(1, input_2);
diff --git a/unit_tests/operator/Test_PadImpl.cpp b/unit_tests/operator/Test_PadImpl.cpp
index b603e165392f1a861dc1b40d50b70a53c9256870..41c40494e4ec27c4764d7c91f4a229ecb3ebaf25 100644
--- a/unit_tests/operator/Test_PadImpl.cpp
+++ b/unit_tests/operator/Test_PadImpl.cpp
@@ -20,547 +20,484 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Pad(forward)") {
-    SECTION("Symmetric Pad") {
+TEST_CASE("[cpu/operator] Pad(forward)")
+{
+    SECTION("Symmetric Pad")
+    {
         const int pv = 0; // pad value
 
-        std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
-        myPad->getOperator()->setDatatype(DataType::Int32);
+        std::shared_ptr<Node> myPad = Pad<2>(
+            {1, 1, 1, 1}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
+        myPad->getOperator()->setDataType(DataType::Int32);
         myPad->getOperator()->setBackend("cpu");
-        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
-            {
-                {
-                    {{  0,   1,   2,   3,   4},
-                    {  5,   6,   7,   8,   9},
-                    { 10,  11,  12,  13,  14},
-                    { 15,  16,  17,  18,  19},
-                    { 20,  21,  22,  23,  24}},
-
-                    {{ 25,  26,  27,  28,  29},
-                    { 30,  31,  32,  33,  34},
-                    { 35,  36,  37,  38,  39},
-                    { 40,  41,  42,  43,  44},
-                    { 45,  46,  47,  48,  49}},
-
-                    {{ 50,  51,  52,  53,  54},
-                    { 55,  56,  57,  58,  59},
-                    { 60,  61,  62,  63,  64},
-                    { 65,  66,  67,  68,  69},
-                    { 70,  71,  72,  73,  74}}
-                },
-                {
-                    {{ 75,  76,  77,  78,  79},
-                    { 80,  81,  82,  83,  84},
-                    { 85,  86,  87,  88,  89},
-                    { 90,  91,  92,  93,  94},
-                    { 95,  96,  97,  98,  99}},
-
-                    {{100, 101, 102, 103, 104},
-                    {105, 106, 107, 108, 109},
-                    {110, 111, 112, 113, 114},
-                    {115, 116, 117, 118, 119},
-                    {120, 121, 122, 123, 124}},
-
-                    {{125, 126, 127, 128, 129},
-                    {130, 131, 132, 133, 134},
-                    {135, 136, 137, 138, 139},
-                    {140, 141, 142, 143, 144},
-                    {145, 146, 147, 148, 149}}
-                }
-            }
-        });
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
-            {
-                {
-                    {{ pv,  pv,   pv,   pv,   pv,   pv,  pv},
-                    { pv,   0,   1,   2,   3,   4,  pv},
-                    { pv,   5,   6,   7,   8,   9,  pv},
-                    { pv,  10,  11,  12,  13,  14,  pv},
-                    { pv,  15,  16,  17,  18,  19,  pv},
-                    { pv,  20,  21,  22,  23,  24,  pv},
-                    { pv,  pv,   pv,   pv,   pv,   pv,  pv}},
-
-                    {{ pv,  pv,   pv,   pv,   pv,   pv,  pv},
-                    { pv,  25,  26,  27,  28,  29,  pv},
-                    { pv,  30,  31,  32,  33,  34,  pv},
-                    { pv,  35,  36,  37,  38,  39,  pv},
-                    { pv,  40,  41,  42,  43,  44,  pv},
-                    { pv,  45,  46,  47,  48,  49,  pv},
-                    { pv,  pv,   pv,   pv,   pv,   pv,  pv}},
-
-                    {{ pv,  pv,   pv,   pv,   pv,   pv,  pv},
-                    { pv,  50,  51,  52,  53,  54,  pv},
-                    { pv,  55,  56,  57,  58,  59,  pv},
-                    { pv,  60,  61,  62,  63,  64,  pv},
-                    { pv,  65,  66,  67,  68,  69,  pv},
-                    { pv,  70,  71,  72,  73,  74,  pv},
-                    { pv,  pv,   pv,   pv,   pv,   pv,  pv}}
-                },
-                {
-                    {{ pv,  pv,   pv,   pv,   pv,   pv,  pv},
-                    { pv,  75,  76,  77,  78,  79,  pv},
-                    { pv,  80,  81,  82,  83,  84,  pv},
-                    { pv,  85,  86,  87,  88,  89,  pv},
-                    { pv,  90,  91,  92,  93,  94,  pv},
-                    { pv,  95,  96,  97,  98,  99,  pv},
-                    { pv,  pv,   pv,   pv,   pv,   pv,  pv}},
-
-                    {{ pv,  pv,   pv,   pv,   pv,   pv,  pv},
-                    {pv,  100, 101, 102, 103, 104,  pv},
-                    {pv,  105, 106, 107, 108, 109,  pv},
-                    {pv,  110, 111, 112, 113, 114,  pv},
-                    {pv,  115, 116, 117, 118, 119,  pv},
-                    {pv,  120, 121, 122, 123, 124,  pv},
-                    { pv,  pv,   pv,   pv,   pv,   pv,  pv}},
-
-                    {{ pv,  pv,   pv,   pv,   pv,   pv,  pv},
-                    {pv,  125, 126, 127, 128, 129,  pv},
-                    {pv,  130, 131, 132, 133, 134,  pv},
-                    {pv,  135, 136, 137, 138, 139,  pv},
-                    {pv,  140, 141, 142, 143, 144,  pv},
-                    {pv,  145, 146, 147, 148, 149,  pv},
-                    { pv,  pv,   pv,   pv,   pv,   pv,  pv}}
-                }
-            }
-        });
-
-        myPad->getOperator()->associateInput(0,myInput);
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
+            Array4D<int, 2, 3, 5, 5>{// NCHW
+                                     {{{{0, 1, 2, 3, 4},
+                                        {5, 6, 7, 8, 9},
+                                        {10, 11, 12, 13, 14},
+                                        {15, 16, 17, 18, 19},
+                                        {20, 21, 22, 23, 24}},
+
+                                       {{25, 26, 27, 28, 29},
+                                        {30, 31, 32, 33, 34},
+                                        {35, 36, 37, 38, 39},
+                                        {40, 41, 42, 43, 44},
+                                        {45, 46, 47, 48, 49}},
+
+                                       {{50, 51, 52, 53, 54},
+                                        {55, 56, 57, 58, 59},
+                                        {60, 61, 62, 63, 64},
+                                        {65, 66, 67, 68, 69},
+                                        {70, 71, 72, 73, 74}}},
+                                      {{{75, 76, 77, 78, 79},
+                                        {80, 81, 82, 83, 84},
+                                        {85, 86, 87, 88, 89},
+                                        {90, 91, 92, 93, 94},
+                                        {95, 96, 97, 98, 99}},
+
+                                       {{100, 101, 102, 103, 104},
+                                        {105, 106, 107, 108, 109},
+                                        {110, 111, 112, 113, 114},
+                                        {115, 116, 117, 118, 119},
+                                        {120, 121, 122, 123, 124}},
+
+                                       {{125, 126, 127, 128, 129},
+                                        {130, 131, 132, 133, 134},
+                                        {135, 136, 137, 138, 139},
+                                        {140, 141, 142, 143, 144},
+                                        {145, 146, 147, 148, 149}}}}});
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(
+            Array4D<int, 2, 3, 7, 7>{// NCHW
+                                     {{{{pv, pv, pv, pv, pv, pv, pv},
+                                        {pv, 0, 1, 2, 3, 4, pv},
+                                        {pv, 5, 6, 7, 8, 9, pv},
+                                        {pv, 10, 11, 12, 13, 14, pv},
+                                        {pv, 15, 16, 17, 18, 19, pv},
+                                        {pv, 20, 21, 22, 23, 24, pv},
+                                        {pv, pv, pv, pv, pv, pv, pv}},
+
+                                       {{pv, pv, pv, pv, pv, pv, pv},
+                                        {pv, 25, 26, 27, 28, 29, pv},
+                                        {pv, 30, 31, 32, 33, 34, pv},
+                                        {pv, 35, 36, 37, 38, 39, pv},
+                                        {pv, 40, 41, 42, 43, 44, pv},
+                                        {pv, 45, 46, 47, 48, 49, pv},
+                                        {pv, pv, pv, pv, pv, pv, pv}},
+
+                                       {{pv, pv, pv, pv, pv, pv, pv},
+                                        {pv, 50, 51, 52, 53, 54, pv},
+                                        {pv, 55, 56, 57, 58, 59, pv},
+                                        {pv, 60, 61, 62, 63, 64, pv},
+                                        {pv, 65, 66, 67, 68, 69, pv},
+                                        {pv, 70, 71, 72, 73, 74, pv},
+                                        {pv, pv, pv, pv, pv, pv, pv}}},
+                                      {{{pv, pv, pv, pv, pv, pv, pv},
+                                        {pv, 75, 76, 77, 78, 79, pv},
+                                        {pv, 80, 81, 82, 83, 84, pv},
+                                        {pv, 85, 86, 87, 88, 89, pv},
+                                        {pv, 90, 91, 92, 93, 94, pv},
+                                        {pv, 95, 96, 97, 98, 99, pv},
+                                        {pv, pv, pv, pv, pv, pv, pv}},
+
+                                       {{pv, pv, pv, pv, pv, pv, pv},
+                                        {pv, 100, 101, 102, 103, 104, pv},
+                                        {pv, 105, 106, 107, 108, 109, pv},
+                                        {pv, 110, 111, 112, 113, 114, pv},
+                                        {pv, 115, 116, 117, 118, 119, pv},
+                                        {pv, 120, 121, 122, 123, 124, pv},
+                                        {pv, pv, pv, pv, pv, pv, pv}},
+
+                                       {{pv, pv, pv, pv, pv, pv, pv},
+                                        {pv, 125, 126, 127, 128, 129, pv},
+                                        {pv, 130, 131, 132, 133, 134, pv},
+                                        {pv, 135, 136, 137, 138, 139, pv},
+                                        {pv, 140, 141, 142, 143, 144, pv},
+                                        {pv, 145, 146, 147, 148, 149, pv},
+                                        {pv, pv, pv, pv, pv, pv, pv}}}}});
+
+        myPad->getOperator()->associateInput(0, myInput);
         myPad->getOperator()->computeOutputDims();
         myPad->forward();
         // myPad->getOperator()->getOutput(0)->print();
         REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
     }
 
-    SECTION("Asymmetric Pad") {
+    SECTION("Asymmetric Pad")
+    {
         const int pv = 0; // pad value
 
-        std::shared_ptr<Node> myPad = Pad<2>({1, 0, 0, 1}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
-        myPad->getOperator()->setDatatype(DataType::Int32);
+        std::shared_ptr<Node> myPad = Pad<2>(
+            {1, 0, 0, 1}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
+        myPad->getOperator()->setDataType(DataType::Int32);
         myPad->getOperator()->setBackend("cpu");
-        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
-            {
-                {
-                    {{  0,   1,   2,   3,   4},
-                    {  5,   6,   7,   8,   9},
-                    { 10,  11,  12,  13,  14},
-                    { 15,  16,  17,  18,  19},
-                    { 20,  21,  22,  23,  24}},
-
-                    {{ 25,  26,  27,  28,  29},
-                    { 30,  31,  32,  33,  34},
-                    { 35,  36,  37,  38,  39},
-                    { 40,  41,  42,  43,  44},
-                    { 45,  46,  47,  48,  49}},
-
-                    {{ 50,  51,  52,  53,  54},
-                    { 55,  56,  57,  58,  59},
-                    { 60,  61,  62,  63,  64},
-                    { 65,  66,  67,  68,  69},
-                    { 70,  71,  72,  73,  74}}
-                },
-                {
-                    {{ 75,  76,  77,  78,  79},
-                    { 80,  81,  82,  83,  84},
-                    { 85,  86,  87,  88,  89},
-                    { 90,  91,  92,  93,  94},
-                    { 95,  96,  97,  98,  99}},
-
-                    {{100, 101, 102, 103, 104},
-                    {105, 106, 107, 108, 109},
-                    {110, 111, 112, 113, 114},
-                    {115, 116, 117, 118, 119},
-                    {120, 121, 122, 123, 124}},
-
-                    {{125, 126, 127, 128, 129},
-                    {130, 131, 132, 133, 134},
-                    {135, 136, 137, 138, 139},
-                    {140, 141, 142, 143, 144},
-                    {145, 146, 147, 148, 149}}
-                }
-            }
-        });
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,6,6> { //NCHW
-            {
-                {
-                    {{ pv,   pv,   pv,   pv,   pv,  pv},
-                    { 0,   1,   2,   3,   4,  pv},
-                    { 5,   6,   7,   8,   9,  pv},
-                    { 10,  11,  12,  13,  14,  pv},
-                    { 15,  16,  17,  18,  19,  pv},
-                    { 20,  21,  22,  23,  24,  pv}},
-
-                    {{ pv,   pv,   pv,   pv,   pv,  pv},
-                    { 25,  26,  27,  28,  29,  pv},
-                    { 30,  31,  32,  33,  34,  pv},
-                    { 35,  36,  37,  38,  39,  pv},
-                    { 40,  41,  42,  43,  44,  pv},
-                    { 45,  46,  47,  48,  49,  pv}},
-
-                    {{ pv,   pv,   pv,   pv,   pv,  pv},
-                    { 50,  51,  52,  53,  54,  pv},
-                    { 55,  56,  57,  58,  59,  pv},
-                    { 60,  61,  62,  63,  64,  pv},
-                    { 65,  66,  67,  68,  69,  pv},
-                    { 70,  71,  72,  73,  74,  pv}}
-                },
-                {
-                    {{ pv,   pv,   pv,   pv,   pv,  pv},
-                    { 75,  76,  77,  78,  79,  pv},
-                    { 80,  81,  82,  83,  84,  pv},
-                    { 85,  86,  87,  88,  89,  pv},
-                    { 90,  91,  92,  93,  94,  pv},
-                    { 95,  96,  97,  98,  99,  pv}},
-
-                    {{ pv,   pv,   pv,   pv,   pv,  pv},
-                    { 100, 101, 102, 103, 104,  pv},
-                    { 105, 106, 107, 108, 109,  pv},
-                    { 110, 111, 112, 113, 114,  pv},
-                    { 115, 116, 117, 118, 119,  pv},
-                    { 120, 121, 122, 123, 124,  pv}},
-
-                    {{ pv,   pv,   pv,   pv,   pv,  pv},
-                    { 125, 126, 127, 128, 129,  pv},
-                    { 130, 131, 132, 133, 134,  pv},
-                    { 135, 136, 137, 138, 139,  pv},
-                    { 140, 141, 142, 143, 144,  pv},
-                    { 145, 146, 147, 148, 149,  pv}}
-                }
-            }
-        });
-
-        myPad->getOperator()->associateInput(0,myInput);
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
+            Array4D<int, 2, 3, 5, 5>{// NCHW
+                                     {{{{0, 1, 2, 3, 4},
+                                        {5, 6, 7, 8, 9},
+                                        {10, 11, 12, 13, 14},
+                                        {15, 16, 17, 18, 19},
+                                        {20, 21, 22, 23, 24}},
+
+                                       {{25, 26, 27, 28, 29},
+                                        {30, 31, 32, 33, 34},
+                                        {35, 36, 37, 38, 39},
+                                        {40, 41, 42, 43, 44},
+                                        {45, 46, 47, 48, 49}},
+
+                                       {{50, 51, 52, 53, 54},
+                                        {55, 56, 57, 58, 59},
+                                        {60, 61, 62, 63, 64},
+                                        {65, 66, 67, 68, 69},
+                                        {70, 71, 72, 73, 74}}},
+                                      {{{75, 76, 77, 78, 79},
+                                        {80, 81, 82, 83, 84},
+                                        {85, 86, 87, 88, 89},
+                                        {90, 91, 92, 93, 94},
+                                        {95, 96, 97, 98, 99}},
+
+                                       {{100, 101, 102, 103, 104},
+                                        {105, 106, 107, 108, 109},
+                                        {110, 111, 112, 113, 114},
+                                        {115, 116, 117, 118, 119},
+                                        {120, 121, 122, 123, 124}},
+
+                                       {{125, 126, 127, 128, 129},
+                                        {130, 131, 132, 133, 134},
+                                        {135, 136, 137, 138, 139},
+                                        {140, 141, 142, 143, 144},
+                                        {145, 146, 147, 148, 149}}}}});
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(
+            Array4D<int, 2, 3, 6, 6>{// NCHW
+                                     {{{{pv, pv, pv, pv, pv, pv},
+                                        {0, 1, 2, 3, 4, pv},
+                                        {5, 6, 7, 8, 9, pv},
+                                        {10, 11, 12, 13, 14, pv},
+                                        {15, 16, 17, 18, 19, pv},
+                                        {20, 21, 22, 23, 24, pv}},
+
+                                       {{pv, pv, pv, pv, pv, pv},
+                                        {25, 26, 27, 28, 29, pv},
+                                        {30, 31, 32, 33, 34, pv},
+                                        {35, 36, 37, 38, 39, pv},
+                                        {40, 41, 42, 43, 44, pv},
+                                        {45, 46, 47, 48, 49, pv}},
+
+                                       {{pv, pv, pv, pv, pv, pv},
+                                        {50, 51, 52, 53, 54, pv},
+                                        {55, 56, 57, 58, 59, pv},
+                                        {60, 61, 62, 63, 64, pv},
+                                        {65, 66, 67, 68, 69, pv},
+                                        {70, 71, 72, 73, 74, pv}}},
+                                      {{{pv, pv, pv, pv, pv, pv},
+                                        {75, 76, 77, 78, 79, pv},
+                                        {80, 81, 82, 83, 84, pv},
+                                        {85, 86, 87, 88, 89, pv},
+                                        {90, 91, 92, 93, 94, pv},
+                                        {95, 96, 97, 98, 99, pv}},
+
+                                       {{pv, pv, pv, pv, pv, pv},
+                                        {100, 101, 102, 103, 104, pv},
+                                        {105, 106, 107, 108, 109, pv},
+                                        {110, 111, 112, 113, 114, pv},
+                                        {115, 116, 117, 118, 119, pv},
+                                        {120, 121, 122, 123, 124, pv}},
+
+                                       {{pv, pv, pv, pv, pv, pv},
+                                        {125, 126, 127, 128, 129, pv},
+                                        {130, 131, 132, 133, 134, pv},
+                                        {135, 136, 137, 138, 139, pv},
+                                        {140, 141, 142, 143, 144, pv},
+                                        {145, 146, 147, 148, 149, pv}}}}});
+
+        myPad->getOperator()->associateInput(0, myInput);
         myPad->getOperator()->computeOutputDims();
         myPad->forward();
         // myPad->getOperator()->getOutput(0)->print();
         REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
     }
 
-    SECTION("Pad Edge") {
+    SECTION("Pad Edge")
+    {
         std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Edge);
-        myPad->getOperator()->setDatatype(DataType::Int32);
+        myPad->getOperator()->setDataType(DataType::Int32);
         myPad->getOperator()->setBackend("cpu");
-        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
-            {
-                {
-                    {{  0,   1,   2,   3,   4},
-                    {  5,   6,   7,   8,   9},
-                    { 10,  11,  12,  13,  14},
-                    { 15,  16,  17,  18,  19},
-                    { 20,  21,  22,  23,  24}},
-
-                    {{ 25,  26,  27,  28,  29},
-                    { 30,  31,  32,  33,  34},
-                    { 35,  36,  37,  38,  39},
-                    { 40,  41,  42,  43,  44},
-                    { 45,  46,  47,  48,  49}},
-
-                    {{ 50,  51,  52,  53,  54},
-                    { 55,  56,  57,  58,  59},
-                    { 60,  61,  62,  63,  64},
-                    { 65,  66,  67,  68,  69},
-                    { 70,  71,  72,  73,  74}}
-                },
-                {
-                    {{ 75,  76,  77,  78,  79},
-                    { 80,  81,  82,  83,  84},
-                    { 85,  86,  87,  88,  89},
-                    { 90,  91,  92,  93,  94},
-                    { 95,  96,  97,  98,  99}},
-
-                    {{100, 101, 102, 103, 104},
-                    {105, 106, 107, 108, 109},
-                    {110, 111, 112, 113, 114},
-                    {115, 116, 117, 118, 119},
-                    {120, 121, 122, 123, 124}},
-
-                    {{125, 126, 127, 128, 129},
-                    {130, 131, 132, 133, 134},
-                    {135, 136, 137, 138, 139},
-                    {140, 141, 142, 143, 144},
-                    {145, 146, 147, 148, 149}}
-                }
-            }
-        });
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
-            {
-                {
-                    {{ 0,  0,   1,   2,   3,   4,  4},
-                    { 0,   0,   1,   2,   3,   4,  4},
-                    { 5,   5,   6,   7,   8,   9,  9},
-                    { 10,  10,  11,  12,  13,  14,  14},
-                    { 15,  15,  16,  17,  18,  19,  19},
-                    { 20,  20,  21,  22,  23,  24,  24},
-                    { 20,  20,  21,  22,  23,  24,  24}},
-
-                    {{ 25,  25,  26,  27,  28,  29,  29},
-                    { 25,  25,  26,  27,  28,  29,  29},
-                    { 30,  30,  31,  32,  33,  34,  34},
-                    { 35,  35,  36,  37,  38,  39,  39},
-                    { 40,  40,  41,  42,  43,  44,  44},
-                    { 45,  45,  46,  47,  48,  49,  49},
-                    { 45,  45,  46,  47,  48,  49, 49}},
-
-                    {{ 50,  50,  51,  52,  53,  54,  54},
-                    { 50,  50,  51,  52,  53,  54,  54},
-                    { 55,  55,  56,  57,  58,  59,  59},
-                    { 60,  60,  61,  62,  63,  64,  64},
-                    { 65,  65,  66,  67,  68,  69,  69},
-                    { 70,  70,  71,  72,  73,  74,  74},
-                    { 70,  70,  71,  72,  73,  74,  74}}
-                },
-                {
-                    {{ 75,  75,  76,  77,  78,  79,  79},
-                    { 75,  75,  76,  77,  78,  79,  79},
-                    { 80,  80,  81,  82,  83,  84,  84},
-                    { 85,  85,  86,  87,  88,  89,  89},
-                    { 90,  90,  91,  92,  93,  94,  94},
-                    { 95,  95,  96,  97,  98,  99,  99},
-                    { 95,  95,  96,  97,  98,  99,  99}},
-
-                    {{100,  100, 101, 102, 103, 104,  104},
-                    {100,  100, 101, 102, 103, 104,  104},
-                    {105,  105, 106, 107, 108, 109, 109},
-                    {110,  110, 111, 112, 113, 114,  114},
-                    {115,  115, 116, 117, 118, 119,  119},
-                    {120,  120, 121, 122, 123, 124,  124},
-                    {120,  120, 121, 122, 123, 124,  124}},
-
-                    {{125,  125, 126, 127, 128, 129,  129},
-                    {125,  125, 126, 127, 128, 129,  129},
-                    {130,  130, 131, 132, 133, 134,  134},
-                    {135,  135, 136, 137, 138, 139,  139},
-                    {140,  140, 141, 142, 143, 144,  144},
-                    {145,  145, 146, 147, 148, 149,  149},
-                    {145,  145, 146, 147, 148, 149,  149}}
-                }
-            }
-        });
-
-        myPad->getOperator()->associateInput(0,myInput);
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
+            Array4D<int, 2, 3, 5, 5>{// NCHW
+                                     {{{{0, 1, 2, 3, 4},
+                                        {5, 6, 7, 8, 9},
+                                        {10, 11, 12, 13, 14},
+                                        {15, 16, 17, 18, 19},
+                                        {20, 21, 22, 23, 24}},
+
+                                       {{25, 26, 27, 28, 29},
+                                        {30, 31, 32, 33, 34},
+                                        {35, 36, 37, 38, 39},
+                                        {40, 41, 42, 43, 44},
+                                        {45, 46, 47, 48, 49}},
+
+                                       {{50, 51, 52, 53, 54},
+                                        {55, 56, 57, 58, 59},
+                                        {60, 61, 62, 63, 64},
+                                        {65, 66, 67, 68, 69},
+                                        {70, 71, 72, 73, 74}}},
+                                      {{{75, 76, 77, 78, 79},
+                                        {80, 81, 82, 83, 84},
+                                        {85, 86, 87, 88, 89},
+                                        {90, 91, 92, 93, 94},
+                                        {95, 96, 97, 98, 99}},
+
+                                       {{100, 101, 102, 103, 104},
+                                        {105, 106, 107, 108, 109},
+                                        {110, 111, 112, 113, 114},
+                                        {115, 116, 117, 118, 119},
+                                        {120, 121, 122, 123, 124}},
+
+                                       {{125, 126, 127, 128, 129},
+                                        {130, 131, 132, 133, 134},
+                                        {135, 136, 137, 138, 139},
+                                        {140, 141, 142, 143, 144},
+                                        {145, 146, 147, 148, 149}}}}});
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(
+            Array4D<int, 2, 3, 7, 7>{// NCHW
+                                     {{{{0, 0, 1, 2, 3, 4, 4},
+                                        {0, 0, 1, 2, 3, 4, 4},
+                                        {5, 5, 6, 7, 8, 9, 9},
+                                        {10, 10, 11, 12, 13, 14, 14},
+                                        {15, 15, 16, 17, 18, 19, 19},
+                                        {20, 20, 21, 22, 23, 24, 24},
+                                        {20, 20, 21, 22, 23, 24, 24}},
+
+                                       {{25, 25, 26, 27, 28, 29, 29},
+                                        {25, 25, 26, 27, 28, 29, 29},
+                                        {30, 30, 31, 32, 33, 34, 34},
+                                        {35, 35, 36, 37, 38, 39, 39},
+                                        {40, 40, 41, 42, 43, 44, 44},
+                                        {45, 45, 46, 47, 48, 49, 49},
+                                        {45, 45, 46, 47, 48, 49, 49}},
+
+                                       {{50, 50, 51, 52, 53, 54, 54},
+                                        {50, 50, 51, 52, 53, 54, 54},
+                                        {55, 55, 56, 57, 58, 59, 59},
+                                        {60, 60, 61, 62, 63, 64, 64},
+                                        {65, 65, 66, 67, 68, 69, 69},
+                                        {70, 70, 71, 72, 73, 74, 74},
+                                        {70, 70, 71, 72, 73, 74, 74}}},
+                                      {{{75, 75, 76, 77, 78, 79, 79},
+                                        {75, 75, 76, 77, 78, 79, 79},
+                                        {80, 80, 81, 82, 83, 84, 84},
+                                        {85, 85, 86, 87, 88, 89, 89},
+                                        {90, 90, 91, 92, 93, 94, 94},
+                                        {95, 95, 96, 97, 98, 99, 99},
+                                        {95, 95, 96, 97, 98, 99, 99}},
+
+                                       {{100, 100, 101, 102, 103, 104, 104},
+                                        {100, 100, 101, 102, 103, 104, 104},
+                                        {105, 105, 106, 107, 108, 109, 109},
+                                        {110, 110, 111, 112, 113, 114, 114},
+                                        {115, 115, 116, 117, 118, 119, 119},
+                                        {120, 120, 121, 122, 123, 124, 124},
+                                        {120, 120, 121, 122, 123, 124, 124}},
+
+                                       {{125, 125, 126, 127, 128, 129, 129},
+                                        {125, 125, 126, 127, 128, 129, 129},
+                                        {130, 130, 131, 132, 133, 134, 134},
+                                        {135, 135, 136, 137, 138, 139, 139},
+                                        {140, 140, 141, 142, 143, 144, 144},
+                                        {145, 145, 146, 147, 148, 149, 149},
+                                        {145, 145, 146, 147, 148, 149, 149}}}}});
+
+        myPad->getOperator()->associateInput(0, myInput);
         myPad->getOperator()->computeOutputDims();
         myPad->forward();
         // myPad->getOperator()->getOutput(0)->print();
         REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
     }
 
-    SECTION("Pad Reflect") {
-        std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Reflect);
-        myPad->getOperator()->setDatatype(DataType::Int32);
+    SECTION("Pad Reflect")
+    {
+        std::shared_ptr<Node> myPad
+            = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Reflect);
+        myPad->getOperator()->setDataType(DataType::Int32);
         myPad->getOperator()->setBackend("cpu");
-        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
-            {
-                {
-                    {{  0,   1,   2,   3,   4},
-                    {  5,   6,   7,   8,   9},
-                    { 10,  11,  12,  13,  14},
-                    { 15,  16,  17,  18,  19},
-                    { 20,  21,  22,  23,  24}},
-
-                    {{ 25,  26,  27,  28,  29},
-                    { 30,  31,  32,  33,  34},
-                    { 35,  36,  37,  38,  39},
-                    { 40,  41,  42,  43,  44},
-                    { 45,  46,  47,  48,  49}},
-
-                    {{ 50,  51,  52,  53,  54},
-                    { 55,  56,  57,  58,  59},
-                    { 60,  61,  62,  63,  64},
-                    { 65,  66,  67,  68,  69},
-                    { 70,  71,  72,  73,  74}}
-                },
-                {
-                    {{ 75,  76,  77,  78,  79},
-                    { 80,  81,  82,  83,  84},
-                    { 85,  86,  87,  88,  89},
-                    { 90,  91,  92,  93,  94},
-                    { 95,  96,  97,  98,  99}},
-
-                    {{100, 101, 102, 103, 104},
-                    {105, 106, 107, 108, 109},
-                    {110, 111, 112, 113, 114},
-                    {115, 116, 117, 118, 119},
-                    {120, 121, 122, 123, 124}},
-
-                    {{125, 126, 127, 128, 129},
-                    {130, 131, 132, 133, 134},
-                    {135, 136, 137, 138, 139},
-                    {140, 141, 142, 143, 144},
-                    {145, 146, 147, 148, 149}}
-                }
-            }
-        });
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
-            {
-                {
-                    {
-                    { 6, 5, 6, 7, 8, 9, 5},
-                    { 1, 0, 1, 2, 3, 4, 0},
-                    { 6, 5, 6, 7, 8, 9, 5},
-                    { 11, 10, 11, 12, 13, 14, 10},
-                    { 16, 15, 16, 17, 18, 19, 15},
-                    { 21, 20, 21, 22, 23, 24, 20},
-                    { 1, 0, 1, 2, 3, 4, 0}
-                    },
-                    {
-                    { 31, 30, 31, 32, 33, 34, 30},
-                    { 26, 25, 26, 27, 28, 29, 25},
-                    { 31, 30, 31, 32, 33, 34, 30},
-                    { 36, 35, 36, 37, 38, 39, 35},
-                    { 41, 40, 41, 42, 43, 44, 40},
-                    { 46, 45, 46, 47, 48, 49, 45},
-                    { 26, 25, 26, 27, 28, 29, 25}
-                    },
-                    {
-                    { 56, 55, 56, 57, 58, 59, 55},
-                    { 51, 50, 51, 52, 53, 54, 50},
-                    { 56, 55, 56, 57, 58, 59, 55},
-                    { 61, 60, 61, 62, 63, 64, 60},
-                    { 66, 65, 66, 67, 68, 69, 65},
-                    { 71, 70, 71, 72, 73, 74, 70},
-                    { 51, 50, 51, 52, 53, 54, 50}
-                    }
-                },
-                {
-                    {
-                    { 81, 80, 81, 82, 83, 84, 80},
-                    { 76, 75, 76, 77, 78, 79, 75},
-                    { 81, 80, 81, 82, 83, 84, 80},
-                    { 86, 85, 86, 87, 88, 89, 85},
-                    { 91, 90, 91, 92, 93, 94, 90},
-                    { 96, 95, 96, 97, 98, 99, 95},
-                    { 76, 75, 76, 77, 78, 79, 75}
-                    },
-                    {
-                    { 106, 105, 106, 107, 108, 109, 105},
-                    { 101, 100, 101, 102, 103, 104, 100},
-                    { 106, 105, 106, 107, 108, 109, 105},
-                    { 111, 110, 111, 112, 113, 114, 110},
-                    { 116, 115, 116, 117, 118, 119, 115},
-                    { 121, 120, 121, 122, 123, 124, 120},
-                    { 101, 100, 101, 102, 103, 104, 100}
-                    },
-                    {
-                    { 131, 130, 131, 132, 133, 134, 130},
-                    { 126, 125, 126, 127, 128, 129, 125},
-                    { 131, 130, 131, 132, 133, 134, 130},
-                    { 136, 135, 136, 137, 138, 139, 135},
-                    { 141, 140, 141, 142, 143, 144, 140},
-                    { 146, 145, 146, 147, 148, 149, 145},
-                    { 126, 125, 126, 127, 128, 129, 125}
-                    }
-                    }
-                }
-        });
-
-        myPad->getOperator()->associateInput(0,myInput);
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
+            Array4D<int, 2, 3, 5, 5>{// NCHW
+                                     {{{{0, 1, 2, 3, 4},
+                                        {5, 6, 7, 8, 9},
+                                        {10, 11, 12, 13, 14},
+                                        {15, 16, 17, 18, 19},
+                                        {20, 21, 22, 23, 24}},
+
+                                       {{25, 26, 27, 28, 29},
+                                        {30, 31, 32, 33, 34},
+                                        {35, 36, 37, 38, 39},
+                                        {40, 41, 42, 43, 44},
+                                        {45, 46, 47, 48, 49}},
+
+                                       {{50, 51, 52, 53, 54},
+                                        {55, 56, 57, 58, 59},
+                                        {60, 61, 62, 63, 64},
+                                        {65, 66, 67, 68, 69},
+                                        {70, 71, 72, 73, 74}}},
+                                      {{{75, 76, 77, 78, 79},
+                                        {80, 81, 82, 83, 84},
+                                        {85, 86, 87, 88, 89},
+                                        {90, 91, 92, 93, 94},
+                                        {95, 96, 97, 98, 99}},
+
+                                       {{100, 101, 102, 103, 104},
+                                        {105, 106, 107, 108, 109},
+                                        {110, 111, 112, 113, 114},
+                                        {115, 116, 117, 118, 119},
+                                        {120, 121, 122, 123, 124}},
+
+                                       {{125, 126, 127, 128, 129},
+                                        {130, 131, 132, 133, 134},
+                                        {135, 136, 137, 138, 139},
+                                        {140, 141, 142, 143, 144},
+                                        {145, 146, 147, 148, 149}}}}});
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(
+            Array4D<int, 2, 3, 7, 7>{// NCHW
+                                     {{{{6, 5, 6, 7, 8, 9, 5},
+                                        {1, 0, 1, 2, 3, 4, 0},
+                                        {6, 5, 6, 7, 8, 9, 5},
+                                        {11, 10, 11, 12, 13, 14, 10},
+                                        {16, 15, 16, 17, 18, 19, 15},
+                                        {21, 20, 21, 22, 23, 24, 20},
+                                        {1, 0, 1, 2, 3, 4, 0}},
+                                       {{31, 30, 31, 32, 33, 34, 30},
+                                        {26, 25, 26, 27, 28, 29, 25},
+                                        {31, 30, 31, 32, 33, 34, 30},
+                                        {36, 35, 36, 37, 38, 39, 35},
+                                        {41, 40, 41, 42, 43, 44, 40},
+                                        {46, 45, 46, 47, 48, 49, 45},
+                                        {26, 25, 26, 27, 28, 29, 25}},
+                                       {{56, 55, 56, 57, 58, 59, 55},
+                                        {51, 50, 51, 52, 53, 54, 50},
+                                        {56, 55, 56, 57, 58, 59, 55},
+                                        {61, 60, 61, 62, 63, 64, 60},
+                                        {66, 65, 66, 67, 68, 69, 65},
+                                        {71, 70, 71, 72, 73, 74, 70},
+                                        {51, 50, 51, 52, 53, 54, 50}}},
+                                      {{{81, 80, 81, 82, 83, 84, 80},
+                                        {76, 75, 76, 77, 78, 79, 75},
+                                        {81, 80, 81, 82, 83, 84, 80},
+                                        {86, 85, 86, 87, 88, 89, 85},
+                                        {91, 90, 91, 92, 93, 94, 90},
+                                        {96, 95, 96, 97, 98, 99, 95},
+                                        {76, 75, 76, 77, 78, 79, 75}},
+                                       {{106, 105, 106, 107, 108, 109, 105},
+                                        {101, 100, 101, 102, 103, 104, 100},
+                                        {106, 105, 106, 107, 108, 109, 105},
+                                        {111, 110, 111, 112, 113, 114, 110},
+                                        {116, 115, 116, 117, 118, 119, 115},
+                                        {121, 120, 121, 122, 123, 124, 120},
+                                        {101, 100, 101, 102, 103, 104, 100}},
+                                       {{131, 130, 131, 132, 133, 134, 130},
+                                        {126, 125, 126, 127, 128, 129, 125},
+                                        {131, 130, 131, 132, 133, 134, 130},
+                                        {136, 135, 136, 137, 138, 139, 135},
+                                        {141, 140, 141, 142, 143, 144, 140},
+                                        {146, 145, 146, 147, 148, 149, 145},
+                                        {126, 125, 126, 127, 128, 129, 125}}}}});
+
+        myPad->getOperator()->associateInput(0, myInput);
         myPad->getOperator()->computeOutputDims();
         myPad->forward();
-         myPad->getOperator()->getOutput(0)->print();
+        myPad->getOperator()->getOutput(0)->print();
         REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
     }
 
-    SECTION("Pad Wrap") {
+    SECTION("Pad Wrap")
+    {
         std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Wrap);
-        myPad->getOperator()->setDatatype(DataType::Int32);
+        myPad->getOperator()->setDataType(DataType::Int32);
         myPad->getOperator()->setBackend("cpu");
-        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
-            {
-                {
-                    {{  0,   1,   2,   3,   4},
-                    {  5,   6,   7,   8,   9},
-                    { 10,  11,  12,  13,  14},
-                    { 15,  16,  17,  18,  19},
-                    { 20,  21,  22,  23,  24}},
-
-                    {{ 25,  26,  27,  28,  29},
-                    { 30,  31,  32,  33,  34},
-                    { 35,  36,  37,  38,  39},
-                    { 40,  41,  42,  43,  44},
-                    { 45,  46,  47,  48,  49}},
-
-                    {{ 50,  51,  52,  53,  54},
-                    { 55,  56,  57,  58,  59},
-                    { 60,  61,  62,  63,  64},
-                    { 65,  66,  67,  68,  69},
-                    { 70,  71,  72,  73,  74}}
-                },
-                {
-                    {{ 75,  76,  77,  78,  79},
-                    { 80,  81,  82,  83,  84},
-                    { 85,  86,  87,  88,  89},
-                    { 90,  91,  92,  93,  94},
-                    { 95,  96,  97,  98,  99}},
-
-                    {{100, 101, 102, 103, 104},
-                    {105, 106, 107, 108, 109},
-                    {110, 111, 112, 113, 114},
-                    {115, 116, 117, 118, 119},
-                    {120, 121, 122, 123, 124}},
-
-                    {{125, 126, 127, 128, 129},
-                    {130, 131, 132, 133, 134},
-                    {135, 136, 137, 138, 139},
-                    {140, 141, 142, 143, 144},
-                    {145, 146, 147, 148, 149}}
-                }
-            }
-        });
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
-            {
-                {
-                    {{ 24,  20,  21,  22,  23,  24,  20},
-                    { 4,   0,   1,   2,   3,   4,  0},
-                    { 9,   5,   6,   7,   8,   9,  5},
-                    { 14,  10,  11,  12,  13,  14,  10},
-                    { 19,  15,  16,  17,  18,  19,  15},
-                    { 24,  20,  21,  22,  23,  24,  20},
-                    { 4,   0,   1,   2,   3,   4,  0}},
-
-                    {{ 49,  45,  46,  47,  48,  49, 45},
-                    { 29,  25,  26,  27,  28,  29,  25},
-                    { 34,  30,  31,  32,  33,  34,  30},
-                    { 39,  35,  36,  37,  38,  39,  35},
-                    { 44,  40,  41,  42,  43,  44,  40},
-                    { 49,  45,  46,  47,  48,  49,  45},
-                    { 29,  25,  26,  27,  28,  29,  25}},
-
-                    {{ 74,  70,  71,  72,  73,  74,  70},
-                    { 54,  50,  51,  52,  53,  54,  50},
-                    { 59,  55,  56,  57,  58,  59,  55},
-                    { 64,  60,  61,  62,  63,  64,  60},
-                    { 69,  65,  66,  67,  68,  69,  65},
-                    { 74,  70,  71,  72,  73,  74,  70},
-                    { 54,  50,  51,  52,  53,  54,  50}}
-                },
-                {
-                    {{ 99,  95,  96,  97,  98,  99,  95},
-                    { 79,  75,  76,  77,  78,  79,  75},
-                    { 84,  80,  81,  82,  83,  84,  80},
-                    { 89,  85,  86,  87,  88,  89,  85},
-                    { 94,  90,  91,  92,  93,  94,  90},
-                    { 99,  95,  96,  97,  98,  99,  95},
-                    { 79,  75,  76,  77,  78,  79,  75}},
-
-                    {{124,  120, 121, 122, 123, 124,  120},
-                    {104,  100, 101, 102, 103, 104,  100},
-                    {109,  105, 106, 107, 108, 109, 105},
-                    {114,  110, 111, 112, 113, 114,  110},
-                    {119,  115, 116, 117, 118, 119,  115},
-                    {124,  120, 121, 122, 123, 124,  120},
-                    {104,  100, 101, 102, 103, 104,  100}},
-
-                    {{149,  145, 146, 147, 148, 149,  145},
-                    {129,  125, 126, 127, 128, 129,  125},
-                    {134,  130, 131, 132, 133, 134,  130},
-                    {139,  135, 136, 137, 138, 139,  135},
-                    {144,  140, 141, 142, 143, 144,  140},
-                    {149,  145, 146, 147, 148, 149,  145},
-                    {129,  125, 126, 127, 128, 129,  125}}
-                }
-            }
-        });
-
-        myPad->getOperator()->associateInput(0,myInput);
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
+            Array4D<int, 2, 3, 5, 5>{// NCHW
+                                     {{{{0, 1, 2, 3, 4},
+                                        {5, 6, 7, 8, 9},
+                                        {10, 11, 12, 13, 14},
+                                        {15, 16, 17, 18, 19},
+                                        {20, 21, 22, 23, 24}},
+
+                                       {{25, 26, 27, 28, 29},
+                                        {30, 31, 32, 33, 34},
+                                        {35, 36, 37, 38, 39},
+                                        {40, 41, 42, 43, 44},
+                                        {45, 46, 47, 48, 49}},
+
+                                       {{50, 51, 52, 53, 54},
+                                        {55, 56, 57, 58, 59},
+                                        {60, 61, 62, 63, 64},
+                                        {65, 66, 67, 68, 69},
+                                        {70, 71, 72, 73, 74}}},
+                                      {{{75, 76, 77, 78, 79},
+                                        {80, 81, 82, 83, 84},
+                                        {85, 86, 87, 88, 89},
+                                        {90, 91, 92, 93, 94},
+                                        {95, 96, 97, 98, 99}},
+
+                                       {{100, 101, 102, 103, 104},
+                                        {105, 106, 107, 108, 109},
+                                        {110, 111, 112, 113, 114},
+                                        {115, 116, 117, 118, 119},
+                                        {120, 121, 122, 123, 124}},
+
+                                       {{125, 126, 127, 128, 129},
+                                        {130, 131, 132, 133, 134},
+                                        {135, 136, 137, 138, 139},
+                                        {140, 141, 142, 143, 144},
+                                        {145, 146, 147, 148, 149}}}}});
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(
+            Array4D<int, 2, 3, 7, 7>{// NCHW
+                                     {{{{24, 20, 21, 22, 23, 24, 20},
+                                        {4, 0, 1, 2, 3, 4, 0},
+                                        {9, 5, 6, 7, 8, 9, 5},
+                                        {14, 10, 11, 12, 13, 14, 10},
+                                        {19, 15, 16, 17, 18, 19, 15},
+                                        {24, 20, 21, 22, 23, 24, 20},
+                                        {4, 0, 1, 2, 3, 4, 0}},
+
+                                       {{49, 45, 46, 47, 48, 49, 45},
+                                        {29, 25, 26, 27, 28, 29, 25},
+                                        {34, 30, 31, 32, 33, 34, 30},
+                                        {39, 35, 36, 37, 38, 39, 35},
+                                        {44, 40, 41, 42, 43, 44, 40},
+                                        {49, 45, 46, 47, 48, 49, 45},
+                                        {29, 25, 26, 27, 28, 29, 25}},
+
+                                       {{74, 70, 71, 72, 73, 74, 70},
+                                        {54, 50, 51, 52, 53, 54, 50},
+                                        {59, 55, 56, 57, 58, 59, 55},
+                                        {64, 60, 61, 62, 63, 64, 60},
+                                        {69, 65, 66, 67, 68, 69, 65},
+                                        {74, 70, 71, 72, 73, 74, 70},
+                                        {54, 50, 51, 52, 53, 54, 50}}},
+                                      {{{99, 95, 96, 97, 98, 99, 95},
+                                        {79, 75, 76, 77, 78, 79, 75},
+                                        {84, 80, 81, 82, 83, 84, 80},
+                                        {89, 85, 86, 87, 88, 89, 85},
+                                        {94, 90, 91, 92, 93, 94, 90},
+                                        {99, 95, 96, 97, 98, 99, 95},
+                                        {79, 75, 76, 77, 78, 79, 75}},
+
+                                       {{124, 120, 121, 122, 123, 124, 120},
+                                        {104, 100, 101, 102, 103, 104, 100},
+                                        {109, 105, 106, 107, 108, 109, 105},
+                                        {114, 110, 111, 112, 113, 114, 110},
+                                        {119, 115, 116, 117, 118, 119, 115},
+                                        {124, 120, 121, 122, 123, 124, 120},
+                                        {104, 100, 101, 102, 103, 104, 100}},
+
+                                       {{149, 145, 146, 147, 148, 149, 145},
+                                        {129, 125, 126, 127, 128, 129, 125},
+                                        {134, 130, 131, 132, 133, 134, 130},
+                                        {139, 135, 136, 137, 138, 139, 135},
+                                        {144, 140, 141, 142, 143, 144, 140},
+                                        {149, 145, 146, 147, 148, 149, 145},
+                                        {129, 125, 126, 127, 128, 129, 125}}}}});
+
+        myPad->getOperator()->associateInput(0, myInput);
         myPad->getOperator()->computeOutputDims();
         myPad->forward();
         // myPad->getOperator()->getOutput(0)->print();
diff --git a/unit_tests/operator/Test_PaddedConv.cpp b/unit_tests/operator/Test_PaddedConv.cpp
index e41be85ab00faae1af7239c43b74a34f558a663c..367ee3f51d64db24f62adfdff92886e10a37bc48 100644
--- a/unit_tests/operator/Test_PaddedConv.cpp
+++ b/unit_tests/operator/Test_PaddedConv.cpp
@@ -21,296 +21,202 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] PaddedConv(forward)") {
-    SECTION("Classic Conv") {
-        std::shared_ptr<Node> myConv = PaddedConv(3,4,{3,3}, "myconv");
-        myConv->getOperator()->setDatatype(DataType::Int32);
+TEST_CASE("[cpu/operator] PaddedConv(forward)")
+{
+    SECTION("Classic Conv")
+    {
+        std::shared_ptr<Node> myConv = PaddedConv(3, 4, {3, 3}, "myconv");
+        myConv->getOperator()->setDataType(DataType::Int32);
         myConv->getOperator()->setBackend("cpu");
-        std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
-            {
-                {
-                    {{  0,   1,   2},
-                    {  3,   4,   5},
-                    {  6,   7,   8}},
-                    {{  9,  10,  11},
-                    { 12,  13,  14},
-                    { 15,  16,  17}},
-                    {{ 18,  19,  20},
-                    { 21,  22,  23},
-                    { 24,  25,  26}}
-                },
-                {
-                    {{ 27,  28,  29},
-                    { 30,  31,  32},
-                    { 33,  34,  35}},
-                    {{ 36,  37,  38},
-                    { 39,  40,  41},
-                    { 42,  43,  44}},
-                    {{ 45,  46,  47},
-                    { 48,  49,  50},
-                    { 51,  52,  53}}
-                },
-                {
-                    {{ 54,  55,  56},
-                    { 57,  58,  59},
-                    { 60,  61,  62}},
-                    {{ 63,  64,  65},
-                    { 66,  67,  68},
-                    { 69,  70,  71}},
-                    {{ 72,  73,  74},
-                    { 75,  76,  77},
-                    { 78,  79,  80}}
-                },
-                {
-                    {{ 81,  82,  83},
-                    { 84,  85,  86},
-                    { 87,  88,  89}},
-                    {{ 90,  91,  92},
-                    { 93,  94,  95},
-                    { 96,  97,  98}},
-                    {{ 99, 100, 101},
-                    {102, 103, 104},
-                    {105, 106, 107}}
-                }
-            }
-        });
-        std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
-        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
-            {
-                {
-                    {{  0,   1,   2,   3,   4},
-                    {  5,   6,   7,   8,   9},
-                    { 10,  11,  12,  13,  14},
-                    { 15,  16,  17,  18,  19},
-                    { 20,  21,  22,  23,  24}},
-
-                    {{ 25,  26,  27,  28,  29},
-                    { 30,  31,  32,  33,  34},
-                    { 35,  36,  37,  38,  39},
-                    { 40,  41,  42,  43,  44},
-                    { 45,  46,  47,  48,  49}},
-
-                    {{ 50,  51,  52,  53,  54},
-                    { 55,  56,  57,  58,  59},
-                    { 60,  61,  62,  63,  64},
-                    { 65,  66,  67,  68,  69},
-                    { 70,  71,  72,  73,  74}}
-                },
-                {
-                    {{ 75,  76,  77,  78,  79},
-                    { 80,  81,  82,  83,  84},
-                    { 85,  86,  87,  88,  89},
-                    { 90,  91,  92,  93,  94},
-                    { 95,  96,  97,  98,  99}},
-
-                    {{100, 101, 102, 103, 104},
-                    {105, 106, 107, 108, 109},
-                    {110, 111, 112, 113, 114},
-                    {115, 116, 117, 118, 119},
-                    {120, 121, 122, 123, 124}},
-
-                    {{125, 126, 127, 128, 129},
-                    {130, 131, 132, 133, 134},
-                    {135, 136, 137, 138, 139},
-                    {140, 141, 142, 143, 144},
-                    {145, 146, 147, 148, 149}}
-                }
-            }
-        });
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> { 
-            {
-                {
-                    {{ 15226,  15577,  15928},
-                    { 16981,  17332,  17683},
-                    { 18736,  19087,  19438}},
-                    {{ 37818,  38898,  39978},
-                    { 43218,  44298,  45378},
-                    { 48618,  49698,  50778}},
-                    {{ 60426,  62235,  64044},
-                    { 69471,  71280,  73089},
-                    { 78516,  80325,  82134}},
-                    {{ 83016,  85554,  88092},
-                    { 95706,  98244, 100782},
-                    {108396, 110934, 113472}}
-                },
-                {
-                    {{ 41551,  41902,  42253},
-                    { 43306,  43657,  44008},
-                    { 45061,  45412,  45763}},
-                    {{118818, 119898, 120978},
-                    {124218, 125298, 126378},
-                    {129618, 130698, 131778}},
-                    {{196101, 197910, 199719},
-                    {205146, 206955, 208764},
-                    {214191, 216000, 217809}},
-                    {{273366, 275904, 278442},
-                    {286056, 288594, 291132},
-                    {298746, 301284, 303822}}
-                }
-            }
-        });
-
-        myConv->getOperator()->associateInput(0,myInput);
-        myConv->getOperator()->associateInput(1,myWeights);
-        myConv->getOperator()->associateInput(2,myBias);
+        std::shared_ptr<Tensor> myWeights
+            = std::make_shared<Tensor>(Array4D<int, 4, 3, 3, 3>{
+                {{{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}},
+                  {{9, 10, 11}, {12, 13, 14}, {15, 16, 17}},
+                  {{18, 19, 20}, {21, 22, 23}, {24, 25, 26}}},
+                 {{{27, 28, 29}, {30, 31, 32}, {33, 34, 35}},
+                  {{36, 37, 38}, {39, 40, 41}, {42, 43, 44}},
+                  {{45, 46, 47}, {48, 49, 50}, {51, 52, 53}}},
+                 {{{54, 55, 56}, {57, 58, 59}, {60, 61, 62}},
+                  {{63, 64, 65}, {66, 67, 68}, {69, 70, 71}},
+                  {{72, 73, 74}, {75, 76, 77}, {78, 79, 80}}},
+                 {{{81, 82, 83}, {84, 85, 86}, {87, 88, 89}},
+                  {{90, 91, 92}, {93, 94, 95}, {96, 97, 98}},
+                  {{99, 100, 101}, {102, 103, 104}, {105, 106, 107}}}}});
+        std::shared_ptr<Tensor> myBias
+            = std::make_shared<Tensor>(Array1D<int, 4>{{7, 0, 9, 0}});
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
+            Array4D<int, 2, 3, 5, 5>{// NCHW
+                                     {{{{0, 1, 2, 3, 4},
+                                        {5, 6, 7, 8, 9},
+                                        {10, 11, 12, 13, 14},
+                                        {15, 16, 17, 18, 19},
+                                        {20, 21, 22, 23, 24}},
+
+                                       {{25, 26, 27, 28, 29},
+                                        {30, 31, 32, 33, 34},
+                                        {35, 36, 37, 38, 39},
+                                        {40, 41, 42, 43, 44},
+                                        {45, 46, 47, 48, 49}},
+
+                                       {{50, 51, 52, 53, 54},
+                                        {55, 56, 57, 58, 59},
+                                        {60, 61, 62, 63, 64},
+                                        {65, 66, 67, 68, 69},
+                                        {70, 71, 72, 73, 74}}},
+                                      {{{75, 76, 77, 78, 79},
+                                        {80, 81, 82, 83, 84},
+                                        {85, 86, 87, 88, 89},
+                                        {90, 91, 92, 93, 94},
+                                        {95, 96, 97, 98, 99}},
+
+                                       {{100, 101, 102, 103, 104},
+                                        {105, 106, 107, 108, 109},
+                                        {110, 111, 112, 113, 114},
+                                        {115, 116, 117, 118, 119},
+                                        {120, 121, 122, 123, 124}},
+
+                                       {{125, 126, 127, 128, 129},
+                                        {130, 131, 132, 133, 134},
+                                        {135, 136, 137, 138, 139},
+                                        {140, 141, 142, 143, 144},
+                                        {145, 146, 147, 148, 149}}}}});
+        std::shared_ptr<Tensor> myOutput
+            = std::make_shared<Tensor>(Array4D<int, 2, 4, 3, 3>{
+                {{{{15226, 15577, 15928}, {16981, 17332, 17683}, {18736, 19087, 19438}},
+                  {{37818, 38898, 39978}, {43218, 44298, 45378}, {48618, 49698, 50778}},
+                  {{60426, 62235, 64044}, {69471, 71280, 73089}, {78516, 80325, 82134}},
+                  {{83016, 85554, 88092},
+                   {95706, 98244, 100782},
+                   {108396, 110934, 113472}}},
+                 {{{41551, 41902, 42253}, {43306, 43657, 44008}, {45061, 45412, 45763}},
+                  {{118818, 119898, 120978},
+                   {124218, 125298, 126378},
+                   {129618, 130698, 131778}},
+                  {{196101, 197910, 199719},
+                   {205146, 206955, 208764},
+                   {214191, 216000, 217809}},
+                  {{273366, 275904, 278442},
+                   {286056, 288594, 291132},
+                   {298746, 301284, 303822}}}}});
+
+        myConv->getOperator()->associateInput(0, myInput);
+        myConv->getOperator()->associateInput(1, myWeights);
+        myConv->getOperator()->associateInput(2, myBias);
         myConv->getOperator()->computeOutputDims();
         myConv->forward();
 
         REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
     }
-    SECTION("test Padding") {
-        std::shared_ptr<Node> myConv = PaddedConv(3,4,{3,3}, "myconv", {1,1}, {1,1,1,1});
-        myConv->getOperator()->setDatatype(DataType::Int32);
+    SECTION("test Padding")
+    {
+        std::shared_ptr<Node> myConv
+            = PaddedConv(3, 4, {3, 3}, "myconv", {1, 1}, {1, 1, 1, 1});
+        myConv->getOperator()->setDataType(DataType::Int32);
         myConv->getOperator()->setBackend("cpu");
-        std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
-            {
-                {
-                    {{  0,   1,   2},
-                    {  3,   4,   5},
-                    {  6,   7,   8}},
-                    {{  9,  10,  11},
-                    { 12,  13,  14},
-                    { 15,  16,  17}},
-                    {{ 18,  19,  20},
-                    { 21,  22,  23},
-                    { 24,  25,  26}}
-                },
-                {
-                    {{ 27,  28,  29},
-                    { 30,  31,  32},
-                    { 33,  34,  35}},
-                    {{ 36,  37,  38},
-                    { 39,  40,  41},
-                    { 42,  43,  44}},
-                    {{ 45,  46,  47},
-                    { 48,  49,  50},
-                    { 51,  52,  53}}
-                },
-                {
-                    {{ 54,  55,  56},
-                    { 57,  58,  59},
-                    { 60,  61,  62}},
-                    {{ 63,  64,  65},
-                    { 66,  67,  68},
-                    { 69,  70,  71}},
-                    {{ 72,  73,  74},
-                    { 75,  76,  77},
-                    { 78,  79,  80}}
-                },
-                {
-                    {{ 81,  82,  83},
-                    { 84,  85,  86},
-                    { 87,  88,  89}},
-                    {{ 90,  91,  92},
-                    { 93,  94,  95},
-                    { 96,  97,  98}},
-                    {{ 99, 100, 101},
-                    {102, 103, 104},
-                    {105, 106, 107}}
-                }
-            }
-        });
-        std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
-        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
-            {
-                {
-                    {{  0,   1,   2,   3,   4},
-                    {  5,   6,   7,   8,   9},
-                    { 10,  11,  12,  13,  14},
-                    { 15,  16,  17,  18,  19},
-                    { 20,  21,  22,  23,  24}},
-
-                    {{ 25,  26,  27,  28,  29},
-                    { 30,  31,  32,  33,  34},
-                    { 35,  36,  37,  38,  39},
-                    { 40,  41,  42,  43,  44},
-                    { 45,  46,  47,  48,  49}},
-
-                    {{ 50,  51,  52,  53,  54},
-                    { 55,  56,  57,  58,  59},
-                    { 60,  61,  62,  63,  64},
-                    { 65,  66,  67,  68,  69},
-                    { 70,  71,  72,  73,  74}}
-                },
-                {
-                    {{ 75,  76,  77,  78,  79},
-                    { 80,  81,  82,  83,  84},
-                    { 85,  86,  87,  88,  89},
-                    { 90,  91,  92,  93,  94},
-                    { 95,  96,  97,  98,  99}},
-
-                    {{100, 101, 102, 103, 104},
-                    {105, 106, 107, 108, 109},
-                    {110, 111, 112, 113, 114},
-                    {115, 116, 117, 118, 119},
-                    {120, 121, 122, 123, 124}},
-
-                    {{125, 126, 127, 128, 129},
-                    {130, 131, 132, 133, 134},
-                    {135, 136, 137, 138, 139},
-                    {140, 141, 142, 143, 144},
-                    {145, 146, 147, 148, 149}}
-                }
-            }
-        });
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,5,5> { 
-            {
-                {
-                    {{  6895,  10225,  10486,  10747,   7063},
-                     { 10303,  15226,  15577,  15928,  10429},
-                     { 11518,  16981,  17332,  17683,  11554},
-                     { 12733,  18736,  19087,  19438,  12679},
-                     {  8047,  11791,  11998,  12205,   7927}},
-
-                    {{ 15960,  24069,  24816,  25563,  17100},
-                     { 25119,  37818,  38898,  39978,  26703},
-                     { 28764,  43218,  44298,  45378,  30258},
-                     { 32409,  48618,  49698,  50778,  33813},
-                     { 21972,  32925,  33618,  34311,  22824}},
-
-                    {{ 25041,  37929,  39162,  40395,  27153},
-                     { 39951,  60426,  62235,  64044,  42993},
-                     { 46026,  69471,  71280,  73089,  48978},
-                     { 52101,  78516,  80325,  82134,  54963},
-                     { 35913,  54075,  55254,  56433,  37737}},
-
-                    {{ 34104,  51771,  53490,  55209,  37188},
-                     { 54765,  83016,  85554,  88092,  59265},
-                     { 63270,  95706,  98244, 100782,  67680},
-                     { 71775, 108396, 110934, 113472,  76095},
-                     { 49836,  75207,  76872,  78537,  52632}}
-                },
-                {
-                    {{ 20395,  29800,  30061,  30322,  19663},
-                     { 28528,  41551,  41902,  42253,  27304},
-                     { 29743,  43306,  43657,  44008,  28429},
-                     { 30958,  45061,  45412,  45763,  29554},
-                     { 18847,  27316,  27523,  27730,  17827}},
-
-                    {{ 53760,  80094,  80841,  81588,  54000},
-                     { 79794, 118818, 119898, 120978,  80028},
-                     { 83439, 124218, 125298, 126378,  83583},
-                     { 87084, 129618, 130698, 131778,  87138},
-                     { 57072,  84900,  85593,  86286,  57024}},
-
-                    {{ 87141, 130404, 131637, 132870,  88353},
-                     {131076, 196101, 197910, 199719, 132768},
-                     {137151, 205146, 206955, 208764, 138753},
-                     {143226, 214191, 216000, 217809, 144738},
-                     { 95313, 142500, 143679, 144858,  96237}},
-
-                    {{120504, 180696, 182415, 184134, 122688},
-                     {182340, 273366, 275904, 278442, 185490},
-                     {190845, 286056, 288594, 291132, 193905},
-                     {199350, 298746, 301284, 303822, 202320},
-                     {133536, 200082, 201747, 203412, 135432}}
-                }
-            }
-        });
-
-        myConv->getOperator()->associateInput(0,myInput);
-        myConv->getOperator()->associateInput(1,myWeights);
-        myConv->getOperator()->associateInput(2,myBias);
+        std::shared_ptr<Tensor> myWeights
+            = std::make_shared<Tensor>(Array4D<int, 4, 3, 3, 3>{
+                {{{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}},
+                  {{9, 10, 11}, {12, 13, 14}, {15, 16, 17}},
+                  {{18, 19, 20}, {21, 22, 23}, {24, 25, 26}}},
+                 {{{27, 28, 29}, {30, 31, 32}, {33, 34, 35}},
+                  {{36, 37, 38}, {39, 40, 41}, {42, 43, 44}},
+                  {{45, 46, 47}, {48, 49, 50}, {51, 52, 53}}},
+                 {{{54, 55, 56}, {57, 58, 59}, {60, 61, 62}},
+                  {{63, 64, 65}, {66, 67, 68}, {69, 70, 71}},
+                  {{72, 73, 74}, {75, 76, 77}, {78, 79, 80}}},
+                 {{{81, 82, 83}, {84, 85, 86}, {87, 88, 89}},
+                  {{90, 91, 92}, {93, 94, 95}, {96, 97, 98}},
+                  {{99, 100, 101}, {102, 103, 104}, {105, 106, 107}}}}});
+        std::shared_ptr<Tensor> myBias
+            = std::make_shared<Tensor>(Array1D<int, 4>{{7, 0, 9, 0}});
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
+            Array4D<int, 2, 3, 5, 5>{// NCHW
+                                     {{{{0, 1, 2, 3, 4},
+                                        {5, 6, 7, 8, 9},
+                                        {10, 11, 12, 13, 14},
+                                        {15, 16, 17, 18, 19},
+                                        {20, 21, 22, 23, 24}},
+
+                                       {{25, 26, 27, 28, 29},
+                                        {30, 31, 32, 33, 34},
+                                        {35, 36, 37, 38, 39},
+                                        {40, 41, 42, 43, 44},
+                                        {45, 46, 47, 48, 49}},
+
+                                       {{50, 51, 52, 53, 54},
+                                        {55, 56, 57, 58, 59},
+                                        {60, 61, 62, 63, 64},
+                                        {65, 66, 67, 68, 69},
+                                        {70, 71, 72, 73, 74}}},
+                                      {{{75, 76, 77, 78, 79},
+                                        {80, 81, 82, 83, 84},
+                                        {85, 86, 87, 88, 89},
+                                        {90, 91, 92, 93, 94},
+                                        {95, 96, 97, 98, 99}},
+
+                                       {{100, 101, 102, 103, 104},
+                                        {105, 106, 107, 108, 109},
+                                        {110, 111, 112, 113, 114},
+                                        {115, 116, 117, 118, 119},
+                                        {120, 121, 122, 123, 124}},
+
+                                       {{125, 126, 127, 128, 129},
+                                        {130, 131, 132, 133, 134},
+                                        {135, 136, 137, 138, 139},
+                                        {140, 141, 142, 143, 144},
+                                        {145, 146, 147, 148, 149}}}}});
+        std::shared_ptr<Tensor> myOutput
+            = std::make_shared<Tensor>(Array4D<int, 2, 4, 5, 5>{
+                {{{{6895, 10225, 10486, 10747, 7063},
+                   {10303, 15226, 15577, 15928, 10429},
+                   {11518, 16981, 17332, 17683, 11554},
+                   {12733, 18736, 19087, 19438, 12679},
+                   {8047, 11791, 11998, 12205, 7927}},
+
+                  {{15960, 24069, 24816, 25563, 17100},
+                   {25119, 37818, 38898, 39978, 26703},
+                   {28764, 43218, 44298, 45378, 30258},
+                   {32409, 48618, 49698, 50778, 33813},
+                   {21972, 32925, 33618, 34311, 22824}},
+
+                  {{25041, 37929, 39162, 40395, 27153},
+                   {39951, 60426, 62235, 64044, 42993},
+                   {46026, 69471, 71280, 73089, 48978},
+                   {52101, 78516, 80325, 82134, 54963},
+                   {35913, 54075, 55254, 56433, 37737}},
+
+                  {{34104, 51771, 53490, 55209, 37188},
+                   {54765, 83016, 85554, 88092, 59265},
+                   {63270, 95706, 98244, 100782, 67680},
+                   {71775, 108396, 110934, 113472, 76095},
+                   {49836, 75207, 76872, 78537, 52632}}},
+                 {{{20395, 29800, 30061, 30322, 19663},
+                   {28528, 41551, 41902, 42253, 27304},
+                   {29743, 43306, 43657, 44008, 28429},
+                   {30958, 45061, 45412, 45763, 29554},
+                   {18847, 27316, 27523, 27730, 17827}},
+
+                  {{53760, 80094, 80841, 81588, 54000},
+                   {79794, 118818, 119898, 120978, 80028},
+                   {83439, 124218, 125298, 126378, 83583},
+                   {87084, 129618, 130698, 131778, 87138},
+                   {57072, 84900, 85593, 86286, 57024}},
+
+                  {{87141, 130404, 131637, 132870, 88353},
+                   {131076, 196101, 197910, 199719, 132768},
+                   {137151, 205146, 206955, 208764, 138753},
+                   {143226, 214191, 216000, 217809, 144738},
+                   {95313, 142500, 143679, 144858, 96237}},
+
+                  {{120504, 180696, 182415, 184134, 122688},
+                   {182340, 273366, 275904, 278442, 185490},
+                   {190845, 286056, 288594, 291132, 193905},
+                   {199350, 298746, 301284, 303822, 202320},
+                   {133536, 200082, 201747, 203412, 135432}}}}});
+
+        myConv->getOperator()->associateInput(0, myInput);
+        myConv->getOperator()->associateInput(1, myWeights);
+        myConv->getOperator()->associateInput(2, myBias);
         myConv->getOperator()->computeOutputDims();
         myConv->forward();
 
diff --git a/unit_tests/operator/Test_PowImpl.cpp b/unit_tests/operator/Test_PowImpl.cpp
index 37f9724b64b3e4676134a4e1738aff98927b5827..3c434d3cea0639fa64a4ead800220d8bbef6b5a1 100644
--- a/unit_tests/operator/Test_PowImpl.cpp
+++ b/unit_tests/operator/Test_PowImpl.cpp
@@ -32,7 +32,7 @@ TEST_CASE("[cpu/operator] Pow(forward)")
             Array2D<float, 2, 2>{{{0.17757183, 0.26547423}, {0.72671247, 0.01804400}}});
 
         std::shared_ptr<Node> myPow = Pow();
-        myPow->getOperator()->setDatatype(DataType::Float32);
+        myPow->getOperator()->setDataType(DataType::Float32);
         myPow->getOperator()->setBackend("cpu");
         myPow->getOperator()->associateInput(0, input_1);
         myPow->getOperator()->associateInput(1, input_2);
@@ -68,7 +68,7 @@ TEST_CASE("[cpu/operator] Pow(forward)")
                   {0.97706109, 0.93867886, 0.84118503}}}});
 
         std::shared_ptr<Node> myPow = Pow();
-        myPow->getOperator()->setDatatype(DataType::Float32);
+        myPow->getOperator()->setDataType(DataType::Float32);
         myPow->getOperator()->setBackend("cpu");
         myPow->getOperator()->associateInput(0, input_1);
         myPow->getOperator()->associateInput(1, input_2);
@@ -94,7 +94,7 @@ TEST_CASE("[cpu/operator] Pow(forward)")
             Array2D<float, 2, 2>{{{0.87504572, 0.57271165}, {0.92909741, 0.96922028}}});
 
         std::shared_ptr<Node> myPow = Pow();
-        myPow->getOperator()->setDatatype(DataType::Float32);
+        myPow->getOperator()->setDataType(DataType::Float32);
         myPow->getOperator()->setBackend("cpu");
         myPow->getOperator()->associateInput(0, input_1);
         myPow->getOperator()->associateInput(1, input_2);
@@ -160,7 +160,7 @@ TEST_CASE("[cpu/operator] Pow(forward)")
                    {6.55772448e-01, 3.32630165e-02, 3.81309800e-02}}}}});
 
         std::shared_ptr<Node> myPow = Pow();
-        myPow->getOperator()->setDatatype(DataType::Float32);
+        myPow->getOperator()->setDataType(DataType::Float32);
         myPow->getOperator()->setBackend("cpu");
         myPow->getOperator()->associateInput(0, input_1);
         myPow->getOperator()->associateInput(1, input_2);
diff --git a/unit_tests/operator/Test_ReLUImpl.cpp b/unit_tests/operator/Test_ReLUImpl.cpp
index b568e32f33f423bb583a700a8a597bbfda92b7a7..a1aebee34afc17e8dbad3b5c7ba99286a567f1bc 100644
--- a/unit_tests/operator/Test_ReLUImpl.cpp
+++ b/unit_tests/operator/Test_ReLUImpl.cpp
@@ -30,7 +30,7 @@ TEST_CASE("[cpu/operator] ReLU(forward)")
             = std::make_shared<Tensor>(Array1D<int, 10>{{0, 1, 2, 0, 4, 0, 0, 7, 8, 9}});
 
         std::shared_ptr<Node> myReLU = ReLU();
-        myReLU->getOperator()->setDatatype(DataType::Int32);
+        myReLU->getOperator()->setDataType(DataType::Int32);
         myReLU->getOperator()->setBackend("cpu");
         myReLU->getOperator()->associateInput(0, input0);
         myReLU->getOperator()->computeOutputDims();
@@ -47,7 +47,7 @@ TEST_CASE("[cpu/operator] ReLU(forward)")
                 {{0, 1, 2, 0, 4, 0, 0, 7, 8, 9}, {0, 4, 2, 0, 4, 0, 0, 7, 0, 10}}});
 
         std::shared_ptr<Node> myReLU = ReLU();
-        myReLU->getOperator()->setDatatype(DataType::Int32);
+        myReLU->getOperator()->setDataType(DataType::Int32);
         myReLU->getOperator()->setBackend("cpu");
         myReLU->getOperator()->associateInput(0, input0);
         myReLU->getOperator()->computeOutputDims();
@@ -66,7 +66,7 @@ TEST_CASE("[cpu/operator] ReLU(forward)")
                  {{0, 1, 2, 0, 4, 0, 0, 7, 8, 9}, {0, 4, 2, 0, 4, 0, 0, 7, 0, 10}}}});
 
         std::shared_ptr<Node> myReLU = ReLU();
-        myReLU->getOperator()->setDatatype(DataType::Int32);
+        myReLU->getOperator()->setDataType(DataType::Int32);
         myReLU->getOperator()->setBackend("cpu");
         myReLU->getOperator()->associateInput(0, input0);
         myReLU->getOperator()->computeOutputDims();
@@ -91,7 +91,7 @@ TEST_CASE("[cpu/operator] ReLU(forward)")
                   {{0, 1, 2, 0, 4, 0, 0, 7, 8, 9}, {0, 4, 2, 0, 4, 0, 0, 7, 0, 10}}}}});
 
         std::shared_ptr<Node> myReLU = ReLU();
-        myReLU->getOperator()->setDatatype(DataType::Int32);
+        myReLU->getOperator()->setDataType(DataType::Int32);
         myReLU->getOperator()->setBackend("cpu");
         myReLU->getOperator()->associateInput(0, input0);
         myReLU->getOperator()->computeOutputDims();
diff --git a/unit_tests/operator/Test_SoftmaxImpl.cpp b/unit_tests/operator/Test_SoftmaxImpl.cpp
index 255f72a09b30174b2e6dc9b6a0f2eea4fa170444..67ebb84a5571705c06abcfc0f1f5c94b8ef1369c 100644
--- a/unit_tests/operator/Test_SoftmaxImpl.cpp
+++ b/unit_tests/operator/Test_SoftmaxImpl.cpp
@@ -69,7 +69,7 @@ TEST_CASE("[cpu/operator] Softmax(forward)")
                   0.16429459}}});
 
         std::shared_ptr<Node> mySoftmax = Softmax();
-        mySoftmax->getOperator()->setDatatype(DataType::Float32);
+        mySoftmax->getOperator()->setDataType(DataType::Float32);
         mySoftmax->getOperator()->setBackend("cpu");
         mySoftmax->getOperator()->associateInput(0, input);
         mySoftmax->getOperator()->computeOutputDims();
@@ -127,7 +127,7 @@ TEST_CASE("[cpu/operator] Softmax(forward)")
                    {0.34566763, 0.32462072, 0.48979440}}}}});
 
         std::shared_ptr<Node> mySoftmax = Softmax();
-        mySoftmax->getOperator()->setDatatype(DataType::Float32);
+        mySoftmax->getOperator()->setDataType(DataType::Float32);
         mySoftmax->getOperator()->setBackend("cpu");
         mySoftmax->getOperator()->associateInput(0, input);
         mySoftmax->getOperator()->computeOutputDims();
diff --git a/unit_tests/operator/Test_SplitConv.cpp b/unit_tests/operator/Test_SplitConv.cpp
index bfbd5e2f9c269a1ed07ba09af2e8ca44af6edb4e..a6df000d00b398f267ef819e33f3d0d31b8e02d9 100644
--- a/unit_tests/operator/Test_SplitConv.cpp
+++ b/unit_tests/operator/Test_SplitConv.cpp
@@ -25,7 +25,7 @@ TEST_CASE("[cpu/operator] SplitConv(forward)")
     SECTION("Splitted Conv")
     {
         std::shared_ptr<Node> myConv = Conv(3, 4, {3, 3}, "myconv");
-        myConv->getOperator()->setDatatype(DataType::Int32);
+        myConv->getOperator()->setDataType(DataType::Int32);
         myConv->getOperator()->setBackend("cpu");
         std::shared_ptr<Tensor> myWeights
             = std::make_shared<Tensor>(Array4D<int, 4, 3, 3, 3>{
@@ -108,7 +108,7 @@ TEST_CASE("[cpu/operator] SplitConv(forward)")
     SECTION("Point-wise splitted conv")
     {
         std::shared_ptr<Node> myConv = Conv(3, 4, {1, 1}, "myconv", {1, 1});
-        myConv->getOperator()->setDatatype(DataType::Float32);
+        myConv->getOperator()->setDataType(DataType::Float32);
         myConv->getOperator()->setBackend("cpu");
         myConv->getOperator()->input(0) = Array4D<float, 2, 3, 3, 3>{
             {{{{-1.38467371F, -0.87123615F, -0.22336592F},
diff --git a/unit_tests/operator/Test_SqrtImpl.cpp b/unit_tests/operator/Test_SqrtImpl.cpp
index 1464f0569c9a34397e8fba3f64ad7c09d8859240..c47a6825887b3d23529b30e72a4deb8492942112 100644
--- a/unit_tests/operator/Test_SqrtImpl.cpp
+++ b/unit_tests/operator/Test_SqrtImpl.cpp
@@ -30,7 +30,7 @@ TEST_CASE("[cpu/operator] Sqrt(forward)")
             Array2D<float, 2, 2>{{{4.00000000, 0.78883994}, {0.00000000, 1.35845140}}});
 
         std::shared_ptr<Node> mySqrt = Sqrt();
-        mySqrt->getOperator()->setDatatype(DataType::Float32);
+        mySqrt->getOperator()->setDataType(DataType::Float32);
         mySqrt->getOperator()->setBackend("cpu");
         mySqrt->getOperator()->associateInput(0, input);
         mySqrt->getOperator()->computeOutputDims();
@@ -90,7 +90,7 @@ TEST_CASE("[cpu/operator] Sqrt(forward)")
                    {0.3608653, 0.8571328, 0.16447252}}}}});
 
         std::shared_ptr<Node> mySqrt = Sqrt();
-        mySqrt->getOperator()->setDatatype(DataType::Float32);
+        mySqrt->getOperator()->setDataType(DataType::Float32);
         mySqrt->getOperator()->setBackend("cpu");
         mySqrt->getOperator()->associateInput(0, input);
         mySqrt->getOperator()->computeOutputDims();
diff --git a/unit_tests/operator/Test_SubImpl.cpp b/unit_tests/operator/Test_SubImpl.cpp
index eee61fcb02a1f18b6536f4d78e1dd4be1d3b492b..e706b55f034cc3c813af8a8481825a43bea1059f 100644
--- a/unit_tests/operator/Test_SubImpl.cpp
+++ b/unit_tests/operator/Test_SubImpl.cpp
@@ -33,7 +33,7 @@ TEST_CASE("[cpu/operator] Sub(forward)")
                 {{-2.15765429, -1.57187295}, {-1.76293385, -1.80046117}}});
 
         std::shared_ptr<Node> mySub = Sub();
-        mySub->getOperator()->setDatatype(DataType::Float32);
+        mySub->getOperator()->setDataType(DataType::Float32);
         mySub->getOperator()->setBackend("cpu");
         mySub->getOperator()->associateInput(0, input_1);
         mySub->getOperator()->associateInput(1, input_2);
@@ -59,7 +59,7 @@ TEST_CASE("[cpu/operator] Sub(forward)")
             Array2D<float, 2, 2>{{{-0.27494568, 0.09808338}, {0.01704526, 0.17480034}}});
 
         std::shared_ptr<Node> mySub = Sub();
-        mySub->getOperator()->setDatatype(DataType::Float32);
+        mySub->getOperator()->setDataType(DataType::Float32);
         mySub->getOperator()->setBackend("cpu");
         mySub->getOperator()->associateInput(0, input_1);
         mySub->getOperator()->associateInput(1, input_2);
@@ -95,7 +95,7 @@ TEST_CASE("[cpu/operator] Sub(forward)")
                   {0.35178328, -0.24023145, -0.05397654}}}});
 
         std::shared_ptr<Node> mySub = Sub();
-        mySub->getOperator()->setDatatype(DataType::Float32);
+        mySub->getOperator()->setDataType(DataType::Float32);
         mySub->getOperator()->setBackend("cpu");
         mySub->getOperator()->associateInput(0, input_1);
         mySub->getOperator()->associateInput(1, input_2);
diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp
index 78ab8d5b149e8f702558658fef0442f225de3813..07d83d64c18109be51b04aac2c72b8b93332d882 100644
--- a/unit_tests/scheduler/Test_Scheduler.cpp
+++ b/unit_tests/scheduler/Test_Scheduler.cpp
@@ -14,8 +14,8 @@
 #include <string>
 
 #include "aidge/data/Tensor.hpp"
-#include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
 
@@ -23,71 +23,77 @@
 
 using namespace Aidge;
 
-
-TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
-    std::shared_ptr<Tensor> inputTensor =
-            std::make_shared<Tensor>(Array4D<int, 2, 1, 5, 5>{{{{{0, 1, 2, 3, 4},
-                                                                 {5, 6, 7, 8, 9},
-                                                                 {10, 11, 12, 13, 14},
-                                                                 {15, 16, 17, 18, 19},
-                                                                 {20, 21, 22, 23, 24}}},
-                                                               {{{25, 26, 27, 28, 29},
-                                                                 {30, 31, 32, 33, 34},
-                                                                 {35, 36, 37, 38, 39},
-                                                                 {40, 41, 42, 43, 44},
-                                                                 {45, 46, 47, 48, 49}}}}});
-
-    std::shared_ptr<Tensor> weight1 = std::make_shared<Tensor>(
-            Array4D<int, 3, 1, 3, 3>{{{{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}},
-                                      {{{10, 11, 12}, {13, 14, 15}, {16, 17, 18}}},
-                                      {{{19, 20, 21}, {22, 23, 24}, {25, 26, 27}}}}});
+TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)")
+{
+    std::shared_ptr<Tensor> inputTensor
+        = std::make_shared<Tensor>(Array4D<int, 2, 1, 5, 5>{
+            {{{{0, 1, 2, 3, 4},
+               {5, 6, 7, 8, 9},
+               {10, 11, 12, 13, 14},
+               {15, 16, 17, 18, 19},
+               {20, 21, 22, 23, 24}}},
+             {{{25, 26, 27, 28, 29},
+               {30, 31, 32, 33, 34},
+               {35, 36, 37, 38, 39},
+               {40, 41, 42, 43, 44},
+               {45, 46, 47, 48, 49}}}}});
+
+    std::shared_ptr<Tensor> weight1 = std::make_shared<Tensor>(Array4D<int, 3, 1, 3, 3>{
+        {{{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}},
+         {{{10, 11, 12}, {13, 14, 15}, {16, 17, 18}}},
+         {{{19, 20, 21}, {22, 23, 24}, {25, 26, 27}}}}});
 
     std::shared_ptr<Tensor> bias1 = std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}});
 
-    SECTION("Test Sequential graph") {
-        std::shared_ptr<GraphView> g =
-                Sequential({
-                    Conv(1, 3, {3, 3}, "conv1"),
-                    Conv(3, 4, {1, 1}, "conv2"),
-                    Conv(4, 3, {1, 1}, "conv3"),
-                    FC(5, false, "fc")});
-        g->setDatatype(Aidge::DataType::Int32);
+    SECTION("Test Sequential graph")
+    {
+        std::shared_ptr<GraphView> g = Sequential(
+            {Conv(1, 3, {3, 3}, "conv1"),
+             Conv(3, 4, {1, 1}, "conv2"),
+             Conv(4, 3, {1, 1}, "conv3"),
+             FC(5, false, "fc")});
+        g->setDataType(Aidge::DataType::Int32);
         g->setBackend("cpu");
 
         g->getNode("conv1")->getOperator()->input(0) = *inputTensor;
         g->getNode("conv1")->getOperator()->input(1) = *weight1;
         g->getNode("conv1")->getOperator()->input(2) = *bias1;
 
-        std::shared_ptr<Tensor> weight2 =
-                std::make_shared<Tensor>(Array4D<int, 4, 3, 1, 1>{{{{{1}}, {{2}}, {{3}}},
-                                                                   {{{4}}, {{5}}, {{6}}},
-                                                                   {{{7}}, {{8}}, {{9}}},
-                                                                   {{{10}}, {{11}}, {{12}}}}});
-        std::shared_ptr<Tensor> bias2 = std::make_shared<Tensor>(Array1D<int, 4>{{1, 2, 3, 4}});
+        std::shared_ptr<Tensor> weight2
+            = std::make_shared<Tensor>(Array4D<int, 4, 3, 1, 1>{
+                {{{{1}}, {{2}}, {{3}}},
+                 {{{4}}, {{5}}, {{6}}},
+                 {{{7}}, {{8}}, {{9}}},
+                 {{{10}}, {{11}}, {{12}}}}});
+        std::shared_ptr<Tensor> bias2
+            = std::make_shared<Tensor>(Array1D<int, 4>{{1, 2, 3, 4}});
         g->getNode("conv2")->getOperator()->input(1) = *weight2;
         g->getNode("conv2")->getOperator()->input(2) = *bias2;
         // *(g->getNode("conv2")->getOperator()->input(1, weight2);
 
-        std::shared_ptr<Tensor> weight3 = std::make_shared<Tensor>(
-                Array4D<int, 3, 4, 1, 1>{{{{{1}}, {{2}}, {{3}}, {{4}}},
-                                          {{{5}}, {{6}}, {{7}}, {{8}}},
-                                          {{{9}}, {{10}}, {{11}}, {{12}}}}});
-        std::shared_ptr<Tensor> bias3 = std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}});
+        std::shared_ptr<Tensor> weight3
+            = std::make_shared<Tensor>(Array4D<int, 3, 4, 1, 1>{
+                {{{{1}}, {{2}}, {{3}}, {{4}}},
+                 {{{5}}, {{6}}, {{7}}, {{8}}},
+                 {{{9}}, {{10}}, {{11}}, {{12}}}}});
+        std::shared_ptr<Tensor> bias3
+            = std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}});
         g->getNode("conv3")->getOperator()->input(1) = *weight3;
         g->getNode("conv3")->getOperator()->input(2) = *bias3;
 
-        std::shared_ptr<Tensor> weightfc = std::make_shared<Tensor>(
-                Array2D<int, 5, 27>{{{1,  2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
-                                      15, 1, 2, 3, 4, 5, 6, 7, 8, 9,  10, 11, 12},
-                                     {13, 14, 15, 1,  2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
-                                      12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9},
-                                     {10, 11, 12, 13, 14, 15, 1,  2, 3, 4, 5, 6, 7, 8,
-                                      9,  10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6},
-                                     {7, 8, 9, 10, 11, 12, 13, 14, 15, 1,  2, 3, 4, 5,
-                                      6, 7, 8, 9,  10, 11, 12, 13, 14, 15, 1, 2, 3},
-                                     {4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2,
-                                      3, 4, 5, 6, 7, 8, 9,  10, 11, 12, 13, 14, 15}}});
-        std::shared_ptr<Tensor> biasfc = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}});
+        std::shared_ptr<Tensor> weightfc = std::make_shared<Tensor>(Array2D<int, 5, 27>{
+            {{1,  2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+              15, 1, 2, 3, 4, 5, 6, 7, 8, 9,  10, 11, 12},
+             {13, 14, 15, 1,  2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+              12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9},
+             {10, 11, 12, 13, 14, 15, 1,  2, 3, 4, 5, 6, 7, 8,
+              9,  10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6},
+             {7, 8, 9, 10, 11, 12, 13, 14, 15, 1,  2, 3, 4, 5,
+              6, 7, 8, 9,  10, 11, 12, 13, 14, 15, 1, 2, 3},
+             {4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2,
+              3, 4, 5, 6, 7, 8, 9,  10, 11, 12, 13, 14, 15}}});
+        std::shared_ptr<Tensor> biasfc
+            = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}});
         g->getNode("fc")->getOperator()->input(1) = *weightfc;
         g->getNode("fc")->getOperator()->input(2) = *biasfc;
 
@@ -97,7 +103,8 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
         REQUIRE_NOTHROW(scheduler.forward());
         scheduler.saveSchedulingDiagram("schedulingSequential");
 
-        std::shared_ptr<Tensor> expectedOutput1 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{
+        std::shared_ptr<Tensor> expectedOutput1
+            = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{
                 {{{{367, 412, 457}, {592, 637, 682}, {817, 862, 907}},
                   {{854, 980, 1106}, {1484, 1610, 1736}, {2114, 2240, 2366}},
                   {{1341, 1548, 1755}, {2376, 2583, 2790}, {3411, 3618, 3825}}},
@@ -105,27 +112,45 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
                   {{4004, 4130, 4256}, {4634, 4760, 4886}, {5264, 5390, 5516}},
                   {{6516, 6723, 6930}, {7551, 7758, 7965}, {8586, 8793, 9000}}}}});
 
-        std::shared_ptr<Tensor> expectedOutput2 = std::make_shared<Tensor>(Array4D<int, 2, 4, 3, 3>{
+        std::shared_ptr<Tensor> expectedOutput2
+            = std::make_shared<Tensor>(Array4D<int, 2, 4, 3, 3>{
                 {{{{6099, 7017, 7935}, {10689, 11607, 12525}, {15279, 16197, 17115}},
                   {{13786, 15838, 17890}, {24046, 26098, 28150}, {34306, 36358, 38410}},
                   {{21473, 24659, 27845}, {37403, 40589, 43775}, {53333, 56519, 59705}},
                   {{29160, 33480, 37800}, {50760, 55080, 59400}, {72360, 76680, 81000}}},
                  {{{29049, 29967, 30885}, {33639, 34557, 35475}, {38229, 39147, 40065}},
                   {{65086, 67138, 69190}, {75346, 77398, 79450}, {85606, 87658, 89710}},
-                  {{101123, 104309, 107495}, {117053, 120239, 123425}, {132983, 136169, 139355}},
-                  {{137160, 141480, 145800}, {158760, 163080, 167400}, {180360, 184680, 189000}}}}});
-
-        std::shared_ptr<Tensor> expectedOutput3 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{
-                {{{{214731, 246591, 278451}, {374031, 405891, 437751}, {533331, 565191, 597051}},
-                  {{496804, 570568, 644332}, {865624, 939388, 1013152}, {1234444, 1308208, 1381972}},
-                  {{778877, 894545, 1010213}, {1357217, 1472885, 1588553}, {1935557, 2051225, 2166893}}},
-                 {{{1011231, 1043091, 1074951}, {1170531, 1202391, 1234251}, {1329831, 1361691, 1393551}},
-                  {{2340904, 2414668, 2488432}, {2709724, 2783488, 2857252}, {3078544, 3152308, 3226072}},
-                  {{3670577, 3786245, 3901913}, {4248917, 4364585, 4480253}, {4827257, 4942925, 5058593}}}}});
+                  {{101123, 104309, 107495},
+                   {117053, 120239, 123425},
+                   {132983, 136169, 139355}},
+                  {{137160, 141480, 145800},
+                   {158760, 163080, 167400},
+                   {180360, 184680, 189000}}}}});
+
+        std::shared_ptr<Tensor> expectedOutput3
+            = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{
+                {{{{214731, 246591, 278451},
+                   {374031, 405891, 437751},
+                   {533331, 565191, 597051}},
+                  {{496804, 570568, 644332},
+                   {865624, 939388, 1013152},
+                   {1234444, 1308208, 1381972}},
+                  {{778877, 894545, 1010213},
+                   {1357217, 1472885, 1588553},
+                   {1935557, 2051225, 2166893}}},
+                 {{{1011231, 1043091, 1074951},
+                   {1170531, 1202391, 1234251},
+                   {1329831, 1361691, 1393551}},
+                  {{2340904, 2414668, 2488432},
+                   {2709724, 2783488, 2857252},
+                   {3078544, 3152308, 3226072}},
+                  {{3670577, 3786245, 3901913},
+                   {4248917, 4364585, 4480253},
+                   {4827257, 4942925, 5058593}}}}});
 
         Tensor expectedOutput4 = Array2D<int, 2, 5>{
-                {{205050376, 198925904, 181355097, 196978090, 238868348},
-                {598467376, 561797804, 560823897, 593043790, 698672948}}};
+            {{205050376, 198925904, 181355097, 196978090, 238868348},
+             {598467376, 561797804, 560823897, 593043790, 698672948}}};
         Tensor other1 = g->getNode("conv1")->getOperator()->output(0);
         bool equal1 = (other1 == *expectedOutput1);
         REQUIRE(equal1);
@@ -140,70 +165,83 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
         REQUIRE(equal4);
     }
 
-    SECTION("Test Parallel graph") {
-        std::shared_ptr<GraphView> g =
-                Sequential({Conv(1, 3, {3, 3}, "inputConv"),
-                            Parallel({
-                                Conv(3, 3, {1, 1}, "conv1.1"),
-                                Conv(3, 3, {1, 1}, "conv1.2"),
-                                Conv(3, 3, {1, 1}, "conv1.3")}),
-                            Add<3>("add1"),
-                            Conv(3, 2, {1, 1}, "conv2"),
-                            FC(5, false, "out")});
+    SECTION("Test Parallel graph")
+    {
+        std::shared_ptr<GraphView> g = Sequential(
+            {Conv(1, 3, {3, 3}, "inputConv"),
+             Parallel(
+                 {Conv(3, 3, {1, 1}, "conv1.1"),
+                  Conv(3, 3, {1, 1}, "conv1.2"),
+                  Conv(3, 3, {1, 1}, "conv1.3")}),
+             Add<3>("add1"),
+             Conv(3, 2, {1, 1}, "conv2"),
+             FC(5, false, "out")});
         g->setBackend("cpu");
-        g->setDatatype(Aidge::DataType::Int32);
+        g->setDataType(Aidge::DataType::Int32);
 
         g->getNode("inputConv")->getOperator()->input(0) = *inputTensor;
         g->getNode("inputConv")->getOperator()->input(1) = *weight1;
         g->getNode("inputConv")->getOperator()->input(2) = *bias1;
 
-        std::shared_ptr<Tensor> conv11Weight = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
+        std::shared_ptr<Tensor> conv11Weight
+            = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
                 {{{{1}}, {{2}}, {{3}}}, {{{4}}, {{5}}, {{6}}}, {{{7}}, {{8}}, {{9}}}}});
         g->getNode("conv1.1")->getOperator()->input(1) = *conv11Weight;
         g->getNode("conv1.1")->getOperator()->input(2) = *bias1;
 
-        std::shared_ptr<Tensor> conv12Weight = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
-                {{{{11}}, {{12}}, {{13}}}, {{{14}}, {{15}}, {{16}}}, {{{17}}, {{18}}, {{19}}}}});
+        std::shared_ptr<Tensor> conv12Weight
+            = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
+                {{{{11}}, {{12}}, {{13}}},
+                 {{{14}}, {{15}}, {{16}}},
+                 {{{17}}, {{18}}, {{19}}}}});
         g->getNode("conv1.2")->getOperator()->input(1) = *conv12Weight;
         g->getNode("conv1.2")->getOperator()->input(2) = *bias1;
 
-        std::shared_ptr<Tensor> conv13Weight = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
-                {{{{21}}, {{22}}, {{23}}}, {{{24}}, {{25}}, {{26}}}, {{{27}}, {{28}}, {{29}}}}});
+        std::shared_ptr<Tensor> conv13Weight
+            = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
+                {{{{21}}, {{22}}, {{23}}},
+                 {{{24}}, {{25}}, {{26}}},
+                 {{{27}}, {{28}}, {{29}}}}});
         g->getNode("conv1.3")->getOperator()->input(1) = *conv13Weight;
         g->getNode("conv1.3")->getOperator()->input(2) = *bias1;
 
         std::shared_ptr<Tensor> conv2Weight = std::make_shared<Tensor>(
-                Array4D<int, 2, 3, 1, 1>{{{{{1}}, {{2}}, {{3}}}, {{{4}}, {{5}}, {{6}}}}});
+            Array4D<int, 2, 3, 1, 1>{{{{{1}}, {{2}}, {{3}}}, {{{4}}, {{5}}, {{6}}}}});
         std::shared_ptr<Tensor> bias2 = std::make_shared<Tensor>(Array1D<int, 2>{{1, 2}});
         g->getNode("conv2")->getOperator()->input(1) = *conv2Weight;
         g->getNode("conv2")->getOperator()->input(2) = *bias2;
 
-        std::shared_ptr<Tensor> fcWeight = std::make_shared<Tensor>(
-                Array2D<int, 5, 18>{{{1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3},
-                                     {4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1},
-                                     {2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4},
-                                     {5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2},
-                                     {3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}}});
-        std::shared_ptr<Tensor> fcBias = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}});
+        std::shared_ptr<Tensor> fcWeight = std::make_shared<Tensor>(Array2D<int, 5, 18>{
+            {{1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3},
+             {4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1},
+             {2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4},
+             {5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2},
+             {3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}}});
+        std::shared_ptr<Tensor> fcBias
+            = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}});
         g->getNode("out")->getOperator()->input(1) = *fcWeight;
         g->getNode("out")->getOperator()->input(2) = *fcBias;
 
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(
-                Array2D<int, 2, 5>{{{124324368, 130692907, 133325056, 125044620, 142843879},
-                                    {369195468, 394615207, 382643056, 379441320, 416291779}}});
+        std::shared_ptr<Tensor> expectedOutput
+            = std::make_shared<Tensor>(Array2D<int, 2, 5>{
+                {{124324368, 130692907, 133325056, 125044620, 142843879},
+                 {369195468, 394615207, 382643056, 379441320, 416291779}}});
 
         g->forwardDims();
         SequentialScheduler scheduler(g);
         REQUIRE_NOTHROW(scheduler.forward());
         scheduler.saveSchedulingDiagram("schedulingSequential");
-        std::shared_ptr<Tensor> result =
-                std::static_pointer_cast<Tensor>(g->getNode("out")->getOperator()->getOutput(0));
+        std::shared_ptr<Tensor> result = std::static_pointer_cast<Tensor>(
+            g->getNode("out")->getOperator()->getOutput(0));
         bool equal = (*result == *expectedOutput);
         REQUIRE(equal);
     }
 
-    SECTION("Test Residual graph") {
+    SECTION("Test Residual graph")
+    {
     }
 
-    SECTION("Test Recurrent graph") {}
+    SECTION("Test Recurrent graph")
+    {
+    }
 }
\ No newline at end of file