From 871f666de0d52c85c1b90ac2010a91c8c4d8a23a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Gr=C3=A9goire=20KUBLER?= <gregoire.kubler@proton.me>
Date: Wed, 6 Nov 2024 17:30:47 +0100
Subject: [PATCH] chore : formatted cpp files

---
 .../aidge/backend/opencv/data/DataUtils.hpp   |   5 +-
 .../aidge/backend/opencv/data/TensorImpl.hpp  | 320 +++++++++++-------
 .../aidge/backend/opencv/database/MNIST.hpp   |  68 ++--
 .../stimuli/StimulusImpl_opencv_imread.hpp    |  35 +-
 include/aidge/backend/opencv/utils/Utils.hpp  |  21 +-
 python_binding/database/pybind_MNIST.cpp      |  30 +-
 python_binding/pybind_opencv.cpp              |   9 +-
 src/database/MNIST.cpp                        |  64 ++--
 src/stimuli/StimulusImpl_opencv_imread.cpp    |   8 +-
 src/utils/Utils.cpp                           | 154 +++++----
 unit_tests/Test_Stimulus.cpp                  |  39 ++-
 .../Test_StimulusImpl_opencv_imread.cpp       |  39 ++-
 unit_tests/Test_TensorImpl.cpp                |  93 ++---
 unit_tests/Tests_Utils.cpp                    |  40 ++-
 unit_tests/Tools.hpp                          |   3 +-
 15 files changed, 523 insertions(+), 405 deletions(-)

diff --git a/include/aidge/backend/opencv/data/DataUtils.hpp b/include/aidge/backend/opencv/data/DataUtils.hpp
index b58267a..c8c2922 100644
--- a/include/aidge/backend/opencv/data/DataUtils.hpp
+++ b/include/aidge/backend/opencv/data/DataUtils.hpp
@@ -12,7 +12,6 @@
 #ifndef AIDGE_OPENCV_DATA_DATAUTILS_H_
 #define AIDGE_OPENCV_DATA_DATAUTILS_H_
 
-
 #include "opencv2/core.hpp"
 
 #include <cstdint>
@@ -20,7 +19,9 @@
 namespace Aidge {
 
 namespace detail {
-template <typename T> struct CV_C1_CPP { static constexpr int value = -1; };
+template <typename T> struct CV_C1_CPP {
+    static constexpr int value = -1;
+};
 template <> struct CV_C1_CPP<std::int8_t> {
     static constexpr int value = CV_8SC1;
 };
diff --git a/include/aidge/backend/opencv/data/TensorImpl.hpp b/include/aidge/backend/opencv/data/TensorImpl.hpp
index 4b548d2..9a480ce 100644
--- a/include/aidge/backend/opencv/data/TensorImpl.hpp
+++ b/include/aidge/backend/opencv/data/TensorImpl.hpp
@@ -15,38 +15,36 @@
 #include "opencv2/core.hpp"
 
 #include "aidge/backend/TensorImpl.hpp"
+#include "aidge/backend/opencv/data/DataUtils.hpp"
 #include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/future_std/span.hpp"
-#include "aidge/backend/opencv/data/DataUtils.hpp"
-
 
 namespace Aidge {
 
 class TensorImpl_opencv_ {
-public:
-    virtual const cv::Mat& data() const = 0;
-    virtual void setCvMat(const cv::Mat& mat ) = 0;
+  public:
+    virtual const cv::Mat &data() const = 0;
+    virtual void setCvMat(const cv::Mat &mat) = 0;
 };
 
 template <class T>
 class TensorImpl_opencv : public TensorImpl, public TensorImpl_opencv_ {
-private:
+  private:
     // Stores the cv::Mat
     cv::Mat mData;
 
-protected:
+  protected:
     std::vector<DimSize_t> mDims;
 
-public:
+  public:
     static constexpr const char *Backend = "opencv";
 
     TensorImpl_opencv() = delete;
     TensorImpl_opencv(DeviceIdx_t device, std::vector<DimSize_t> dims)
-    : TensorImpl(Backend, device, dims)
-    {
+        : TensorImpl(Backend, device, dims) {
         mDims = dims;
     }
 
@@ -54,7 +52,8 @@ public:
         // Create iterators for both matrices
         cv::MatConstIterator_<T> it1 = mData.begin<T>();
 
-        const cv::Mat & otherData = reinterpret_cast<const TensorImpl_opencv<T> &>(otherImpl).data();
+        const cv::Mat &otherData =
+            reinterpret_cast<const TensorImpl_opencv<T> &>(otherImpl).data();
         cv::MatConstIterator_<T> it2 = otherData.begin<T>();
 
         // Iterate over the elements and compare them
@@ -66,11 +65,12 @@ public:
         return true;
     }
 
-    static std::unique_ptr<TensorImpl_opencv> create(DeviceIdx_t device, std::vector<DimSize_t> dims) {
+    static std::unique_ptr<TensorImpl_opencv>
+    create(DeviceIdx_t device, std::vector<DimSize_t> dims) {
         return std::make_unique<TensorImpl_opencv<T>>(device, dims);
     }
 
-    void resize(std::vector<DimSize_t> dims) override{
+    void resize(std::vector<DimSize_t> dims) override {
         mDims = dims;
         size_t product = 1;
         for (size_t num : dims) {
@@ -80,99 +80,157 @@ public:
     }
 
     // native interface
-    const cv::Mat & data() const override { return mData; }
+    const cv::Mat &data() const override {
+        return mData;
+    }
 
-    inline std::size_t capacity() const noexcept override { return (mData.total() * mData.channels()); }
+    inline std::size_t capacity() const noexcept override {
+        return (mData.total() * mData.channels());
+    }
 
-    inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
+    inline std::size_t scalarSize() const noexcept override final {
+        return sizeof(T);
+    }
 
     void zeros() override final {
         mData.setTo(cv::Scalar::all(T(0)));
     }
 
-    void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
-        const T* srcT = static_cast<const T *>(src);
-        T* dstT = static_cast<T *>(rawPtr(offset));
-
-        AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "TensorImpl_opencv<{}>::copy(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts);
-        AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "TensorImpl_opencv<{}>::copy(): overlapping copy is not supported", typeid(T).name());
+    void copy(const void *src,
+              NbElts_t length,
+              NbElts_t offset = 0) override final {
+        const T *srcT = static_cast<const T *>(src);
+        T *dstT = static_cast<T *>(rawPtr(offset));
+
+        AIDGE_ASSERT(
+            length <= (mData.total() * mData.channels()) || length <= mNbElts,
+            "TensorImpl_opencv<{}>::copy(): copy length ({}) is above "
+            "capacity ({})",
+            typeid(T).name(),
+            length,
+            mNbElts);
+        AIDGE_ASSERT(
+            dstT < srcT || dstT >= srcT + length,
+            "TensorImpl_opencv<{}>::copy(): overlapping copy is not supported",
+            typeid(T).name());
         std::copy(srcT, srcT + length, dstT);
-
     }
 
-    void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final{
+    void copyCast(const void *src,
+                  const DataType srcDt,
+                  NbElts_t length,
+                  NbElts_t offset = 0) override final {
         if (length == 0) {
             return;
         }
 
-        T* dstT = static_cast<T *>(rawPtr(offset));
-        AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "TensorImpl_opencv<{}>::copyCast(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts);
-        switch (srcDt)
-        {
-            case DataType::Float64:
-                std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
-                        dstT);
-                break;
-            case DataType::Float32:
-                std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
-                        dstT);
-                break;
-            case DataType::Float16:
-                std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
-                        dstT);
-                break;
-            case DataType::Int64:
-                std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
-                        dstT);
-                break;
-            case DataType::UInt64:
-                std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
-                        dstT);
-                break;
-            case DataType::Int32:
-                std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
-                        dstT);
-                break;
-            case DataType::UInt32:
-                std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
-                        dstT);
-                break;
-            case DataType::Int16:
-                std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
-                        dstT);
-                break;
-            case DataType::UInt16:
-                std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
-                        dstT);
-                break;
-            case DataType::Int8:
-                std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
-                        dstT);
-                break;
-            case DataType::UInt8:
-                std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
-                        dstT);
-                break;
-            default:
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "TensorImpl_opencv<{}>::copyCast(): unsupported data type {}.", typeid(T).name(), srcDt);
-                break;
+        T *dstT = static_cast<T *>(rawPtr(offset));
+        AIDGE_ASSERT(
+            length <= (mData.total() * mData.channels()) || length <= mNbElts,
+            "TensorImpl_opencv<{}>::copyCast(): copy length ({}) is above "
+            "capacity ({})",
+            typeid(T).name(),
+            length,
+            mNbElts);
+        switch (srcDt) {
+        case DataType::Float64:
+            std::copy(static_cast<const double *>(src),
+                      static_cast<const double *>(src) + length,
+                      dstT);
+            break;
+        case DataType::Float32:
+            std::copy(static_cast<const float *>(src),
+                      static_cast<const float *>(src) + length,
+                      dstT);
+            break;
+        case DataType::Float16:
+            std::copy(static_cast<const half_float::half *>(src),
+                      static_cast<const half_float::half *>(src) + length,
+                      dstT);
+            break;
+        case DataType::Int64:
+            std::copy(static_cast<const int64_t *>(src),
+                      static_cast<const int64_t *>(src) + length,
+                      dstT);
+            break;
+        case DataType::UInt64:
+            std::copy(static_cast<const uint64_t *>(src),
+                      static_cast<const uint64_t *>(src) + length,
+                      dstT);
+            break;
+        case DataType::Int32:
+            std::copy(static_cast<const int32_t *>(src),
+                      static_cast<const int32_t *>(src) + length,
+                      dstT);
+            break;
+        case DataType::UInt32:
+            std::copy(static_cast<const uint32_t *>(src),
+                      static_cast<const uint32_t *>(src) + length,
+                      dstT);
+            break;
+        case DataType::Int16:
+            std::copy(static_cast<const int16_t *>(src),
+                      static_cast<const int16_t *>(src) + length,
+                      dstT);
+            break;
+        case DataType::UInt16:
+            std::copy(static_cast<const uint16_t *>(src),
+                      static_cast<const uint16_t *>(src) + length,
+                      dstT);
+            break;
+        case DataType::Int8:
+            std::copy(static_cast<const int8_t *>(src),
+                      static_cast<const int8_t *>(src) + length,
+                      dstT);
+            break;
+        case DataType::UInt8:
+            std::copy(static_cast<const uint8_t *>(src),
+                      static_cast<const uint8_t *>(src) + length,
+                      dstT);
+            break;
+        default:
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "TensorImpl_opencv<{}>::copyCast(): unsupported data type {}.",
+                typeid(T).name(),
+                srcDt);
+            break;
         }
     }
 
-
-    void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
-        AIDGE_ASSERT(device.first == Backend, "TensorImpl_opencv<{}>::copyFromDevice(): backend must match", typeid(T).name());
-        AIDGE_ASSERT(device.second == 0, "TensorImpl_opencv<{}>::copyFromDevice(): device ({}) cannot be != 0 for CPU backend", typeid(T).name(), device.second);
+    void copyFromDevice(const void *src,
+                        const std::pair<std::string, DeviceIdx_t> &device,
+                        NbElts_t length,
+                        NbElts_t offset = 0) override final {
+        AIDGE_ASSERT(
+            device.first == Backend,
+            "TensorImpl_opencv<{}>::copyFromDevice(): backend must match",
+            typeid(T).name());
+        AIDGE_ASSERT(device.second == 0,
+                     "TensorImpl_opencv<{}>::copyFromDevice(): device ({}) "
+                     "cannot be != 0 for CPU backend",
+                     typeid(T).name(),
+                     device.second);
         copy(src, length, offset);
     }
 
-    void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
+    void copyFromHost(const void *src,
+                      NbElts_t length,
+                      NbElts_t offset = 0) override final {
         copy(src, length, offset);
     }
 
-    void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
-        const T* src = static_cast<const T*>(rawPtr(offset));
-        AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "TensorImpl_opencv<{}>::copyToHost(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts);
+    void copyToHost(void *dst,
+                    NbElts_t length,
+                    NbElts_t offset = 0) const override final {
+        const T *src = static_cast<const T *>(rawPtr(offset));
+        AIDGE_ASSERT(
+            length <= (mData.total() * mData.channels()) || length <= mNbElts,
+            "TensorImpl_opencv<{}>::copyToHost(): copy length ({}) is above "
+            "capacity ({})",
+            typeid(T).name(),
+            length,
+            mNbElts);
         std::copy(src, src + length, static_cast<T *>(dst));
     }
 
@@ -182,7 +240,10 @@ public:
     };
 
     const void *rawPtr(NbElts_t offset = 0) const override final {
-        AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "TensorImpl_opencv<{}>::rawPtr(): accessing uninitialized const rawPtr", typeid(T).name());
+        AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts,
+                     "TensorImpl_opencv<{}>::rawPtr(): accessing "
+                     "uninitialized const rawPtr",
+                     typeid(T).name());
         return (mData.ptr<T>() + offset);
     };
 
@@ -192,59 +253,76 @@ public:
     };
 
     const void *hostPtr(NbElts_t offset = 0) const override {
-        AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "TensorImpl_opencv<{}>::hostPtr(): accessing uninitialized const hostPtr", typeid(T).name());
-        AIDGE_ASSERT(mData.isContinuous(), "TensorImpl_opencv<{}>::hostPtr(): CV Matrix not continuous", typeid(T).name());
+        AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts,
+                     "TensorImpl_opencv<{}>::hostPtr(): accessing "
+                     "uninitialized const hostPtr",
+                     typeid(T).name());
+        AIDGE_ASSERT(
+            mData.isContinuous(),
+            "TensorImpl_opencv<{}>::hostPtr(): CV Matrix not continuous",
+            typeid(T).name());
         return (mData.ptr<T>() + offset);
     };
 
-    void setCvMat(const cv::Mat& mat) override {mData=mat;}
-
-
-  virtual ~TensorImpl_opencv() = default;
-
-private:
-
-  void lazyInit() {
-    if ((mData.total() * mData.channels()) < mNbElts) {
-        // Need more data, a re-allocation will occur
-        AIDGE_ASSERT(mData.empty(), "TensorImpl_opencv<{}>: trying to enlarge non-owned data", typeid(T).name());
-
-        if (mDims.size() < 3) {
-            mData = cv::Mat(((mDims.size() > 1) ? static_cast<int>(mDims[0])
-                                                    : (mDims.size() > 0) ? 1
-                                                                            : 0),
-                            (mDims.size() > 0) ? static_cast<int>(mDims[1]) : 0,
-                            detail::CV_C1_CPP_v<T>);
-        } else {
-            std::vector<cv::Mat> channels;
+    void setCvMat(const cv::Mat &mat) override {
+        mData = mat;
+    }
 
-            for (std::size_t k = 0; k < mDims[2]; ++k) {
-                channels.push_back(cv::Mat(static_cast<int>(mDims[0]),
-                                        static_cast<int>(mDims[1]),
-                                        detail::CV_C1_CPP_v<T>));
+    virtual ~TensorImpl_opencv() = default;
+
+  private:
+    void lazyInit() {
+        if ((mData.total() * mData.channels()) < mNbElts) {
+            // Need more data, a re-allocation will occur
+            AIDGE_ASSERT(
+                mData.empty(),
+                "TensorImpl_opencv<{}>: trying to enlarge non-owned data",
+                typeid(T).name());
+
+            if (mDims.size() < 3) {
+                mData = cv::Mat(
+                    ((mDims.size() > 1)   ? static_cast<int>(mDims[0])
+                     : (mDims.size() > 0) ? 1
+                                          : 0),
+                    (mDims.size() > 0) ? static_cast<int>(mDims[1]) : 0,
+                    detail::CV_C1_CPP_v<T>);
+            } else {
+                std::vector<cv::Mat> channels;
+
+                for (std::size_t k = 0; k < mDims[2]; ++k) {
+                    channels.push_back(cv::Mat(static_cast<int>(mDims[0]),
+                                               static_cast<int>(mDims[1]),
+                                               detail::CV_C1_CPP_v<T>));
+                }
+
+                cv::merge(channels, mData);
             }
-
-            cv::merge(channels, mData);
         }
     }
-  }
 };
 
 namespace {
 static Registrar<Tensor> registrarTensorImpl_opencv_Float64(
-        {"opencv", DataType::Float64}, Aidge::TensorImpl_opencv<double>::create);
+    {"opencv", DataType::Float64},
+    Aidge::TensorImpl_opencv<double>::create);
 static Registrar<Tensor> registrarTensorImpl_opencv_Float32(
-        {"opencv", DataType::Float32}, Aidge::TensorImpl_opencv<float>::create);
+    {"opencv", DataType::Float32},
+    Aidge::TensorImpl_opencv<float>::create);
 static Registrar<Tensor> registrarTensorImpl_opencv_Int32(
-        {"opencv", DataType::Int32}, Aidge::TensorImpl_opencv<std::int32_t>::create);
+    {"opencv", DataType::Int32},
+    Aidge::TensorImpl_opencv<std::int32_t>::create);
 static Registrar<Tensor> registrarTensorImpl_opencv_Int16(
-        {"opencv", DataType::Int16}, Aidge::TensorImpl_opencv<std::int16_t>::create);
+    {"opencv", DataType::Int16},
+    Aidge::TensorImpl_opencv<std::int16_t>::create);
 static Registrar<Tensor> registrarTensorImpl_opencv_UInt16(
-        {"opencv", DataType::UInt16}, Aidge::TensorImpl_opencv<std::uint16_t>::create);
+    {"opencv", DataType::UInt16},
+    Aidge::TensorImpl_opencv<std::uint16_t>::create);
 static Registrar<Tensor> registrarTensorImpl_opencv_Int8(
-        {"opencv", DataType::Int8}, Aidge::TensorImpl_opencv<std::int8_t>::create);
+    {"opencv", DataType::Int8},
+    Aidge::TensorImpl_opencv<std::int8_t>::create);
 static Registrar<Tensor> registrarTensorImpl_opencv_UInt8(
-        {"opencv", DataType::UInt8}, Aidge::TensorImpl_opencv<std::uint8_t>::create);
+    {"opencv", DataType::UInt8},
+    Aidge::TensorImpl_opencv<std::uint8_t>::create);
 } // namespace
 } // namespace Aidge
 
diff --git a/include/aidge/backend/opencv/database/MNIST.hpp b/include/aidge/backend/opencv/database/MNIST.hpp
index af77e67..60fa30c 100644
--- a/include/aidge/backend/opencv/database/MNIST.hpp
+++ b/include/aidge/backend/opencv/database/MNIST.hpp
@@ -12,11 +12,11 @@
 #ifndef AIDGE_OPENCV_DATABASE_MNIST_H_
 #define AIDGE_OPENCV_DATABASE_MNIST_H_
 
-#include <algorithm>  // std::reverse
-#include <cstddef>    // std:size_t
-#include <cstdint>    // std::uint32_t
+#include <algorithm> // std::reverse
+#include <cstddef>   // std:size_t
+#include <cstdint>   // std::uint32_t
 #include <string>
-#include <tuple>      // std::tuple_size
+#include <tuple>     // std::tuple_size
 #include <vector>
 
 #include "aidge/data/Database.hpp"
@@ -29,14 +29,12 @@
 
 namespace Aidge {
 
-template <class T> void swapEndian(T& obj)
-{
-    std::uint8_t* memp = reinterpret_cast<unsigned char*>(&obj);
+template <class T> void swapEndian(T &obj) {
+    std::uint8_t *memp = reinterpret_cast<unsigned char *>(&obj);
     std::reverse(memp, memp + sizeof(T));
 }
 
-inline bool isBigEndian()
-{
+inline bool isBigEndian() {
     const union {
         uint32_t i;
         char c[4];
@@ -45,9 +43,8 @@ inline bool isBigEndian()
     return bint.c[0] == 1;
 }
 
-
 class MNIST : public Database {
-public:
+  public:
     union MagicNumber {
         std::uint32_t value;
         std::uint8_t byte[4];
@@ -62,7 +59,7 @@ public:
         Double = 0x0E
     };
 
-protected:
+  protected:
     /// Stimulus data path
     const std::string mDataPath;
 
@@ -75,43 +72,44 @@ protected:
     /// Stimulus data
     // Each index of the vector is one item of the database
     // One item of the MNIST database is the tuple <Image,label>
-    // First stimuli of the tuple is a gray scale image stimuli of a writen digit
-    // Second stimuli of the tuple is the label associated to the digit : unsigned integer 0-9
-    mutable std::vector<std::tuple<Stimulus,Stimulus>> mStimuli;
+    // First stimuli of the tuple is a gray scale image stimuli of a writen
+    // digit Second stimuli of the tuple is the label associated to the digit :
+    // unsigned integer 0-9
+    mutable std::vector<std::tuple<Stimulus, Stimulus>> mStimuli;
 
     /// Data Transformations
     // Data transformations use the GraphView mecanism
-    // Transformations are a sequential graph with each operator of the graph being one transformation
-    // GraphView mDataTransformations;
+    // Transformations are a sequential graph with each operator of the graph
+    // being one transformation GraphView mDataTransformations;
 
     // Scheduler to run the graph of data transformations
     // Scheduler mScheduler;
 
-public:
-    MNIST(const std::string& dataPath,
-            // const GraphView transformations,
-            bool train,
-            bool loadDataInMemory = false)
-    : Database(),
-      mDataPath(dataPath),
-    // mDataTransformations(transformations),
-      mTrain(train),
-      mLoadDataInMemory(loadDataInMemory)
-    {
+  public:
+    MNIST(const std::string &dataPath,
+          // const GraphView transformations,
+          bool train,
+          bool loadDataInMemory = false)
+        : Database(),
+          mDataPath(dataPath),
+          // mDataTransformations(transformations),
+          mTrain(train),
+          mLoadDataInMemory(loadDataInMemory) {
         // Uncompress train database
         if (mTrain) {
             uncompress(mDataPath + "/train-images-idx3-ubyte",
-                            dataPath + "/train-labels-idx1-ubyte");
+                       dataPath + "/train-labels-idx1-ubyte");
         } else { // Uncompress test database
             uncompress(mDataPath + "/t10k-images-idx3-ubyte",
-                            dataPath + "/t10k-labels-idx1-ubyte");
+                       dataPath + "/t10k-labels-idx1-ubyte");
         }
     }
 
     ~MNIST() noexcept;
 
-public:
-    std::vector<std::shared_ptr<Tensor>> getItem(const std::size_t index) const override final;
+  public:
+    std::vector<std::shared_ptr<Tensor>>
+    getItem(const std::size_t index) const override final;
 
     inline std::size_t getLen() const noexcept override final {
         return mStimuli.size();
@@ -121,9 +119,9 @@ public:
         return std::tuple_size<decltype(mStimuli)::value_type>::value;
     }
 
-private:
-    void uncompress(const std::string& dataPath, const std::string& labelPath);
+  private:
+    void uncompress(const std::string &dataPath, const std::string &labelPath);
 };
-}
+} // namespace Aidge
 
 #endif // AIDGE_OPENCV_DATABASE_MNIST_H_
diff --git a/include/aidge/backend/opencv/stimuli/StimulusImpl_opencv_imread.hpp b/include/aidge/backend/opencv/stimuli/StimulusImpl_opencv_imread.hpp
index ab55621..aba433d 100644
--- a/include/aidge/backend/opencv/stimuli/StimulusImpl_opencv_imread.hpp
+++ b/include/aidge/backend/opencv/stimuli/StimulusImpl_opencv_imread.hpp
@@ -12,49 +12,50 @@
 #ifndef AIDGE_OPENCV_STIMULI_STIMULUSIMPLOPENCVIMREAD_H_
 #define AIDGE_OPENCV_STIMULI_STIMULUSIMPLOPENCVIMREAD_H_
 
-#include <string>
 #include <memory>
+#include <string>
 
-#include <opencv2/imgcodecs.hpp>   // cv::IMREAD_COLOR
+#include <opencv2/imgcodecs.hpp> // cv::IMREAD_COLOR
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/backend/StimulusImpl.hpp"
-#include "aidge/stimuli/Stimulus.hpp"
 #include "aidge/backend/opencv/data/TensorImpl.hpp"
-
+#include "aidge/data/Tensor.hpp"
+#include "aidge/stimuli/Stimulus.hpp"
 
 namespace Aidge {
 class StimulusImpl_opencv_imread : public StimulusImpl {
-private:
+  private:
     /// Stimulus data path
     const std::string mDataPath;
     const int mReadMode;
 
-public:
-    StimulusImpl_opencv_imread(const std::string& dataPath="", int readMode=cv::IMREAD_UNCHANGED)
-    : mDataPath(dataPath),
-      mReadMode(readMode)
-    {
+  public:
+    StimulusImpl_opencv_imread(const std::string &dataPath = "",
+                               int readMode = cv::IMREAD_UNCHANGED)
+        : mDataPath(dataPath), mReadMode(readMode) {
         // ctor
     }
 
     ~StimulusImpl_opencv_imread() noexcept;
 
-public:
-    static std::unique_ptr<StimulusImpl_opencv_imread> create(const std::string& dataPath) {
+  public:
+    static std::unique_ptr<StimulusImpl_opencv_imread>
+    create(const std::string &dataPath) {
         return std::make_unique<StimulusImpl_opencv_imread>(dataPath);
     }
 
-public:
+  public:
     std::shared_ptr<Tensor> load() const override;
 };
 
 namespace {
 static Registrar<Aidge::Stimulus> registrarStimulusImpl_opencv_png(
-        {"opencv", "png"}, Aidge::StimulusImpl_opencv_imread::create);
+    {"opencv", "png"},
+    Aidge::StimulusImpl_opencv_imread::create);
 static Registrar<Aidge::Stimulus> registrarStimulusImpl_opencv_pgm(
-        {"opencv", "pgm"}, Aidge::StimulusImpl_opencv_imread::create);
-}  // namespace
+    {"opencv", "pgm"},
+    Aidge::StimulusImpl_opencv_imread::create);
+} // namespace
 
 } // namespace Aidge
 
diff --git a/include/aidge/backend/opencv/utils/Utils.hpp b/include/aidge/backend/opencv/utils/Utils.hpp
index 8825226..55a53d1 100644
--- a/include/aidge/backend/opencv/utils/Utils.hpp
+++ b/include/aidge/backend/opencv/utils/Utils.hpp
@@ -12,12 +12,12 @@
 #ifndef AIDGE_OPENCV_UTILS_UTILS_H_
 #define AIDGE_OPENCV_UTILS_UTILS_H_
 
-#include <opencv2/core/mat.hpp>  // cv::Mat
+#include <cstring>
 #include <memory>
+#include <opencv2/core/mat.hpp> // cv::Mat
+#include <string>
 #include <tuple>
 #include <vector>
-#include <cstring>
-#include <string> 
 
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
@@ -26,7 +26,8 @@
 namespace Aidge {
 
 /**
- * @brief Instanciate an aidge tensor with backend "opencv" from an opencv matrix
+ * @brief Instanciate an aidge tensor with backend "opencv" from an opencv
+ * matrix
  *
  * @param mat the cv::mat to instanciate the tensor from
  * @return std::shared_ptr<Tensor> aidge tensor
@@ -34,7 +35,8 @@ namespace Aidge {
 std::shared_ptr<Tensor> tensorOpencv(cv::Mat mat);
 
 /**
- * @brief Copy the data from a source 2D cv::mat to a destination pointer with an offset
+ * @brief Copy the data from a source 2D cv::mat to a destination pointer with
+ * an offset
  *
  * @tparam CV_T The standard type corresponding to the opencv data type
  * @param mat opencv 2D mat to copy the data from
@@ -42,8 +44,7 @@ std::shared_ptr<Tensor> tensorOpencv(cv::Mat mat);
  * @param offset offset an the destination data pointer
  */
 template <class CV_T>
-void convert(const cv::Mat& mat, void* data, std::size_t offset);
-
+void convert(const cv::Mat &mat, void *data, std::size_t offset);
 
 /**
  * @brief Convert a tensor backend opencv into a tensor backend cpu
@@ -51,9 +52,9 @@ void convert(const cv::Mat& mat, void* data, std::size_t offset);
  * @param tensorOpencv tensor with backend opencv (contains a cv::mat)
  * @return std::shared_ptr<Tensor> tensor backend cpu (contains a std::vector)
  */
-std::shared_ptr<Tensor> convertCpu(std::shared_ptr<Aidge::Tensor> tensorOpencv);
-
+std::shared_ptr<Tensor>
+convertCpu(std::shared_ptr<Aidge::Tensor> tensorOpencv);
 
-}  // namespace
+} // namespace Aidge
 
 #endif // AIDGE_OPENCV_UTILS_UTILS_H_
\ No newline at end of file
diff --git a/python_binding/database/pybind_MNIST.cpp b/python_binding/database/pybind_MNIST.cpp
index 3c73a5f..6fbd104 100644
--- a/python_binding/database/pybind_MNIST.cpp
+++ b/python_binding/database/pybind_MNIST.cpp
@@ -1,33 +1,39 @@
+#include "aidge/backend/opencv/database/MNIST.hpp"
 #include <pybind11/pybind11.h>
 #include <pybind11/stl.h>
-#include "aidge/backend/opencv/database/MNIST.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
-void init_MNIST(py::module& m){
+void init_MNIST(py::module &m) {
 
     py::class_<MNIST, std::shared_ptr<MNIST>, Database>(m, "MNIST")
-          .def(py::init<const std::string&, bool, bool>(), py::arg("dataPath"), py::arg("train"), py::arg("load_data_in_memory")=false)
-          .def("get_item", &MNIST::getItem, py::arg("index"),
-          R"mydelimiter(
+        .def(py::init<const std::string &, bool, bool>(),
+             py::arg("dataPath"),
+             py::arg("train"),
+             py::arg("load_data_in_memory") = false)
+        .def("get_item",
+             &MNIST::getItem,
+             py::arg("index"),
+             R"mydelimiter(
           Return samples of each data modality for the given index.
 
           :param index: Database index corresponding to one item
           :type index: int
           )mydelimiter")
-          
-          .def("get_len", &MNIST::getLen,
-          R"mydelimiter(
+
+        .def("get_len",
+             &MNIST::getLen,
+             R"mydelimiter(
           Return the number of items in the database.
 
           )mydelimiter")
 
-          .def("get_nb_modalities", &MNIST::getNbModalities,
-          R"mydelimiter(
+        .def("get_nb_modalities",
+             &MNIST::getNbModalities,
+             R"mydelimiter(
           Return the number of modalities in one item of the database.
           
           )mydelimiter");
-    
-}
 }
+} // namespace Aidge
diff --git a/python_binding/pybind_opencv.cpp b/python_binding/pybind_opencv.cpp
index 276467d..c64faaf 100644
--- a/python_binding/pybind_opencv.cpp
+++ b/python_binding/pybind_opencv.cpp
@@ -1,14 +1,15 @@
 #include <pybind11/pybind11.h>
-// Need to call this header to register tensorImpl when initializing opencv python module
-#include "aidge/backend/opencv.hpp" 
+// Need to call this header to register tensorImpl when initializing opencv
+// python module
+#include "aidge/backend/opencv.hpp"
 
 namespace py = pybind11;
 
 namespace Aidge {
 
-void init_MNIST(py::module&);
+void init_MNIST(py::module &);
 
 PYBIND11_MODULE(aidge_backend_opencv, m) {
     init_MNIST(m);
 }
-}
+} // namespace Aidge
diff --git a/src/database/MNIST.cpp b/src/database/MNIST.cpp
index fd92f57..a06efd0 100644
--- a/src/database/MNIST.cpp
+++ b/src/database/MNIST.cpp
@@ -16,16 +16,14 @@
 #include <iomanip>
 #include <tuple>
 
+#include "aidge/backend/opencv/utils/Utils.hpp"
 #include "opencv2/core.hpp"
 #include <opencv2/opencv.hpp>
-#include "aidge/backend/opencv/utils/Utils.hpp"
-
 
 Aidge::MNIST::~MNIST() noexcept = default;
 
-void Aidge::MNIST::uncompress(const std::string& dataPath,
-                            const std::string& labelPath)
-{
+void Aidge::MNIST::uncompress(const std::string &dataPath,
+                              const std::string &labelPath) {
     // Images
     std::ifstream images(dataPath.c_str(), std::fstream::binary);
 
@@ -37,11 +35,11 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
     std::uint32_t nbRows;
     std::uint32_t nbColumns;
 
-    images.read(reinterpret_cast<char*>(&magicNumber.value),
+    images.read(reinterpret_cast<char *>(&magicNumber.value),
                 sizeof(magicNumber));
-    images.read(reinterpret_cast<char*>(&nbImages), sizeof(nbImages));
-    images.read(reinterpret_cast<char*>(&nbRows), sizeof(nbRows));
-    images.read(reinterpret_cast<char*>(&nbColumns), sizeof(nbColumns));
+    images.read(reinterpret_cast<char *>(&nbImages), sizeof(nbImages));
+    images.read(reinterpret_cast<char *>(&nbRows), sizeof(nbRows));
+    images.read(reinterpret_cast<char *>(&nbColumns), sizeof(nbColumns));
 
     if (!Aidge::isBigEndian()) {
         Aidge::swapEndian(magicNumber.value);
@@ -53,8 +51,8 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
     // if (magicNumber.byte[3] != 0 || magicNumber.byte[2] != 0
     //     || magicNumber.byte[1] != Unsigned || magicNumber.byte[0] != 3) {
     if (magicNumber.value != 0x00000803) { // 0, 0, unisgned, 3
-        throw std::runtime_error("Wrong file format for images file: "
-                                 + dataPath);
+        throw std::runtime_error("Wrong file format for images file: " +
+                                 dataPath);
     }
 
     // Labels
@@ -66,9 +64,10 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
     MagicNumber magicNumberLabels;
     std::uint32_t nbItemsLabels;
 
-    labels.read(reinterpret_cast<char*>(&magicNumberLabels.value),
+    labels.read(reinterpret_cast<char *>(&magicNumberLabels.value),
                 sizeof(magicNumberLabels));
-    labels.read(reinterpret_cast<char*>(&nbItemsLabels), sizeof(nbItemsLabels));
+    labels.read(reinterpret_cast<char *>(&nbItemsLabels),
+                sizeof(nbItemsLabels));
 
     if (!Aidge::isBigEndian()) {
         Aidge::swapEndian(magicNumberLabels);
@@ -79,8 +78,8 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
     //     || magicNumberLabels.byte[1] != Unsigned
     //     || magicNumberLabels.byte[0] != 1) {
     if (magicNumberLabels.value != 0x00000801) { // 0, 0, unsigned, 1
-        throw std::runtime_error("Wrong file format for labels file: "
-                                 + labelPath);
+        throw std::runtime_error("Wrong file format for labels file: " +
+                                 labelPath);
     }
 
     if (nbImages != nbItemsLabels)
@@ -101,14 +100,14 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
 
             for (std::uint32_t y = 0; y < nbRows; ++y) {
                 for (std::uint32_t x = 0; x < nbColumns; ++x) {
-                    images.read(reinterpret_cast<char*>(&buff), sizeof(buff));
+                    images.read(reinterpret_cast<char *>(&buff), sizeof(buff));
                     frame.at<std::uint8_t>(y, x) = buff;
                 }
             }
 
             if (!cv::imwrite(nameStr.str(), frame))
-                throw std::runtime_error("Unable to write image: "
-                                         + nameStr.str());
+                throw std::runtime_error("Unable to write image: " +
+                                         nameStr.str());
         } else {
             // Skip image data (to grab labels only)
             images.seekg(nbColumns * nbRows, images.cur);
@@ -118,11 +117,13 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
         Aidge::Stimulus StimulusImg(nameStr.str(), mLoadDataInMemory);
         StimulusImg.setBackend("opencv");
 
-        // Create the stimulus of the corresponding label by filing integer to the stimulus directly
-        labels.read(reinterpret_cast<char*>(&buff), sizeof(buff));
+        // Create the stimulus of the corresponding label by filing integer to
+        // the stimulus directly
+        labels.read(reinterpret_cast<char *>(&buff), sizeof(buff));
         const std::int32_t label = std::move(static_cast<std::int32_t>(buff));
 
-        std::shared_ptr<Tensor> lbl = std::make_shared<Tensor>(Array1D<int, 1>{label});
+        std::shared_ptr<Tensor> lbl =
+            std::make_shared<Tensor>(Array1D<int, 1>{label});
         Aidge::Stimulus StimulusLabel(lbl);
 
         // Push back the corresponding image & label in the vector
@@ -135,25 +136,28 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
     else if (!images.good())
         throw std::runtime_error("Error while reading data file: " + dataPath);
     else if (images.get() != std::fstream::traits_type::eof())
-        throw std::runtime_error("Data file size larger than expected: "
-                                 + dataPath);
+        throw std::runtime_error("Data file size larger than expected: " +
+                                 dataPath);
 
     if (labels.eof())
         throw std::runtime_error(
             "End-of-file reached prematurely in data file: " + labelPath);
     else if (!labels.good())
-        throw std::runtime_error("Error while reading data file: " + labelPath);
+        throw std::runtime_error("Error while reading data file: " +
+                                 labelPath);
     else if (labels.get() != std::fstream::traits_type::eof())
-        throw std::runtime_error("Data file size larger than expected: "
-                                 + labelPath);
+        throw std::runtime_error("Data file size larger than expected: " +
+                                 labelPath);
 }
 
-
-std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::MNIST::getItem(const std::size_t index) const {
+std::vector<std::shared_ptr<Aidge::Tensor>>
+Aidge::MNIST::getItem(const std::size_t index) const {
     std::vector<std::shared_ptr<Tensor>> item;
     // Load the digit tensor
-    // TODO : Currently converts the tensor Opencv but this operation will be carried by a convert operator in the preprocessing graph
-    item.push_back(Aidge::convertCpu((std::get<0>(mStimuli.at(index))).load()));
+    // TODO : Currently converts the tensor Opencv but this operation will be
+    // carried by a convert operator in the preprocessing graph
+    item.push_back(
+        Aidge::convertCpu((std::get<0>(mStimuli.at(index))).load()));
     // item.push_back((std::get<0>(mStimuli.at(index))).load());
     // Load the label tensor
     item.push_back((std::get<1>(mStimuli.at(index))).load());
diff --git a/src/stimuli/StimulusImpl_opencv_imread.cpp b/src/stimuli/StimulusImpl_opencv_imread.cpp
index ff0b1da..6c532a6 100644
--- a/src/stimuli/StimulusImpl_opencv_imread.cpp
+++ b/src/stimuli/StimulusImpl_opencv_imread.cpp
@@ -17,12 +17,14 @@
 
 #include "opencv2/core.hpp"
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/backend/opencv/utils/Utils.hpp"
+#include "aidge/data/Tensor.hpp"
 
-Aidge::StimulusImpl_opencv_imread::~StimulusImpl_opencv_imread() noexcept = default;
+Aidge::StimulusImpl_opencv_imread::~StimulusImpl_opencv_imread() noexcept =
+    default;
 
-std::shared_ptr<Aidge::Tensor> Aidge::StimulusImpl_opencv_imread::load() const {
+std::shared_ptr<Aidge::Tensor>
+Aidge::StimulusImpl_opencv_imread::load() const {
     cv::Mat cvImg = cv::imread(mDataPath, mReadMode);
     if (cvImg.empty()) {
         throw std::runtime_error("Could not open images file: " + mDataPath);
diff --git a/src/utils/Utils.cpp b/src/utils/Utils.cpp
index 9cfe4f1..1185732 100644
--- a/src/utils/Utils.cpp
+++ b/src/utils/Utils.cpp
@@ -9,61 +9,62 @@
  *
  ********************************************************************************/
 
-#include <opencv2/core.hpp>  // cv::Mat, cv::split
 #include <cstddef>
 #include <cstdint>
-#include <cstring>   // std::memcpy, std::strcmp
-#include <stdexcept> // std::runtime_error
+#include <cstring>          // std::memcpy, std::strcmp
 #include <memory>
+#include <opencv2/core.hpp> // cv::Mat, cv::split
+#include <stdexcept>        // std::runtime_error
 #include <vector>
 
-#include "aidge/backend/opencv/utils/Utils.hpp"
-#include "aidge/backend/opencv/data/DataUtils.hpp"  // detail::CvtoAidge
 #include "aidge/backend/cpu/data/TensorImpl.hpp"
+#include "aidge/backend/opencv/data/DataUtils.hpp" // detail::CvtoAidge
 #include "aidge/backend/opencv/data/TensorImpl.hpp"
+#include "aidge/backend/opencv/utils/Utils.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
-#include "aidge/utils/Types.h"                      // DimSize_t
+#include "aidge/utils/Types.h" // DimSize_t
 
 static Aidge::DataType CVtoAidge(const int matDepth) {
     Aidge::DataType res;
     switch (matDepth) {
-        case CV_8U:
-            res = Aidge::DataType::UInt8;
-            break;
-        case CV_8S:
-            res = Aidge::DataType::Int8;
-            break;
-        case CV_16U:
-            res = Aidge::DataType::UInt16;
-            break;
-        case CV_16S:
-            res = Aidge::DataType::Int16;
-            break;
-        case CV_16F:
-            res = Aidge::DataType::Float16;
-            break;
-        case CV_32S:
-            res = Aidge::DataType::Int32;
-            break;
-        case CV_32F:
-            res = Aidge::DataType::Float32;
-            break;
-        case CV_64F:
-            res = Aidge::DataType::Float64;
-            break;
-        default:
-            throw std::runtime_error(
-                "Cannot convert cv::Mat to Tensor: incompatible types.");
-        }
+    case CV_8U:
+        res = Aidge::DataType::UInt8;
+        break;
+    case CV_8S:
+        res = Aidge::DataType::Int8;
+        break;
+    case CV_16U:
+        res = Aidge::DataType::UInt16;
+        break;
+    case CV_16S:
+        res = Aidge::DataType::Int16;
+        break;
+    case CV_16F:
+        res = Aidge::DataType::Float16;
+        break;
+    case CV_32S:
+        res = Aidge::DataType::Int32;
+        break;
+    case CV_32F:
+        res = Aidge::DataType::Float32;
+        break;
+    case CV_64F:
+        res = Aidge::DataType::Float64;
+        break;
+    default:
+        throw std::runtime_error(
+            "Cannot convert cv::Mat to Tensor: incompatible types.");
+    }
     return res;
 }
 
 std::shared_ptr<Aidge::Tensor> Aidge::tensorOpencv(cv::Mat mat) {
     // Get Mat dims
-    const std::vector<DimSize_t> matDims = std::vector<DimSize_t>({static_cast<DimSize_t>(mat.channels()),
-                                                            static_cast<DimSize_t>(mat.rows),
-                                                            static_cast<DimSize_t>(mat.cols)});
+    const std::vector<DimSize_t> matDims =
+        std::vector<DimSize_t>({static_cast<DimSize_t>(mat.channels()),
+                                static_cast<DimSize_t>(mat.rows),
+                                static_cast<DimSize_t>(mat.cols)});
     // Get the correct Data Type
     Aidge::DataType type;
     type = CVtoAidge(mat.depth());
@@ -75,34 +76,41 @@ std::shared_ptr<Aidge::Tensor> Aidge::tensorOpencv(cv::Mat mat) {
     tensor->setDataType(type);
 
     // Cast the tensorImpl to access setCvMat function
-    TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor->getImpl().get());
+    TensorImpl_opencv_ *tImpl_opencv =
+        dynamic_cast<TensorImpl_opencv_ *>(tensor->getImpl().get());
     tImpl_opencv->setCvMat(mat);
     return tensor;
 }
 
-
 template <class CV_T>
-void Aidge::convert(const cv::Mat& mat, void* data, std::size_t offset)
-{
+void Aidge::convert(const cv::Mat &mat, void *data, std::size_t offset) {
     if (mat.isContinuous())
-        std::memcpy(reinterpret_cast<void*>(reinterpret_cast<CV_T*>(data) + offset), mat.ptr<CV_T>(), sizeof(CV_T)*(mat.cols*mat.rows));
+        std::memcpy(
+            reinterpret_cast<void *>(reinterpret_cast<CV_T *>(data) + offset),
+            mat.ptr<CV_T>(),
+            sizeof(CV_T) * (mat.cols * mat.rows));
     else {
         throw std::runtime_error(
-                "Poui pwoup convert not support if matrix not contiguous");
+            "Poui pwoup convert not support if matrix not contiguous");
     }
-
 }
 
-
-std::shared_ptr<Aidge::Tensor> Aidge::convertCpu(std::shared_ptr<Aidge::Tensor> tensorOpencv){
+std::shared_ptr<Aidge::Tensor>
+Aidge::convertCpu(std::shared_ptr<Aidge::Tensor> tensorOpencv) {
     // Assert the tensorOpencv is backend Opencv
-    AIDGE_ASSERT(tensorOpencv->getImpl()->backend() == "opencv", "Cannot convert tensor backend from opencv to cpu : tensor is not backend opencv.");
+    AIDGE_ASSERT(tensorOpencv->getImpl()->backend() == "opencv",
+                 "Cannot convert tensor backend from opencv to cpu : tensor "
+                 "is not backend opencv.");
 
-    //  Create a tensor backend cpu from the dimensions of the tensor backend opencv
-    std::shared_ptr<Aidge::Tensor> tensorCpu = std::make_shared<Aidge::Tensor>(tensorOpencv->dims());
+    //  Create a tensor backend cpu from the dimensions of the tensor backend
+    //  opencv
+    std::shared_ptr<Aidge::Tensor> tensorCpu =
+        std::make_shared<Aidge::Tensor>(tensorOpencv->dims());
 
     // Get the cv::Mat from the tensor backend Opencv
-    Aidge::TensorImpl_opencv_* tImplOpencv = dynamic_cast<Aidge::TensorImpl_opencv_*>(tensorOpencv->getImpl().get());
+    Aidge::TensorImpl_opencv_ *tImplOpencv =
+        dynamic_cast<Aidge::TensorImpl_opencv_ *>(
+            tensorOpencv->getImpl().get());
     cv::Mat dataOpencv = tImplOpencv->data();
 
     // Convert the cv::Mat into a vector of cv::Mat (vector of channels)
@@ -115,33 +123,57 @@ std::shared_ptr<Aidge::Tensor> Aidge::convertCpu(std::shared_ptr<Aidge::Tensor>
     // Set backend cpu
     tensorCpu->setBackend("cpu");
 
-    // Convert & copy the cv::Mat into the tensor using the rawPtr of tensor cpu
+    // Convert & copy the cv::Mat into the tensor using the rawPtr of tensor
+    // cpu
     std::size_t count = 0;
     for (std::vector<cv::Mat>::const_iterator itChannel = channels.cbegin();
-        itChannel != channels.cend();
-        ++itChannel)
-    {
+         itChannel != channels.cend();
+         ++itChannel) {
         switch ((*itChannel).depth()) {
         case CV_8U:
-            convert<unsigned char>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
+            convert<unsigned char>(
+                *itChannel,
+                tensorCpu->getImpl()->rawPtr(),
+                count * static_cast<std::size_t>((*itChannel).rows *
+                                                 (*itChannel).cols));
             break;
         case CV_8S:
-            convert<char>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
+            convert<char>(*itChannel,
+                          tensorCpu->getImpl()->rawPtr(),
+                          count * static_cast<std::size_t>((*itChannel).rows *
+                                                           (*itChannel).cols));
             break;
         case CV_16U:
-            convert<unsigned short>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
+            convert<unsigned short>(
+                *itChannel,
+                tensorCpu->getImpl()->rawPtr(),
+                count * static_cast<std::size_t>((*itChannel).rows *
+                                                 (*itChannel).cols));
             break;
         case CV_16S:
-            convert<short>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
+            convert<short>(*itChannel,
+                           tensorCpu->getImpl()->rawPtr(),
+                           count * static_cast<std::size_t>(
+                                       (*itChannel).rows * (*itChannel).cols));
             break;
         case CV_32S:
-            convert<int>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
+            convert<int>(*itChannel,
+                         tensorCpu->getImpl()->rawPtr(),
+                         count * static_cast<std::size_t>((*itChannel).rows *
+                                                          (*itChannel).cols));
             break;
         case CV_32F:
-            convert<float>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
+            convert<float>(*itChannel,
+                           tensorCpu->getImpl()->rawPtr(),
+                           count * static_cast<std::size_t>(
+                                       (*itChannel).rows * (*itChannel).cols));
             break;
         case CV_64F:
-            convert<double>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
+            convert<double>(
+                *itChannel,
+                tensorCpu->getImpl()->rawPtr(),
+                count * static_cast<std::size_t>((*itChannel).rows *
+                                                 (*itChannel).cols));
             break;
         default:
             throw std::runtime_error(
diff --git a/unit_tests/Test_Stimulus.cpp b/unit_tests/Test_Stimulus.cpp
index a02a711..f8c515b 100644
--- a/unit_tests/Test_Stimulus.cpp
+++ b/unit_tests/Test_Stimulus.cpp
@@ -9,15 +9,15 @@
  *
  ********************************************************************************/
 
-#include <catch2/catch_test_macros.hpp>
 #include "opencv2/core.hpp"
-#include <opencv2/imgcodecs.hpp>
-#include <memory>
+#include <catch2/catch_test_macros.hpp>
 #include <iostream>
+#include <memory>
+#include <opencv2/imgcodecs.hpp>
 
-#include "aidge/stimuli/Stimulus.hpp"
 #include "aidge/backend/opencv/data/TensorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
+#include "aidge/stimuli/Stimulus.hpp"
 
 #include "Tools.hpp"
 
@@ -26,18 +26,20 @@ using namespace Aidge;
 TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") {
     SECTION("Instanciation & load an image") {
         // //  Load image with imread
-        // cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm", cv::IMREAD_UNCHANGED);
-        // REQUIRE(true_mat.empty()==false);
+        // cv::Mat true_mat =
+        // cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm",
+        // cv::IMREAD_UNCHANGED); REQUIRE(true_mat.empty()==false);
 
         // // Create Stimulus
-        // Stimulus stimg("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm", true);
-        // stimg.setBackend("opencv");
-        
+        // Stimulus
+        // stimg("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm",
+        // true); stimg.setBackend("opencv");
+
         //  Generate random matrix and save it
         std::vector<cv::Mat> channels;
         cv::Mat true_mat;
 
-        for (int c = 0; c < 3; ++c){
+        for (int c = 0; c < 3; ++c) {
             // Create a random matrix
             cv::Mat randomMat = createRandomMat<unsigned char>(224, 224);
             // Add each random matrix to the vector
@@ -57,11 +59,13 @@ TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") {
         tensor_load = stimg.load();
 
         // Access the cv::Mat with the tensor
-        TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor_load->getImpl().get());
+        TensorImpl_opencv_ *tImpl_opencv =
+            dynamic_cast<TensorImpl_opencv_ *>(tensor_load->getImpl().get());
         auto mat_tensor = tImpl_opencv->data();
 
         // Check the dimensions
-        REQUIRE((mat_tensor.total() * mat_tensor.channels()) == (true_mat.total() * true_mat.channels()));
+        REQUIRE((mat_tensor.total() * mat_tensor.channels()) ==
+                (true_mat.total() * true_mat.channels()));
 
         // Split it in channels
         std::vector<cv::Mat> channels_tensor;
@@ -71,7 +75,7 @@ TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") {
 
         // Check the elements
         for (size_t i = 0; i < channels_tensor.size(); ++i) {
-            REQUIRE(cv::countNonZero(channels_tensor[i] != channels[i]) == 0);   
+            REQUIRE(cv::countNonZero(channels_tensor[i] != channels[i]) == 0);
         }
 
         // This time the tensor is already loaded in memory
@@ -79,11 +83,13 @@ TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") {
         tensor_load_2 = stimg.load();
 
         // Access the cv::Mat with the tensor
-        TensorImpl_opencv_* tImpl_opencv_2 = dynamic_cast<TensorImpl_opencv_*>(tensor_load_2->getImpl().get());
+        TensorImpl_opencv_ *tImpl_opencv_2 =
+            dynamic_cast<TensorImpl_opencv_ *>(tensor_load_2->getImpl().get());
         auto mat_tensor_2 = tImpl_opencv_2->data();
 
         // Check the dimensions
-        REQUIRE((mat_tensor_2.total() * mat_tensor_2.channels()) == (true_mat.total() * true_mat.channels()));
+        REQUIRE((mat_tensor_2.total() * mat_tensor_2.channels()) ==
+                (true_mat.total() * true_mat.channels()));
 
         // Split it in channels
         std::vector<cv::Mat> channels_tensor_2;
@@ -93,7 +99,8 @@ TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") {
 
         // Check the elements
         for (size_t i = 0; i < channels_tensor_2.size(); ++i) {
-            REQUIRE(cv::countNonZero(channels_tensor_2[i] != channels[i]) == 0);   
+            REQUIRE(cv::countNonZero(channels_tensor_2[i] != channels[i]) ==
+                    0);
         }
     }
 }
diff --git a/unit_tests/Test_StimulusImpl_opencv_imread.cpp b/unit_tests/Test_StimulusImpl_opencv_imread.cpp
index 4fd5a99..0377781 100644
--- a/unit_tests/Test_StimulusImpl_opencv_imread.cpp
+++ b/unit_tests/Test_StimulusImpl_opencv_imread.cpp
@@ -9,36 +9,41 @@
  *
  ********************************************************************************/
 
-#include <catch2/catch_test_macros.hpp>
 #include "opencv2/core.hpp"
-#include <opencv2/imgcodecs.hpp>
-#include <memory>
+#include <catch2/catch_test_macros.hpp>
 #include <iostream>
+#include <memory>
+#include <opencv2/imgcodecs.hpp>
 
-#include "aidge/backend/opencv/stimuli/StimulusImpl_opencv_imread.hpp"
 #include "aidge/backend/opencv/data/TensorImpl.hpp"
+#include "aidge/backend/opencv/stimuli/StimulusImpl_opencv_imread.hpp"
 #include "aidge/data/Tensor.hpp"
 
 #include "Tools.hpp"
 
 using namespace Aidge;
 
-TEST_CASE("StimulusImpl_opencv_imread creation", "[StimulusImpl_opencv_imread][OpenCV]") {
+TEST_CASE("StimulusImpl_opencv_imread creation",
+          "[StimulusImpl_opencv_imread][OpenCV]") {
     SECTION("Instanciation & load an image") {
         // //  Load image with imread
-        // // cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/Lenna.png");
-        // cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm", cv::IMREAD_UNCHANGED);
-        // REQUIRE(true_mat.empty()==false);
+        // // cv::Mat true_mat =
+        // cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/Lenna.png");
+        // cv::Mat true_mat =
+        // cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm",
+        // cv::IMREAD_UNCHANGED); REQUIRE(true_mat.empty()==false);
 
         // // Create StimulusImpl_opencv_imread
-        // // StimulusImpl_opencv_imread stImpl("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/Lenna.png");
-        // StimulusImpl_opencv_imread stImpl("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm");
-        
-         //  Generate random matrix and save it
+        // // StimulusImpl_opencv_imread
+        // stImpl("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/Lenna.png");
+        // StimulusImpl_opencv_imread
+        // stImpl("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm");
+
+        //  Generate random matrix and save it
         std::vector<cv::Mat> channels;
         cv::Mat true_mat;
 
-        for (int c = 0; c < 3; ++c){
+        for (int c = 0; c < 3; ++c) {
             // Create a random matrix
             cv::Mat randomMat = createRandomMat<unsigned char>(224, 224);
             // Add each random matrix to the vector
@@ -58,11 +63,13 @@ TEST_CASE("StimulusImpl_opencv_imread creation", "[StimulusImpl_opencv_imread][O
         tensor_load = stImpl.load();
 
         // Access the cv::Mat with the tensor
-        TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor_load->getImpl().get());
+        TensorImpl_opencv_ *tImpl_opencv =
+            dynamic_cast<TensorImpl_opencv_ *>(tensor_load->getImpl().get());
         auto mat_tensor = tImpl_opencv->data();
 
         // Check the dimensions
-        REQUIRE((mat_tensor.total() * mat_tensor.channels()) == (true_mat.total() * true_mat.channels()));
+        REQUIRE((mat_tensor.total() * mat_tensor.channels()) ==
+                (true_mat.total() * true_mat.channels()));
 
         // Split it in channels
         std::vector<cv::Mat> channels_tensor;
@@ -72,7 +79,7 @@ TEST_CASE("StimulusImpl_opencv_imread creation", "[StimulusImpl_opencv_imread][O
 
         // Check the elements
         for (size_t i = 0; i < channels_tensor.size(); ++i) {
-            REQUIRE(cv::countNonZero(channels_tensor[i] != channels[i]) == 0);   
+            REQUIRE(cv::countNonZero(channels_tensor[i] != channels[i]) == 0);
         }
     }
 }
diff --git a/unit_tests/Test_TensorImpl.cpp b/unit_tests/Test_TensorImpl.cpp
index 7ef7d5f..1ce323b 100644
--- a/unit_tests/Test_TensorImpl.cpp
+++ b/unit_tests/Test_TensorImpl.cpp
@@ -15,7 +15,6 @@
 
 #include "aidge/backend/opencv/data/TensorImpl.hpp"
 
-
 using namespace Aidge;
 
 TEST_CASE("Tensor creation opencv", "[Tensor][OpenCV]") {
@@ -23,46 +22,17 @@ TEST_CASE("Tensor creation opencv", "[Tensor][OpenCV]") {
         Tensor x;
         x.setDataType(Aidge::DataType::Int32);
         x.setBackend("opencv");
-        x = Array3D<int,2,2,2>{
-        {
-            {
-                {1, 2},
-                {3, 4}
-            },
-            {
-                {5, 6},
-                {7, 8}
-            }
-        }};
+        x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
 
         Tensor xCopy;
         xCopy.setDataType(Aidge::DataType::Int32);
         xCopy.setBackend("opencv");
-        xCopy = Array3D<int,2,2,2>{
-        {
-            {
-                {1, 2},
-                {3, 4}
-            },
-            {
-                {5, 6},
-                {7, 8}
-            }
-        }};
+        xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
 
         Tensor xFloat;
         xFloat.setBackend("opencv");
-        xFloat = Array3D<float,2,2,2>{
-        {
-            {
-                {1., 2.},
-                {3., 4.}
-            },
-            {
-                {5., 6.},
-                {7., 8.}
-            }
-        }};
+        xFloat = Array3D<float, 2, 2, 2>{
+            {{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
 
         SECTION("Tensor features") {
             REQUIRE(x.nbDims() == 3);
@@ -73,25 +43,35 @@ TEST_CASE("Tensor creation opencv", "[Tensor][OpenCV]") {
         }
 
         SECTION("OpenCV tensor features") {
-            REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().rows == 2);
-            REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().cols == 2);
-            REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().dims == 2);
-            REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().total() == 4);
-            REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().channels() == 2);
+            REQUIRE(static_cast<TensorImpl_opencv<int> *>(x.getImpl().get())
+                        ->data()
+                        .rows == 2);
+            REQUIRE(static_cast<TensorImpl_opencv<int> *>(x.getImpl().get())
+                        ->data()
+                        .cols == 2);
+            REQUIRE(static_cast<TensorImpl_opencv<int> *>(x.getImpl().get())
+                        ->data()
+                        .dims == 2);
+            REQUIRE(static_cast<TensorImpl_opencv<int> *>(x.getImpl().get())
+                        ->data()
+                        .total() == 4);
+            REQUIRE(static_cast<TensorImpl_opencv<int> *>(x.getImpl().get())
+                        ->data()
+                        .channels() == 2);
         }
 
         SECTION("Access to array") {
-            REQUIRE(static_cast<int*>(x.getImpl()->rawPtr())[0] == 1);
-            REQUIRE(static_cast<int*>(x.getImpl()->rawPtr())[7] == 8);
+            REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
+            REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
         }
 
         SECTION("get function") {
-            REQUIRE(x.get<int>({0,0,0}) == 1);
-            REQUIRE(x.get<int>({0,0,1}) == 2);
-            REQUIRE(x.get<int>({0,1,1}) == 4);
-            REQUIRE(x.get<int>({1,1,0}) == 7);
+            REQUIRE(x.get<int>({0, 0, 0}) == 1);
+            REQUIRE(x.get<int>({0, 0, 1}) == 2);
+            REQUIRE(x.get<int>({0, 1, 1}) == 4);
+            REQUIRE(x.get<int>({1, 1, 0}) == 7);
             x.set<int>({1, 1, 1}, 36);
-            REQUIRE(x.get<int>({1,1,1}) == 36);
+            REQUIRE(x.get<int>({1, 1, 1}) == 36);
         }
 
         SECTION("Pretty printing for debug") {
@@ -105,17 +85,7 @@ TEST_CASE("Tensor creation opencv", "[Tensor][OpenCV]") {
     }
 
     SECTION("from const array before backend") {
-        Tensor x = Array3D<int,2,2,2>{
-        {
-            {
-                {1, 2},
-                {3, 4}
-            },
-            {
-                {5, 6},
-                {7, 8}
-            }
-        }};
+        Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
         x.setBackend("opencv");
 
         REQUIRE(x.nbDims() == 3);
@@ -124,10 +94,9 @@ TEST_CASE("Tensor creation opencv", "[Tensor][OpenCV]") {
         REQUIRE(x.dims()[2] == 2);
         REQUIRE(x.size() == 8);
 
-        REQUIRE(x.get<int>({0,0,0}) == 1);
-        REQUIRE(x.get<int>({0,0,1}) == 2);
-        REQUIRE(x.get<int>({0,1,1}) == 4);
-        REQUIRE(x.get<int>({1,1,1}) == 8);
+        REQUIRE(x.get<int>({0, 0, 0}) == 1);
+        REQUIRE(x.get<int>({0, 0, 1}) == 2);
+        REQUIRE(x.get<int>({0, 1, 1}) == 4);
+        REQUIRE(x.get<int>({1, 1, 1}) == 8);
     }
-
 }
\ No newline at end of file
diff --git a/unit_tests/Tests_Utils.cpp b/unit_tests/Tests_Utils.cpp
index 28a697b..de48c4f 100644
--- a/unit_tests/Tests_Utils.cpp
+++ b/unit_tests/Tests_Utils.cpp
@@ -9,25 +9,34 @@
  *
  ********************************************************************************/
 
-#include <catch2/catch_test_macros.hpp>
 #include <catch2/catch_template_test_macros.hpp>
+#include <catch2/catch_test_macros.hpp>
 #include <memory>
 #include <string>
 
 #include "opencv2/core.hpp"
 
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+#include "aidge/backend/opencv/data/TensorImpl.hpp"
 #include "aidge/backend/opencv/utils/Utils.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/backend/opencv/data/TensorImpl.hpp"
-#include "aidge/backend/cpu/data/TensorImpl.hpp"
 
 #include "Tools.hpp"
 
 using namespace Aidge;
 
-// TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", char, unsigned char, short, unsigned short, int, float, double) {
+// TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", char, unsigned char,
+// short, unsigned short, int, float, double) {
 // TODO : perform test for char and double
-TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char, short, unsigned short, int, float, double) {
+TEMPLATE_TEST_CASE("Opencv Utils",
+                   "[Utils][OpenCV]",
+                   signed char,
+                   unsigned char,
+                   short,
+                   unsigned short,
+                   int,
+                   float,
+                   double) {
 
     constexpr int num_test_matrices = 50;
 
@@ -42,7 +51,7 @@ TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char
             std::vector<cv::Mat> channels;
             cv::Mat mat;
 
-            for (int c = 0; c < ch; ++c){
+            for (int c = 0; c < ch; ++c) {
                 // Create a random matrix
                 cv::Mat randomMat = createRandomMat<TestType>(rows, cols);
                 // Add each random matrix to the vector
@@ -64,10 +73,10 @@ TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char
             REQUIRE(mat.channels() == tensorOcv->dims()[0]);
             REQUIRE(mat.rows == tensorOcv->dims()[1]);
             REQUIRE(mat.cols == tensorOcv->dims()[2]);
-            
 
-            //Get the matrix inside the tensor
-            TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensorOcv->getImpl().get());
+            // Get the matrix inside the tensor
+            TensorImpl_opencv_ *tImpl_opencv =
+                dynamic_cast<TensorImpl_opencv_ *>(tensorOcv->getImpl().get());
             auto mat_tensor = tImpl_opencv->data();
             // Split the mat from tensor opencv into channels
             std::vector<cv::Mat> channels_split;
@@ -77,17 +86,20 @@ TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char
             auto tensorCpu = convertCpu(tensorOcv);
 
             // Get the cpu ptr of the converted tensor
-            auto cpu_ptr = static_cast<TestType*>(tensorCpu->getImpl()->rawPtr());
+            auto cpu_ptr =
+                static_cast<TestType *>(tensorCpu->getImpl()->rawPtr());
 
-            // Compare the tensor cpu values with the cv mat in an elementwise fashion
-            // Loop over channels
+            // Compare the tensor cpu values with the cv mat in an elementwise
+            // fashion Loop over channels
             for (int c = 0; c < ch; ++c) {
                 // Loop over rows
                 for (int i = 0; i < rows; ++i) {
                     // Loop over columns
                     for (int j = 0; j < cols; ++j) {
-                        TestType elementValue = channels_split[c].at<TestType>(i, j);
-                        TestType elementValue_cpu = cpu_ptr[c*(rows*cols)+i*cols+j];
+                        TestType elementValue =
+                            channels_split[c].at<TestType>(i, j);
+                        TestType elementValue_cpu =
+                            cpu_ptr[c * (rows * cols) + i * cols + j];
                         REQUIRE(elementValue == elementValue_cpu);
                     }
                 }
diff --git a/unit_tests/Tools.hpp b/unit_tests/Tools.hpp
index 0ade0c5..cc8026b 100644
--- a/unit_tests/Tools.hpp
+++ b/unit_tests/Tools.hpp
@@ -14,8 +14,7 @@
 
 #include "opencv2/core.hpp"
 
-template <typename T>
-cv::Mat createRandomMat(int rows, int cols) {
+template <typename T> cv::Mat createRandomMat(int rows, int cols) {
     cv::Mat randomMat(rows, cols, cv::DataType<T>::type);
 
     cv::randu(randomMat, cv::Scalar::all(0), cv::Scalar::all(255));
-- 
GitLab