diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp
index 98015d5b67e139ec2f842f3ba50f278a578e3da6..539a3128c8c7afb8dad06799e657f70d22db1e9c 100644
--- a/include/aidge/backend/cpu.hpp
+++ b/include/aidge/backend/cpu.hpp
@@ -53,7 +53,7 @@
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
 #include "aidge/backend/cpu/operator/SubImpl.hpp"
 #include "aidge/backend/cpu/operator/TanhImpl.hpp"
-#include "aidge/backend/cpu/operator/WeightInterleavingImpl.hpp"
+#include "aidge/backend/cpu/operator/WeightInterleavedImpl.hpp"
 
 #include "aidge/backend/cpu/data/TensorImpl.hpp"
 
diff --git a/include/aidge/backend/cpu/operator/WeightInterleavingImpl.hpp b/include/aidge/backend/cpu/operator/WeightInterleavedImpl.hpp
similarity index 87%
rename from include/aidge/backend/cpu/operator/WeightInterleavingImpl.hpp
rename to include/aidge/backend/cpu/operator/WeightInterleavedImpl.hpp
index 0b3b1c5765e4db42500645c314f8befe7cd9b182..ff5c4778f530912e8bdf97ffadb2f546789e2c48 100644
--- a/include/aidge/backend/cpu/operator/WeightInterleavingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/WeightInterleavedImpl.hpp
@@ -23,7 +23,7 @@
 
 namespace Aidge {
 // Operator implementation entry point for the backend
-using WeightInterleavingImpl_cpu = OperatorImpl_cpu<WeightInterleaving_Op,
+using WeightInterleavedImpl_cpu = OperatorImpl_cpu<WeightInterleaving_Op,
     void(const DimSize_t,
         const DimSize_t,
         const DimSize_t,
@@ -31,7 +31,7 @@ using WeightInterleavingImpl_cpu = OperatorImpl_cpu<WeightInterleaving_Op,
         void *)>;
 
 // Implementation entry point registration to Operator
-REGISTRAR(WeightInterleaving_Op, "cpu", Aidge::WeightInterleavingImpl_cpu::create);
+REGISTRAR(WeightInterleaving_Op, "cpu", Aidge::WeightInterleavedImpl_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_WeightInterleavingIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/WeightInterleavingImpl_kernels.hpp b/include/aidge/backend/cpu/operator/WeightInterleavedImpl_kernels.hpp
similarity index 57%
rename from include/aidge/backend/cpu/operator/WeightInterleavingImpl_kernels.hpp
rename to include/aidge/backend/cpu/operator/WeightInterleavedImpl_kernels.hpp
index f2347fd2d7ad3e9adfa134ce1413b6348e08c064..18557f8fb5fcdd31476904d273d4d2d7f37a66b5 100644
--- a/include/aidge/backend/cpu/operator/WeightInterleavingImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/WeightInterleavedImpl_kernels.hpp
@@ -1,23 +1,35 @@
-
-
-#ifndef AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVINGIMPL_KERNELS_H_
-#define AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVINGIMPL_KERNELS_H_
-
-#include <algorithm>
-
-#include "aidge/backend/cpu/operator/WeightInterleavingImpl.hpp"
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVEDIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVEDIMPL_KERNELS_H_
+
+#include <cstddef>  // std::size_t
+#include <cstdint>  // std::int8_t, std::uint8_t
+
+#include "aidge/backend/cpu/operator/WeightInterleavedImpl.hpp"
+#include "aidge/data/DataType.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 
 
 namespace Aidge {
 
     /**
      * @brief Compacts 8-bit data into a smaller bit-width representation.
-     * 
-     * This function takes an array of 8-bit data and compacts it into smaller chunks 
-     * based on the specified bit-width `nb_bits`. Each element in `compactData` will 
+     *
+     * This function takes an array of 8-bit data and compacts it into smaller chunks
+     * based on the specified bit-width `nb_bits`. Each element in `compactData` will
      * store multiple packed `nb_bits` segments extracted from `data`.
-     * 
+     *
      * @param data The input array of 8-bit values to be compacted.
      * @param dataSize The size of the input `data` array.
      * @param compactData The output array storing the compacted data.
@@ -39,14 +51,14 @@ namespace Aidge {
         std::uint8_t shift = 8 / nbSlot;
 
         const unsigned int nbFullCompactbytes = dataSize / nbSlot;
-        
+
         // Main loop to process data in groups of `nbSlot`
         for (std::size_t i = 0; i < nbFullCompactbytes; ++i) {
             T compact = 0;
-            
+
             for (unsigned int j = 0; j < nbSlot; ++j) {
                 compact |= (data[i * nbSlot + j] & mask);    // Apply mask to keep `nb_bits` only
-                
+
                 // Shift only if not on the last slot to make room for the next `nb_bits`
                 if (j < nbSlot - 1) {
                     compact <<= shift;
@@ -55,7 +67,7 @@ namespace Aidge {
             // Store the compacted value in the output array
             compactData[i] = compact;
         }
-        
+
 
         // Handle any remaining data elements (if dataSize is not a multiple of nbSlot).
         std::size_t remaining = dataSize % nbSlot;
@@ -63,7 +75,7 @@ namespace Aidge {
             std::int8_t compact = 0;
             for (std::size_t j = 0; j < remaining; ++j) {
                 compact |= (data[nbFullCompactbytes*nbSlot + j] & mask);
-                
+
                 if (j < remaining - 1) {
                     compact <<= shift;
                 }
@@ -75,7 +87,7 @@ namespace Aidge {
     }
 
 template <class I, class O, int nb_bits>
-void WeightInterleavingImpl_cpu_forward_kernel(const DimSize_t input_interleaving,
+void WeightInterleavedImpl_cpu_forward_kernel(const DimSize_t input_interleaving,
                             const DimSize_t nb_interleaving,
                             const DimSize_t output_interleaving,
                             const void* input_,
@@ -91,41 +103,41 @@ void WeightInterleavingImpl_cpu_forward_kernel(const DimSize_t input_interleavin
 }
 
 
-REGISTRAR(WeightInterleavingImpl_cpu,
-    {ImplSpec::IOSpec{DataType::Int4, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::Int4>::type, DataFormat::NHWC}},
-    {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 4>, nullptr});
-REGISTRAR(WeightInterleavingImpl_cpu,
-    {ImplSpec::IOSpec{DataType::Int3, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::Int3>::type, DataFormat::NHWC}},
-    {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 3>, nullptr});
-REGISTRAR(WeightInterleavingImpl_cpu,
-    {ImplSpec::IOSpec{DataType::Int2, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::Int2>::type, DataFormat::NHWC}},
-    {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 2>, nullptr});
-REGISTRAR(WeightInterleavingImpl_cpu,
-    {ImplSpec::IOSpec{DataType::Binary, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::Binary>::type, DataFormat::NHWC}},
-    {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 1>, nullptr});
-
-REGISTRAR(WeightInterleavingImpl_cpu,
-    {ImplSpec::IOSpec{DataType::UInt4, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::UInt4>::type, DataFormat::NHWC}},
-    {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<uint8_t, uint8_t, 4>, nullptr});
-REGISTRAR(WeightInterleavingImpl_cpu,
-    {ImplSpec::IOSpec{DataType::UInt3, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::UInt3>::type, DataFormat::NHWC}},
-    {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<uint8_t, uint8_t, 3>, nullptr});
-REGISTRAR(WeightInterleavingImpl_cpu,
-    {ImplSpec::IOSpec{DataType::UInt2, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::UInt2>::type, DataFormat::NHWC}},
-    {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<uint8_t, uint8_t, 2>, nullptr});
-
-
-// REGISTRAR(WeightInterleavingImpl_cpu,
+REGISTRAR(WeightInterleavedImpl_cpu,
+    {ImplSpec::IOSpec{DataType::Int4, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::Int4>, DataFormat::NHWC}},
+    {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 4>, nullptr});
+REGISTRAR(WeightInterleavedImpl_cpu,
+    {ImplSpec::IOSpec{DataType::Int3, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::Int3>, DataFormat::NHWC}},
+    {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 3>, nullptr});
+REGISTRAR(WeightInterleavedImpl_cpu,
+    {ImplSpec::IOSpec{DataType::Int2, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::Int2>, DataFormat::NHWC}},
+    {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 2>, nullptr});
+REGISTRAR(WeightInterleavedImpl_cpu,
+    {ImplSpec::IOSpec{DataType::Binary, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::Binary>, DataFormat::NHWC}},
+    {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 1>, nullptr});
+
+REGISTRAR(WeightInterleavedImpl_cpu,
+    {ImplSpec::IOSpec{DataType::UInt4, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::UInt4>, DataFormat::NHWC}},
+    {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<uint8_t, uint8_t, 4>, nullptr});
+REGISTRAR(WeightInterleavedImpl_cpu,
+    {ImplSpec::IOSpec{DataType::UInt3, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::UInt3>, DataFormat::NHWC}},
+    {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<uint8_t, uint8_t, 3>, nullptr});
+REGISTRAR(WeightInterleavedImpl_cpu,
+    {ImplSpec::IOSpec{DataType::UInt2, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::UInt2>, DataFormat::NHWC}},
+    {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<uint8_t, uint8_t, 2>, nullptr});
+
+
+// REGISTRAR(WeightInterleavedImpl_cpu,
 //     {ImplSpec::IOSpec{DataType::Int4, DataFormat::NHWC}},
-//     {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 4>, nullptr});
-// REGISTRAR(WeightInterleavingImpl_cpu,
+//     {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 4>, nullptr});
+// REGISTRAR(WeightInterleavedImpl_cpu,
 //     {ImplSpec::IOSpec{DataType::Int3, DataFormat::NHWC}},
-//     {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 3>, nullptr});
-// REGISTRAR(WeightInterleavingImpl_cpu,
+//     {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 3>, nullptr});
+// REGISTRAR(WeightInterleavedImpl_cpu,
 //     {ImplSpec::IOSpec{DataType::Int2, DataFormat::NHWC}},
-//     {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 2>, nullptr});
+//     {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 2>, nullptr});
 
 
 }
 
-#endif /* AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVINGIMPL_KERNELS_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVEDIMPL_KERNELS_H_ */
\ No newline at end of file
diff --git a/src/operator/WeightInterleavingImpl.cpp b/src/operator/WeightInterleavedImpl.cpp
similarity index 84%
rename from src/operator/WeightInterleavingImpl.cpp
rename to src/operator/WeightInterleavedImpl.cpp
index afb79179512c8ed360387532b458e6bbe10a92b9..2c9f3a6e8df35616a4f7ffae86cbeacd841f44bf 100644
--- a/src/operator/WeightInterleavingImpl.cpp
+++ b/src/operator/WeightInterleavedImpl.cpp
@@ -9,7 +9,7 @@
  *
  ********************************************************************************/
 
-#include "aidge/backend/cpu/operator/WeightInterleavingImpl.hpp"
+#include "aidge/backend/cpu/operator/WeightInterleavedImpl.hpp"
 
 #include <cstddef>  // std::size_t
 #include <functional>
@@ -17,19 +17,19 @@
 #include <tuple>
 
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include "aidge/backend/cpu/operator/WeightInterleavingImpl_kernels.hpp"
+#include "aidge/backend/cpu/operator/WeightInterleavedImpl_kernels.hpp"
 #include "aidge/operator/WeightInterleaving.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
 
 template <>
-void Aidge::WeightInterleavingImpl_cpu::forward()
+void Aidge::WeightInterleavedImpl_cpu::forward()
 {
     const WeightInterleaving_Op& op_ = dynamic_cast<const WeightInterleaving_Op&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0");
 
-    const auto impl = Registrar<WeightInterleavingImpl_cpu>::create(getBestMatch(getRequiredSpec()));
+    const auto impl = Registrar<WeightInterleavedImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Convert input data (no overhead if not needed!)
     // TODO: right now, if needed, memory will be allocated/deallocated at each
@@ -38,14 +38,14 @@ void Aidge::WeightInterleavingImpl_cpu::forward()
     std::shared_ptr<Tensor> input0Fallback;
     const auto& input0 = op_.getInput(0)->refCastFrom(input0Fallback, *(op_.getOutput(0)));
 
-    // inputInterleaving is the number of consecutive input elements that will be compacted 
+    // inputInterleaving is the number of consecutive input elements that will be compacted
     // Here the interleaving is the last dimension (cf STM32 low bit kernels)
     std::size_t inputInterleaving = input0.dims().back();
 
     // The resulting compacted dimension was computed in forwardDims and the output tensor was resized
     std::size_t outputInterleaving = op_.getOutput(0)->dims().back();
 
-    // nb_interleaving is the number of compacted segments 
+    // nb_interleaving is the number of compacted segments
     std::size_t nbInterleaving;
 
     // Determine the number of segment to compact
@@ -65,11 +65,11 @@ void Aidge::WeightInterleavingImpl_cpu::forward()
         outputInterleaving,
         input0.getImpl()->rawPtr(),
         getCPUPtr(mOp.getRawOutput(0)));
-    
-    
+
+
 }
 
 template <>
-void Aidge::WeightInterleavingImpl_cpu::backward() {
+void Aidge::WeightInterleavedImpl_cpu::backward() {
     AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for WeightInterleaving_Op on backend cpu");
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_BitShift.cpp b/unit_tests/operator/Test_BitShift.cpp
index db97e8d30b5e7121b096f99f8722a69e6d4e367c..33ab932e296be717604be42716d7abe2b61f65ee 100644
--- a/unit_tests/operator/Test_BitShift.cpp
+++ b/unit_tests/operator/Test_BitShift.cpp
@@ -136,8 +136,8 @@ TEST_CASE("[cpu/operator] BitShift_TEST", "[BitShift][CPU]") {
 
 
             }
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {}μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {}μs\n", duration.count());
         }
         SECTION("Test BitShift kernels with Broadcasting") {
             std::size_t number_of_operation = 0;
@@ -236,8 +236,8 @@ TEST_CASE("[cpu/operator] BitShift_TEST", "[BitShift][CPU]") {
                 const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
                 number_of_operation += nb_elements;
             }
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {}μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {}μs\n", duration.count());
         }
 
 }
diff --git a/unit_tests/operator/Test_ClipImpl.cpp b/unit_tests/operator/Test_ClipImpl.cpp
index 1a7aa5e548a4e6b93c0052758fb9210fd8b14818..99147ac93bd659dd91897f6b7f1f3f33e5552ef6 100644
--- a/unit_tests/operator/Test_ClipImpl.cpp
+++ b/unit_tests/operator/Test_ClipImpl.cpp
@@ -119,8 +119,8 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
 
             REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
         }
-        fmt::print("INFO: multiplications over time spent: {}\n", totalComputation/duration.count());
-        fmt::print("INFO: total time: {}\n", duration.count());
+        Log::info("multiplications over time spent: {}\n", totalComputation/duration.count());
+        Log::info("total time: {}\n", duration.count());
     }
     SECTION("Clip test with min >= max [Forward]") {
         std::size_t totalComputation = 0;
@@ -179,8 +179,8 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
 
             REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
         }
-        fmt::print("INFO: multiplications over time spent: {}\n", totalComputation/duration.count());
-        fmt::print("INFO: total time: {}\n", duration.count());
+        Log::info("multiplications over time spent: {}\n", totalComputation/duration.count());
+        Log::info("total time: {}\n", duration.count());
     }
     SECTION("Clip with Clip Attr [Forward]")
     {
@@ -232,8 +232,8 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
 
             REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
         }
-        fmt::print("INFO: multiplications over time spent: {}\n", totalComputation/duration.count());
-        fmt::print("INFO: total time: {}\n", duration.count());
+        Log::info("multiplications over time spent: {}\n", totalComputation/duration.count());
+        Log::info("total time: {}\n", duration.count());
     }
     SECTION("Simple clip test [Backward]") {
         std::size_t totalComputation = 0;
@@ -311,8 +311,8 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
             duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
             REQUIRE(GT1 == BackwardTensorVec);
         }
-        fmt::print("INFO: multiplications over time spent: {}\n", totalComputation/duration.count());
-        fmt::print("INFO: total time: {}\n", duration.count());
+        Log::info("multiplications over time spent: {}\n", totalComputation/duration.count());
+        Log::info("total time: {}\n", duration.count());
     }
  }
 } // namespace Aidge
diff --git a/unit_tests/operator/Test_DivImpl.cpp b/unit_tests/operator/Test_DivImpl.cpp
index b03fe4aa91e96299f2a748026ee8ca5e5d57fb5c..4037b2ad4e117573279f07d0c1819d3435ee7ada 100644
--- a/unit_tests/operator/Test_DivImpl.cpp
+++ b/unit_tests/operator/Test_DivImpl.cpp
@@ -126,8 +126,8 @@ TEST_CASE("[cpu/operator] Div", "[Div][CPU]") {
 
                 // with broadcasting
             }
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {} μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {} μs\n", duration.count());
         }
 
         SECTION("+1-D Tensor / +1-D Tensor - broadcasting") {
@@ -221,8 +221,8 @@ TEST_CASE("[cpu/operator] Div", "[Div][CPU]") {
                 const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
                 number_of_operation += nb_elements;
             }
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {} μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {} μs\n", duration.count());
         }
         SECTION("+1-D Tensor / 1-D Tensor") {
             std::size_t number_of_operation = 0;
@@ -317,8 +317,8 @@ TEST_CASE("[cpu/operator] Div", "[Div][CPU]") {
                 number_of_operation += nb_elements;
             }
 
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {} μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {} μs\n", duration.count());
         }
     }
 }
diff --git a/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp b/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp
index 63f8d3269cdb25a6d84c3e936d8f124b0964962d..8e8536accadcb874f74d4d962aae435bc1351d6e 100644
--- a/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp
+++ b/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp
@@ -554,9 +554,9 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
           delete[] result;
         }
       }
-      fmt::print("INFO: GlobalAveragePooling total execution time: {}µs\n", duration.count());
-      fmt::print("INFO: Number of operations : {}\n", number_of_operation);
-      fmt::print("INFO: Operation / µs = {}\n", number_of_operation / duration.count());
+      Log::info("GlobalAveragePooling total execution time: {}µs\n", duration.count());
+      Log::info("Number of operations : {}\n", number_of_operation);
+      Log::info("Operation / µs = {}\n", number_of_operation / duration.count());
     }
   }
 }
diff --git a/unit_tests/operator/Test_MatMulImpl.cpp b/unit_tests/operator/Test_MatMulImpl.cpp
index daef47b32ffcca880a1bf2438e9ee9c35adbb2c8..f062f06cddfbd04217d63e1edcb6505914bc77e9 100644
--- a/unit_tests/operator/Test_MatMulImpl.cpp
+++ b/unit_tests/operator/Test_MatMulImpl.cpp
@@ -111,8 +111,8 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") {
             delete[] bigArray2;
             delete[] res;
         }
-        fmt::print("INFO: number of multiplications over time spent: {}\n", (totalComputation / duration.count()));
-        fmt::print("INFO: total time: {} μs\n", duration.count());
+        Log::info("number of multiplications over time spent: {}\n", (totalComputation / duration.count()));
+        Log::info("total time: {} μs\n", duration.count());
     }
 
     SECTION("3-D Tensors") {
@@ -179,8 +179,8 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") {
             delete[] bigArray2;
             delete[] res;
         }
-        fmt::print("INFO: number of multiplications over time spent: {}\n", (totalComputation / duration.count()));
-        fmt::print("INFO: total time: {} μs\n", duration.count());
+        Log::info("number of multiplications over time spent: {}\n", (totalComputation / duration.count()));
+        Log::info("total time: {} μs\n", duration.count());
     }
 
     SECTION("4-D Tensors") {
@@ -249,8 +249,8 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") {
             delete[] bigArray2;
             delete[] res;
         }
-        fmt::print("INFO: number of multiplications over time spent: {}\n", (totalComputation / duration.count()));
-        fmt::print("INFO: total time: {} μs\n", duration.count());
+        Log::info("number of multiplications over time spent: {}\n", (totalComputation / duration.count()));
+        Log::info("total time: {} μs\n", duration.count());
     }
 
     SECTION("+2-D / 1-D") {
diff --git a/unit_tests/operator/Test_MulImpl.cpp b/unit_tests/operator/Test_MulImpl.cpp
index 925b9f2059518d434b74a0e2fd0cde79b334c54e..b5f5172542ff560400d3033d190f48738b34035d 100644
--- a/unit_tests/operator/Test_MulImpl.cpp
+++ b/unit_tests/operator/Test_MulImpl.cpp
@@ -437,8 +437,8 @@ TEST_CASE("[cpu/operator] Mul", "[Mul][CPU]") {
                 delete[] array1;
                 delete[] result;
             }
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {} μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {} μs\n", duration.count());
         }
 
 
@@ -568,8 +568,8 @@ TEST_CASE("[cpu/operator] Mul", "[Mul][CPU]") {
                 const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
                 number_of_operation += nb_elements;
             }
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {} μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {} μs\n", duration.count());
         }
         SECTION("+1-D Tensor / 1-D Tensor") {
             std::size_t number_of_operation = 0;
@@ -664,8 +664,8 @@ TEST_CASE("[cpu/operator] Mul", "[Mul][CPU]") {
                 number_of_operation += nb_elements;
             }
 
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {} μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {} μs\n", duration.count());
         }
     }
 }
diff --git a/unit_tests/operator/Test_PowImpl.cpp b/unit_tests/operator/Test_PowImpl.cpp
index 8238da3970740f4b8d6095d7a28c000319ea004e..55a416c3f404506359e06f9937dd958503236901 100644
--- a/unit_tests/operator/Test_PowImpl.cpp
+++ b/unit_tests/operator/Test_PowImpl.cpp
@@ -126,8 +126,8 @@ TEST_CASE("[cpu/operator] Pow", "[Pow][CPU]") {
 
                 // with broadcasting
             }
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {} μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {} μs\n", duration.count());
         }
 
         SECTION("+1-D Tensor / +1-D Tensor - broadcasting") {
@@ -221,8 +221,8 @@ TEST_CASE("[cpu/operator] Pow", "[Pow][CPU]") {
                 const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
                 number_of_operation += nb_elements;
             }
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {} μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {} μs\n", duration.count());
         }
         SECTION("+1-D Tensor / 1-D Tensor") {
             std::size_t number_of_operation = 0;
@@ -317,8 +317,8 @@ TEST_CASE("[cpu/operator] Pow", "[Pow][CPU]") {
                 number_of_operation += nb_elements;
             }
 
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {} μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {} μs\n", duration.count());
         }
     }
 
diff --git a/unit_tests/operator/Test_RoundImpl.cpp b/unit_tests/operator/Test_RoundImpl.cpp
index 8b5dd53a79242a38063f178807d5b6b40f2c0e96..e658b0616683633ce19b2284abb9d4fae7942a23 100644
--- a/unit_tests/operator/Test_RoundImpl.cpp
+++ b/unit_tests/operator/Test_RoundImpl.cpp
@@ -108,8 +108,8 @@ TEST_CASE("[cpu/operator] Round_Test", "[Round][CPU]") {
 
 
             }
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {} μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {} μs\n", duration.count());
         }
     }
 } // namespace Aidge
diff --git a/unit_tests/operator/Test_SubImpl.cpp b/unit_tests/operator/Test_SubImpl.cpp
index 471ae560a35b480945d7e5c85fb93bbbc8d459f6..1317e88a371e9a6e7a3deae5b7f662a9cd879a60 100644
--- a/unit_tests/operator/Test_SubImpl.cpp
+++ b/unit_tests/operator/Test_SubImpl.cpp
@@ -126,8 +126,8 @@ TEST_CASE("[cpu/operator] Sub", "[Sub][CPU]") {
 
                 // with broadcasting
             }
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {}μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {}μs\n", duration.count());
         }
 
         SECTION("+1-D Tensor / +1-D Tensor - broadcasting") {
@@ -221,8 +221,8 @@ TEST_CASE("[cpu/operator] Sub", "[Sub][CPU]") {
                 const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
                 number_of_operation += nb_elements;
             }
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {}μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {}μs\n", duration.count());
         }
         SECTION("+1-D Tensor / 1-D Tensor") {
             std::size_t number_of_operation = 0;
@@ -317,8 +317,8 @@ TEST_CASE("[cpu/operator] Sub", "[Sub][CPU]") {
                 number_of_operation += nb_elements;
             }
 
-            fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
-            fmt::print("INFO: total time: {}μs\n", duration.count());
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {}μs\n", duration.count());
         }
     }
 }
diff --git a/unit_tests/operator/Test_WeightInterleavingImpl.cpp b/unit_tests/operator/Test_WeightInterleavingImpl.cpp
index 9bd9f14681115d8e7d77fbc651596eec714d7b1e..c95c8fca19eb79eb78fc19e93ded3383054383e7 100644
--- a/unit_tests/operator/Test_WeightInterleavingImpl.cpp
+++ b/unit_tests/operator/Test_WeightInterleavingImpl.cpp
@@ -23,7 +23,7 @@
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
-    
+
     std::shared_ptr<Node> myWeightInterleaving = WeightInterleaving();
     auto opWeightInterleaving = std::static_pointer_cast<WeightInterleaving_Op>(myWeightInterleaving -> getOperator());
 
@@ -64,9 +64,9 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
 
     SECTION("CompactData - 4-bit compaction") {
         std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 4>{
-                                                                {static_cast<std::int8_t>(0x0F), 
-                                                                static_cast<std::int8_t>(0xF5), 
-                                                                static_cast<std::int8_t>(0xB3), 
+                                                                {static_cast<std::int8_t>(0x0F),
+                                                                static_cast<std::int8_t>(0xF5),
+                                                                static_cast<std::int8_t>(0xB3),
                                                                 static_cast<std::int8_t>(0x9C)}
                                                                 });
 
@@ -74,17 +74,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
         weight->setDataType(Aidge::DataType::Int4);
 
         std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{
-                                                                {static_cast<int8_t>(0xF5), 
+                                                                {static_cast<int8_t>(0xF5),
                                                                 static_cast<int8_t>(0x3C)}
                                                                 });
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
-        expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
 
         std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
         auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
         op->associateInput(0,weight);
-        op->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
         op->setDataFormat(DataFormat::NHWC);
         op->setBackend("cpu");
         myWeightInterleavingNode->forward();
@@ -93,8 +93,8 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
 
     SECTION("CompactData - 3-bit compaction") {
         std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 4>{
-                                                                {static_cast<int8_t>(0x0F), 
-                                                                static_cast<int8_t>(0x05), 
+                                                                {static_cast<int8_t>(0x0F),
+                                                                static_cast<int8_t>(0x05),
                                                                 static_cast<int8_t>(0x04),
                                                                 static_cast<int8_t>(0xD3)}
                                                                 });
@@ -103,17 +103,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
         weight->setDataType(Aidge::DataType::Int3);
 
         std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{
-                                                                {static_cast<int8_t>(0x75), 
+                                                                {static_cast<int8_t>(0x75),
                                                                 static_cast<int8_t>(0x43)}
                                                                 });
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
-        expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int3>::type);
+        expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int3>);
 
         std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
         auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
         op->associateInput(0,weight);
-        op->setDataType(WeightInterleavingType<Aidge::DataType::Int3>::type);
+        op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int3>);
         op->setDataFormat(DataFormat::NHWC);
         op->setBackend("cpu");
         myWeightInterleavingNode->forward();
@@ -124,7 +124,7 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
         std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 4>{
                                                                 {static_cast<std::int8_t>(0x03),
                                                                  static_cast<std::int8_t>(0x02),
-                                                                 static_cast<std::int8_t>(0x01), 
+                                                                 static_cast<std::int8_t>(0x01),
                                                                  static_cast<std::int8_t>(0x00)}
                                                                  });
 
@@ -136,12 +136,12 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
                                                                 });
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
-        expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int2>::type);
+        expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int2>);
 
         std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
         auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
         op->associateInput(0,weight);
-        op->setDataType(WeightInterleavingType<Aidge::DataType::Int2>::type);
+        op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int2>);
         op->setDataFormat(DataFormat::NHWC);
         op->setBackend("cpu");
         myWeightInterleavingNode->forward();
@@ -161,12 +161,12 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
                                                                 });
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
-        expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
 
         std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
         auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
         op->associateInput(0,weight);
-        op->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
         op->setDataFormat(DataFormat::NHWC);
         op->setBackend("cpu");
         myWeightInterleavingNode->forward();
@@ -175,8 +175,8 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
 
     SECTION("CompactData - Edge Cases - Non-divisible dataSize for nbSlot with nbbits=4") {
         std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 3>{
-                                                                {static_cast<int8_t>(0x0F), 
-                                                                static_cast<int8_t>(0xA5), 
+                                                                {static_cast<int8_t>(0x0F),
+                                                                static_cast<int8_t>(0xA5),
                                                                 static_cast<int8_t>(0x34)}
                                                                 });
 
@@ -184,17 +184,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
         weight->setDataType(Aidge::DataType::Int4);
 
         std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{
-                                                                {static_cast<int8_t>(0xF5), 
+                                                                {static_cast<int8_t>(0xF5),
                                                                 static_cast<int8_t>(0x40)}
                                                                 });
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
-        expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
 
         std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
         auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
         op->associateInput(0,weight);
-        op->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
         op->setDataFormat(DataFormat::NHWC);
         op->setBackend("cpu");
         myWeightInterleavingNode->forward();
@@ -205,8 +205,8 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
     SECTION("CompactData - Edge Cases - Non-divisible dataSize for nbSlot with nbbits=3") {
 
         std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 3>{
-                                                                {static_cast<int8_t>(0x0F), 
-                                                                static_cast<int8_t>(0x05), 
+                                                                {static_cast<int8_t>(0x0F),
+                                                                static_cast<int8_t>(0x05),
                                                                 static_cast<int8_t>(0x04)}
                                                                 });
 
@@ -214,17 +214,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
         weight->setDataType(Aidge::DataType::Int3);
 
         std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{
-                                                                {static_cast<int8_t>(0x75), 
+                                                                {static_cast<int8_t>(0x75),
                                                                 static_cast<int8_t>(0x40)}
                                                                 });
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
-        expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int3>::type);
+        expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int3>);
 
         std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
         auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
         op->associateInput(0,weight);
-        op->setDataType(WeightInterleavingType<Aidge::DataType::Int3>::type);
+        op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int3>);
         op->setDataFormat(DataFormat::NHWC);
         op->setBackend("cpu");
         myWeightInterleavingNode->forward();
@@ -271,9 +271,9 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
                         {-1, -6, -3,  0}  // 'F' 'A' 'D' '0' in hexadecimal format
                     }
                 }
-            } 
+            }
         });
-        
+
         std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array4D<std::int8_t,2,3,3,2> {
             {
                 {
@@ -310,19 +310,19 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
                         {static_cast<int8_t>(0xFA), static_cast<int8_t>(0xD0)}  // 'F' 'A' 'D' '0' in hexadecimal format
                     }
                 }
-            } 
+            }
         });
 
         weight->setDataFormat(Aidge::DataFormat::NHWC);
         weight->setDataType(Aidge::DataType::Int4);
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
-        expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
 
         std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
         auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
         op->associateInput(0,weight);
-        op->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
         op->setDataFormat(DataFormat::NHWC);
         op->setBackend("cpu");
         myWeightInterleavingNode->forward();
@@ -368,9 +368,9 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
                         {-1, -6, -3,  0}  // 'F' 'A' 'D' '0' in hexadecimal format
                     }
                 }
-            } 
+            }
         });
-        
+
         std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array4D<std::int8_t,2,3,3,2> {
             {
                 {
@@ -407,7 +407,7 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
                         {static_cast<int8_t>(0xFA), static_cast<int8_t>(0xD0)}  // 'F' 'A' 'D' '0' in hexadecimal format
                     }
                 }
-            } 
+            }
         });
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
@@ -415,12 +415,12 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
 
         // Create convolution node
         std::shared_ptr<Node> conv = Conv(4, 2, {3, 3}, "conv1");
-        
+
         // Place the weight tensor in the weight producer of the conv
         auto weightProducer = conv->getParent(1);
         weightProducer->getOperator()->setOutput(0, weight);
 
-        // Set dataType, dataformat and backend of convolution 
+        // Set dataType, dataformat and backend of convolution
         conv->getOperator()->setDataFormat(Aidge::DataFormat::NHWC);
         conv->getOperator()->setDataType(Aidge::DataType::Int4);
         conv->getOperator()->setBackend("cpu");