diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp
index 98015d5b67e139ec2f842f3ba50f278a578e3da6..539a3128c8c7afb8dad06799e657f70d22db1e9c 100644
--- a/include/aidge/backend/cpu.hpp
+++ b/include/aidge/backend/cpu.hpp
@@ -53,7 +53,7 @@
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
 #include "aidge/backend/cpu/operator/SubImpl.hpp"
 #include "aidge/backend/cpu/operator/TanhImpl.hpp"
-#include "aidge/backend/cpu/operator/WeightInterleavingImpl.hpp"
+#include "aidge/backend/cpu/operator/WeightInterleavedImpl.hpp"
 
 #include "aidge/backend/cpu/data/TensorImpl.hpp"
 
diff --git a/include/aidge/backend/cpu/operator/WeightInterleavingImpl.hpp b/include/aidge/backend/cpu/operator/WeightInterleavedImpl.hpp
similarity index 87%
rename from include/aidge/backend/cpu/operator/WeightInterleavingImpl.hpp
rename to include/aidge/backend/cpu/operator/WeightInterleavedImpl.hpp
index 0b3b1c5765e4db42500645c314f8befe7cd9b182..ff5c4778f530912e8bdf97ffadb2f546789e2c48 100644
--- a/include/aidge/backend/cpu/operator/WeightInterleavingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/WeightInterleavedImpl.hpp
@@ -23,7 +23,7 @@
 
 namespace Aidge {
 // Operator implementation entry point for the backend
-using WeightInterleavingImpl_cpu = OperatorImpl_cpu<WeightInterleaving_Op,
+using WeightInterleavedImpl_cpu = OperatorImpl_cpu<WeightInterleaving_Op,
     void(const DimSize_t,
         const DimSize_t,
         const DimSize_t,
@@ -31,7 +31,7 @@ using WeightInterleavingImpl_cpu = OperatorImpl_cpu<WeightInterleaving_Op,
         void *)>;
 
 // Implementation entry point registration to Operator
-REGISTRAR(WeightInterleaving_Op, "cpu", Aidge::WeightInterleavingImpl_cpu::create);
+REGISTRAR(WeightInterleaving_Op, "cpu", Aidge::WeightInterleavedImpl_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_WeightInterleavingIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/WeightInterleavingImpl_kernels.hpp b/include/aidge/backend/cpu/operator/WeightInterleavedImpl_kernels.hpp
similarity index 57%
rename from include/aidge/backend/cpu/operator/WeightInterleavingImpl_kernels.hpp
rename to include/aidge/backend/cpu/operator/WeightInterleavedImpl_kernels.hpp
index f2347fd2d7ad3e9adfa134ce1413b6348e08c064..18557f8fb5fcdd31476904d273d4d2d7f37a66b5 100644
--- a/include/aidge/backend/cpu/operator/WeightInterleavingImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/WeightInterleavedImpl_kernels.hpp
@@ -1,23 +1,35 @@
-
-
-#ifndef AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVINGIMPL_KERNELS_H_
-#define AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVINGIMPL_KERNELS_H_
-
-#include <algorithm>
-
-#include "aidge/backend/cpu/operator/WeightInterleavingImpl.hpp"
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVEDIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVEDIMPL_KERNELS_H_
+
+#include <cstddef>  // std::size_t
+#include <cstdint>  // std::int8_t, std::uint8_t
+
+#include "aidge/backend/cpu/operator/WeightInterleavedImpl.hpp"
+#include "aidge/data/DataType.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 
 
 namespace Aidge {
 
     /**
      * @brief Compacts 8-bit data into a smaller bit-width representation.
-     * 
-     * This function takes an array of 8-bit data and compacts it into smaller chunks 
-     * based on the specified bit-width `nb_bits`. Each element in `compactData` will 
+     *
+     * This function takes an array of 8-bit data and compacts it into smaller chunks
+     * based on the specified bit-width `nb_bits`. Each element in `compactData` will
      * store multiple packed `nb_bits` segments extracted from `data`.
-     * 
+     *
      * @param data The input array of 8-bit values to be compacted.
      * @param dataSize The size of the input `data` array.
      * @param compactData The output array storing the compacted data.
@@ -39,14 +51,14 @@ namespace Aidge {
         std::uint8_t shift = 8 / nbSlot;
 
         const unsigned int nbFullCompactbytes = dataSize / nbSlot;
-        
+
         // Main loop to process data in groups of `nbSlot`
         for (std::size_t i = 0; i < nbFullCompactbytes; ++i) {
             T compact = 0;
-            
+
             for (unsigned int j = 0; j < nbSlot; ++j) {
                 compact |= (data[i * nbSlot + j] & mask);    // Apply mask to keep `nb_bits` only
-                
+
                 // Shift only if not on the last slot to make room for the next `nb_bits`
                 if (j < nbSlot - 1) {
                     compact <<= shift;
@@ -55,7 +67,7 @@ namespace Aidge {
             // Store the compacted value in the output array
             compactData[i] = compact;
         }
-        
+
 
         // Handle any remaining data elements (if dataSize is not a multiple of nbSlot).
         std::size_t remaining = dataSize % nbSlot;
@@ -63,7 +75,7 @@ namespace Aidge {
             std::int8_t compact = 0;
             for (std::size_t j = 0; j < remaining; ++j) {
                 compact |= (data[nbFullCompactbytes*nbSlot + j] & mask);
-                
+
                 if (j < remaining - 1) {
                     compact <<= shift;
                 }
@@ -75,7 +87,7 @@ namespace Aidge {
     }
 
 template <class I, class O, int nb_bits>
-void WeightInterleavingImpl_cpu_forward_kernel(const DimSize_t input_interleaving,
+void WeightInterleavedImpl_cpu_forward_kernel(const DimSize_t input_interleaving,
                             const DimSize_t nb_interleaving,
                             const DimSize_t output_interleaving,
                             const void* input_,
@@ -91,41 +103,41 @@ void WeightInterleavingImpl_cpu_forward_kernel(const DimSize_t input_interleavin
 }
 
 
-REGISTRAR(WeightInterleavingImpl_cpu,
-    {ImplSpec::IOSpec{DataType::Int4, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::Int4>::type, DataFormat::NHWC}},
-    {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 4>, nullptr});
-REGISTRAR(WeightInterleavingImpl_cpu,
-    {ImplSpec::IOSpec{DataType::Int3, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::Int3>::type, DataFormat::NHWC}},
-    {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 3>, nullptr});
-REGISTRAR(WeightInterleavingImpl_cpu,
-    {ImplSpec::IOSpec{DataType::Int2, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::Int2>::type, DataFormat::NHWC}},
-    {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 2>, nullptr});
-REGISTRAR(WeightInterleavingImpl_cpu,
-    {ImplSpec::IOSpec{DataType::Binary, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::Binary>::type, DataFormat::NHWC}},
-    {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 1>, nullptr});
-
-REGISTRAR(WeightInterleavingImpl_cpu,
-    {ImplSpec::IOSpec{DataType::UInt4, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::UInt4>::type, DataFormat::NHWC}},
-    {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<uint8_t, uint8_t, 4>, nullptr});
-REGISTRAR(WeightInterleavingImpl_cpu,
-    {ImplSpec::IOSpec{DataType::UInt3, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::UInt3>::type, DataFormat::NHWC}},
-    {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<uint8_t, uint8_t, 3>, nullptr});
-REGISTRAR(WeightInterleavingImpl_cpu,
-    {ImplSpec::IOSpec{DataType::UInt2, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::UInt2>::type, DataFormat::NHWC}},
-    {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<uint8_t, uint8_t, 2>, nullptr});
-
-
-// REGISTRAR(WeightInterleavingImpl_cpu,
+REGISTRAR(WeightInterleavedImpl_cpu,
+    {ImplSpec::IOSpec{DataType::Int4, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::Int4>, DataFormat::NHWC}},
+    {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 4>, nullptr});
+REGISTRAR(WeightInterleavedImpl_cpu,
+    {ImplSpec::IOSpec{DataType::Int3, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::Int3>, DataFormat::NHWC}},
+    {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 3>, nullptr});
+REGISTRAR(WeightInterleavedImpl_cpu,
+    {ImplSpec::IOSpec{DataType::Int2, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::Int2>, DataFormat::NHWC}},
+    {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 2>, nullptr});
+REGISTRAR(WeightInterleavedImpl_cpu,
+    {ImplSpec::IOSpec{DataType::Binary, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::Binary>, DataFormat::NHWC}},
+    {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 1>, nullptr});
+
+REGISTRAR(WeightInterleavedImpl_cpu,
+    {ImplSpec::IOSpec{DataType::UInt4, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::UInt4>, DataFormat::NHWC}},
+    {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<uint8_t, uint8_t, 4>, nullptr});
+REGISTRAR(WeightInterleavedImpl_cpu,
+    {ImplSpec::IOSpec{DataType::UInt3, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::UInt3>, DataFormat::NHWC}},
+    {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<uint8_t, uint8_t, 3>, nullptr});
+REGISTRAR(WeightInterleavedImpl_cpu,
+    {ImplSpec::IOSpec{DataType::UInt2, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::UInt2>, DataFormat::NHWC}},
+    {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<uint8_t, uint8_t, 2>, nullptr});
+
+
+// REGISTRAR(WeightInterleavedImpl_cpu,
 //     {ImplSpec::IOSpec{DataType::Int4, DataFormat::NHWC}},
-//     {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 4>, nullptr});
-// REGISTRAR(WeightInterleavingImpl_cpu,
+//     {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 4>, nullptr});
+// REGISTRAR(WeightInterleavedImpl_cpu,
 //     {ImplSpec::IOSpec{DataType::Int3, DataFormat::NHWC}},
-//     {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 3>, nullptr});
-// REGISTRAR(WeightInterleavingImpl_cpu,
+//     {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 3>, nullptr});
+// REGISTRAR(WeightInterleavedImpl_cpu,
 //     {ImplSpec::IOSpec{DataType::Int2, DataFormat::NHWC}},
-//     {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 2>, nullptr});
+//     {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 2>, nullptr});
 
 
 }
 
-#endif /* AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVINGIMPL_KERNELS_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVEDIMPL_KERNELS_H_ */
\ No newline at end of file
diff --git a/src/operator/WeightInterleavingImpl.cpp b/src/operator/WeightInterleavedImpl.cpp
similarity index 84%
rename from src/operator/WeightInterleavingImpl.cpp
rename to src/operator/WeightInterleavedImpl.cpp
index afb79179512c8ed360387532b458e6bbe10a92b9..2c9f3a6e8df35616a4f7ffae86cbeacd841f44bf 100644
--- a/src/operator/WeightInterleavingImpl.cpp
+++ b/src/operator/WeightInterleavedImpl.cpp
@@ -9,7 +9,7 @@
  *
  ********************************************************************************/
 
-#include "aidge/backend/cpu/operator/WeightInterleavingImpl.hpp"
+#include "aidge/backend/cpu/operator/WeightInterleavedImpl.hpp"
 
 #include <cstddef>  // std::size_t
 #include <functional>
@@ -17,19 +17,19 @@
 #include <tuple>
 
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include "aidge/backend/cpu/operator/WeightInterleavingImpl_kernels.hpp"
+#include "aidge/backend/cpu/operator/WeightInterleavedImpl_kernels.hpp"
 #include "aidge/operator/WeightInterleaving.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
 
 template <>
-void Aidge::WeightInterleavingImpl_cpu::forward()
+void Aidge::WeightInterleavedImpl_cpu::forward()
 {
     const WeightInterleaving_Op& op_ = dynamic_cast<const WeightInterleaving_Op&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0");
 
-    const auto impl = Registrar<WeightInterleavingImpl_cpu>::create(getBestMatch(getRequiredSpec()));
+    const auto impl = Registrar<WeightInterleavedImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Convert input data (no overhead if not needed!)
     // TODO: right now, if needed, memory will be allocated/deallocated at each
@@ -38,14 +38,14 @@ void Aidge::WeightInterleavingImpl_cpu::forward()
     std::shared_ptr<Tensor> input0Fallback;
     const auto& input0 = op_.getInput(0)->refCastFrom(input0Fallback, *(op_.getOutput(0)));
 
-    // inputInterleaving is the number of consecutive input elements that will be compacted 
+    // inputInterleaving is the number of consecutive input elements that will be compacted
     // Here the interleaving is the last dimension (cf STM32 low bit kernels)
     std::size_t inputInterleaving = input0.dims().back();
 
     // The resulting compacted dimension was computed in forwardDims and the output tensor was resized
     std::size_t outputInterleaving = op_.getOutput(0)->dims().back();
 
-    // nb_interleaving is the number of compacted segments 
+    // nb_interleaving is the number of compacted segments
     std::size_t nbInterleaving;
 
     // Determine the number of segment to compact
@@ -65,11 +65,11 @@ void Aidge::WeightInterleavingImpl_cpu::forward()
         outputInterleaving,
         input0.getImpl()->rawPtr(),
         getCPUPtr(mOp.getRawOutput(0)));
-    
-    
+
+
 }
 
 template <>
-void Aidge::WeightInterleavingImpl_cpu::backward() {
+void Aidge::WeightInterleavedImpl_cpu::backward() {
     AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for WeightInterleaving_Op on backend cpu");
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_WeightInterleavingImpl.cpp b/unit_tests/operator/Test_WeightInterleavingImpl.cpp
index 9bd9f14681115d8e7d77fbc651596eec714d7b1e..c95c8fca19eb79eb78fc19e93ded3383054383e7 100644
--- a/unit_tests/operator/Test_WeightInterleavingImpl.cpp
+++ b/unit_tests/operator/Test_WeightInterleavingImpl.cpp
@@ -23,7 +23,7 @@
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
-    
+
     std::shared_ptr<Node> myWeightInterleaving = WeightInterleaving();
     auto opWeightInterleaving = std::static_pointer_cast<WeightInterleaving_Op>(myWeightInterleaving -> getOperator());
 
@@ -64,9 +64,9 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
 
     SECTION("CompactData - 4-bit compaction") {
         std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 4>{
-                                                                {static_cast<std::int8_t>(0x0F), 
-                                                                static_cast<std::int8_t>(0xF5), 
-                                                                static_cast<std::int8_t>(0xB3), 
+                                                                {static_cast<std::int8_t>(0x0F),
+                                                                static_cast<std::int8_t>(0xF5),
+                                                                static_cast<std::int8_t>(0xB3),
                                                                 static_cast<std::int8_t>(0x9C)}
                                                                 });
 
@@ -74,17 +74,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
         weight->setDataType(Aidge::DataType::Int4);
 
         std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{
-                                                                {static_cast<int8_t>(0xF5), 
+                                                                {static_cast<int8_t>(0xF5),
                                                                 static_cast<int8_t>(0x3C)}
                                                                 });
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
-        expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
 
         std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
         auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
         op->associateInput(0,weight);
-        op->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
         op->setDataFormat(DataFormat::NHWC);
         op->setBackend("cpu");
         myWeightInterleavingNode->forward();
@@ -93,8 +93,8 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
 
     SECTION("CompactData - 3-bit compaction") {
         std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 4>{
-                                                                {static_cast<int8_t>(0x0F), 
-                                                                static_cast<int8_t>(0x05), 
+                                                                {static_cast<int8_t>(0x0F),
+                                                                static_cast<int8_t>(0x05),
                                                                 static_cast<int8_t>(0x04),
                                                                 static_cast<int8_t>(0xD3)}
                                                                 });
@@ -103,17 +103,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
         weight->setDataType(Aidge::DataType::Int3);
 
         std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{
-                                                                {static_cast<int8_t>(0x75), 
+                                                                {static_cast<int8_t>(0x75),
                                                                 static_cast<int8_t>(0x43)}
                                                                 });
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
-        expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int3>::type);
+        expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int3>);
 
         std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
         auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
         op->associateInput(0,weight);
-        op->setDataType(WeightInterleavingType<Aidge::DataType::Int3>::type);
+        op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int3>);
         op->setDataFormat(DataFormat::NHWC);
         op->setBackend("cpu");
         myWeightInterleavingNode->forward();
@@ -124,7 +124,7 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
         std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 4>{
                                                                 {static_cast<std::int8_t>(0x03),
                                                                  static_cast<std::int8_t>(0x02),
-                                                                 static_cast<std::int8_t>(0x01), 
+                                                                 static_cast<std::int8_t>(0x01),
                                                                  static_cast<std::int8_t>(0x00)}
                                                                  });
 
@@ -136,12 +136,12 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
                                                                 });
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
-        expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int2>::type);
+        expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int2>);
 
         std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
         auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
         op->associateInput(0,weight);
-        op->setDataType(WeightInterleavingType<Aidge::DataType::Int2>::type);
+        op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int2>);
         op->setDataFormat(DataFormat::NHWC);
         op->setBackend("cpu");
         myWeightInterleavingNode->forward();
@@ -161,12 +161,12 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
                                                                 });
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
-        expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
 
         std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
         auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
         op->associateInput(0,weight);
-        op->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
         op->setDataFormat(DataFormat::NHWC);
         op->setBackend("cpu");
         myWeightInterleavingNode->forward();
@@ -175,8 +175,8 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
 
     SECTION("CompactData - Edge Cases - Non-divisible dataSize for nbSlot with nbbits=4") {
         std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 3>{
-                                                                {static_cast<int8_t>(0x0F), 
-                                                                static_cast<int8_t>(0xA5), 
+                                                                {static_cast<int8_t>(0x0F),
+                                                                static_cast<int8_t>(0xA5),
                                                                 static_cast<int8_t>(0x34)}
                                                                 });
 
@@ -184,17 +184,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
         weight->setDataType(Aidge::DataType::Int4);
 
         std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{
-                                                                {static_cast<int8_t>(0xF5), 
+                                                                {static_cast<int8_t>(0xF5),
                                                                 static_cast<int8_t>(0x40)}
                                                                 });
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
-        expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
 
         std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
         auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
         op->associateInput(0,weight);
-        op->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
         op->setDataFormat(DataFormat::NHWC);
         op->setBackend("cpu");
         myWeightInterleavingNode->forward();
@@ -205,8 +205,8 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
     SECTION("CompactData - Edge Cases - Non-divisible dataSize for nbSlot with nbbits=3") {
 
         std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 3>{
-                                                                {static_cast<int8_t>(0x0F), 
-                                                                static_cast<int8_t>(0x05), 
+                                                                {static_cast<int8_t>(0x0F),
+                                                                static_cast<int8_t>(0x05),
                                                                 static_cast<int8_t>(0x04)}
                                                                 });
 
@@ -214,17 +214,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
         weight->setDataType(Aidge::DataType::Int3);
 
         std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{
-                                                                {static_cast<int8_t>(0x75), 
+                                                                {static_cast<int8_t>(0x75),
                                                                 static_cast<int8_t>(0x40)}
                                                                 });
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
-        expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int3>::type);
+        expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int3>);
 
         std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
         auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
         op->associateInput(0,weight);
-        op->setDataType(WeightInterleavingType<Aidge::DataType::Int3>::type);
+        op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int3>);
         op->setDataFormat(DataFormat::NHWC);
         op->setBackend("cpu");
         myWeightInterleavingNode->forward();
@@ -271,9 +271,9 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
                         {-1, -6, -3,  0}  // 'F' 'A' 'D' '0' in hexadecimal format
                     }
                 }
-            } 
+            }
         });
-        
+
         std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array4D<std::int8_t,2,3,3,2> {
             {
                 {
@@ -310,19 +310,19 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
                         {static_cast<int8_t>(0xFA), static_cast<int8_t>(0xD0)}  // 'F' 'A' 'D' '0' in hexadecimal format
                     }
                 }
-            } 
+            }
         });
 
         weight->setDataFormat(Aidge::DataFormat::NHWC);
         weight->setDataType(Aidge::DataType::Int4);
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
-        expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
 
         std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
         auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
         op->associateInput(0,weight);
-        op->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type);
+        op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
         op->setDataFormat(DataFormat::NHWC);
         op->setBackend("cpu");
         myWeightInterleavingNode->forward();
@@ -368,9 +368,9 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
                         {-1, -6, -3,  0}  // 'F' 'A' 'D' '0' in hexadecimal format
                     }
                 }
-            } 
+            }
         });
-        
+
         std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array4D<std::int8_t,2,3,3,2> {
             {
                 {
@@ -407,7 +407,7 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
                         {static_cast<int8_t>(0xFA), static_cast<int8_t>(0xD0)}  // 'F' 'A' 'D' '0' in hexadecimal format
                     }
                 }
-            } 
+            }
         });
 
         expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
@@ -415,12 +415,12 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
 
         // Create convolution node
         std::shared_ptr<Node> conv = Conv(4, 2, {3, 3}, "conv1");
-        
+
         // Place the weight tensor in the weight producer of the conv
         auto weightProducer = conv->getParent(1);
         weightProducer->getOperator()->setOutput(0, weight);
 
-        // Set dataType, dataformat and backend of convolution 
+        // Set dataType, dataformat and backend of convolution
         conv->getOperator()->setDataFormat(Aidge::DataFormat::NHWC);
         conv->getOperator()->setDataType(Aidge::DataType::Int4);
         conv->getOperator()->setBackend("cpu");