diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 8a6684b22b4c4659353fa5b5dee2b0820c46a11f..9710c028aa6b18c5f5324b898b96e93f1a7912cf 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -12,13 +12,16 @@
 #ifndef AIDGE_BACKEND_OPERATORIMPL_H_
 #define AIDGE_BACKEND_OPERATORIMPL_H_
 
+#include <functional>  // std::function
+#include <memory>
 #include <string>
 #include <vector>
-#include <functional>
+#include <utility>  // std::pair
 
 #include "aidge/utils/Types.h"
 #include "aidge/utils/DynamicAttributes.hpp"
-#include "aidge/data/Data.hpp"
+#include "aidge/data/DataFormat.hpp"
+#include "aidge/data/DataType.hpp"
 #include "aidge/data/Elts.hpp"
 #include "aidge/scheduler/ProdConso.hpp"
 
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 6ddb29c8d97d4812434e741a1baa49fe051e5b27..05674c44d5a97e89bfd6154e956d52e7d22920ff 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -19,7 +19,7 @@
 #include <vector>
 #include <string>
 
-#include "aidge/data/Data.hpp"
+#include "aidge/data/DataType.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 4f7079e59c4328885969e7dc7181395d1333d0af..cc0b2e6655e64633c77db09334ee9edee2ad8cb2 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -13,6 +13,7 @@
 #define AIDGE_CPU_DATA_TENSORIMPL_H_
 
 #include "aidge/backend/TensorImpl.hpp"
+#include "aidge/data/DataType.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
diff --git a/include/aidge/data/Elts.hpp b/include/aidge/data/Elts.hpp
index bc4a225fc87a44e10a96da85f5b26048fbfad98c..b2df11968e8c688729c93747cdf953d288b26aef 100644
--- a/include/aidge/data/Elts.hpp
+++ b/include/aidge/data/Elts.hpp
@@ -14,6 +14,7 @@
 
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/logger/EnumString.hpp"
 
 namespace Aidge {
 /**
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 3f609e54d1d160c4228c06b00e1a11e6b027fcbd..c8df815bbe294e90e86f125a804bde2db82a739b 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -27,6 +27,8 @@
 
 #include "aidge/backend/TensorImpl.hpp"
 #include "aidge/data/Data.hpp"
+#include "aidge/data/DataType.hpp"
+#include "aidge/data/DataFormat.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -85,11 +87,11 @@ class Tensor : public Data,
              typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
     Tensor(T val)
         : Data(Type),
-          mDataType(NativeType<VT>::type),
+          mDataType(NativeType_v<VT>),
           mDataFormat(DataFormat::Default),
           mDims({}),
           mStrides({1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<VT>})(0, std::vector<std::size_t>())),
           mSize(1)
     {
         *static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
@@ -114,10 +116,10 @@ class Tensor : public Data,
     template <typename T>
     Tensor(Vector<T> &&arr)
         : Data(Type),
-          mDataType(NativeType<T>::type),
+          mDataType(NativeType_v<T>),
           mDims({arr.data.size()}),
           mStrides({1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {arr.data.size()})),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {arr.data.size()})),
           mSize(arr.data.size())
     {
         mImpl->copyFromHost(&arr.data[0], arr.data.size());
@@ -131,11 +133,11 @@ class Tensor : public Data,
     template <typename T, std::size_t SIZE_0>
     constexpr Tensor(Array1D<T, SIZE_0> &&arr)
         : Data(Type),
-          mDataType(NativeType<T>::type),
+          mDataType(NativeType_v<T>),
           mDataFormat(DataFormat::Default),
           mDims({SIZE_0}),
           mStrides({1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0})),
           mSize(SIZE_0)
     {
         mImpl->copyFromHost(&arr.data[0], SIZE_0);
@@ -150,11 +152,11 @@ class Tensor : public Data,
     template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
     constexpr Tensor(Array2D<T, SIZE_0, SIZE_1> &&arr)
         : Data(Type),
-          mDataType(NativeType<T>::type),
+          mDataType(NativeType_v<T>),
           mDataFormat(DataFormat::Default),
           mDims({SIZE_0, SIZE_1}),
           mStrides({SIZE_1, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1})),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1})),
           mSize(SIZE_0 * SIZE_1) {
         mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
     }
@@ -169,11 +171,11 @@ class Tensor : public Data,
     template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
     constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr)
         : Data(Type),
-          mDataType(NativeType<T>::type),
+          mDataType(NativeType_v<T>),
           mDataFormat(DataFormat::Default),
           mDims({SIZE_0, SIZE_1, SIZE_2}),
           mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2})),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1, SIZE_2})),
           mSize(SIZE_0 * SIZE_1 * SIZE_2) {
         mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
     }
@@ -189,11 +191,11 @@ class Tensor : public Data,
     template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
     constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr)
         : Data(Type),
-          mDataType(NativeType<T>::type),
+          mDataType(NativeType_v<T>),
           mDataFormat(DataFormat::Default),
           mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
           mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
           mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) {
         mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
     }
@@ -615,7 +617,7 @@ public:
 
     template <typename expectedType>
     const expectedType& get(std::size_t idx) const {
-        AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "Tensor::get<>({}): wrong data type, expected {}, got {}", idx, mDataType, NativeType<expectedType>::type);
+        AIDGE_ASSERT(NativeType_v<expectedType> == mDataType, "Tensor::get<>({}): wrong data type, expected {}, got {}", idx, mDataType, NativeType_v<expectedType>);
         AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "Tensor::get<>({}): can only be used for backends providing a valid host pointer.", idx);
         AIDGE_ASSERT(idx < mSize, "Tensor::get<>({}): idx {} out of range, tensor size {}", idx, mSize);
         return *reinterpret_cast<expectedType *>(mImpl->hostPtr(mImplOffset + idx));
@@ -628,7 +630,7 @@ public:
 
     template <typename expectedType>
     void set(std::size_t idx, expectedType value){
-        AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
+        AIDGE_ASSERT(NativeType_v<expectedType> == mDataType, "wrong data type");
         AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer");
         AIDGE_ASSERT(idx < mSize, "idx out of range");
         expectedType* dataPtr = static_cast<expectedType*>(mImpl->hostPtr(mImplOffset + idx));
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index dc2b2059e75711572e0f7fa94cc6ccb9f58c970b..fbf1be0716c9210606efab51f747fc4aa47a48a1 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -19,9 +19,9 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/logger/EnumString.hpp"
 
 namespace Aidge {
 
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index cdf8cc23250366c62a4102118a95e68cec28ec3d..02a692dea47a1ba270df5b7c710db0c07da2043a 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -13,6 +13,8 @@
 #include <pybind11/stl.h>
 
 #include "aidge/data/Data.hpp"
+#include "aidge/data/DataType.hpp"
+#include "aidge/data/DataFormat.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
@@ -66,7 +68,7 @@ void init_Data(py::module& m){
 
     m.def("format_as", (const char* (*)(DataType)) &format_as, py::arg("dt"));
     m.def("format_as", (const char* (*)(DataFormat)) &format_as, py::arg("df"));
-    m.def("get_data_format_transpose", &getDataFormatTranspose, py::arg("src"), py::arg("dst"));  
+    m.def("get_data_format_transpose", &getDataFormatTranspose, py::arg("src"), py::arg("dst"));
 
 }
 }
diff --git a/src/graph/StaticAnalysis.cpp b/src/graph/StaticAnalysis.cpp
index 4309c5c37b72dea9f07f8e5a2e7ce7678090b2e2..b322450bf45ebb9f4e3430bc4a98475710a9c725 100644
--- a/src/graph/StaticAnalysis.cpp
+++ b/src/graph/StaticAnalysis.cpp
@@ -20,7 +20,7 @@
 #include <fmt/format.h>
 #include <fmt/ranges.h>
 
-#include "aidge/data/Data.hpp"  // Aidge::isDataTypeFloatingPoint
+#include "aidge/data/DataType.hpp"  // Aidge::isFloatingPoint
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
@@ -38,7 +38,7 @@ Aidge::OperatorStats::~OperatorStats() = default;
 std::size_t Aidge::OperatorStats::getNbArithmIntOps() const {
     const auto opTensor = dynamic_cast<const OperatorTensor*>(&mOp);
     if (opTensor) {
-        if (!isDataTypeFloatingPoint(opTensor->getOutput(0)->dataType())) {
+        if (!isFloatingPoint(opTensor->getOutput(0)->dataType())) {
             return getNbArithmOps();
         }
     }