diff --git a/include/aidge/backend/cpu/data/Broadcasting.hpp b/include/aidge/backend/cpu/data/Broadcasting.hpp
index cb969cb54806a204072763a1672ee5266fb6347e..06b72dac830dd5f8201cc056e2849e13e66a72c9 100644
--- a/include/aidge/backend/cpu/data/Broadcasting.hpp
+++ b/include/aidge/backend/cpu/data/Broadcasting.hpp
@@ -12,38 +12,27 @@
 #ifndef AIDGE_CPU_DATA_BROADCASTING_H_
 #define AIDGE_CPU_DATA_BROADCASTING_H_
 
+#include <aidge/utils/Types.h>
 #include <vector>
 
 namespace Aidge {
 
-// Function to broadCast an input dims vector into the same size as an outputDims vector
-
-    /**
-     * @brief  Broadcast an input dims vector into the same size as an outputDims vector
-     * @details The missing dimensions would be completed by 1
-     * @param outputDims The vector of dimensions to follow 
-     * @param dimsToBroadcast The vecotr of dimensions to braodcast
-     * @return std::vector<std::size_t> a broadcasted vector by addding 1 on the missing dimensions.
-     */
-    std::vector<std::size_t> getBroadcastedDims(const std::vector<std::size_t>& outputDims, const std::vector<std::size_t>& dimsToBroadcast);
-
-    /**
-     * @brief Get a vector of indexes along the dimensions vector from a flattened index
-     * @param dimensions The vector of dimensions we want the indexes on
-     * @param idx The flattened index
-     * @return std::vector<std::size_t> vector of indexes along dimensions.
-     */
-    std::vector<std::size_t> getMultiDimIndices(const std::vector<std::size_t>& dimensions, std::size_t idx);
-
-    // Function to get a flattened index from multi-dimensional indices
-    /**
-     * @brief Get a flattened index the dimensions vector from a given vector of indices on a broadcasted vector
-     * @param dimensions The vector of dimensions we want the flattened index on
-     * @param indices The vector of indices we want to flatten
-     * @return std::size_t The flattened index on the dimensions vector
-     */
-    std::size_t getFlattenedIndex(const std::vector<std::size_t>& dimensions, const std::vector<std::size_t>& indices);
+// Function to broadCast an input dims vector into the same size as an
+// outputDims vector
+
+/**
+ * @brief  Broadcast an input dims vector into the same size as an outputDims
+ * vector
+ * @details The missing dimensions would be completed by 1
+ * @param outputDims The vector of dimensions to follow
+ * @param dimsToBroadcast The vecotr of dimensions to braodcast
+ * @return std::vector<std::size_t> a broadcasted vector by addding 1 on the
+ * missing dimensions.
+ */
+std::vector<std::size_t>
+getBroadcastedDims(const std::vector<std::size_t> &outputDims,
+                   const std::vector<std::size_t> &dimsToBroadcast);
 
 } // namespace Aidge
 
-#endif // AIDGE_CPU_DATA_BROADCASTING_H_
\ No newline at end of file
+#endif // AIDGE_CPU_DATA_BROADCASTING_H_
diff --git a/include/aidge/backend/cpu/operator/AddImpl_kernels.hpp b/include/aidge/backend/cpu/operator/AddImpl_kernels.hpp
index 4a4ba2a8999c4dc33fc743b5a3a7dad023f9e0dd..e1832509f42fa99f0183b67f2be51ec18d614aa3 100644
--- a/include/aidge/backend/cpu/operator/AddImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl_kernels.hpp
@@ -14,6 +14,7 @@
 
 #include "aidge/utils/Registrar.hpp"
 
+#include <aidge/data/Tensor.hpp>
 #include <cstdint>     // std::int32_t, std::int64_t
 
 #include "aidge/backend/cpu/data/Broadcasting.hpp"
@@ -33,9 +34,9 @@ void AddImpl_cpu_forward_kernel(const std::vector<const void*> inputs_, const st
 	for (std::size_t oIndex = 0; oIndex < outputLength; ++oIndex)
 	{
         output[oIndex] = 0;
-		std::vector<size_t> indexes = getMultiDimIndices(outDims, oIndex);
+		std::vector<size_t> indexes = Tensor::getCoord(outDims, oIndex);
 		for(std::size_t iIndex = 0; iIndex < inputs.size(); ++iIndex) {
-			std::size_t idx = getFlattenedIndex(inputDims[iIndex], indexes);
+			std::size_t idx = Tensor::getIdx(inputDims[iIndex], indexes);
             output[oIndex] += inputs[iIndex][idx];
 		}
 	}
@@ -56,4 +57,4 @@ REGISTRAR(AddImpl_cpu,
     {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<std::int64_t, std::int64_t>, nullptr});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_CPU_KERNELS_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_CPU_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/AndImpl_kernels.hpp b/include/aidge/backend/cpu/operator/AndImpl_kernels.hpp
index 197e829f3527ce2f36c3ef5ee812a26477633703..f50ce178633271d9b359fb4b462ed92c662361da 100644
--- a/include/aidge/backend/cpu/operator/AndImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AndImpl_kernels.hpp
@@ -36,10 +36,10 @@ void AndImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims,
 
 	for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex)
 	{
-		std::vector<size_t> indexes = getMultiDimIndices(outputDims, oIndex);
+		std::vector<size_t> indexes = Tensor::getCoord(outputDims, oIndex);
 
-		std::size_t idx1 = getFlattenedIndex(input1Dims, indexes);
-		std::size_t idx2 = getFlattenedIndex(input2Dims, indexes);
+		std::size_t idx1 = Tensor::getIdx(input1Dims, indexes);
+		std::size_t idx2 = Tensor::getIdx(input2Dims, indexes);
 
         output[oIndex] = static_cast<O>(input_1[idx1] == input_2[idx2]);
     }
diff --git a/include/aidge/backend/cpu/operator/BitShiftImpl_kernels.hpp b/include/aidge/backend/cpu/operator/BitShiftImpl_kernels.hpp
index f815e946ea2e4abaff48a6e5155368d564e88e8c..be128f0db818c17f70a8a416bfaa7a580594b661 100644
--- a/include/aidge/backend/cpu/operator/BitShiftImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/BitShiftImpl_kernels.hpp
@@ -17,12 +17,10 @@
 #include <cstdint>     // std::int32_t, std::int64_t
 #include "aidge/operator/BitShift.hpp"
 
-#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/operator/BitShiftImpl.hpp"
 
-
-
 namespace Aidge {
+
 template <class I1, class I2, class O>
 void BitShiftImpl_cpu_forward_kernel(
                                 const BitShift_Op::BitShiftDirection direction,
@@ -38,13 +36,13 @@ void BitShiftImpl_cpu_forward_kernel(
     const I2* input_2 = static_cast<const I2*>(input2_);
     O* output = static_cast<O*>(output_);
 
-    const size_t totalElements = std::accumulate(outputDims.begin(), outputDims.end(), std::size_t(1), std::multiplies<std::size_t>());
+    const size_t totalElements = std::accumulate(outputDims.begin(), outputDims.end(), static_cast<std::size_t>(1), std::multiplies<std::size_t>());
     
     for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex)
     {
-        std::vector<size_t> indexes = getMultiDimIndices(outputDims, oIndex);
-        std::size_t idx1 = getFlattenedIndex(input1Dims, indexes);
-        std::size_t idx2 = getFlattenedIndex(input2Dims, indexes);
+        std::vector<size_t> indexes = Tensor::getCoord(outputDims, oIndex);
+        std::size_t idx1 = Tensor::getIdx(input1Dims, indexes);
+        std::size_t idx2 = Tensor::getIdx(input2Dims, indexes);
         if(direction == BitShift_Op::BitShiftDirection::right)
 
         {
@@ -67,4 +65,4 @@ REGISTRAR(BitShiftImpl_cpu,
 
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_BitShiftIMPL_KERNELS_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_BitShiftIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/MulImpl_kernels.hpp b/include/aidge/backend/cpu/operator/MulImpl_kernels.hpp
index c015b8f0182608fecd3da94220e9411decfd186c..3ec95df7a55706cc895b8f51bc24606216bad0a5 100644
--- a/include/aidge/backend/cpu/operator/MulImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/MulImpl_kernels.hpp
@@ -39,10 +39,10 @@ void MulImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims,
 
 	for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex)
 	{
-		std::vector<size_t> indexes = getMultiDimIndices(outputDims, oIndex);
+		std::vector<size_t> indexes = Tensor::getCoord(outputDims, oIndex);
 
-		std::size_t idx1 = getFlattenedIndex(input1Dims, indexes);
-		std::size_t idx2 = getFlattenedIndex(input2Dims, indexes);
+		std::size_t idx1 = Tensor::getIdx(input1Dims, indexes);
+		std::size_t idx2 = Tensor::getIdx(input2Dims, indexes);
 
         output[oIndex] = input_1[idx1] * input_2[idx2];
     }
@@ -73,16 +73,16 @@ void MulImpl_cpu_backward_kernel(const std::size_t input0Length,
 
         for(auto i = 0U; i < input0Length; ++i)
         {
-            const auto indices = getMultiDimIndices(input1Dims, i);
-            const auto flattenedIndex = getFlattenedIndex(input1Dims, indices);
+            const auto indices = Tensor::getCoord(input1Dims, i);
+            const auto flattenedIndex = Tensor::getIdx(input1Dims, indices);
 
             grad_input_0[i] = input1[flattenedIndex] * grad_output[i];
         }
 
         for(std::size_t i = 0 ; i < grad0Length; ++i)
         {
-            const auto indices = getMultiDimIndices(input1Dims, i);
-            const auto flattenedIndex = getFlattenedIndex(input1Dims, indices);
+            const auto indices = Tensor::getCoord(input1Dims, i);
+            const auto flattenedIndex = Tensor::getIdx(input1Dims, indices);
 
             grad_input_1[flattenedIndex] += input0[i] * grad_output[i];
         }
@@ -92,16 +92,16 @@ void MulImpl_cpu_backward_kernel(const std::size_t input0Length,
 
         for(auto i = 0U; i < input1Length; ++i)
         {
-            const auto indices = getMultiDimIndices(input0Dims, i);
-            const auto flattenedIndex = getFlattenedIndex(input0Dims, indices);
+            const auto indices = Tensor::getCoord(input0Dims, i);
+            const auto flattenedIndex = Tensor::getIdx(input0Dims, indices);
 
             grad_input_1[i] = input0[flattenedIndex] * grad_output[i];
         }
 
         for(std::size_t i = 0 ; i < grad0Length; ++i)
         {
-            const auto indices = getMultiDimIndices(input0Dims, i);
-            const auto flattenedIndex = getFlattenedIndex(input0Dims, indices);
+            const auto indices = Tensor::getCoord(input0Dims, i);
+            const auto flattenedIndex = Tensor::getIdx(input0Dims, indices);
 
             grad_input_0[flattenedIndex] += input1[i] * grad_output[i];
         }
diff --git a/include/aidge/backend/cpu/operator/PowImpl_kernels.hpp b/include/aidge/backend/cpu/operator/PowImpl_kernels.hpp
index ab9b2ccc7b823842decd044b90a5c6364cedc9c9..5ab96d37fa084dcd836670cf38fff7594bf8d876 100644
--- a/include/aidge/backend/cpu/operator/PowImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/PowImpl_kernels.hpp
@@ -13,9 +13,9 @@
 #define AIDGE_CPU_OPERATOR_POWIMPL_KERNELS_H_
 
 #include "aidge/utils/Registrar.hpp"
+#include <aidge/data/Tensor.hpp>
 #include <cmath>
 
-#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/operator/PowImpl.hpp"
 
 namespace Aidge {
@@ -31,13 +31,13 @@ void PowImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims,
     const I2* input_2 = static_cast<const I2*>(input2_);
     O* output = static_cast<O*>(output_);
 
-    std::size_t totalElements = std::accumulate(outputDims.cbegin(), outputDims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+    std::size_t totalElements = std::accumulate(outputDims.cbegin(), outputDims.cend(), static_cast<std::size_t>(1), std::multiplies<std::size_t>());
 	for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex) 
 	{
-		std::vector<std::size_t> indexes = getMultiDimIndices(outputDims, oIndex);
+		std::vector<std::size_t> indexes = Tensor::getCoord(outputDims, oIndex);
 
-		std::size_t idx1 = getFlattenedIndex(input1Dims, indexes);
-		std::size_t idx2 = getFlattenedIndex(input2Dims, indexes);
+		std::size_t idx1 = Tensor::getIdx(input1Dims, indexes);
+		std::size_t idx2 = Tensor::getIdx(input2Dims, indexes);
 		
         output[oIndex] = std::pow(input_1[idx1], input_2[idx2]);
 	}
@@ -59,18 +59,18 @@ void PowImpl_cpu_backward_kernel(const std::vector<std::size_t>& input0Dims,
     const O* gradOut = static_cast<const O*>(gradOutput_);
 
     // Fill input grads with zeros
-	std::size_t input0Elements = std::accumulate(input0Dims.cbegin(), input0Dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+	std::size_t input0Elements = std::accumulate(input0Dims.cbegin(), input0Dims.cend(), static_cast<std::size_t>(1), std::multiplies<std::size_t>());
 	std::fill(grad0, grad0 + input0Elements, I1(0));
-	std::size_t input1Elements = std::accumulate(input1Dims.cbegin(), input1Dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+	std::size_t input1Elements = std::accumulate(input1Dims.cbegin(), input1Dims.cend(), static_cast<std::size_t>(1), std::multiplies<std::size_t>());
 	std::fill(grad1, grad1 + input1Elements, I2(0));
 
-	std::size_t totalElements = std::accumulate(outputDims.cbegin(), outputDims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+	std::size_t totalElements = std::accumulate(outputDims.cbegin(), outputDims.cend(), static_cast<std::size_t>(1), std::multiplies<std::size_t>());
     for (size_t oIndex = 0; oIndex < totalElements; ++oIndex)
     {
         // Compute indexes in inputs 0 and 1 to support broadcasting
-        std::vector<std::size_t> indexes = getMultiDimIndices(outputDims, oIndex);
-        std::size_t idx0 = getFlattenedIndex(input0Dims, indexes);
-        std::size_t idx1 = getFlattenedIndex(input1Dims, indexes);
+        std::vector<std::size_t> indexes = Tensor::getCoord(outputDims, oIndex);
+        std::size_t idx0 = Tensor::getIdx(input0Dims, indexes);
+        std::size_t idx1 = Tensor::getIdx(input1Dims, indexes);
 
         // grad0 = grad_output * (input1 * pow(input0, (input1 -1)))
         grad0[idx0] += gradOut[oIndex]*input1[idx1]* std::pow(input0[idx0], input1[idx1]-1);
diff --git a/include/aidge/backend/cpu/operator/SubImpl_kernels.hpp b/include/aidge/backend/cpu/operator/SubImpl_kernels.hpp
index 0486ed2105b23e95f9cdfcda578e14900fcb2c8e..bb85a1abb959c54795bed6d41f1010d1d35f673f 100644
--- a/include/aidge/backend/cpu/operator/SubImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/SubImpl_kernels.hpp
@@ -42,9 +42,9 @@ void SubImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims,
 
 	for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex)
 	{
-		std::vector<size_t> indexes = getMultiDimIndices(outputDims, oIndex);
-		std::size_t idx1 = getFlattenedIndex(input1Dims, indexes);
-		std::size_t idx2 = getFlattenedIndex(input2Dims, indexes);
+		std::vector<size_t> indexes = Tensor::getCoord(outputDims, oIndex);
+		std::size_t idx1 = Tensor::getIdx(input1Dims, indexes);
+		std::size_t idx2 = Tensor::getIdx(input2Dims, indexes);
         output[oIndex] = input_1[idx1] - input_2[idx2];
 	}
 }
diff --git a/src/data/Broadcasting.cpp b/src/data/Broadcasting.cpp
index 22977aa772e3f3f4810a59ff1fc024cc21c66bd1..80fc3aa5e8c67190e295fcc80e695b2da4e70536 100644
--- a/src/data/Broadcasting.cpp
+++ b/src/data/Broadcasting.cpp
@@ -10,37 +10,19 @@
  ********************************************************************************/
 
 #include "aidge/backend/cpu/data/Broadcasting.hpp"
+#include <aidge/utils/Types.h>
 
-std::vector<std::size_t> Aidge::getBroadcastedDims(const std::vector<std::size_t>& outputDims, const std::vector<std::size_t>& dimsToBroadcast){
-    std::vector<std::size_t> broadcastedDims(outputDims.size(), 1);
-		for(int j=dimsToBroadcast.size()-1; j>=0; --j)
-		{
-			std::size_t idx = outputDims.size() - (dimsToBroadcast.size()-j);
-			broadcastedDims[idx] = dimsToBroadcast[j];
-		}
-    return broadcastedDims;
-}
-
-std::vector<std::size_t> Aidge::getMultiDimIndices(const std::vector<std::size_t>& dimensions, std::size_t idx){
-    std::vector<std::size_t> indices(dimensions.size(), 0);
-
-    for (int i = dimensions.size() - 1; i >= 0; --i) {
-        indices[i] = idx % dimensions[i];
-        idx /= dimensions[i];
-    }
+namespace Aidge {
 
-    return indices;
-}
-
-std::size_t Aidge::getFlattenedIndex(const std::vector<std::size_t>& dimensions, const std::vector<std::size_t>& indices){
-    std::size_t flattenedIdx = 0;
-    std::size_t stride = 1;
-
-    for (int i = dimensions.size() - 1; i >= 0; --i) {
-        std::size_t idx = dimensions[i]>1 ? indices[i] : 0;
-        flattenedIdx += idx * stride;
-        stride *= dimensions[i];
+std::vector<std::size_t>
+getBroadcastedDims(const std::vector<std::size_t> &outputDims,
+                   const std::vector<std::size_t> &dimsToBroadcast) {
+    std::vector<std::size_t> broadcastedDims(outputDims.size(), 1);
+    for (int j = dimsToBroadcast.size() - 1; j >= 0; --j) {
+        std::size_t idx = outputDims.size() - (dimsToBroadcast.size() - j);
+        broadcastedDims[idx] = dimsToBroadcast[j];
     }
-    return flattenedIdx;
+    return broadcastedDims;
 }
 
+} // namespace Aidge