diff --git a/unit_tests/operator/Test_MatMulImpl.cpp b/unit_tests/operator/Test_MatMulImpl.cpp
index 5df0528b5d24be04b324cd05d1f964a57c35b3ea..168418372d94a7de2aee7ed2e6a41d90c68531af 100644
--- a/unit_tests/operator/Test_MatMulImpl.cpp
+++ b/unit_tests/operator/Test_MatMulImpl.cpp
@@ -54,26 +54,22 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") {
             totalComputation += dim0*dim1*dim2;
 
             // Create and populate the array with random float values
-            float bigArray1[dim0][dim1];
-            for (int i = 0; i < dim0; ++i) {
-                for (int j = 0; j < dim1; ++j) {
-                    bigArray1[i][j] = dis(gen); // Generate random float value
-                }
+            float* bigArray1 = new float[dim0*dim1];
+            for (int i = 0; i < dim0*dim1; ++i) {
+                bigArray1[i] = dis(gen); // Generate random float value
             }
-            float bigArray2[dim1][dim2];
-            for (int i = 0; i < dim1; ++i) {
-                for (int j = 0; j < dim2; ++j) {
-                    bigArray2[i][j] = dis(gen); // Generate random float value
-                }
+            float* bigArray2 = new float[dim1*dim2];
+            for (int i = 0; i < dim1*dim2; ++i) {
+                bigArray2[i] = dis(gen); // Generate random float value
             }
-            float res[dim0][dim2];
+            float* res = new float[dim0*dim2];
             for (int i = 0; i < dim0; ++i) {
                 for (int j = 0; j < dim2; ++j) {
                     float sum = 0.0;
                     for (int k = 0; k < dim1; ++k) {
-                        sum += bigArray1[i][k] * bigArray2[k][j];
+                        sum += bigArray1[i*dim1+k] * bigArray2[k*dim2+j];
                     }
-                    res[i][j] = sum;
+                    res[i*dim2+j] = sum;
                 }
             }
 
@@ -82,17 +78,17 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") {
             std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(DataType::Float32);
             T1 -> resize({dim0,dim1});
             T1 -> setBackend("cpu");
-            T1 -> getImpl() -> setRawPtr(&bigArray1[0][0], dim0*dim1);
+            T1 -> getImpl() -> setRawPtr(bigArray1, dim0*dim1);
             // Convert bigArray2 to Tensor
             std::shared_ptr<Tensor> T2 = std::make_shared<Tensor>(DataType::Float32);
             T2 -> resize({dim1,dim2});
             T2 -> setBackend("cpu");
-            T2 -> getImpl() -> setRawPtr(&bigArray2[0][0], dim1*dim2);
+            T2 -> getImpl() -> setRawPtr(bigArray2, dim1*dim2);
             // convert res to Tensor
             std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(DataType::Float32);
             Tres -> resize({dim0,dim2});
             Tres -> setBackend("cpu");
-            Tres -> getImpl() -> setRawPtr(&res[0][0], dim0*dim2);
+            Tres -> getImpl() -> setRawPtr(res, dim0*dim2);
 
             op->associateInput(0, T1);
             op->associateInput(1, T2);
@@ -122,31 +118,23 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") {
             totalComputation += dim0*dim1*dim2*dimNb;
 
             // Create and populate the array with random float values
-            float bigArray1[dimNb][dim0][dim1];
-            for (std::size_t n = 0; n < dimNb; ++n) {
-                for (std::size_t i = 0; i < dim0; ++i) {
-                    for (std::size_t j = 0; j < dim1; ++j) {
-                        bigArray1[n][i][j] = dis(gen); // Generate random float value
-                    }
-                }
+            float* bigArray1 = new float[dimNb*dim0*dim1];
+            for (std::size_t i = 0; i < dimNb*dim0*dim1; ++i) {
+                bigArray1[i] = dis(gen); // Generate random float value
             }
-            float bigArray2[dimNb][dim1][dim2];
-            for (std::size_t n = 0; n < dimNb; ++n) {
-                for (int i = 0; i < dim1; ++i) {
-                    for (int j = 0; j < dim2; ++j) {
-                        bigArray2[n][i][j] = dis(gen); // Generate random float value
-                    }
-                }
+            float* bigArray2 = new float[dimNb*dim1*dim2];
+            for (int i = 0; i < dimNb*dim1*dim2; ++i) {
+                bigArray2[i] = dis(gen); // Generate random float value
             }
-            float res[dimNb][dim0][dim2];
+            float* res = new float[dimNb*dim0*dim2];
             for (std::size_t n = 0; n < dimNb; ++n) {
                 for (int i = 0; i < dim0; ++i) {
                     for (int j = 0; j < dim2; ++j) {
                         float sum = 0.0;
                         for (int k = 0; k < dim1; ++k) {
-                            sum += bigArray1[n][i][k] * bigArray2[n][k][j];
+                            sum += bigArray1[n*dim0*dim1 + i*dim1 + k] * bigArray2[n*dim2*dim1+k*dim2+j];
                         }
-                        res[n][i][j] = sum;
+                        res[n*dim0*dim2+i*dim2+j] = sum;
                     }
                 }
             }
@@ -154,17 +142,17 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") {
             std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(DataType::Float32);
             T1 -> resize({dimNb,dim0,dim1});
             T1 -> setBackend("cpu");
-            T1 -> getImpl() -> setRawPtr(&bigArray1[0][0], dimNb*dim0*dim1);
+            T1 -> getImpl() -> setRawPtr(bigArray1, dimNb*dim0*dim1);
             // Convert bigArray2 to Tensor
             std::shared_ptr<Tensor> T2 = std::make_shared<Tensor>(DataType::Float32);
             T2 -> resize({dimNb,dim1,dim2});
             T2 -> setBackend("cpu");
-            T2 -> getImpl() -> setRawPtr(&bigArray2[0][0], dimNb*dim1*dim2);
+            T2 -> getImpl() -> setRawPtr(bigArray2, dimNb*dim1*dim2);
             // convert res to Tensor
             std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(DataType::Float32);
             Tres -> resize({dimNb,dim0,dim2});
             Tres -> setBackend("cpu");
-            Tres -> getImpl() -> setRawPtr(&res[0][0], dimNb*dim0*dim2);
+            Tres -> getImpl() -> setRawPtr(res, dimNb*dim0*dim2);
 
             op->associateInput(0, T1);
             op->associateInput(1, T2);
@@ -195,36 +183,24 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") {
             totalComputation += dim0*dim1*dim2*dimNb1*dimNb2;
 
             // Create and populate the array with random float values
-            float bigArray1[dimNb1][dimNb2][dim0][dim1];
-            for (std::size_t n1 = 0; n1 < dimNb1; ++n1) {
-                for (std::size_t n2 = 0; n2 < dimNb2; ++n2) {
-                    for (std::size_t i = 0; i < dim0; ++i) {
-                        for (std::size_t j = 0; j < dim1; ++j) {
-                            bigArray1[n1][n2][i][j] = dis(gen); // Generate random float value
-                        }
-                    }
-                }
+            float* bigArray1 = new float[dimNb1*dimNb2*dim0*dim1];
+            for (std::size_t i = 0; i < dimNb1*dimNb2*dim0*dim1; ++i) {
+                bigArray1[i] = dis(gen); // Generate random float value
             }
-            float bigArray2[dimNb1][dimNb2][dim1][dim2];
-            for (std::size_t n1 = 0; n1 < dimNb1; ++n1) {
-                for (std::size_t n2 = 0; n2 < dimNb2; ++n2) {
-                    for (std::size_t i = 0; i < dim1; ++i) {
-                        for (std::size_t j = 0; j < dim2; ++j) {
-                            bigArray2[n1][n2][i][j] = dis(gen); // Generate random float value
-                        }
-                    }
-                }
+            float* bigArray2 = new float[dimNb1*dimNb2*dim1*dim2];
+            for (std::size_t i = 0; i < dimNb1*dimNb2*dim1*dim2; ++i) {
+                bigArray2[i] = dis(gen); // Generate random float value
             }
-            float res[dimNb1][dimNb2][dim0][dim2];
+            float* res = new float[dimNb1*dimNb2*dim0*dim2];
             for (std::size_t n1 = 0; n1 < dimNb1; ++n1) {
                 for (std::size_t n2 = 0; n2 < dimNb2; ++n2) {
                     for (int i = 0; i < dim0; ++i) {
                         for (int j = 0; j < dim2; ++j) {
                             float sum = 0.0;
                             for (int k = 0; k < dim1; ++k) {
-                                sum += bigArray1[n1][n2][i][k] * bigArray2[n1][n2][k][j];
+                                sum += bigArray1[n1*dimNb2*dim0*dim1+n2*dim0*dim1+i*dim1+k] * bigArray2[n1*dimNb2*dim1*dim2+n2*dim1*dim2+k*dim2+j];
                             }
-                            res[n1][n2][i][j] = sum;
+                            res[n1*dimNb2*dim0*dim2+n2*dim0*dim2+i*dim2+j] = sum;
                         }
                     }
                 }
@@ -233,17 +209,17 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") {
             std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(DataType::Float32);
             T1 -> resize({dimNb1,dimNb2,dim0,dim1});
             T1 -> setBackend("cpu");
-            T1 -> getImpl() -> setRawPtr(&bigArray1[0][0], dimNb1*dimNb2*dim0*dim1);
+            T1 -> getImpl() -> setRawPtr(bigArray1, dimNb1*dimNb2*dim0*dim1);
             // Convert bigArray2 to Tensor
             std::shared_ptr<Tensor> T2 = std::make_shared<Tensor>(DataType::Float32);
             T2 -> resize({dimNb1,dimNb2,dim1,dim2});
             T2 -> setBackend("cpu");
-            T2 -> getImpl() -> setRawPtr(&bigArray2[0][0], dimNb1*dimNb2*dim1*dim2);
+            T2 -> getImpl() -> setRawPtr(bigArray2, dimNb1*dimNb2*dim1*dim2);
             // convert res to Tensor
             std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(DataType::Float32);
             Tres -> resize({dimNb1,dimNb2,dim0,dim2});
             Tres -> setBackend("cpu");
-            Tres -> getImpl() -> setRawPtr(&res[0][0], dimNb1*dimNb2*dim0*dim2);
+            Tres -> getImpl() -> setRawPtr(res, dimNb1*dimNb2*dim0*dim2);
 
             op->associateInput(0, T1);
             op->associateInput(1, T2);
diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp
index 0bbe59643df050759c209878135da67a0c94d6ce..953f291d107e8ea99c25b9aa1f06def6b3e381b2 100644
--- a/unit_tests/scheduler/Test_Scheduler.cpp
+++ b/unit_tests/scheduler/Test_Scheduler.cpp
@@ -23,8 +23,8 @@
 #include "aidge/backend/cpu.hpp"
 #include "aidge/recipes/GraphViewHelper.hpp"
 
-using namespace Aidge;
 
+namespace Aidge {
 
 TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
     std::shared_ptr<Tensor> inputTensor =
@@ -433,4 +433,5 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(backward)", "[scheduler][backward
                                                                  {7.0f, 7.0f, 7.0f, 7.0f, 7.0f}}}}});
 
     REQUIRE_NOTHROW(scheduler.backward({targetOutput}));
-}
\ No newline at end of file
+}
+} // namespace Aidge