diff --git a/unit_tests/optimizer/Test_Adam.cpp b/unit_tests/optimizer/Test_Adam.cpp
index 5f516728057df42cfb55ad2fb0172dfd611cfdeb..ef5482d448f49e3c44f7fa97fb8ae67f40e9cb44 100644
--- a/unit_tests/optimizer/Test_Adam.cpp
+++ b/unit_tests/optimizer/Test_Adam.cpp
@@ -23,9 +23,6 @@
 #include "aidge/learning/learningRate/LRSchedulerList.hpp"
 #include "aidge/learning/optimizer/Optimizer.hpp"
 #include "aidge/learning/optimizer/Adam.hpp"
-//#include "aidge/backend/cpu/operator/AddImpl.hpp"
-//#include "aidge/backend/cpu/operator/MulImpl.hpp"
-//#include "aidge/backend/cpu/operator/SubImpl.hpp"
 #include "aidge/utils/TensorUtils.hpp"
 
 namespace Aidge {
@@ -118,15 +115,15 @@ TEST_CASE("[learning/Adam] update", "[Optimizer][Adam]") {
         }
 
         // truth
-		for (std::size_t step = 0; step < 10; ++step) {
+        for (std::size_t step = 0; step < 10; ++step) {
             for (std::size_t t = 0; t < nb_tensors; ++t) {
                 for (std::size_t i = 0; i < size_tensors[t]; ++i) {
                     val_momentum1_tensors[t][i] = beta1 * val_momentum1_tensors[t][i] + (1.0f - beta1) * val_grad_tensors[t][i];
-				    val_momentum2_tensors[t][i] = beta2 * val_momentum2_tensors[t][i] + (1.0f - beta2) * val_grad_tensors[t][i] * val_grad_tensors[t][i];
+                    val_momentum2_tensors[t][i] = beta2 * val_momentum2_tensors[t][i] + (1.0f - beta2) * val_grad_tensors[t][i] * val_grad_tensors[t][i];
                     val_tensors[t][i] = val_tensors[t][i]
                                       - lr * val_momentum1_tensors[t][i] / (1.0f -  std::pow(beta1, step + 1))
                                            / (std::sqrt(val_momentum2_tensors[t][i] / (1.0f - std::pow(beta2, step + 1))) + epsilon);
-				}
+                }
             }
             // optimizer
             opt.update();