From 1040710ab837242b47eb2e8e3a31cc2145705ead Mon Sep 17 00:00:00 2001 From: Antoni Olivier <olivier.antoni@cea.fr> Date: Tue, 25 Jun 2024 09:11:08 +0200 Subject: [PATCH] Make clean --- unit_tests/optimizer/Test_Adam.cpp | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/unit_tests/optimizer/Test_Adam.cpp b/unit_tests/optimizer/Test_Adam.cpp index 5f51672..ef5482d 100644 --- a/unit_tests/optimizer/Test_Adam.cpp +++ b/unit_tests/optimizer/Test_Adam.cpp @@ -23,9 +23,6 @@ #include "aidge/learning/learningRate/LRSchedulerList.hpp" #include "aidge/learning/optimizer/Optimizer.hpp" #include "aidge/learning/optimizer/Adam.hpp" -//#include "aidge/backend/cpu/operator/AddImpl.hpp" -//#include "aidge/backend/cpu/operator/MulImpl.hpp" -//#include "aidge/backend/cpu/operator/SubImpl.hpp" #include "aidge/utils/TensorUtils.hpp" namespace Aidge { @@ -118,15 +115,15 @@ TEST_CASE("[learning/Adam] update", "[Optimizer][Adam]") { } // truth - for (std::size_t step = 0; step < 10; ++step) { + for (std::size_t step = 0; step < 10; ++step) { for (std::size_t t = 0; t < nb_tensors; ++t) { for (std::size_t i = 0; i < size_tensors[t]; ++i) { val_momentum1_tensors[t][i] = beta1 * val_momentum1_tensors[t][i] + (1.0f - beta1) * val_grad_tensors[t][i]; - val_momentum2_tensors[t][i] = beta2 * val_momentum2_tensors[t][i] + (1.0f - beta2) * val_grad_tensors[t][i] * val_grad_tensors[t][i]; + val_momentum2_tensors[t][i] = beta2 * val_momentum2_tensors[t][i] + (1.0f - beta2) * val_grad_tensors[t][i] * val_grad_tensors[t][i]; val_tensors[t][i] = val_tensors[t][i] - lr * val_momentum1_tensors[t][i] / (1.0f - std::pow(beta1, step + 1)) / (std::sqrt(val_momentum2_tensors[t][i] / (1.0f - std::pow(beta2, step + 1))) + epsilon); - } + } } // optimizer opt.update(); -- GitLab