diff --git a/unit_tests/optimizer/Test_Adam.cpp b/unit_tests/optimizer/Test_Adam.cpp index caacb9ce581ae82729f7b3e08a6b7ba91519c025..cd171e3e856c9cc11c4575ef2b85207638d7f3af 100644 --- a/unit_tests/optimizer/Test_Adam.cpp +++ b/unit_tests/optimizer/Test_Adam.cpp @@ -130,8 +130,8 @@ TEST_CASE("[learning/Adam] update", "[Optimizer][Adam]") { for (std::size_t step = 0; step < 10; ++step) { // truth - float lr2 = lr * std::sqrt(1.0f - std::pow(beta2, step + 1)) / (1.0f - std::pow(beta1, step + 1)); - float epsilon2 = epsilon * std::sqrt(1.0f - std::pow(beta2, step + 1)); + float lr2 = lr * std::sqrt(1.0f - std::pow(beta2, static_cast<float>(step + 1))) / (1.0f - std::pow(beta1, static_cast<float>(step + 1))); + float epsilon2 = epsilon * std::sqrt(1.0f - std::pow(beta2, static_cast<float>(step + 1))); for (std::size_t t = 0; t < nb_tensors; ++t) { for (std::size_t i = 0; i < size_tensors[t]; ++i) { val_momentum1_tensors[t][i] = beta1 * val_momentum1_tensors[t][i] + (1.0f - beta1) * val_grad_tensors[t][i]; @@ -146,7 +146,7 @@ TEST_CASE("[learning/Adam] update", "[Optimizer][Adam]") { for (std::size_t t = 0; t < nb_tensors; ++t) { const Tensor tmpt1= *(opt.parameters().at(t)); const Tensor tmpt2= *tensors[t]; - REQUIRE(approxEq<float,float>(tmpt2, tmpt1, 1e-5f, 1e-7f)); + REQUIRE(approxEq<float,float>(tmpt2, tmpt1, 1e-5f, 1e-8f)); } } }