diff --git a/unit_tests/learningRate/Test_LRScheduler.cpp b/unit_tests/learningRate/Test_LRScheduler.cpp index 206f23e1e5ae273faad4391b621e9a2d56212a2b..12c7f698bcf67b2f53f2d98adebbe304cf1bc198 100644 --- a/unit_tests/learningRate/Test_LRScheduler.cpp +++ b/unit_tests/learningRate/Test_LRScheduler.cpp @@ -16,11 +16,11 @@ #include <vector> // #include "aidge/data/Tensor.hpp" -#include "aidge/optimizer/LR/LRScheduler.hpp" -#include "aidge/optimizer/LR/LRSchedulerList.hpp" +#include "aidge/learning/learningRate/LRScheduler.hpp" +#include "aidge/learning/learningRate/LRSchedulerList.hpp" namespace Aidge { -TEST_CASE("[core/optimizer/LR] LRSchduler(computeOutputDims)", "[LRScheduler]") { +TEST_CASE("[learning/LR] Construction & evolution", "[LRScheduler]") { constexpr std::uint16_t NBTRIALS = 10; // Create a random number generator @@ -37,7 +37,7 @@ TEST_CASE("[core/optimizer/LR] LRSchduler(computeOutputDims)", "[LRScheduler]") const float truth = initValDist(gen); // create learning rate scheduler - LRScheduler myLR = ConstantLR(truth); + LRScheduler myLR = learning::ConstantLR(truth); // prediction std::vector<float> profile = myLR.lr_profiling(nbSteps); @@ -61,7 +61,7 @@ TEST_CASE("[core/optimizer/LR] LRSchduler(computeOutputDims)", "[LRScheduler]") const std::size_t nbSteps = nbStepsDist(gen); const float gamma = gammaDist(gen); const std::size_t stepSize = stepSizeDist(gen); - LRScheduler myLR = StepLR(initialLR, stepSize, gamma); + LRScheduler myLR = learning::StepLR(initialLR, stepSize, gamma); // truth std::vector<float> truth(nbSteps);