diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt index 5984524fdc8c596641e505897d16e12de78024cc..7e63fb2be1a8aa9e7955b768c736302e9f9e3920 100644 --- a/unit_tests/CMakeLists.txt +++ b/unit_tests/CMakeLists.txt @@ -1,12 +1,19 @@ -Include(FetchContent) +find_package(Catch2 QUIET) -FetchContent_Declare( - Catch2 - GIT_REPOSITORY https://github.com/catchorg/Catch2.git - GIT_TAG v3.7.1 # or a later release -) +if(NOT Catch2_FOUND) + message(STATUS "Catch2 not found in system, retrieving from git") + Include(FetchContent) -FetchContent_MakeAvailable(Catch2) + FetchContent_Declare( + Catch2 + GIT_REPOSITORY https://github.com/catchorg/Catch2.git + GIT_TAG devel # or a later release + ) + + FetchContent_MakeAvailable(Catch2) +else() + message(STATUS "Found system Catch2 version ${Catch2_VERSION}") +endif() file(GLOB_RECURSE src_files "*.cpp") diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp index fd938f10a947d1520600a1d00022eeb970cd76e6..2bc1e7d4c6f8a7cfbae8807e3021f9c5dd89fff6 100644 --- a/unit_tests/data/Test_TensorImpl.cpp +++ b/unit_tests/data/Test_TensorImpl.cpp @@ -9,19 +9,23 @@ * ********************************************************************************/ -#include <catch2/catch_test_macros.hpp> -#include <cstddef> // std::size_t -#include <cstdint> // std::uint16_t -#include <chrono> -#include <iostream> +#include <chrono> // std::micro, std::chrono::time_point, + // std::chrono::system_clock +#include <cstddef> // std::size_t +#include <cstdint> // std::int32_t, std::uint16_t #include <memory> -#include <numeric> // std::accumulate -#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution +#include <random> // std::random_device, std::mt19937 + // std::uniform_int_distribution, std::uniform_real_distribution +#include <vector> + +#include <catch2/catch_test_macros.hpp> +#include <fmt/core.h> -#include "aidge/data/Tensor.hpp" #include "aidge/backend/cpu/data/TensorImpl.hpp" -#include "aidge/operator/Add.hpp" #include "aidge/backend/cpu/operator/AddImpl.hpp" +#include "aidge/data/Data.hpp" +#include "aidge/operator/Add.hpp" +#include "aidge/utils/ArrayHelpers.hpp" namespace Aidge { @@ -35,8 +39,7 @@ TEST_CASE("Test addition of Tensors","[TensorImpl][Add][Data]") { std::uniform_int_distribution<int> boolDist(0,1); // Create MatMul Operator - std::shared_ptr<Node> mySub = Add(); - auto op = std::static_pointer_cast<OperatorTensor>(mySub-> getOperator()); + std::shared_ptr<Add_Op> op = std::make_shared<Add_Op>(); op->setDataType(DataType::Float32); op->setBackend("cpu"); diff --git a/unit_tests/operator/Test_AddImpl.cpp b/unit_tests/operator/Test_AddImpl.cpp index bca4025705cb1c851dcf3e9accbf016c4535120a..720c4ca2aa59ec3c265cc416871fa76c83dfc7fe 100644 --- a/unit_tests/operator/Test_AddImpl.cpp +++ b/unit_tests/operator/Test_AddImpl.cpp @@ -9,12 +9,16 @@ * ********************************************************************************/ +#include <memory> + #include <catch2/catch_test_macros.hpp> +#include "aidge/backend/cpu/operator/AddImpl.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" +#include "aidge/graph/Node.hpp" #include "aidge/operator/Add.hpp" - -#include "aidge/backend/cpu.hpp" +#include "aidge/utils/ArrayHelpers.hpp" using namespace Aidge; diff --git a/unit_tests/operator/Test_AndImpl.cpp b/unit_tests/operator/Test_AndImpl.cpp index 053bb3ea4ed913bd388f3ae049c4d6402ad58d59..c2309dce5f32862ad9aeceaf98430b75ab7be6ef 100644 --- a/unit_tests/operator/Test_AndImpl.cpp +++ b/unit_tests/operator/Test_AndImpl.cpp @@ -9,13 +9,19 @@ * ********************************************************************************/ +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <memory> +#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution, std::uniform_real_distribution + #include <catch2/catch_test_macros.hpp> -#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution +#include "aidge/backend/cpu/operator/AndImpl.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" +#include "aidge/graph/Node.hpp" #include "aidge/operator/And.hpp" - -#include "aidge/backend/cpu.hpp" +#include "aidge/utils/ArrayHelpers.hpp" using namespace Aidge; @@ -180,7 +186,7 @@ TEST_CASE("[cpu/operator] And(forward)", "[And][CPU]") { } // }); // - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<int,2> {{10, 20}}); + std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<int,2> {{10, 20}}); std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,1,3,3,2> { { // { // diff --git a/unit_tests/operator/Test_ArgMaxImpl.cpp b/unit_tests/operator/Test_ArgMaxImpl.cpp index 9915d90423e976db1bdd2a694a2cfd7beb380cee..894697f65a6f73af27a568b994c1dd2dc6b118f3 100644 --- a/unit_tests/operator/Test_ArgMaxImpl.cpp +++ b/unit_tests/operator/Test_ArgMaxImpl.cpp @@ -9,17 +9,20 @@ * ********************************************************************************/ -#include <catch2/catch_test_macros.hpp> +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t #include <memory> -#include <numeric> // std::accumulate -#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution +#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution, std::uniform_real_distribution + +#include <catch2/catch_test_macros.hpp> +#include <fmt/core.h> +#include "aidge/backend/cpu/operator/ArgMaxImpl.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" +#include "aidge/graph/Node.hpp" #include "aidge/operator/ArgMax.hpp" -#include "aidge/operator/Conv.hpp" - -#include "aidge/backend/cpu.hpp" -#include "aidge/utils/TensorUtils.hpp" +#include "aidge/utils/ArrayHelpers.hpp" using namespace Aidge; @@ -118,8 +121,8 @@ TEST_CASE("[cpu/operator] ArgMax(forward)", "[ArgMax][CPU]") { SECTION("Axis 2") { Tensor myOutput = Tensor(Array3D<float,2,3, 1> { - { - { + { + { {3.0}, {2.0}, {1.0} @@ -144,7 +147,7 @@ TEST_CASE("[cpu/operator] ArgMax(forward)", "[ArgMax][CPU]") { SECTION("Axis 2 with keep_dims false") { Tensor myOutput = Tensor(Array2D<float,2,3> { - { + { { 3.0, 2.0, 1.0 }, { 2.0, 1.0, 0.0 } } @@ -196,10 +199,11 @@ TEST_CASE("[cpu/operator] ArgMax(forward)", "[ArgMax][CPU]") { op->associateInput(0,myInput); op->setDataType(DataType::Float32); op->setBackend("cpu"); - std::cout << " ............... "<< std::endl; + fmt::print("{:.^20}\n", "forward"); myArgMax->forward(); + fmt::print("{:.^20}\n", "result"); op->getOutput(0)->print(); - std::cout <<"------"<<std::endl; + fmt::print("{:.^20}\n", "truth"); myOutput.print(); REQUIRE(*(op->getOutput(0)) == myOutput); diff --git a/unit_tests/operator/Test_Atan.cpp b/unit_tests/operator/Test_Atan.cpp index 9548e35d81b0423125424a4198d82558c4e57df4..b9438db0b38642e8c49e46451544a68714ac4de6 100644 --- a/unit_tests/operator/Test_Atan.cpp +++ b/unit_tests/operator/Test_Atan.cpp @@ -9,14 +9,18 @@ * ********************************************************************************/ +#include <cmath> // std::abs +#include <cstddef> // std::size_t +#include <memory> + #include <catch2/catch_test_macros.hpp> +#include "aidge/backend/cpu/operator/AtanImpl.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" +#include "aidge/graph/Node.hpp" #include "aidge/operator/Atan.hpp" - -#include "aidge/backend/cpu.hpp" - -#include <memory> +#include "aidge/utils/ArrayHelpers.hpp" using namespace Aidge; @@ -32,7 +36,7 @@ TEST_CASE("[cpu/operator] Atan(forward)") { 0.09486303, 0.16007232, 0.40421187, 0.4102045, 0.39055911}}); std::shared_ptr<Node> myAtan = Atan(); - auto op = std::static_pointer_cast<OperatorTensor>(myAtan->getOperator()); + auto op = std::static_pointer_cast<Atan_Op>(myAtan->getOperator()); op->associateInput(0, input0); op->setDataType(DataType::Float32); op->setBackend("cpu"); @@ -61,7 +65,7 @@ TEST_CASE("[cpu/operator] Atan(forward)") { {0.75377332, 0.77411225, 0.32928031}}}}); std::shared_ptr<Node> myAtan = Atan(); - auto op = std::static_pointer_cast<OperatorTensor>(myAtan->getOperator()); + auto op = std::static_pointer_cast<Atan_Op>(myAtan->getOperator()); op->associateInput(0, input0); op->setDataType(DataType::Float32); op->setBackend("cpu"); diff --git a/unit_tests/operator/Test_AvgPoolingImpl.cpp b/unit_tests/operator/Test_AvgPoolingImpl.cpp index aaa2757830c245275d02792a7a5a2eb1db32d7b8..372febc61d04c2ba983dd33f009fe5bf1d2908a0 100644 --- a/unit_tests/operator/Test_AvgPoolingImpl.cpp +++ b/unit_tests/operator/Test_AvgPoolingImpl.cpp @@ -9,14 +9,18 @@ * ********************************************************************************/ -#include <catch2/catch_test_macros.hpp> +#include <cmath> // std::abs +#include <cstddef> // std::size_t #include <memory> -#include <cstdlib> +#include <catch2/catch_test_macros.hpp> + +#include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" +#include "aidge/graph/Node.hpp" #include "aidge/operator/AvgPooling.hpp" - -#include "aidge/backend/cpu.hpp" +#include "aidge/utils/ArrayHelpers.hpp" using namespace Aidge; @@ -53,7 +57,7 @@ TEST_CASE("[cpu/operator] AvgPooling(forward)", "[AvgPooling][CPU]") { }); SECTION("Stride") { std::shared_ptr<Node> myAvgPool = AvgPooling({2,2}, "mycdw", {2,2}); - auto op = std::static_pointer_cast<OperatorTensor>(myAvgPool -> getOperator()); + auto op = std::static_pointer_cast<AvgPooling_Op<2>>(myAvgPool -> getOperator()); std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,2,2,2,2> { { @@ -90,7 +94,7 @@ TEST_CASE("[cpu/operator] AvgPooling(forward)", "[AvgPooling][CPU]") { } }); std::shared_ptr<Node> myAvgPool = AvgPooling({3,3}, "mycdw", {3,3}); - auto op = std::static_pointer_cast<OperatorTensor>(myAvgPool -> getOperator()); + auto op = std::static_pointer_cast<AvgPooling_Op<2>>(myAvgPool -> getOperator()); Tensor myOutput = Array4D<float,1,1,1,1> { {{{{(0.3745 + 0.9507 + 0.7320 + 0.5987 + 0.1560 + 0.1560 + 0.0581 + 0.8662 + 0.6011)/9.0}}}} diff --git a/unit_tests/operator/Test_BatchNormImpl.cpp b/unit_tests/operator/Test_BatchNormImpl.cpp index 1b42c90dd09d63cd319f19bd29751da816db06c0..26e964f9386e19a6070d75a4106b6b46a29e455d 100644 --- a/unit_tests/operator/Test_BatchNormImpl.cpp +++ b/unit_tests/operator/Test_BatchNormImpl.cpp @@ -9,20 +9,24 @@ * ********************************************************************************/ -#include <catch2/catch_test_macros.hpp> +#include <cmath> // std::abs +#include <cstddef> // std::size_t #include <memory> +#include <catch2/catch_test_macros.hpp> + +#include "aidge/backend/cpu/operator/BatchNormImpl.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" +#include "aidge/graph/Node.hpp" #include "aidge/operator/BatchNorm.hpp" -#include "aidge/scheduler/SequentialScheduler.hpp" - -#include "aidge/backend/cpu.hpp" +#include "aidge/utils/ArrayHelpers.hpp" using namespace Aidge; TEST_CASE("[cpu/operator] BatchNorm(forward)", "[BatchNorm][CPU]") { std::shared_ptr<Node> myBatchNorm = BatchNorm<2>(3, 0.00001F, 0.1F, "mybatchnorm"); - auto op = std::static_pointer_cast<OperatorTensor>(myBatchNorm -> getOperator()); + auto op = std::static_pointer_cast<BatchNorm_Op<2>>(myBatchNorm -> getOperator()); std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array1D<float,3> {{0.9044, 0.3028, 0.0218}}); std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<float,3> {{0.1332, 0.7503, 0.0878}}); std::shared_ptr<Tensor> myMean = std::make_shared<Tensor>(Array1D<float,3> {{0.9931, 0.8421, 0.9936}}); diff --git a/unit_tests/operator/Test_BitShift.cpp b/unit_tests/operator/Test_BitShift.cpp index a52990bc7991a325ce151cf6634b0d5a831992c8..db97e8d30b5e7121b096f99f8722a69e6d4e367c 100644 --- a/unit_tests/operator/Test_BitShift.cpp +++ b/unit_tests/operator/Test_BitShift.cpp @@ -9,15 +9,20 @@ * ********************************************************************************/ -#include <catch2/catch_test_macros.hpp> +#include <chrono> // std::micro, std::chrono::time_point, + // std::chrono::system_clock #include <cstddef> // std::size_t #include <cstdint> // std::uint16_t #include <chrono> -#include <iostream> #include <memory> -#include <numeric> +#include <numeric> #include <random> // std::random_device, std::mt19937, std::uniform_real_distribution -#include <iomanip> + +#include <catch2/catch_test_macros.hpp> +#include <fmt/core.h> + +#include "aidge/backend/cpu/data/TensorImpl.hpp" +#include "aidge/backend/cpu/operator/BitShiftImpl.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/operator/BitShift.hpp" #include "aidge/utils/TensorUtils.hpp" @@ -29,7 +34,7 @@ TEST_CASE("[cpu/operator] BitShift_TEST", "[BitShift][CPU]") { // Create a random number generator std::random_device rd; std::mt19937 gen(rd()); - std::uniform_int_distribution<int> valueDist(-15, 15); + std::uniform_int_distribution<int> valueDist(-15, 15); std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(5)); std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(3)); std::uniform_int_distribution<int> boolDist(0,1); @@ -131,8 +136,8 @@ TEST_CASE("[cpu/operator] BitShift_TEST", "[BitShift][CPU]") { } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {}μs\n", duration.count()); } SECTION("Test BitShift kernels with Broadcasting") { std::size_t number_of_operation = 0; @@ -194,7 +199,7 @@ TEST_CASE("[cpu/operator] BitShift_TEST", "[BitShift][CPU]") { } else { - result[idx_out + d] = array0[idx0] >> array1[idx1]; + result[idx_out + d] = array0[idx0] >> array1[idx1]; } } } @@ -222,12 +227,7 @@ TEST_CASE("[cpu/operator] BitShift_TEST", "[BitShift][CPU]") { duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); // comparison between truth and computed result - bool equiv = (approxEq<int>(*(op->getOutput(0)), *Tres)); - if(equiv == false) - { - std::cout << "Problem\n"; - } - REQUIRE(equiv); + REQUIRE(approxEq<int>(*(op->getOutput(0)), *Tres)); delete[] array0; delete[] array1; @@ -236,8 +236,8 @@ TEST_CASE("[cpu/operator] BitShift_TEST", "[BitShift][CPU]") { const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); number_of_operation += nb_elements; } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {}μs\n", duration.count()); } } diff --git a/unit_tests/operator/Test_ClipImpl.cpp b/unit_tests/operator/Test_ClipImpl.cpp index 45c8da5bf7ecc84fad6b3e694fe204540f579af3..1a7aa5e548a4e6b93c0052758fb9210fd8b14818 100644 --- a/unit_tests/operator/Test_ClipImpl.cpp +++ b/unit_tests/operator/Test_ClipImpl.cpp @@ -9,36 +9,37 @@ * ********************************************************************************/ -#include <catch2/catch_test_macros.hpp> +#include <algorithm> // std::max, std::min +#include <chrono> #include <cstddef> // std::size_t #include <cstdint> // std::uint16_t -#include <chrono> -#include <iostream> -#include <vector> -#include <algorithm> -#include <iomanip> #include <memory> -#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution +#include <random> // std::random_device, std::mt19937 + // std::uniform_int_distribution, std::uniform_real_distribution +#include <vector> + +#include <catch2/catch_test_macros.hpp> +#include <fmt/core.h> +#include "aidge/backend/cpu/operator/ClipImpl.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/operator/Clip.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/TensorUtils.hpp" -#include "aidge/backend/cpu.hpp" void ComputeClipBackward(const std::vector<float>& vec1, std::vector<float>& vec2, float min, float max) { if (vec1.size() != vec2.size()) { - std::cerr << "Vectors should have the same sizes." << std::endl; + fmt::print(stderr, "Vectors should have the same sizes.\n"); return; } - for (size_t i = 0; i < vec1.size(); ++i) { + for (std::size_t i = 0; i < vec1.size(); ++i) { if (vec1[i] < min || vec1[i] > max) { vec2[i] = 0.0f; } } } -namespace Aidge +namespace Aidge { TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") { @@ -47,8 +48,8 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<float> dis(0.0, 10.0); - std::uniform_real_distribution<float> dismin(0.0, 4.5); - std::uniform_real_distribution<float> dismax(5.5, 10.0); + std::uniform_real_distribution<float> dismin(0.0, 4.5); + std::uniform_real_distribution<float> dismax(5.5, 10.0); std::uniform_int_distribution<std::size_t> distDims(5,15); std::uniform_int_distribution<std::size_t> distNbMatrix(1, 5); @@ -71,7 +72,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") // Create and populate the array with random float values float* Array = new float[dim0*dim1]; - for (int i = 0; i < dim0*dim1; ++i) { + for (std::size_t i = 0; i < dim0*dim1; ++i) { Array[i] = dis(gen); // Generate random float value } @@ -80,7 +81,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") TInput -> resize({dim0,dim1}); TInput -> setBackend("cpu"); TInput -> getImpl() -> setRawPtr(Array, dim0*dim1); - + float min = dismin(gen); std::shared_ptr<Tensor> Tmin = std::make_shared<Tensor>(DataType::Float32); Tmin -> resize({}); @@ -109,7 +110,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") op->setDataType(DataType::Float32); op->setBackend("cpu"); op->forwardDims(true); - + start = std::chrono::system_clock::now(); myClip->forward(); end = std::chrono::system_clock::now(); @@ -118,9 +119,9 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); } - std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl; - std::cout << "total time: " << duration.count() << std::endl; - } + fmt::print("INFO: multiplications over time spent: {}\n", totalComputation/duration.count()); + fmt::print("INFO: total time: {}\n", duration.count()); + } SECTION("Clip test with min >= max [Forward]") { std::size_t totalComputation = 0; for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { @@ -131,7 +132,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") // Create and populate the array with random float values float* Array = new float[dim0*dim1]; - for (int i = 0; i < dim0*dim1; ++i) { + for (std::size_t i = 0; i < dim0*dim1; ++i) { Array[i] = dis(gen); // Generate random float value } @@ -140,7 +141,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") TInput -> resize({dim0,dim1}); TInput -> setBackend("cpu"); TInput -> getImpl() -> setRawPtr(Array, dim0*dim1); - + float min = dismax(gen); std::shared_ptr<Tensor> Tmin = std::make_shared<Tensor>(DataType::Float32); Tmin -> resize({}); @@ -169,7 +170,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") op->setDataType(DataType::Float32); op->setBackend("cpu"); op->forwardDims(true); - + start = std::chrono::system_clock::now(); myClip->forward(); end = std::chrono::system_clock::now(); @@ -178,13 +179,13 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); } - std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl; - std::cout << "total time: " << duration.count() << std::endl; - } + fmt::print("INFO: multiplications over time spent: {}\n", totalComputation/duration.count()); + fmt::print("INFO: total time: {}\n", duration.count()); + } SECTION("Clip with Clip Attr [Forward]") { std::size_t totalComputation = 0; - for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { float min = dismin(gen); @@ -200,7 +201,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") // Create and populate the array with random float values float* Array = new float[dim0*dim1]; - for (int i = 0; i < dim0*dim1; ++i) { + for (std::size_t i = 0; i < dim0*dim1; ++i) { Array[i] = dis(gen); // Generate random float value } // Convert Input to Tensor @@ -231,8 +232,8 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); } - std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl; - std::cout << "total time: " << duration.count() << std::endl; + fmt::print("INFO: multiplications over time spent: {}\n", totalComputation/duration.count()); + fmt::print("INFO: total time: {}\n", duration.count()); } SECTION("Simple clip test [Backward]") { std::size_t totalComputation = 0; @@ -243,13 +244,13 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") // generate Tensors dimensions const std::size_t dim0 = distDims(gen); const std::size_t dim1 = distDims(gen); - + totalComputation += dim0*dim1; // Create and populate the array with random float values float* Array = new float[dim0*dim1]; float* gradArray = new float[dim0*dim1]; - for (int i = 0; i < dim0*dim1; ++i) { + for (std::size_t i = 0; i < dim0*dim1; ++i) { Array[i] = dis(gen); // Generate random float value gradArray[i] = dis(gen); } @@ -264,7 +265,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") TInput -> resize({dim0,dim1}); TInput -> setBackend("cpu"); TInput -> getImpl() -> setRawPtr(Array, dim0*dim1); - + float min = dismin(gen); std::shared_ptr<Tensor> Tmin = std::make_shared<Tensor>(DataType::Float32); Tmin -> resize({}); @@ -296,7 +297,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") myClip->forward(); op->getOutput(0)->setGrad(TGrad); - + start = std::chrono::system_clock::now(); REQUIRE_NOTHROW(myClip->backward()); end = std::chrono::system_clock::now(); @@ -310,9 +311,9 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]") duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); REQUIRE(GT1 == BackwardTensorVec); } - std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl; - std::cout << "total time: " << duration.count() << std::endl; + fmt::print("INFO: multiplications over time spent: {}\n", totalComputation/duration.count()); + fmt::print("INFO: total time: {}\n", duration.count()); } } -} // namespace Aidge +} // namespace Aidge } \ No newline at end of file diff --git a/unit_tests/operator/Test_ConstantOfShapeImpl.cpp b/unit_tests/operator/Test_ConstantOfShapeImpl.cpp index 42505d385fde7e72e09531f1607287ffc6978f75..8ec1669b92a5116999413cf55a8c5113363ef330 100644 --- a/unit_tests/operator/Test_ConstantOfShapeImpl.cpp +++ b/unit_tests/operator/Test_ConstantOfShapeImpl.cpp @@ -9,32 +9,27 @@ * ********************************************************************************/ -#include <algorithm> -#include <chrono> -#include <cmath> -#include <cstddef> // std::size_t -#include <cstdint> // std::uint16_t -#include <iostream> +#include <chrono> // std::micro, std::chrono::time_point, + // std::chrono::system_clock +#include <cstddef> // std::size_t +#include <cstdint> // std::int64_t, std::uint16_t #include <memory> -#include <numeric> // std::accumulate -#include <ostream> -#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution +#include <random> // std::random_device, std::mt19937 + // std::uniform_int_distribution, std::uniform_real_distribution +#include <vector> -#include "catch2/internal/catch_compiler_capabilities.hpp" -#include "catch2/internal/catch_enforce.hpp" #include <catch2/catch_test_macros.hpp> #include <catch2/generators/catch_generators_random.hpp> +#include "aidge/backend/cpu/data/TensorImpl.hpp" +#include "aidge/backend/cpu/operator/ConstantOfShapeImpl.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" +#include "aidge/filler/Filler.hpp" #include "aidge/operator/ConstantOfShape.hpp" +#include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/TensorUtils.hpp" -#include <aidge/data/Data.hpp> -#include <aidge/data/half.hpp> -#include <aidge/filler/Filler.hpp> -#include <aidge/operator/OperatorTensor.hpp> -#include <aidge/operator/Reshape.hpp> -#include <aidge/utils/TensorUtils.hpp> -#include <aidge/utils/Types.h> +#include "aidge/utils/Types.h" namespace Aidge { TEST_CASE("[cpu/operator] ConstantOfShape", "[ConstantOfShape][CPU]") { @@ -62,7 +57,7 @@ TEST_CASE("[cpu/operator] ConstantOfShape", "[ConstantOfShape][CPU]") { result->setDataType(DataType::Int64); result->setBackend("cpu"); for (DimSize_t i = 0; i < result->size(); ++i) { - result->set<int64_t>(i, input_tensor_values_dist(gen)); + result->set<std::int64_t>(i, input_tensor_values_dist(gen)); } return result; }; diff --git a/unit_tests/operator/Test_DivImpl.cpp b/unit_tests/operator/Test_DivImpl.cpp index 5d7dfdf12032d4c444e38cda6d2a4298fc552b14..b03fe4aa91e96299f2a748026ee8ca5e5d57fb5c 100644 --- a/unit_tests/operator/Test_DivImpl.cpp +++ b/unit_tests/operator/Test_DivImpl.cpp @@ -9,17 +9,26 @@ * ********************************************************************************/ -#include <catch2/catch_test_macros.hpp> -#include <cstddef> // std::size_t -#include <cstdint> // std::uint16_t -#include <chrono> -#include <iostream> +#include <chrono> // std::micro, std::chrono::time_point, + // std::chrono::system_clock +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <functional> // std::multiplies #include <memory> -#include <numeric> // std::accumulate -#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution +#include <numeric> // std::accumulate +#include <random> // std::random_device, std::mt19937 + // std::uniform_int_distribution, std::uniform_real_distribution +#include <vector> + +#include <catch2/catch_test_macros.hpp> +#include <fmt/core.h> +#include "aidge/backend/cpu/data/TensorImpl.hpp" +#include "aidge/backend/cpu/operator/DivImpl.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/operator/Div.hpp" +#include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/TensorUtils.hpp" namespace Aidge { @@ -117,8 +126,8 @@ TEST_CASE("[cpu/operator] Div", "[Div][CPU]") { // with broadcasting } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {} μs\n", duration.count()); } SECTION("+1-D Tensor / +1-D Tensor - broadcasting") { @@ -212,8 +221,8 @@ TEST_CASE("[cpu/operator] Div", "[Div][CPU]") { const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); number_of_operation += nb_elements; } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {} μs\n", duration.count()); } SECTION("+1-D Tensor / 1-D Tensor") { std::size_t number_of_operation = 0; @@ -308,8 +317,8 @@ TEST_CASE("[cpu/operator] Div", "[Div][CPU]") { number_of_operation += nb_elements; } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {} μs\n", duration.count()); } } } diff --git a/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp b/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp index 43af544871ad6c2ac319de09f3c6fce5065e60d5..63f8d3269cdb25a6d84c3e936d8f124b0964962d 100644 --- a/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp +++ b/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp @@ -9,34 +9,29 @@ * ********************************************************************************/ -#include <aidge/utils/Types.h> -#include <catch2/catch_test_macros.hpp> #include <chrono> -#include <cmath> #include <cstddef> // std::size_t #include <cstdint> // std::uint16_t -#include <iostream> +#include <functional> // std::multiplies #include <memory> #include <numeric> // std::accumulate -#include <ostream> -#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution +#include <random> // std::random_device, std::mt19937 + // std::uniform_int_distribution, std::uniform_real_distribution +#include <vector> + +#include <catch2/catch_test_macros.hpp> +#include <fmt/core.h> +#include "aidge/backend/cpu/data/TensorImpl.hpp" +#include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/operator/GlobalAveragePooling.hpp" #include "aidge/utils/TensorUtils.hpp" - -// debug print function -void print_tensor(Aidge::Tensor &T) { - // Print tensors - std::cout << "Tensor : size =  ["; - for (auto &dim : T.dims()) { - std::cout << dim << " , "; - } - std::cout << "]" << std::endl; - T.print(); -} +#include "aidge/utils/Types.h" namespace Aidge { + TEST_CASE("[cpu/operator] GlobalAveragePooling", "[GlobalAveragePooling][CPU]") { constexpr std::uint16_t NBTRIALS = 10; @@ -54,9 +49,7 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling", std::size_t(7)); // Create MatGlobalAveragePooling Operator - std::shared_ptr<Node> globAvgPool = GlobalAveragePooling(); - auto op = - std::static_pointer_cast<OperatorTensor>(globAvgPool->getOperator()); + std::shared_ptr<GlobalAveragePooling_Op> op = std::make_shared<GlobalAveragePooling_Op>(); op->setDataType(DataType::Float32); op->setBackend("cpu"); @@ -99,7 +92,7 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling", T0->resize(dims); T0->getImpl()->setRawPtr(array0, nb_elements); - REQUIRE_THROWS(globAvgPool->forward()); + REQUIRE_THROWS(op->forward()); delete[] array0; } @@ -158,7 +151,7 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling", op->forwardDims(); start = std::chrono::system_clock::now(); - REQUIRE_NOTHROW(globAvgPool->forward()); + REQUIRE_NOTHROW(op->forward()); end = std::chrono::system_clock::now(); duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); @@ -231,7 +224,7 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling", op->forwardDims(); start = std::chrono::system_clock::now(); - REQUIRE_NOTHROW(globAvgPool->forward()); + REQUIRE_NOTHROW(op->forward()); end = std::chrono::system_clock::now(); duration += std::chrono::duration_cast<std::chrono::microseconds>( end - start); @@ -358,7 +351,7 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling", Tres->getImpl()->setRawPtr(result, out_nb_elems); op->forwardDims(); start = std::chrono::system_clock::now(); - REQUIRE_NOTHROW(globAvgPool->forward()); + REQUIRE_NOTHROW(op->forward()); end = std::chrono::system_clock::now(); duration += std::chrono::duration_cast<std::chrono::microseconds>( end - start); @@ -547,7 +540,7 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling", Tres->getImpl()->setRawPtr(result, out_nb_elems); op->forwardDims(); start = std::chrono::system_clock::now(); - REQUIRE_NOTHROW(globAvgPool->forward()); + REQUIRE_NOTHROW(op->forward()); end = std::chrono::system_clock::now(); duration += std::chrono::duration_cast<std::chrono::microseconds>( end - start); @@ -561,12 +554,9 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling", delete[] result; } } - std::cout << "GlobalAveragePooling total execution time : " - << duration.count() << "µs" << std::endl; - std::cout << "Number of operations : " << number_of_operation - << std::endl; - std::cout << "Operation / µs = " << number_of_operation / duration.count() - << std::endl; + fmt::print("INFO: GlobalAveragePooling total execution time: {}µs\n", duration.count()); + fmt::print("INFO: Number of operations : {}\n", number_of_operation); + fmt::print("INFO: Operation / µs = {}\n", number_of_operation / duration.count()); } } } diff --git a/unit_tests/operator/Test_MatMulImpl.cpp b/unit_tests/operator/Test_MatMulImpl.cpp index d6e934b4dc8d84e8a595eb74d1af9d2c68c892d1..daef47b32ffcca880a1bf2438e9ee9c35adbb2c8 100644 --- a/unit_tests/operator/Test_MatMulImpl.cpp +++ b/unit_tests/operator/Test_MatMulImpl.cpp @@ -9,21 +9,26 @@ * ********************************************************************************/ -#include <catch2/catch_test_macros.hpp> -#include <cstddef> // std::size_t -#include <cstdint> // std::uint16_t -#include <chrono> -#include <iostream> +#include <chrono> // std::micro, std::chrono::time_point, + // std::chrono::system_clock, std::chrono::duration +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t #include <memory> -#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution +#include <random> // std::random_device, std::mt19937 + // std::uniform_int_distribution, std::uniform_real_distribution +#include <vector> + +#include <catch2/catch_test_macros.hpp> +#include <fmt/core.h> +#include "aidge/backend/cpu/data/TensorImpl.hpp" +#include "aidge/backend/cpu/operator/MatMulImpl.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/operator/MatMul.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/TensorUtils.hpp" -#include "aidge/backend/cpu/operator/MatMulImpl.hpp" - namespace Aidge { TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") { @@ -106,8 +111,8 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") { delete[] bigArray2; delete[] res; } - std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl; - std::cout << "total time: " << duration.count() << std::endl; + fmt::print("INFO: number of multiplications over time spent: {}\n", (totalComputation / duration.count())); + fmt::print("INFO: total time: {} μs\n", duration.count()); } SECTION("3-D Tensors") { @@ -174,8 +179,8 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") { delete[] bigArray2; delete[] res; } - std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl; - std::cout << "total time: " << duration.count() << std::endl; + fmt::print("INFO: number of multiplications over time spent: {}\n", (totalComputation / duration.count())); + fmt::print("INFO: total time: {} μs\n", duration.count()); } SECTION("4-D Tensors") { @@ -244,8 +249,8 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") { delete[] bigArray2; delete[] res; } - std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl; - std::cout << "total time: " << duration.count() << std::endl; + fmt::print("INFO: number of multiplications over time spent: {}\n", (totalComputation / duration.count())); + fmt::print("INFO: total time: {} μs\n", duration.count()); } SECTION("+2-D / 1-D") { diff --git a/unit_tests/operator/Test_MulImpl.cpp b/unit_tests/operator/Test_MulImpl.cpp index 3378861d0d3d7e74e7867c2765a0b09069fa8caf..925b9f2059518d434b74a0e2fd0cde79b334c54e 100644 --- a/unit_tests/operator/Test_MulImpl.cpp +++ b/unit_tests/operator/Test_MulImpl.cpp @@ -9,351 +9,338 @@ * ********************************************************************************/ -#include <catch2/catch_test_macros.hpp> -#include <cstddef> // std::size_t -#include <cstdint> // std::uint16_t -#include <chrono> -#include <iostream> +#include <chrono> // std::micro, std::chrono::time_point, + // std::chrono::system_clock, +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <functional> // std::multiplies #include <memory> -#include <numeric> // std::accumulate -#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution +#include <numeric> // std::accumulate +#include <random> // std::random_device, std::mt19937 + // std::uniform_int_distribution, std::uniform_real_distribution +#include <vector> + +#include <catch2/catch_test_macros.hpp> +#include <fmt/core.h> +#include "aidge/backend/cpu/data/TensorImpl.hpp" +#include "aidge/backend/cpu/operator/MulImpl.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/operator/Mul.hpp" +#include "aidge/utils/ArrayHelpers.hpp" +#include "aidge/utils/Log.hpp" #include "aidge/utils/TensorUtils.hpp" namespace Aidge { - TEST_CASE("[CPU/Operator] Mul Backward", "[Mul][CPU][Backward]") - { - std::shared_ptr<Node> myMul = Mul(); - auto op = std::static_pointer_cast<OperatorTensor>(myMul->getOperator()); - op->setDataType(DataType::Float32); - op->setBackend("cpu"); +TEST_CASE("[CPU/Operator] Mul Backward", "[Mul][CPU][Backward]") +{ + using aif32 = cpptype_t<DataType::Float32>; + std::shared_ptr<Mul_Op> op = std::make_shared<Mul_Op>(); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); - SECTION("Case 1: 2D and 1D tensors") { - const auto T0 = std::make_shared<Tensor>(Array2D<float,2,3>( + SECTION("Case 1: 2D and 1D tensors") { + const auto T0 = std::make_shared<Tensor>(Array2D<aif32,2,3>( + { { - { - {1,2,3},{4,5,6} - } + {1,2,3},{4,5,6} } - )); - - const auto T1 = std::make_shared<Tensor>(Array1D<float,3>( - {0.1,0.2,0.3} - )); + } + )); - T0->setDataType(DataType::Float32); - T0->setBackend("cpu"); - T1->setDataType(DataType::Float32); - T1->setBackend("cpu"); + const auto T1 = std::make_shared<Tensor>(Array1D<aif32,3>( + {0.1,0.2,0.3} + )); - op->getOutput(0)->setGrad(std::make_shared<Tensor>(Array2D<float,2,3>({{{1.0,1.0,1.0},{1.0,1.0,1.0}}}))); + op->getOutput(0)->setGrad(std::make_shared<Tensor>(Array2D<aif32,2,3>({{{1.0,1.0,1.0},{1.0,1.0,1.0}}}))); - op->associateInput(0,T0); - op->associateInput(1,T1); - op->forwardDims(); + op->associateInput(0,T0); + op->associateInput(1,T1); + op->forwardDims(); - myMul->forward(); - myMul->backward(); + op->forward(); + op->backward(); - auto T0Grad = std::make_shared<Tensor>(Array2D<float, 2,3>({{{0.1,0.2,0.3},{0.1, 0.2, 0.3}}})); - auto T1Grad = std::make_shared<Tensor>(Array1D<float, 3>({5,7,9})); + const Tensor T0Grad = Array2D<aif32, 2, 3>({{{0.1,0.2,0.3},{0.1, 0.2, 0.3}}}); + const Tensor T1Grad = Array1D<aif32, 3>({5,7,9}); - REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *T0Grad)); - REQUIRE(approxEq<float>(*(op->getInput(1)->grad()), *T1Grad)); - } + REQUIRE(approxEq<aif32>(*(op->getInput(0)->grad()), T0Grad)); + REQUIRE(approxEq<aif32>(*(op->getInput(1)->grad()), T1Grad)); + } - SECTION("Case 2: 3D and 1D tensors") { - const auto T0 = std::make_shared<Tensor>(Array3D<float,2,2,3>( + SECTION("Case 2: 3D and 1D tensors") { + const auto T0 = std::make_shared<Tensor>(Array3D<aif32,2,2,3>( + { { { - { - {1.0, 2.0, 3.0}, - {4.0, 5.0, 6.0} - }, - { - {7.0, 8.0, 9.0}, - {10.0, 11.0, 12.0} - } + {1.0, 2.0, 3.0}, + {4.0, 5.0, 6.0} + }, + { + {7.0, 8.0, 9.0}, + {10.0, 11.0, 12.0} } } - )); - - const auto T1 = std::make_shared<Tensor>(Array1D<float, 3>({0.3,0.2,0.1})); + } + )); - const auto newGrad = std::make_shared<Tensor>(Array3D<float,2,2,3>( - { - { - { - {1, 1, 1}, - {1, 1, 1} - }, - { - {1, 1, 1}, - {1, 1, 1} - } - } - } - )); + const auto T1 = std::make_shared<Tensor>(Array1D<aif32, 3>({0.3,0.2,0.1})); - const auto expectedGrad0 = std::make_shared<Tensor>(Array3D<float,2,2,3>( + const auto newGrad = std::make_shared<Tensor>(Array3D<aif32,2,2,3>( { { { - {0.3, 0.2, 0.1}, - {0.3, 0.2, 0.1} + {1, 1, 1}, + {1, 1, 1} }, { - {0.3, 0.2, 0.1}, - {0.3, 0.2, 0.1} + {1, 1, 1}, + {1, 1, 1} } } } )); - const auto expectedGrad1 = std::make_shared<Tensor>(Array1D<float,3>( - {22.0, 26.0, 30.0} - )); - - for(auto T: {T0, T1, newGrad, expectedGrad0, expectedGrad1}) + const Tensor expectedGrad0 = Array3D<aif32,2,2,3>( { - T->setBackend("cpu") ; - T->setDataType(DataType::Float32); + { + { + {0.3, 0.2, 0.1}, + {0.3, 0.2, 0.1} + }, + { + {0.3, 0.2, 0.1}, + {0.3, 0.2, 0.1} + } + } } + ); - op->associateInput(0, T0); - op->associateInput(1, T1); - op->getOutput(0)->setGrad(newGrad); - op->forwardDims(); + const Tensor expectedGrad1 = Array1D<aif32,3>( + {22.0, 26.0, 30.0} + ); - myMul->backward(); + op->associateInput(0, T0); + op->associateInput(1, T1); + op->getOutput(0)->setGrad(newGrad); + op->forwardDims(); - REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad0)); - REQUIRE(approxEq<float>(*(op->getInput(1)->grad()), *expectedGrad1)); - } + op->backward(); + + REQUIRE(approxEq<aif32>(*(op->getInput(0)->grad()), expectedGrad0)); + REQUIRE(approxEq<aif32>(*(op->getInput(1)->grad()), expectedGrad1)); + } - SECTION("Case 3: 4D and 2D tensors") { - const auto T0 = std::make_shared<Tensor>(Array4D<float,2, 2, 3, 3>( + SECTION("Case 3: 4D and 2D tensors") { + const auto T0 = std::make_shared<Tensor>(Array4D<aif32,2, 2, 3, 3>( + { { { { - { - {1.0, 2.0, 3.0}, - {4.0, 5.0, 6.0}, - {7.0, 8.0, 9.0} - }, - { - {10.0, 11.0, 12.0}, - {13.0, 14.0, 15.0}, - {16.0, 17.0, 18.0} - } + {1.0, 2.0, 3.0}, + {4.0, 5.0, 6.0}, + {7.0, 8.0, 9.0} }, { - { - {19.0, 20.0, 21.0}, - {22.0, 23.0, 24.0}, - {25.0, 26.0, 27.0} - }, - { - {28.0, 29.0, 30.0}, - {31.0, 32.0, 33.0}, - {34.0, 35.0, 36.0} - } + {10.0, 11.0, 12.0}, + {13.0, 14.0, 15.0}, + {16.0, 17.0, 18.0} + } + }, + { + { + {19.0, 20.0, 21.0}, + {22.0, 23.0, 24.0}, + {25.0, 26.0, 27.0} + }, + { + {28.0, 29.0, 30.0}, + {31.0, 32.0, 33.0}, + {34.0, 35.0, 36.0} } } } - )); + } + )); - const auto T1 = std::make_shared<Tensor>(Array2D<float, 3,3>( + const auto T1 = std::make_shared<Tensor>(Array2D<aif32, 3,3>( + { { - { - {0.5,0.3,0.1}, - {0.4,0.2,0.6}, - {0.7,0.8,0.9} - } + {0.5,0.3,0.1}, + {0.4,0.2,0.6}, + {0.7,0.8,0.9} } - )); + } + )); - const auto newGrad = std::make_shared<Tensor>(Array4D<float,2, 2, 3, 3>( + const auto newGrad = std::make_shared<Tensor>(Array4D<aif32,2, 2, 3, 3>( + { { { { - { - {1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0} - }, - { - {1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0} - } + {1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0} }, { - { - {1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0} - }, - { - {1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0} - } + {1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0} } - } - } - )); - - const auto expectedGrad0 = std::make_shared<Tensor>(Array4D<float,2,2,3,3>( - { + }, { { - { - {0.5, 0.3, 0.1}, - {0.4, 0.2, 0.6}, - {0.7, 0.8, 0.9} - }, - { - {0.5, 0.3, 0.1}, - {0.4, 0.2, 0.6}, - {0.7, 0.8, 0.9} - } + {1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0} }, { - { - {0.5, 0.3, 0.1}, - {0.4, 0.2, 0.6}, - {0.7, 0.8, 0.9} - }, - { - {0.5, 0.3, 0.1}, - {0.4, 0.2, 0.6}, - {0.7, 0.8, 0.9} - } + {1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0} } } } - )); + } + )); - const auto expectedGrad1 = std::make_shared<Tensor>(Array2D<float,3, 3>( + const Tensor expectedGrad0 = Array4D<aif32,2,2,3,3>( + { { { - {58.0, 62.0, 66.0}, - {70.0, 74.0, 78.0}, - {82.0, 86.0, 90.0} + { + {0.5, 0.3, 0.1}, + {0.4, 0.2, 0.6}, + {0.7, 0.8, 0.9} + }, + { + {0.5, 0.3, 0.1}, + {0.4, 0.2, 0.6}, + {0.7, 0.8, 0.9} + } + }, + { + { + {0.5, 0.3, 0.1}, + {0.4, 0.2, 0.6}, + {0.7, 0.8, 0.9} + }, + { + {0.5, 0.3, 0.1}, + {0.4, 0.2, 0.6}, + {0.7, 0.8, 0.9} + } } } - )); + } + ); - for(const auto T: {T0, T1, newGrad, expectedGrad0, expectedGrad1}) + const Tensor expectedGrad1 = Array2D<aif32,3, 3>( { - T->setBackend("cpu") ; - T->setDataType(DataType::Float32); + { + {58.0, 62.0, 66.0}, + {70.0, 74.0, 78.0}, + {82.0, 86.0, 90.0} + } } + ); - op->associateInput(0, T0); - op->associateInput(1, T1); - op->getOutput(0)->setGrad(newGrad); - op->forwardDims(); + op->associateInput(0, T0); + op->associateInput(1, T1); + op->getOutput(0)->setGrad(newGrad); + op->forwardDims(); - myMul->backward(); + op->backward(); - REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad0)); - REQUIRE(approxEq<float>(*(op->getInput(1)->grad()), *expectedGrad1)); - } + REQUIRE(approxEq<aif32>(*(op->getInput(0)->grad()), expectedGrad0)); + REQUIRE(approxEq<aif32>(*(op->getInput(1)->grad()), expectedGrad1)); + } - SECTION("Case 4: 3D and 2D tensors") { - const auto T0 = std::make_shared<Tensor>(Array3D<float, 2, 3, 4>( + SECTION("Case 4: 3D and 2D tensors") { + const auto T0 = std::make_shared<Tensor>(Array3D<aif32, 2, 3, 4>( + { { { - { - {1.0, 2.0, 3.0, 4.0}, - {5.0, 6.0, 7.0, 8.0}, - {9.0, 10.0, 11.0, 12.0}, - }, - { - {13.0, 14.0, 15.0, 16.0}, - {17.0, 18.0, 19.0, 20.0}, - {21.0, 22.0, 23.0, 24.0}, - } - } - } - )); - - const auto T1 = std::make_shared<Tensor>(Array2D<float, 3, 4>( - { + {1.0, 2.0, 3.0, 4.0}, + {5.0, 6.0, 7.0, 8.0}, + {9.0, 10.0, 11.0, 12.0}, + }, { - {0.1, 0.2, 0.3, 0.4}, - {0.5, 0.6, 0.7, 0.8}, - {0.9, 1.0, 1.1, 1.2} + {13.0, 14.0, 15.0, 16.0}, + {17.0, 18.0, 19.0, 20.0}, + {21.0, 22.0, 23.0, 24.0}, } } - )); + } + )); - const auto newGrad = std::make_shared<Tensor>(Array3D<float, 2,3,4>( + const auto T1 = std::make_shared<Tensor>(Array2D<aif32, 3, 4>( + { { - { - { - {1.0, 1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0, 1.0}, - }, - { - {1.0, 1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0, 1.0}, - } - } + {0.1, 0.2, 0.3, 0.4}, + {0.5, 0.6, 0.7, 0.8}, + {0.9, 1.0, 1.1, 1.2} } - )); + } + )); - const auto expectedGrad0 = std::make_shared<Tensor>(Array3D<float,2,3,4>( + const auto newGrad = std::make_shared<Tensor>(Array3D<aif32, 2,3,4>( + { { { - { - {0.1, 0.2, 0.3, 0.4}, - {0.5, 0.6, 0.7, 0.8}, - {0.9, 1.0, 1.1, 1.2} - }, - { - {0.1, 0.2, 0.3, 0.4}, - {0.5, 0.6, 0.7, 0.8}, - {0.9, 1.0, 1.1, 1.2} - } + {1.0, 1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0, 1.0}, + }, + { + {1.0, 1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0, 1.0}, } } - )); + } + )); - const auto expectedGrad1 = std::make_shared<Tensor>(Array2D<float,3, 4>( + const Tensor expectedGrad0 = Array3D<aif32,2,3,4>( + { { { - {14.0, 16.0, 18.0, 20.0}, - {22.0, 24.0, 26.0, 28.0}, - {30.0, 32.0, 34.0, 36.0} + {0.1, 0.2, 0.3, 0.4}, + {0.5, 0.6, 0.7, 0.8}, + {0.9, 1.0, 1.1, 1.2} + }, + { + {0.1, 0.2, 0.3, 0.4}, + {0.5, 0.6, 0.7, 0.8}, + {0.9, 1.0, 1.1, 1.2} } } - )); + } + ); - for(const auto T: {T0, T1, newGrad, expectedGrad0, expectedGrad1}) + const Tensor expectedGrad1 = Array2D<aif32,3,4>( { - T->setBackend("cpu") ; - T->setDataType(DataType::Float32); + { + {14.0, 16.0, 18.0, 20.0}, + {22.0, 24.0, 26.0, 28.0}, + {30.0, 32.0, 34.0, 36.0} + } } + ); - op->associateInput(0, T0); - op->associateInput(1, T1); - op->getOutput(0)->setGrad(newGrad); - op->forwardDims(); + op->associateInput(0, T0); + op->associateInput(1, T1); + op->getOutput(0)->setGrad(newGrad); + op->forwardDims(); - myMul->backward(); + op->backward(); - REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad0)); - REQUIRE(approxEq<float>(*(op->getInput(1)->grad()), *expectedGrad1)); - } + REQUIRE(approxEq<aif32>(*(op->getInput(0)->grad()), expectedGrad0)); + REQUIRE(approxEq<aif32>(*(op->getInput(1)->grad()), expectedGrad1)); } +} TEST_CASE("[cpu/operator] Mul", "[Mul][CPU]") { constexpr std::uint16_t NBTRIALS = 10; @@ -366,8 +353,7 @@ TEST_CASE("[cpu/operator] Mul", "[Mul][CPU]") { std::uniform_int_distribution<int> boolDist(0,1); // Create MatMul Operator - std::shared_ptr<Node> myMul = Mul(); - auto op = std::static_pointer_cast<OperatorTensor>(myMul-> getOperator()); + std::shared_ptr<Mul_Op> op = std::make_shared<Mul_Op>(); op->setDataType(DataType::Float32); op->setBackend("cpu"); @@ -441,7 +427,7 @@ TEST_CASE("[cpu/operator] Mul", "[Mul][CPU]") { op->forwardDims(); start = std::chrono::system_clock::now(); - myMul->forward(); + op->forward(); end = std::chrono::system_clock::now(); duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); @@ -451,8 +437,8 @@ TEST_CASE("[cpu/operator] Mul", "[Mul][CPU]") { delete[] array1; delete[] result; } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {} μs\n", duration.count()); } @@ -568,7 +554,7 @@ TEST_CASE("[cpu/operator] Mul", "[Mul][CPU]") { // compute result op->forwardDims(); start = std::chrono::system_clock::now(); - myMul->forward(); + op->forward(); end = std::chrono::system_clock::now(); duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); @@ -582,8 +568,8 @@ TEST_CASE("[cpu/operator] Mul", "[Mul][CPU]") { const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); number_of_operation += nb_elements; } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {} μs\n", duration.count()); } SECTION("+1-D Tensor / 1-D Tensor") { std::size_t number_of_operation = 0; @@ -663,7 +649,7 @@ TEST_CASE("[cpu/operator] Mul", "[Mul][CPU]") { // compute result op->forwardDims(); start = std::chrono::system_clock::now(); - myMul->forward(); + op->forward(); end = std::chrono::system_clock::now(); duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); @@ -678,8 +664,8 @@ TEST_CASE("[cpu/operator] Mul", "[Mul][CPU]") { number_of_operation += nb_elements; } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {} μs\n", duration.count()); } } } diff --git a/unit_tests/operator/Test_PowImpl.cpp b/unit_tests/operator/Test_PowImpl.cpp index cb5d8872c9c7242bb4aa4efca388d53b578417f9..8238da3970740f4b8d6095d7a28c000319ea004e 100644 --- a/unit_tests/operator/Test_PowImpl.cpp +++ b/unit_tests/operator/Test_PowImpl.cpp @@ -9,18 +9,26 @@ * ********************************************************************************/ -#include <catch2/catch_test_macros.hpp> -#include <cmath> -#include <cstddef> // std::size_t -#include <cstdint> // std::uint16_t -#include <chrono> -#include <iostream> +#include <chrono> // std::micro, std::chrono::time_point, + // std::chrono::system_clock, std::chrono::duration +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <functional> // std::multiplies #include <memory> -#include <numeric> // std::accumulate -#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution +#include <numeric> // std::accumulate +#include <random> // std::random_device, std::mt19937 + // std::uniform_int_distribution, std::uniform_real_distribution +#include <vector> + +#include <catch2/catch_test_macros.hpp> +#include <fmt/core.h> +#include "aidge/backend/cpu/data/TensorImpl.hpp" +#include "aidge/backend/cpu/operator/PowImpl.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/operator/Pow.hpp" +#include "aidge/utils/ArrayHelpers.hpp" #include "aidge/utils/TensorUtils.hpp" namespace Aidge { @@ -118,8 +126,8 @@ TEST_CASE("[cpu/operator] Pow", "[Pow][CPU]") { // with broadcasting } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {} μs\n", duration.count()); } SECTION("+1-D Tensor / +1-D Tensor - broadcasting") { @@ -213,8 +221,8 @@ TEST_CASE("[cpu/operator] Pow", "[Pow][CPU]") { const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); number_of_operation += nb_elements; } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {} μs\n", duration.count()); } SECTION("+1-D Tensor / 1-D Tensor") { std::size_t number_of_operation = 0; @@ -309,8 +317,8 @@ TEST_CASE("[cpu/operator] Pow", "[Pow][CPU]") { number_of_operation += nb_elements; } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {} μs\n", duration.count()); } } @@ -440,7 +448,7 @@ TEST_CASE("[cpu/operator] Pow", "[Pow][CPU]") { } } )); - const auto expectedGrad0 = std::make_shared<Tensor>(Array3D<float, 2, 2, 3>( + const Tensor expectedGrad0 = Array3D<float, 2, 2, 3>( { { { @@ -453,18 +461,13 @@ TEST_CASE("[cpu/operator] Pow", "[Pow][CPU]") { } } } - )); - const auto expectedGrad1 = std::make_shared<Tensor>(Array1D<float, 3>( + ); + const Tensor expectedGrad1 = Array1D<float, 3>( { {14.14779854, 22.99299049, 33.56402588} } - )); + ); - for(const auto T: {input0, input1, gradOut, expectedGrad0, expectedGrad1}) - { - T->setBackend("cpu") ; - T->setDataType(DataType::Float32); - } std::shared_ptr<Node> powOp = Pow(); auto opr = std::static_pointer_cast<OperatorTensor>(powOp-> getOperator()); opr->setDataType(DataType::Float32); @@ -475,8 +478,8 @@ TEST_CASE("[cpu/operator] Pow", "[Pow][CPU]") { powOp->forward(); powOp->backward(); - REQUIRE(approxEq<float>(*(opr->getInput(0)->grad()), *expectedGrad0)); - REQUIRE(approxEq<float>(*(opr->getInput(1)->grad()), *expectedGrad1)); + REQUIRE(approxEq<float>(*(opr->getInput(0)->grad()), expectedGrad0)); + REQUIRE(approxEq<float>(*(opr->getInput(1)->grad()), expectedGrad1)); } } } diff --git a/unit_tests/operator/Test_RoundImpl.cpp b/unit_tests/operator/Test_RoundImpl.cpp index b4cf9ffbedc18b35b42ebbc05971f86e0fa584e3..8b5dd53a79242a38063f178807d5b6b40f2c0e96 100644 --- a/unit_tests/operator/Test_RoundImpl.cpp +++ b/unit_tests/operator/Test_RoundImpl.cpp @@ -9,15 +9,23 @@ * ********************************************************************************/ -#include <catch2/catch_test_macros.hpp> -#include <cstddef> // std::size_t -#include <cstdint> // std::uint16_t -#include <chrono> -#include <iostream> +#include <chrono> // std::micro, std::chrono::time_point, + // std::chrono::system_clock, std::chrono::duration +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <functional> // std::multiplies #include <memory> -#include <numeric> -#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution -#include <iomanip> +#include <numeric> // std::accumulate +#include <random> // std::random_device, std::mt19937 + // std::uniform_int_distribution, std::uniform_real_distribution +#include <vector> + +#include <catch2/catch_test_macros.hpp> +#include <fmt/core.h> + +#include "aidge/backend/cpu/data/TensorImpl.hpp" +#include "aidge/backend/cpu/operator/RoundImpl.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/operator/Round.hpp" #include "aidge/utils/TensorUtils.hpp" @@ -29,7 +37,7 @@ TEST_CASE("[cpu/operator] Round_Test", "[Round][CPU]") { // Create a random number generator std::random_device rd; std::mt19937 gen(rd()); - std::uniform_real_distribution<float> valueDist(-15, 15); + std::uniform_real_distribution<float> valueDist(-15, 15); std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(5)); std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(3)); @@ -59,7 +67,7 @@ TEST_CASE("[cpu/operator] Round_Test", "[Round][CPU]") { std::size_t number_of_operation = 0; for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { - + // generate 2 random Tensors const std::size_t nbDims = nbDimsDist(gen); std::vector<std::size_t> dims; @@ -72,7 +80,7 @@ TEST_CASE("[cpu/operator] Round_Test", "[Round][CPU]") { // without broadcasting float* array0 = new float[nb_elements]; float* result = new float[nb_elements]; - + for (std::size_t i = 0; i < nb_elements; ++i) { array0[i] = valueDist(gen); result[i] = std::nearbyint(array0[i]); @@ -86,29 +94,22 @@ TEST_CASE("[cpu/operator] Round_Test", "[Round][CPU]") { // results Tres->resize(dims); Tres -> getImpl() -> setRawPtr(result, nb_elements); - + op->forwardDims(); start = std::chrono::system_clock::now(); myRound->forward(); end = std::chrono::system_clock::now(); duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); - bool is_eq = approxEq<float>(*(op->getOutput(0)), *Tres); - - auto Output = *(op->getOutput(0)); - - auto prt = Output.getImpl()->rawPtr(); - - REQUIRE(is_eq); - + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); delete[] array0; delete[] result; } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {} μs\n", duration.count()); } } } // namespace Aidge diff --git a/unit_tests/operator/Test_SubImpl.cpp b/unit_tests/operator/Test_SubImpl.cpp index 44666ae631152c8898e24f7003b0c2ede8c67b84..471ae560a35b480945d7e5c85fb93bbbc8d459f6 100644 --- a/unit_tests/operator/Test_SubImpl.cpp +++ b/unit_tests/operator/Test_SubImpl.cpp @@ -9,17 +9,26 @@ * ********************************************************************************/ -#include <catch2/catch_test_macros.hpp> -#include <cstddef> // std::size_t -#include <cstdint> // std::uint16_t -#include <chrono> -#include <iostream> +#include <chrono> // std::micro, std::chrono::time_point, + // std::chrono::system_clock +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <functional> // std::multiplies #include <memory> -#include <numeric> // std::accumulate -#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution +#include <numeric> // std::accumulate +#include <random> // std::random_device, std::mt19937 + // std::uniform_int_distribution, std::uniform_real_distribution +#include <vector> + +#include <catch2/catch_test_macros.hpp> +#include <fmt/core.h> +#include "aidge/backend/cpu/data/TensorImpl.hpp" +#include "aidge/backend/cpu/operator/SubImpl.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/operator/Sub.hpp" +#include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/TensorUtils.hpp" namespace Aidge { @@ -117,8 +126,8 @@ TEST_CASE("[cpu/operator] Sub", "[Sub][CPU]") { // with broadcasting } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {}μs\n", duration.count()); } SECTION("+1-D Tensor / +1-D Tensor - broadcasting") { @@ -212,8 +221,8 @@ TEST_CASE("[cpu/operator] Sub", "[Sub][CPU]") { const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); number_of_operation += nb_elements; } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {}μs\n", duration.count()); } SECTION("+1-D Tensor / 1-D Tensor") { std::size_t number_of_operation = 0; @@ -308,8 +317,8 @@ TEST_CASE("[cpu/operator] Sub", "[Sub][CPU]") { number_of_operation += nb_elements; } - std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; - std::cout << "total time: " << duration.count() << "μs" << std::endl; + fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count())); + fmt::print("INFO: total time: {}μs\n", duration.count()); } } }