Skip to content
Snippets Groups Projects
Commit 171e0702 authored by Maxence Naud's avatar Maxence Naud
Browse files

UPD: test includes

parent 9a4000ed
No related branches found
No related tags found
1 merge request!132[UPD] version 0.4.1 -> 0.5.0
......@@ -9,19 +9,23 @@
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <cstdint> // std::uint16_t
#include <chrono>
#include <iostream>
#include <chrono> // std::micro, std::chrono::time_point,
// std::chrono::system_clock
#include <cstddef> // std::size_t
#include <cstdint> // std::int32_t, std::uint16_t
#include <memory>
#include <numeric> // std::accumulate
#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
#include <random> // std::random_device, std::mt19937
// std::uniform_int_distribution, std::uniform_real_distribution
#include <vector>
#include <catch2/catch_test_macros.hpp>
#include <fmt/core.h>
#include "aidge/data/Tensor.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/backend/cpu/operator/AddImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
namespace Aidge {
......@@ -35,8 +39,7 @@ TEST_CASE("Test addition of Tensors","[TensorImpl][Add][Data]") {
std::uniform_int_distribution<int> boolDist(0,1);
// Create MatMul Operator
std::shared_ptr<Node> mySub = Add();
auto op = std::static_pointer_cast<OperatorTensor>(mySub-> getOperator());
std::shared_ptr<Add_Op> op = std::make_shared<Add_Op>();
op->setDataType(DataType::Float32);
op->setBackend("cpu");
......
......@@ -9,12 +9,16 @@
*
********************************************************************************/
#include <memory>
#include <catch2/catch_test_macros.hpp>
#include "aidge/backend/cpu/operator/AddImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/backend/cpu.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
using namespace Aidge;
......
......@@ -9,13 +9,19 @@
*
********************************************************************************/
#include <cstddef> // std::size_t
#include <cstdint> // std::uint16_t
#include <memory>
#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution, std::uniform_real_distribution
#include <catch2/catch_test_macros.hpp>
#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
#include "aidge/backend/cpu/operator/AndImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/And.hpp"
#include "aidge/backend/cpu.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
using namespace Aidge;
......@@ -180,7 +186,7 @@ TEST_CASE("[cpu/operator] And(forward)", "[And][CPU]") {
} //
}); //
std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<int,2> {{10, 20}});
std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<int,2> {{10, 20}});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
{ //
{ //
......
......@@ -9,17 +9,20 @@
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <cstdint> // std::uint16_t
#include <memory>
#include <numeric> // std::accumulate
#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution, std::uniform_real_distribution
#include <catch2/catch_test_macros.hpp>
#include <fmt/core.h>
#include "aidge/backend/cpu/operator/ArgMaxImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/ArgMax.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/backend/cpu.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
using namespace Aidge;
......@@ -118,8 +121,8 @@ TEST_CASE("[cpu/operator] ArgMax(forward)", "[ArgMax][CPU]") {
SECTION("Axis 2") {
Tensor myOutput = Tensor(Array3D<float,2,3, 1> {
{
{
{
{
{3.0},
{2.0},
{1.0}
......@@ -144,7 +147,7 @@ TEST_CASE("[cpu/operator] ArgMax(forward)", "[ArgMax][CPU]") {
SECTION("Axis 2 with keep_dims false") {
Tensor myOutput = Tensor(Array2D<float,2,3> {
{
{
{ 3.0, 2.0, 1.0 },
{ 2.0, 1.0, 0.0 }
}
......@@ -196,10 +199,11 @@ TEST_CASE("[cpu/operator] ArgMax(forward)", "[ArgMax][CPU]") {
op->associateInput(0,myInput);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
std::cout << " ............... "<< std::endl;
fmt::print("{:.^20}\n", "forward");
myArgMax->forward();
fmt::print("{:.^20}\n", "result");
op->getOutput(0)->print();
std::cout <<"------"<<std::endl;
fmt::print("{:.^20}\n", "truth");
myOutput.print();
REQUIRE(*(op->getOutput(0)) == myOutput);
......
......@@ -9,14 +9,18 @@
*
********************************************************************************/
#include <cmath> // std::abs
#include <cstddef> // std::size_t
#include <memory>
#include <catch2/catch_test_macros.hpp>
#include "aidge/backend/cpu/operator/AtanImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Atan.hpp"
#include "aidge/backend/cpu.hpp"
#include <memory>
#include "aidge/utils/ArrayHelpers.hpp"
using namespace Aidge;
......@@ -32,7 +36,7 @@ TEST_CASE("[cpu/operator] Atan(forward)") {
0.09486303, 0.16007232, 0.40421187, 0.4102045, 0.39055911}});
std::shared_ptr<Node> myAtan = Atan();
auto op = std::static_pointer_cast<OperatorTensor>(myAtan->getOperator());
auto op = std::static_pointer_cast<Atan_Op>(myAtan->getOperator());
op->associateInput(0, input0);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
......@@ -61,7 +65,7 @@ TEST_CASE("[cpu/operator] Atan(forward)") {
{0.75377332, 0.77411225, 0.32928031}}}});
std::shared_ptr<Node> myAtan = Atan();
auto op = std::static_pointer_cast<OperatorTensor>(myAtan->getOperator());
auto op = std::static_pointer_cast<Atan_Op>(myAtan->getOperator());
op->associateInput(0, input0);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
......
......@@ -9,14 +9,18 @@
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cmath> // std::abs
#include <cstddef> // std::size_t
#include <memory>
#include <cstdlib>
#include <catch2/catch_test_macros.hpp>
#include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/AvgPooling.hpp"
#include "aidge/backend/cpu.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
using namespace Aidge;
......@@ -53,7 +57,7 @@ TEST_CASE("[cpu/operator] AvgPooling(forward)", "[AvgPooling][CPU]") {
});
SECTION("Stride") {
std::shared_ptr<Node> myAvgPool = AvgPooling({2,2}, "mycdw", {2,2});
auto op = std::static_pointer_cast<OperatorTensor>(myAvgPool -> getOperator());
auto op = std::static_pointer_cast<AvgPooling_Op<2>>(myAvgPool -> getOperator());
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,2,2,2,2> {
{
......@@ -90,7 +94,7 @@ TEST_CASE("[cpu/operator] AvgPooling(forward)", "[AvgPooling][CPU]") {
}
});
std::shared_ptr<Node> myAvgPool = AvgPooling({3,3}, "mycdw", {3,3});
auto op = std::static_pointer_cast<OperatorTensor>(myAvgPool -> getOperator());
auto op = std::static_pointer_cast<AvgPooling_Op<2>>(myAvgPool -> getOperator());
Tensor myOutput = Array4D<float,1,1,1,1> {
{{{{(0.3745 + 0.9507 + 0.7320 + 0.5987 + 0.1560 + 0.1560 + 0.0581 + 0.8662 + 0.6011)/9.0}}}}
......
......@@ -9,20 +9,24 @@
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cmath> // std::abs
#include <cstddef> // std::size_t
#include <memory>
#include <catch2/catch_test_macros.hpp>
#include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/BatchNorm.hpp"
#include "aidge/scheduler/SequentialScheduler.hpp"
#include "aidge/backend/cpu.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
using namespace Aidge;
TEST_CASE("[cpu/operator] BatchNorm(forward)", "[BatchNorm][CPU]") {
std::shared_ptr<Node> myBatchNorm = BatchNorm<2>(3, 0.00001F, 0.1F, "mybatchnorm");
auto op = std::static_pointer_cast<OperatorTensor>(myBatchNorm -> getOperator());
auto op = std::static_pointer_cast<BatchNorm_Op<2>>(myBatchNorm -> getOperator());
std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array1D<float,3> {{0.9044, 0.3028, 0.0218}});
std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<float,3> {{0.1332, 0.7503, 0.0878}});
std::shared_ptr<Tensor> myMean = std::make_shared<Tensor>(Array1D<float,3> {{0.9931, 0.8421, 0.9936}});
......
......@@ -9,15 +9,20 @@
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <chrono> // std::micro, std::chrono::time_point,
// std::chrono::system_clock
#include <cstddef> // std::size_t
#include <cstdint> // std::uint16_t
#include <chrono>
#include <iostream>
#include <memory>
#include <numeric>
#include <numeric>
#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
#include <iomanip>
#include <catch2/catch_test_macros.hpp>
#include <fmt/core.h>
#include "aidge/backend/cpu/data/TensorImpl.hpp"
#include "aidge/backend/cpu/operator/BitShiftImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/BitShift.hpp"
#include "aidge/utils/TensorUtils.hpp"
......@@ -29,7 +34,7 @@ TEST_CASE("[cpu/operator] BitShift_TEST", "[BitShift][CPU]") {
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<int> valueDist(-15, 15);
std::uniform_int_distribution<int> valueDist(-15, 15);
std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(5));
std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(3));
std::uniform_int_distribution<int> boolDist(0,1);
......@@ -131,8 +136,8 @@ TEST_CASE("[cpu/operator] BitShift_TEST", "[BitShift][CPU]") {
}
std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
std::cout << "total time: " << duration.count() << "μs" << std::endl;
fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
fmt::print("INFO: total time: {}μs\n", duration.count());
}
SECTION("Test BitShift kernels with Broadcasting") {
std::size_t number_of_operation = 0;
......@@ -194,7 +199,7 @@ TEST_CASE("[cpu/operator] BitShift_TEST", "[BitShift][CPU]") {
}
else
{
result[idx_out + d] = array0[idx0] >> array1[idx1];
result[idx_out + d] = array0[idx0] >> array1[idx1];
}
}
}
......@@ -222,12 +227,7 @@ TEST_CASE("[cpu/operator] BitShift_TEST", "[BitShift][CPU]") {
duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
// comparison between truth and computed result
bool equiv = (approxEq<int>(*(op->getOutput(0)), *Tres));
if(equiv == false)
{
std::cout << "Problem\n";
}
REQUIRE(equiv);
REQUIRE(approxEq<int>(*(op->getOutput(0)), *Tres));
delete[] array0;
delete[] array1;
......@@ -236,8 +236,8 @@ TEST_CASE("[cpu/operator] BitShift_TEST", "[BitShift][CPU]") {
const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
number_of_operation += nb_elements;
}
std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
std::cout << "total time: " << duration.count() << "μs" << std::endl;
fmt::print("INFO: number of elements over time spent: {}\n", (number_of_operation / duration.count()));
fmt::print("INFO: total time: {}μs\n", duration.count());
}
}
......
......@@ -9,36 +9,37 @@
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <algorithm> // std::max, std::min
#include <chrono>
#include <cstddef> // std::size_t
#include <cstdint> // std::uint16_t
#include <chrono>
#include <iostream>
#include <vector>
#include <algorithm>
#include <iomanip>
#include <memory>
#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
#include <random> // std::random_device, std::mt19937
// std::uniform_int_distribution, std::uniform_real_distribution
#include <vector>
#include <catch2/catch_test_macros.hpp>
#include <fmt/core.h>
#include "aidge/backend/cpu/operator/ClipImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Clip.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/backend/cpu.hpp"
void ComputeClipBackward(const std::vector<float>& vec1, std::vector<float>& vec2, float min, float max) {
if (vec1.size() != vec2.size()) {
std::cerr << "Vectors should have the same sizes." << std::endl;
fmt::print(stderr, "Vectors should have the same sizes.\n");
return;
}
for (size_t i = 0; i < vec1.size(); ++i) {
for (std::size_t i = 0; i < vec1.size(); ++i) {
if (vec1[i] < min || vec1[i] > max) {
vec2[i] = 0.0f;
}
}
}
namespace Aidge
namespace Aidge
{
TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
{
......@@ -47,8 +48,8 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<float> dis(0.0, 10.0);
std::uniform_real_distribution<float> dismin(0.0, 4.5);
std::uniform_real_distribution<float> dismax(5.5, 10.0);
std::uniform_real_distribution<float> dismin(0.0, 4.5);
std::uniform_real_distribution<float> dismax(5.5, 10.0);
std::uniform_int_distribution<std::size_t> distDims(5,15);
std::uniform_int_distribution<std::size_t> distNbMatrix(1, 5);
......@@ -71,7 +72,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
// Create and populate the array with random float values
float* Array = new float[dim0*dim1];
for (int i = 0; i < dim0*dim1; ++i) {
for (std::size_t i = 0; i < dim0*dim1; ++i) {
Array[i] = dis(gen); // Generate random float value
}
......@@ -80,7 +81,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
TInput -> resize({dim0,dim1});
TInput -> setBackend("cpu");
TInput -> getImpl() -> setRawPtr(Array, dim0*dim1);
float min = dismin(gen);
std::shared_ptr<Tensor> Tmin = std::make_shared<Tensor>(DataType::Float32);
Tmin -> resize({});
......@@ -109,7 +110,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
op->setDataType(DataType::Float32);
op->setBackend("cpu");
op->forwardDims(true);
start = std::chrono::system_clock::now();
myClip->forward();
end = std::chrono::system_clock::now();
......@@ -118,9 +119,9 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
}
std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl;
std::cout << "total time: " << duration.count() << std::endl;
}
fmt::print("INFO: multiplications over time spent: {}\n", totalComputation/duration.count());
fmt::print("INFO: total time: {}\n", duration.count());
}
SECTION("Clip test with min >= max [Forward]") {
std::size_t totalComputation = 0;
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
......@@ -131,7 +132,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
// Create and populate the array with random float values
float* Array = new float[dim0*dim1];
for (int i = 0; i < dim0*dim1; ++i) {
for (std::size_t i = 0; i < dim0*dim1; ++i) {
Array[i] = dis(gen); // Generate random float value
}
......@@ -140,7 +141,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
TInput -> resize({dim0,dim1});
TInput -> setBackend("cpu");
TInput -> getImpl() -> setRawPtr(Array, dim0*dim1);
float min = dismax(gen);
std::shared_ptr<Tensor> Tmin = std::make_shared<Tensor>(DataType::Float32);
Tmin -> resize({});
......@@ -169,7 +170,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
op->setDataType(DataType::Float32);
op->setBackend("cpu");
op->forwardDims(true);
start = std::chrono::system_clock::now();
myClip->forward();
end = std::chrono::system_clock::now();
......@@ -178,13 +179,13 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
}
std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl;
std::cout << "total time: " << duration.count() << std::endl;
}
fmt::print("INFO: multiplications over time spent: {}\n", totalComputation/duration.count());
fmt::print("INFO: total time: {}\n", duration.count());
}
SECTION("Clip with Clip Attr [Forward]")
{
std::size_t totalComputation = 0;
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial)
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial)
{
float min = dismin(gen);
......@@ -200,7 +201,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
// Create and populate the array with random float values
float* Array = new float[dim0*dim1];
for (int i = 0; i < dim0*dim1; ++i) {
for (std::size_t i = 0; i < dim0*dim1; ++i) {
Array[i] = dis(gen); // Generate random float value
}
// Convert Input to Tensor
......@@ -231,8 +232,8 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
}
std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl;
std::cout << "total time: " << duration.count() << std::endl;
fmt::print("INFO: multiplications over time spent: {}\n", totalComputation/duration.count());
fmt::print("INFO: total time: {}\n", duration.count());
}
SECTION("Simple clip test [Backward]") {
std::size_t totalComputation = 0;
......@@ -243,13 +244,13 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
// generate Tensors dimensions
const std::size_t dim0 = distDims(gen);
const std::size_t dim1 = distDims(gen);
totalComputation += dim0*dim1;
// Create and populate the array with random float values
float* Array = new float[dim0*dim1];
float* gradArray = new float[dim0*dim1];
for (int i = 0; i < dim0*dim1; ++i) {
for (std::size_t i = 0; i < dim0*dim1; ++i) {
Array[i] = dis(gen); // Generate random float value
gradArray[i] = dis(gen);
}
......@@ -264,7 +265,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
TInput -> resize({dim0,dim1});
TInput -> setBackend("cpu");
TInput -> getImpl() -> setRawPtr(Array, dim0*dim1);
float min = dismin(gen);
std::shared_ptr<Tensor> Tmin = std::make_shared<Tensor>(DataType::Float32);
Tmin -> resize({});
......@@ -296,7 +297,7 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
myClip->forward();
op->getOutput(0)->setGrad(TGrad);
start = std::chrono::system_clock::now();
REQUIRE_NOTHROW(myClip->backward());
end = std::chrono::system_clock::now();
......@@ -310,9 +311,9 @@ TEST_CASE("[cpu/operator] Clip", "[Clip][CPU]")
duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
REQUIRE(GT1 == BackwardTensorVec);
}
std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl;
std::cout << "total time: " << duration.count() << std::endl;
fmt::print("INFO: multiplications over time spent: {}\n", totalComputation/duration.count());
fmt::print("INFO: total time: {}\n", duration.count());
}
}
} // namespace Aidge
} // namespace Aidge
}
\ No newline at end of file
......@@ -9,34 +9,29 @@
*
********************************************************************************/
#include <aidge/utils/Types.h>
#include <catch2/catch_test_macros.hpp>
#include <chrono>
#include <cmath>
#include <cstddef> // std::size_t
#include <cstdint> // std::uint16_t
#include <iostream>
#include <functional> // std::multiplies
#include <memory>
#include <numeric> // std::accumulate
#include <ostream>
#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
#include <random> // std::random_device, std::mt19937
// std::uniform_int_distribution, std::uniform_real_distribution
#include <vector>
#include <catch2/catch_test_macros.hpp>
#include <fmt/core.h>
#include "aidge/backend/cpu/data/TensorImpl.hpp"
#include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/GlobalAveragePooling.hpp"
#include "aidge/utils/TensorUtils.hpp"
// debug print function
void print_tensor(Aidge::Tensor &T) {
// Print tensors
std::cout << "Tensor : size = [";
for (auto &dim : T.dims()) {
std::cout << dim << " , ";
}
std::cout << "]" << std::endl;
T.print();
}
#include "aidge/utils/Types.h"
namespace Aidge {
TEST_CASE("[cpu/operator] GlobalAveragePooling",
"[GlobalAveragePooling][CPU]") {
constexpr std::uint16_t NBTRIALS = 10;
......@@ -54,9 +49,7 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
std::size_t(7));
// Create MatGlobalAveragePooling Operator
std::shared_ptr<Node> globAvgPool = GlobalAveragePooling();
auto op =
std::static_pointer_cast<OperatorTensor>(globAvgPool->getOperator());
std::shared_ptr<GlobalAveragePooling_Op> op = std::make_shared<GlobalAveragePooling_Op>();
op->setDataType(DataType::Float32);
op->setBackend("cpu");
......@@ -99,7 +92,7 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
T0->resize(dims);
T0->getImpl()->setRawPtr(array0, nb_elements);
REQUIRE_THROWS(globAvgPool->forward());
REQUIRE_THROWS(op->forward());
delete[] array0;
}
......@@ -158,7 +151,7 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
op->forwardDims();
start = std::chrono::system_clock::now();
REQUIRE_NOTHROW(globAvgPool->forward());
REQUIRE_NOTHROW(op->forward());
end = std::chrono::system_clock::now();
duration +=
std::chrono::duration_cast<std::chrono::microseconds>(end - start);
......@@ -231,7 +224,7 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
op->forwardDims();
start = std::chrono::system_clock::now();
REQUIRE_NOTHROW(globAvgPool->forward());
REQUIRE_NOTHROW(op->forward());
end = std::chrono::system_clock::now();
duration += std::chrono::duration_cast<std::chrono::microseconds>(
end - start);
......@@ -358,7 +351,7 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
Tres->getImpl()->setRawPtr(result, out_nb_elems);
op->forwardDims();
start = std::chrono::system_clock::now();
REQUIRE_NOTHROW(globAvgPool->forward());
REQUIRE_NOTHROW(op->forward());
end = std::chrono::system_clock::now();
duration += std::chrono::duration_cast<std::chrono::microseconds>(
end - start);
......@@ -547,7 +540,7 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
Tres->getImpl()->setRawPtr(result, out_nb_elems);
op->forwardDims();
start = std::chrono::system_clock::now();
REQUIRE_NOTHROW(globAvgPool->forward());
REQUIRE_NOTHROW(op->forward());
end = std::chrono::system_clock::now();
duration += std::chrono::duration_cast<std::chrono::microseconds>(
end - start);
......@@ -561,12 +554,9 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
delete[] result;
}
}
std::cout << "GlobalAveragePooling total execution time : "
<< duration.count() << "µs" << std::endl;
std::cout << "Number of operations : " << number_of_operation
<< std::endl;
std::cout << "Operation / µs = " << number_of_operation / duration.count()
<< std::endl;
fmt::print("INFO: GlobalAveragePooling total execution time: {}µs\n", duration.count());
fmt::print("INFO: Number of operations : {}\n", number_of_operation);
fmt::print("INFO: Operation / µs = {}\n", number_of_operation / duration.count());
}
}
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment