Code owners
Assign users and groups as approvers for specific file changes. Learn more.
Test_GlobalAveragePoolingImpl.cpp 6.78 KiB
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <aidge/utils/Types.h>
#include <catch2/catch_test_macros.hpp>
#include <chrono>
#include <cmath>
#include <cstddef> // std::size_t
#include <cstdint> // std::uint16_t
#include <iostream>
#include <memory>
#include <numeric> // std::accumulate
#include <ostream>
#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/GlobalAveragePooling.hpp"
#include "aidge/utils/TensorUtils.hpp"
namespace Aidge {
TEST_CASE("[cpu/operator] GlobalAveragePooling",
"[GlobalAveragePooling][CPU]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<float> valueDist(
0.1f, 1.1f); // Random float distribution between 0 and 1
std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2),
std::size_t(10));
std::uniform_int_distribution<std::size_t> nbLowDimsDist(std::size_t(1),
std::size_t(2));
std::uniform_int_distribution<std::size_t> nbHighDimsDist(std::size_t(3),
std::size_t(7));
// Create MatGlobalAveragePooling Operator
std::shared_ptr<Node> globAvgPool = GlobalAveragePooling();
auto op =
std::static_pointer_cast<OperatorTensor>(globAvgPool->getOperator());
op->setDataType(DataType::Float32);
op->setBackend("cpu");
// Create the input Tensor
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op->associateInput(0, T0);
T0->setDataType(DataType::Float32);
T0->setBackend("cpu");
// Create results Tensor
std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>();
Tres->setDataType(DataType::Float32);
Tres->setBackend("cpu");
// To measure execution time of 'MatGlobalAveragePooling_Op::forward()' member
// function call
std::chrono::time_point<std::chrono::system_clock> start;
std::chrono::time_point<std::chrono::system_clock> end;
std::chrono::duration<double, std::micro> duration{};
int number_of_operation{0};
SECTION("GlobalAveragePoolingImpl_cpu::forward()") {
SECTION(
"1-2Dim > not enough dimensions leads to function throwing an error") {
// generate a random tensors
const std::size_t nbDims = nbLowDimsDist(gen);
std::vector<std::size_t> dims;
for (std::size_t i = 0; i < nbDims; ++i) {
dims.push_back(dimSizeDist(gen));
}
const std::size_t nb_elements =
std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1),
std::multiplies<std::size_t>());
// without broadcasting
float *array0 = new float[nb_elements];
float *result = new float[nb_elements];
for (std::size_t i = 0; i < nb_elements; ++i) {
array0[i] = valueDist(gen);
result[i] += array0[i] / nb_elements;
}
REQUIRE_THROWS(globAvgPool->forward());
}
SECTION("3+Dim") {
SECTION("Fill a tensor with all values set as N will result with every "
"output being N") {
// generate the tensor
const std::size_t nbDims = nbHighDimsDist(gen);
std::vector<std::size_t> dims_in;
for (std::size_t i = 0; i < nbDims; ++i) {
dims_in.push_back(dimSizeDist(gen));
}
// create in nb_elems
const std::size_t in_nb_elems =
std::accumulate(dims_in.cbegin(), dims_in.cend(), std::size_t(1),
std::multiplies<std::size_t>());
const DimSize_t in_batch_nb_elems = in_nb_elems / dims_in[0];
const DimSize_t in_channel_nb_elems = in_batch_nb_elems / dims_in[1];
// create out nb_elems
std::vector<std::size_t> dims_out{dims_in[0], dims_in[1]};
const std::size_t out_nb_elems =
std::accumulate(dims_out.cbegin(), dims_out.cend(), std::size_t(1),
std::multiplies<std::size_t>());
const DimSize_t out_batch_nb_elems = out_nb_elems / dims_out[0];
// iterate over each batch/channel
float *array0 = new float[in_nb_elems];
float *result = new float[out_nb_elems];
float val = valueDist(gen);
std::cout << "val = " << val << std::endl;
for (std::size_t batch = 0; batch < dims_in[0]; ++batch) {
for (std::size_t channel = 0; channel < dims_in[1]; ++channel) {
for (std::size_t i = 0; i < in_channel_nb_elems; ++i)
{
array0[batch * in_batch_nb_elems + channel * in_channel_nb_elems +
i] = val;
}
result[batch * out_batch_nb_elems + channel] = val;
}
}
// input0
T0->resize(dims_in);
T0->getImpl()->setRawPtr(array0, in_nb_elems);
// results
Tres->resize(dims_out);
Tres->getImpl()->setRawPtr(result, out_nb_elems);
op->computeOutputDims();
start = std::chrono::system_clock::now();
REQUIRE_NOTHROW(globAvgPool->forward());
end = std::chrono::system_clock::now();
duration +=
std::chrono::duration_cast<std::chrono::microseconds>(end - start);
// Print tensors
std::cout << "input : size = [";
for (auto &dim : op->getInput(0)->dims()) {
std::cout << dim << " , ";
}
std::cout << "]" << std::endl;
// T0->print();
std::cout << "output : size = [";
for (auto &dim : op->getOutput(0)->dims()) {
std::cout << dim << " , ";
}
std::cout << "]" << std::endl;
op->getOutput(0)->print();
std::cout << "ref Tres : size = output size if no error occurred"
<< std::endl;
std::cout << "ref Tres: size = [";
for (auto &dim : Tres->dims()) {
std::cout << dim << " , ";
}
std::cout << "]" << std::endl;
CHECK(Tres->nbDims() == op->getOutput(0)->nbDims());
for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) {
CHECK(Tres->dims().at(i) == op->getOutput(0)->dims().at(i));
}
Tres->print();
CHECK(approxEq<float>(*(op->getOutput(0)), *Tres));
delete[] array0;
delete[] result;
}
SECTION("Using result from a pytorch function as groundtruth") {}
SECTION("random testing") {}
}
}
}
} // namespace Aidge