Skip to content
Snippets Groups Projects
Commit 75c81956 authored by Houssem ROUIS's avatar Houssem ROUIS
Browse files

remove randoom tests

parent 291eaebe
No related branches found
No related tags found
No related merge requests found
Pipeline #42495 failed
...@@ -160,140 +160,4 @@ TEST_CASE("[gpu/operator] AvgPooling(forward)", "[AvgPooling][GPU]") { ...@@ -160,140 +160,4 @@ TEST_CASE("[gpu/operator] AvgPooling(forward)", "[AvgPooling][GPU]") {
delete[] computedOutput; delete[] computedOutput;
} }
int number_of_operation{0};
SECTION("Random Input") {
constexpr std::uint16_t NBTRIALS = 10;
std::size_t kernel = 2;
std::size_t stride = 2;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<float> valueDist(
0.1f, 1.1f); // Random float distribution between 0 and 1
std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2),
std::size_t(10));
std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(4), std::size_t(4));
// Create AveragePooling Operator
std::shared_ptr<Node> myAvgPool = AvgPooling({kernel,kernel}, "myAvgPool", {stride,stride});
auto op = std::static_pointer_cast<OperatorTensor>(myAvgPool -> getOperator());
op->setDataType(DataType::Float32);
op->setBackend("cpu");
// Create the input Tensor
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op->associateInput(0, T0);
T0->setDataType(DataType::Float32);
T0->setBackend("cpu");
// To measure execution time of 'AveragePooling_Op::forward()'
std::chrono::time_point<std::chrono::system_clock> start;
std::chrono::time_point<std::chrono::system_clock> end;
std::chrono::duration<double, std::micro> duration{};
std::size_t number_of_operation = 0;
SECTION("OutDims") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// generate a random Tensor
const std::size_t nbDims = nbDimsDist(gen);
std::vector<std::size_t> dims;
for (std::size_t i = 0; i < nbDims; ++i) {
dims.push_back(dimSizeDist(gen));
}
const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
number_of_operation += nb_elements;
// Fill input tensor
float* array0 = new float[nb_elements];
for (std::size_t i = 0; i < nb_elements; ++i) {
array0[i] = valueDist(gen);
}
T0->resize(dims);
T0 -> getImpl() -> setRawPtr(array0, nb_elements);
// Run inference
op->computeOutputDims();
start = std::chrono::system_clock::now();
myAvgPool->forward();
end = std::chrono::system_clock::now();
duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
// Verify output dimensions
REQUIRE(op->getOutput(0)->nbDims() == dims.size());
for (size_t i = 0; i < op->getOutput(0)->nbDims(); ++i) {
if(i == 2 || i == 3)
REQUIRE(op->getOutput(0)->dims()[i] == (1 + static_cast<DimSize_t>(std::floor(static_cast<float>(dims[i] - kernel) / static_cast<float>(stride)))));
else
REQUIRE(op->getOutput(0)->dims()[i] == dims[i]);
}
delete[] array0;
}
std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
std::cout << "total time: " << duration.count() << "μs" << std::endl;
}
SECTION("Values") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// generate a random Tensor
const std::size_t nbDims = nbDimsDist(gen);
std::vector<std::size_t> dims;
for (std::size_t i = 0; i < nbDims; ++i) {
dims.push_back(dimSizeDist(gen));
}
const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
number_of_operation += nb_elements;
// Fill input tensor
float* array0 = new float[nb_elements];
for (std::size_t i = 0; i < nb_elements; ++i) {
array0[i] = valueDist(gen);
}
T0->resize(dims);
T0 -> getImpl() -> setRawPtr(array0, nb_elements);
// Fill expected output
std::vector<float> result;
std::size_t rows = dims[2], cols = dims[3], nbMat = dims[0] * dims[1], matSize = rows*cols;
for (size_t i = 0; i < nbMat; i++)
{
for(size_t r=0; r< rows; r += stride){
for(size_t c=0; c< cols; c += stride){
float sum = 0.0f;
for (size_t m = 0; m < kernel; m++)
{
for (size_t n = 0; n < kernel; n++)
{
sum += array0[i * matSize + (r + m) * cols + c + n];
}
}
result.push_back(sum/(kernel*kernel));
}
}
}
// Run inference
op->computeOutputDims();
start = std::chrono::system_clock::now();
myAvgPool->forward();
end = std::chrono::system_clock::now();
duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
op->getOutput(0)->print();
float* computedOutput = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
for (size_t i = 0; i < op->getOutput(0)->size(); i++)
{
REQUIRE(abs(computedOutput[i] - result[i]) < 1e-6);
}
delete[] array0;
}
std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
std::cout << "total time: " << duration.count() << "μs" << std::endl;
}
}
} }
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment