Skip to content
Snippets Groups Projects
Commit e3094ef9 authored by Houssem ROUIS's avatar Houssem ROUIS
Browse files

add forwardDims test for ReduceSum

parent 70b48051
No related branches found
No related tags found
2 merge requests!93Release v0.3.0,!75Learning backend cuda
......@@ -22,6 +22,129 @@
using namespace Aidge;
TEST_CASE("[cpu/operator] ReduceSum(forward)", "[ReduceSum][CPU]") {
SECTION("ForwardDims")
{
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5));
std::uniform_int_distribution<int> boolDist(0,1);
SECTION("KeepDims") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
DimSize_t nbDims = nbDimsDist(gen);
std::vector<DimSize_t> dims(nbDims);
std::vector<DimSize_t> expectedOutDims(nbDims);
std::vector<std::int32_t> axes;
for (std::size_t i = 0; i < nbDims; i++) {
dims[i] = dimSizeDist(gen);
expectedOutDims[i] = dims[i];
if(boolDist(gen)) {
axes.push_back(i);
expectedOutDims[i] = 1;
}
}
if (axes.empty()) { // Default behaviour if no axes are provided is to reduce all dimensions
std::fill(expectedOutDims.begin(), expectedOutDims.end(), 1);
}
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(dims);
myInput->setBackend("cpu");
myInput->setDataType(DataType::Float32);
myInput->zeros();
std::shared_ptr<Node> myReduceSum = ReduceSum(axes, true);
auto op = std::static_pointer_cast<OperatorTensor>(myReduceSum -> getOperator());
op->associateInput(0,myInput);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
op->forwardDims();
const auto outputDims = op->getOutput(0)->dims();
REQUIRE(outputDims == expectedOutDims);
}
}
SECTION("Not KeepDims") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
DimSize_t nbDims = nbDimsDist(gen);
std::vector<DimSize_t> dims(nbDims);
std::vector<DimSize_t> expectedOutDims;
std::vector<std::int32_t> axes;
for (std::size_t i = 0; i < nbDims; i++) {
dims[i] = dimSizeDist(gen);
if(boolDist(gen)) {
axes.push_back(i);
}
else {
expectedOutDims.push_back(dims[i]);
}
}
if (axes.empty() || expectedOutDims.empty()) { // Default behaviour if no axes are provided is to reduce all dimensions
expectedOutDims = std::vector<DimSize_t>{1};
}
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(dims);
myInput->setBackend("cpu");
myInput->setDataType(DataType::Float32);
std::shared_ptr<Node> myReduceSum = ReduceSum(axes, false);
auto op = std::static_pointer_cast<OperatorTensor>(myReduceSum -> getOperator());
op->associateInput(0,myInput);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
op->forwardDims();
const auto outputDims = op->getOutput(0)->dims();
REQUIRE(outputDims == expectedOutDims);
}
}
SECTION("NoopWithEmptyAxes") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
DimSize_t nbDims = nbDimsDist(gen);
std::vector<DimSize_t> dims(nbDims);
for (std::size_t i = 0; i < nbDims; i++) {
dims[i] = dimSizeDist(gen);
}
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(dims);
myInput->setBackend("cpu");
myInput->setDataType(DataType::Float32);
std::shared_ptr<Node> myReduceSum = ReduceSum(std::vector<int32_t>{}, false, true);
auto op = std::static_pointer_cast<OperatorTensor>(myReduceSum -> getOperator());
op->associateInput(0,myInput);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
op->forwardDims();
const auto outputDims = op->getOutput(0)->dims();
REQUIRE(outputDims == dims);
}
}
SECTION("Not NoopWithEmptyAxes") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
DimSize_t nbDims = nbDimsDist(gen);
std::vector<DimSize_t> dims(nbDims);
for (std::size_t i = 0; i < nbDims; i++) {
dims[i] = dimSizeDist(gen);
}
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(dims);
myInput->setBackend("cpu");
myInput->setDataType(DataType::Float32);
std::shared_ptr<Node> myReduceSum = ReduceSum({}, false, false);
auto op = std::static_pointer_cast<OperatorTensor>(myReduceSum -> getOperator());
op->associateInput(0,myInput);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
op->forwardDims();
REQUIRE(op->getOutput(0)->nbDims() == 1);
REQUIRE(op->getOutput(0)->size() == 1);
}
}
}
SECTION("KeepDims") {
SECTION("test 1") {
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment