-
Maxence Naud authoredMaxence Naud authored
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
Test_ReduceMeanImpl.cpp 5.36 KiB
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <memory>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/ReduceMean.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/backend/cpu.hpp"
using namespace Aidge;
TEST_CASE("[cpu/operator] ReduceMean(forward)", "[ReduceMean][CPU]") {
SECTION("KeepDims") {
SECTION("test 1") {
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
{
{
{ 5.0, 1.0 },
{ 20.0, 2.0 }
},
{
{ 30.0, 1.0 },
{ 40.0, 2.0 }
},
{
{ 55.0, 1.0 },
{ 60.0, 2.0 }
}
}
});
Tensor myOutput = Tensor(Array3D<float,3,1,2> {
{
{{ 12.5, 1.5 }},
{{ 35.0, 1.5 }},
{{ 57.5, 1.5 }}
}
});
std::shared_ptr<Node> myReduceMean = ReduceMean({1}, 1);
auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
op->associateInput(0,myInput);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
op->computeOutputDims();
myReduceMean->forward();
op->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == myOutput);
}
SECTION("test 2") {
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,3,2> {
{
{
{ 0.0, 0.0 },
{ 1.0, 1.0 },
{ 2.0, 2.0 }
},
{
{ 3.0, 3.0 },
{ 4.0, 4.0 },
{ 5.0, 5.0 }
},
{
{ 6.0, 6.0 },
{ 7.0, 7.0 },
{ 8.0, 8.0 }
}
}
});
Tensor myOutput = Tensor(Array3D<float,3,1,1> {
{
{{ 1.0 }},
{{ 4.0 }},
{{ 7.0 }}
}
});
std::shared_ptr<Node> myReduceMean = ReduceMean({1, 2}, 1);
auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
op->associateInput(0,myInput);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
op->computeOutputDims();
myReduceMean->forward();
myOutput.print();
op->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == myOutput);
}
}
SECTION("not_KeepDims") {
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
{
{
{ 5.0, 1.0 },
{ 20.0, 2.0 }
},
{
{ 30.0, 1.0 },
{ 40.0, 2.0 }
},
{
{ 55.0, 1.0 },
{ 60.0, 2.0 }
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<float,3,2> {
{
{ 12.5, 1.5 },
{ 35.0, 1.5 },
{ 57.5, 1.5 }
}
});
std::shared_ptr<Node> myReduceMean = ReduceMean({1}, 0);
auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
op->associateInput(0,myInput);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
op->computeOutputDims();
myReduceMean->forward();
op->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
SECTION("all_axes") {
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
{
{
{ 5.0, 1.0 },
{ 20.0, 2.0 }
},
{
{ 30.0, 1.0 },
{ 40.0, 2.0 }
},
{
{ 55.0, 1.0 },
{ 60.0, 2.0 }
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> {
{18.25}
});
std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1, 2}, 0);
auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
op->associateInput(0,myInput);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
op->computeOutputDims();
myReduceMean->forward();
op->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
}