Skip to content
Snippets Groups Projects
Commit c46b0f76 authored by Michal Szczepanski's avatar Michal Szczepanski
Browse files

updated test resize op

parent 59ff6e33
No related branches found
No related tags found
No related merge requests found
Pipeline #53179 failed
...@@ -20,148 +20,60 @@ ...@@ -20,148 +20,60 @@
#include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/OperatorTensor.hpp"
namespace Aidge { namespace Aidge {
/**
* Test the resize operation of the given operator with the specified input dimensions,
* scales or sizes, and expected output dimensions.
*
* @param op The operator to test.
* @param input_dims The input dimensions to use for the test.
* @param scales_or_sizes The scales or sizes to use for the test.
* @param expected_dims The expected output dimensions for the test.
* @param use_scales A boolean flag indicating whether to use scales or sizes for the test.
*/
void test_resize(const std::shared_ptr<OperatorTensor>& op, const std::vector<long unsigned int>& input_dims, const std::vector<float>& scales_or_sizes, const std::vector<long unsigned int>& expected_dims, bool use_scales) {
std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>();
input_data->setBackend("cpu");
input_data->resize(input_dims);
input_data->zeros();
op->associateInput(0, input_data);
const std::shared_ptr<Tensor> tensor_values = std::make_shared<Tensor>();
tensor_values -> setDataType(DataType::Float32);
tensor_values -> setBackend("cpu");
tensor_values -> resize(std::vector<std::size_t>({scales_or_sizes.size()}));
tensor_values -> getImpl() -> copyFromHost(scales_or_sizes.data(), scales_or_sizes.size());
op->associateInput(use_scales ? 2 : 3, tensor_values);
REQUIRE_NOTHROW(op -> forwardDims(true));
REQUIRE(op -> getOutput(0)->dims() == expected_dims);
}
TEST_CASE("[core/operator] Resize_Op(forwardDims)", "[Resize][forwardDimsScales]") { TEST_CASE("[core/operator] Resize_Op(forwardDims)", "[Resize][forwardDimsScales]") {
fmt::print("Test unit_tests/operator/Test_Resize_Op.cpp Resize_Op(forwardDims).\n"); fmt::print("Test unit_tests/operator/Test_Resize_Op.cpp Resize_Op(forwardDims).\n");
std::shared_ptr<Node> myResize = Resize(); std::shared_ptr<Node> myResize = Resize();
auto op = std::static_pointer_cast<OperatorTensor>(myResize -> getOperator()); auto op = std::static_pointer_cast<OperatorTensor>(myResize->getOperator());
SECTION("Un-connected input leads to failure.") { SECTION("Un-connected input leads to failure.") {
REQUIRE_THROWS(op->forwardDims()); REQUIRE_THROWS(op->forwardDims());
} }
SECTION("Connected and fixed Input and Scales") { SECTION("Connected and fixed Input and Scales") {
test_resize(op, {1,1,2,2}, {1, 1, 2, 2}, {1,1,4,4}, true);
std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(); test_resize(op, {4,4,10,10}, {1, 1, 2, 3}, {4, 4, 20, 30}, true);
test_resize(op, {4,2,10,10}, {1, 1, 0.5, 0.5}, {4, 2, 5, 5}, true);
SECTION("fix_scales upscale 1.") { test_resize(op, {11, 11, 4, 4}, {1, 1, 0.3, 0.3}, {11, 11, 1, 1}, true);
input_data->resize({1,1,2,2});
input_data->zeros();
op -> associateInput(0,input_data);
std::shared_ptr<Tensor> input_scales = std::make_shared<Tensor>(Array1D<float,4> {{1, 1, 2, 2}});
std::shared_ptr<Tensor> dst_dimsOut = std::make_shared<Tensor>();
dst_dimsOut->resize({1,1,4,4});
op -> associateInput(2, input_scales);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->nbDims() > 0);
for (uint16_t i = 0; i<op->getOutput(0)->nbDims(); ++i){
REQUIRE((op->getOutput(0)->dims())[i] == dst_dimsOut->dims()[i]);
}
}
SECTION("fix_scales upscale 2.") {
input_data->resize({4,4,10,10});
input_data->zeros();
op -> associateInput(0,input_data);
std::shared_ptr<Tensor> input_scales = std::make_shared<Tensor>(Array1D<float,4> {{1, 1, 2, 3}});
std::shared_ptr<Tensor> dst_dimsOut = std::make_shared<Tensor>();
dst_dimsOut->resize({4, 4, 20, 30});
op -> associateInput(2, input_scales);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->nbDims() > 0);
for (uint16_t i = 0; i<op->getOutput(0)->nbDims(); ++i){
REQUIRE((op->getOutput(0)->dims())[i] == dst_dimsOut->dims()[i]);
}
}
SECTION("fix_scales downscale 3.") {
input_data->resize({4,2,10,10});
input_data->zeros();
op -> associateInput(0,input_data);
std::shared_ptr<Tensor> input_scales = std::make_shared<Tensor>(Array1D<float,4> {{1, 1, 0.5, 0.5}});
std::shared_ptr<Tensor> dst_dimsOut = std::make_shared<Tensor>();
dst_dimsOut->resize({4, 2, 5, 5});
op -> associateInput(2, input_scales);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->nbDims() > 0);
for (uint16_t i = 0; i<op->getOutput(0)->nbDims(); ++i){
REQUIRE((op->getOutput(0)->dims())[i] == dst_dimsOut->dims()[i]);
}
}
SECTION("fix_scales downscale 4.") {
input_data->resize({11, 11, 4, 4});
input_data->zeros();
op -> associateInput(0,input_data);
std::shared_ptr<Tensor> input_scales = std::make_shared<Tensor>(Array1D<float,4> {{1, 1, 0.3, 0.3}});
std::shared_ptr<Tensor> dst_dimsOut = std::make_shared<Tensor>();
dst_dimsOut->resize({11, 11, 1, 1});
op -> associateInput(2, input_scales);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->nbDims() > 0);
for (uint16_t i = 0; i<op->getOutput(0)->nbDims(); ++i){
REQUIRE((op->getOutput(0)->dims())[i] == dst_dimsOut->dims()[i]);
}
}
} }
SECTION("Connected and fixed Input and Sizes") { SECTION("Connected and fixed Input and Sizes") {
test_resize(op, {1,1,2,2}, {1, 1, 8, 8}, {1,1,8,8}, false);
std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(); test_resize(op, {60,60,30,30}, {1, 1, 75, 75}, {60,60,75,75}, false);
test_resize(op, {11,11,20,20}, {1, 1, 8, 8}, {11,11,8,8}, false);
SECTION("fix_sizes upscale 1.") { test_resize(op, {43,211,22,22}, {1, 1, 10, 10}, {43,211,10,10}, false);
input_data->resize({1,1,2,2});
input_data->zeros();
op -> associateInput(0,input_data);
std::shared_ptr<Tensor> input_sizes = std::make_shared<Tensor>(Array1D<float,4> {{1, 1, 8, 8}});
std::shared_ptr<Tensor> dst_dimsOut = std::make_shared<Tensor>();
dst_dimsOut->resize({1,1,8,8});
op -> associateInput(3, input_sizes);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->nbDims() > 0);
for (uint16_t i = 0; i<op->getOutput(0)->nbDims(); ++i){
REQUIRE((op->getOutput(0)->dims())[i] == dst_dimsOut->dims()[i]);
}
}
SECTION("fix_sizes upscale 2.") {
input_data->resize({60,60,30,30});
input_data->zeros();
op -> associateInput(0,input_data);
std::shared_ptr<Tensor> input_sizes = std::make_shared<Tensor>(Array1D<float,4> {{1, 1, 75, 75}});
std::shared_ptr<Tensor> dst_dimsOut = std::make_shared<Tensor>();
dst_dimsOut->resize({60,60,75,75});
op -> associateInput(3, input_sizes);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->nbDims() > 0);
for (uint16_t i = 0; i<op->getOutput(0)->nbDims(); ++i){
REQUIRE((op->getOutput(0)->dims())[i] == dst_dimsOut->dims()[i]);
}
}
SECTION("fix_sizes downscale 1.") {
input_data->resize({11,11,20,20});
input_data->zeros();
op -> associateInput(0,input_data);
std::shared_ptr<Tensor> input_sizes = std::make_shared<Tensor>(Array1D<float,4> {{1, 1, 8, 8}});
std::shared_ptr<Tensor> dst_dimsOut = std::make_shared<Tensor>();
dst_dimsOut->resize({11,11,8,8});
op -> associateInput(3, input_sizes);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->nbDims() > 0);
for (uint16_t i = 0; i<op->getOutput(0)->nbDims(); ++i){
REQUIRE((op->getOutput(0)->dims())[i] == dst_dimsOut->dims()[i]);
}
}
SECTION("fix_sizes downscale 2.") {
input_data->resize({43,211,22,22});
input_data->zeros();
op -> associateInput(0,input_data);
std::shared_ptr<Tensor> input_sizes = std::make_shared<Tensor>(Array1D<float,4> {{1, 1, 10, 10}});
std::shared_ptr<Tensor> dst_dimsOut = std::make_shared<Tensor>();
dst_dimsOut->resize({43,211,10,10});
op -> associateInput(3, input_sizes);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->nbDims() > 0);
for (uint16_t i = 0; i<op->getOutput(0)->nbDims(); ++i){
REQUIRE((op->getOutput(0)->dims())[i] == dst_dimsOut->dims()[i]);
}
}
} }
}
}
} // namespace Aidge } // namespace Aidge
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment