-
Maxence Naud authoredMaxence Naud authored
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
Test_PadImpl.cpp 23.88 KiB
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstdlib>
#include <memory>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Pad.hpp"
#include "aidge/backend/cpu.hpp"
using namespace Aidge;
TEST_CASE("[cpu/operator] Pad(forward)", "[Pad][CPU]") {
SECTION("Symmetric Pad") {
const int pv = 0; // pad value
std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
{
{
{{ pv, pv, pv, pv, pv, pv, pv},
{ pv, 0, 1, 2, 3, 4, pv},
{ pv, 5, 6, 7, 8, 9, pv},
{ pv, 10, 11, 12, 13, 14, pv},
{ pv, 15, 16, 17, 18, 19, pv},
{ pv, 20, 21, 22, 23, 24, pv},
{ pv, pv, pv, pv, pv, pv, pv}},
{{ pv, pv, pv, pv, pv, pv, pv},
{ pv, 25, 26, 27, 28, 29, pv},
{ pv, 30, 31, 32, 33, 34, pv},
{ pv, 35, 36, 37, 38, 39, pv},
{ pv, 40, 41, 42, 43, 44, pv},
{ pv, 45, 46, 47, 48, 49, pv},
{ pv, pv, pv, pv, pv, pv, pv}},
{{ pv, pv, pv, pv, pv, pv, pv},
{ pv, 50, 51, 52, 53, 54, pv},
{ pv, 55, 56, 57, 58, 59, pv},
{ pv, 60, 61, 62, 63, 64, pv},
{ pv, 65, 66, 67, 68, 69, pv},
{ pv, 70, 71, 72, 73, 74, pv},
{ pv, pv, pv, pv, pv, pv, pv}}
},
{
{{ pv, pv, pv, pv, pv, pv, pv},
{ pv, 75, 76, 77, 78, 79, pv},
{ pv, 80, 81, 82, 83, 84, pv},
{ pv, 85, 86, 87, 88, 89, pv},
{ pv, 90, 91, 92, 93, 94, pv},
{ pv, 95, 96, 97, 98, 99, pv},
{ pv, pv, pv, pv, pv, pv, pv}},
{{ pv, pv, pv, pv, pv, pv, pv},
{pv, 100, 101, 102, 103, 104, pv},
{pv, 105, 106, 107, 108, 109, pv},
{pv, 110, 111, 112, 113, 114, pv},
{pv, 115, 116, 117, 118, 119, pv},
{pv, 120, 121, 122, 123, 124, pv},
{ pv, pv, pv, pv, pv, pv, pv}},
{{ pv, pv, pv, pv, pv, pv, pv},
{pv, 125, 126, 127, 128, 129, pv},
{pv, 130, 131, 132, 133, 134, pv},
{pv, 135, 136, 137, 138, 139, pv},
{pv, 140, 141, 142, 143, 144, pv},
{pv, 145, 146, 147, 148, 149, pv},
{ pv, pv, pv, pv, pv, pv, pv}}
}
}
});
myPad->getOperator()->associateInput(0,myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
op->computeOutputDims();
myPad->forward();
// myPad->getOperator()->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
SECTION("Asymmetric Pad") {
const int pv = 0; // pad value
std::shared_ptr<Node> myPad = Pad<2>({1, 0, 0, 1}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,6,6> { //NCHW
{
{
{{ pv, pv, pv, pv, pv, pv},
{ 0, 1, 2, 3, 4, pv},
{ 5, 6, 7, 8, 9, pv},
{ 10, 11, 12, 13, 14, pv},
{ 15, 16, 17, 18, 19, pv},
{ 20, 21, 22, 23, 24, pv}},
{{ pv, pv, pv, pv, pv, pv},
{ 25, 26, 27, 28, 29, pv},
{ 30, 31, 32, 33, 34, pv},
{ 35, 36, 37, 38, 39, pv},
{ 40, 41, 42, 43, 44, pv},
{ 45, 46, 47, 48, 49, pv}},
{{ pv, pv, pv, pv, pv, pv},
{ 50, 51, 52, 53, 54, pv},
{ 55, 56, 57, 58, 59, pv},
{ 60, 61, 62, 63, 64, pv},
{ 65, 66, 67, 68, 69, pv},
{ 70, 71, 72, 73, 74, pv}}
},
{
{{ pv, pv, pv, pv, pv, pv},
{ 75, 76, 77, 78, 79, pv},
{ 80, 81, 82, 83, 84, pv},
{ 85, 86, 87, 88, 89, pv},
{ 90, 91, 92, 93, 94, pv},
{ 95, 96, 97, 98, 99, pv}},
{{ pv, pv, pv, pv, pv, pv},
{ 100, 101, 102, 103, 104, pv},
{ 105, 106, 107, 108, 109, pv},
{ 110, 111, 112, 113, 114, pv},
{ 115, 116, 117, 118, 119, pv},
{ 120, 121, 122, 123, 124, pv}},
{{ pv, pv, pv, pv, pv, pv},
{ 125, 126, 127, 128, 129, pv},
{ 130, 131, 132, 133, 134, pv},
{ 135, 136, 137, 138, 139, pv},
{ 140, 141, 142, 143, 144, pv},
{ 145, 146, 147, 148, 149, pv}}
}
}
});
myPad->getOperator()->associateInput(0,myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
op->computeOutputDims();
myPad->forward();
// myPad->getOperator()->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
SECTION("Pad Edge") {
std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Edge);
auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
{
{
{{ 0, 0, 1, 2, 3, 4, 4},
{ 0, 0, 1, 2, 3, 4, 4},
{ 5, 5, 6, 7, 8, 9, 9},
{ 10, 10, 11, 12, 13, 14, 14},
{ 15, 15, 16, 17, 18, 19, 19},
{ 20, 20, 21, 22, 23, 24, 24},
{ 20, 20, 21, 22, 23, 24, 24}},
{{ 25, 25, 26, 27, 28, 29, 29},
{ 25, 25, 26, 27, 28, 29, 29},
{ 30, 30, 31, 32, 33, 34, 34},
{ 35, 35, 36, 37, 38, 39, 39},
{ 40, 40, 41, 42, 43, 44, 44},
{ 45, 45, 46, 47, 48, 49, 49},
{ 45, 45, 46, 47, 48, 49, 49}},
{{ 50, 50, 51, 52, 53, 54, 54},
{ 50, 50, 51, 52, 53, 54, 54},
{ 55, 55, 56, 57, 58, 59, 59},
{ 60, 60, 61, 62, 63, 64, 64},
{ 65, 65, 66, 67, 68, 69, 69},
{ 70, 70, 71, 72, 73, 74, 74},
{ 70, 70, 71, 72, 73, 74, 74}}
},
{
{{ 75, 75, 76, 77, 78, 79, 79},
{ 75, 75, 76, 77, 78, 79, 79},
{ 80, 80, 81, 82, 83, 84, 84},
{ 85, 85, 86, 87, 88, 89, 89},
{ 90, 90, 91, 92, 93, 94, 94},
{ 95, 95, 96, 97, 98, 99, 99},
{ 95, 95, 96, 97, 98, 99, 99}},
{{100, 100, 101, 102, 103, 104, 104},
{100, 100, 101, 102, 103, 104, 104},
{105, 105, 106, 107, 108, 109, 109},
{110, 110, 111, 112, 113, 114, 114},
{115, 115, 116, 117, 118, 119, 119},
{120, 120, 121, 122, 123, 124, 124},
{120, 120, 121, 122, 123, 124, 124}},
{{125, 125, 126, 127, 128, 129, 129},
{125, 125, 126, 127, 128, 129, 129},
{130, 130, 131, 132, 133, 134, 134},
{135, 135, 136, 137, 138, 139, 139},
{140, 140, 141, 142, 143, 144, 144},
{145, 145, 146, 147, 148, 149, 149},
{145, 145, 146, 147, 148, 149, 149}}
}
}
});
myPad->getOperator()->associateInput(0,myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
op->computeOutputDims();
myPad->forward();
// myPad->getOperator()->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
SECTION("Pad Reflect") {
std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Reflect);
auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
{
{
{
{ 6, 5, 6, 7, 8, 9, 5},
{ 1, 0, 1, 2, 3, 4, 0},
{ 6, 5, 6, 7, 8, 9, 5},
{ 11, 10, 11, 12, 13, 14, 10},
{ 16, 15, 16, 17, 18, 19, 15},
{ 21, 20, 21, 22, 23, 24, 20},
{ 1, 0, 1, 2, 3, 4, 0}
},
{
{ 31, 30, 31, 32, 33, 34, 30},
{ 26, 25, 26, 27, 28, 29, 25},
{ 31, 30, 31, 32, 33, 34, 30},
{ 36, 35, 36, 37, 38, 39, 35},
{ 41, 40, 41, 42, 43, 44, 40},
{ 46, 45, 46, 47, 48, 49, 45},
{ 26, 25, 26, 27, 28, 29, 25}
},
{
{ 56, 55, 56, 57, 58, 59, 55},
{ 51, 50, 51, 52, 53, 54, 50},
{ 56, 55, 56, 57, 58, 59, 55},
{ 61, 60, 61, 62, 63, 64, 60},
{ 66, 65, 66, 67, 68, 69, 65},
{ 71, 70, 71, 72, 73, 74, 70},
{ 51, 50, 51, 52, 53, 54, 50}
}
},
{
{
{ 81, 80, 81, 82, 83, 84, 80},
{ 76, 75, 76, 77, 78, 79, 75},
{ 81, 80, 81, 82, 83, 84, 80},
{ 86, 85, 86, 87, 88, 89, 85},
{ 91, 90, 91, 92, 93, 94, 90},
{ 96, 95, 96, 97, 98, 99, 95},
{ 76, 75, 76, 77, 78, 79, 75}
},
{
{ 106, 105, 106, 107, 108, 109, 105},
{ 101, 100, 101, 102, 103, 104, 100},
{ 106, 105, 106, 107, 108, 109, 105},
{ 111, 110, 111, 112, 113, 114, 110},
{ 116, 115, 116, 117, 118, 119, 115},
{ 121, 120, 121, 122, 123, 124, 120},
{ 101, 100, 101, 102, 103, 104, 100}
},
{
{ 131, 130, 131, 132, 133, 134, 130},
{ 126, 125, 126, 127, 128, 129, 125},
{ 131, 130, 131, 132, 133, 134, 130},
{ 136, 135, 136, 137, 138, 139, 135},
{ 141, 140, 141, 142, 143, 144, 140},
{ 146, 145, 146, 147, 148, 149, 145},
{ 126, 125, 126, 127, 128, 129, 125}
}
}
}
});
myPad->getOperator()->associateInput(0,myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
op->computeOutputDims();
myPad->forward();
op->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
SECTION("Pad Wrap") {
std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Wrap);
auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
{
{
{{ 24, 20, 21, 22, 23, 24, 20},
{ 4, 0, 1, 2, 3, 4, 0},
{ 9, 5, 6, 7, 8, 9, 5},
{ 14, 10, 11, 12, 13, 14, 10},
{ 19, 15, 16, 17, 18, 19, 15},
{ 24, 20, 21, 22, 23, 24, 20},
{ 4, 0, 1, 2, 3, 4, 0}},
{{ 49, 45, 46, 47, 48, 49, 45},
{ 29, 25, 26, 27, 28, 29, 25},
{ 34, 30, 31, 32, 33, 34, 30},
{ 39, 35, 36, 37, 38, 39, 35},
{ 44, 40, 41, 42, 43, 44, 40},
{ 49, 45, 46, 47, 48, 49, 45},
{ 29, 25, 26, 27, 28, 29, 25}},
{{ 74, 70, 71, 72, 73, 74, 70},
{ 54, 50, 51, 52, 53, 54, 50},
{ 59, 55, 56, 57, 58, 59, 55},
{ 64, 60, 61, 62, 63, 64, 60},
{ 69, 65, 66, 67, 68, 69, 65},
{ 74, 70, 71, 72, 73, 74, 70},
{ 54, 50, 51, 52, 53, 54, 50}}
},
{
{{ 99, 95, 96, 97, 98, 99, 95},
{ 79, 75, 76, 77, 78, 79, 75},
{ 84, 80, 81, 82, 83, 84, 80},
{ 89, 85, 86, 87, 88, 89, 85},
{ 94, 90, 91, 92, 93, 94, 90},
{ 99, 95, 96, 97, 98, 99, 95},
{ 79, 75, 76, 77, 78, 79, 75}},
{{124, 120, 121, 122, 123, 124, 120},
{104, 100, 101, 102, 103, 104, 100},
{109, 105, 106, 107, 108, 109, 105},
{114, 110, 111, 112, 113, 114, 110},
{119, 115, 116, 117, 118, 119, 115},
{124, 120, 121, 122, 123, 124, 120},
{104, 100, 101, 102, 103, 104, 100}},
{{149, 145, 146, 147, 148, 149, 145},
{129, 125, 126, 127, 128, 129, 125},
{134, 130, 131, 132, 133, 134, 130},
{139, 135, 136, 137, 138, 139, 135},
{144, 140, 141, 142, 143, 144, 140},
{149, 145, 146, 147, 148, 149, 145},
{129, 125, 126, 127, 128, 129, 125}}
}
}
});
myPad->getOperator()->associateInput(0,myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
op->computeOutputDims();
myPad->forward();
// myPad->getOperator()->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
}