Skip to content
Snippets Groups Projects
Commit ede140a3 authored by Grégoire Kubler's avatar Grégoire Kubler Committed by Olivier BICHLER
Browse files

feat : [ADD] Padding3D

parent eff4347d
No related branches found
No related tags found
2 merge requests!1740.6.1,!160feat : support for conv3D forward
......@@ -54,9 +54,19 @@ using PadImpl2D_cpu = OperatorImpl_cpu<Pad_Op<2>,
const void *,
void *)>;
using Pad3D_Op = Pad_Op<3>;
using PadImpl3D_cpu = OperatorImpl_cpu<Pad_Op<3>,
void(const std::array<DimSize_t, 6>&,
const PadBorderType,
const double,
const std::array<DimSize_t, 5> &,
const void *,
void *)>;
// Implementation entry point registration to Operator
REGISTRAR(Pad1D_Op, "cpu", Aidge::PadImpl1D_cpu::create);
REGISTRAR(Pad2D_Op, "cpu", Aidge::PadImpl2D_cpu::create);
REGISTRAR(Pad3D_Op, "cpu", Aidge::PadImpl3D_cpu::create);
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_PADIMPL_H_ */
......@@ -12,16 +12,27 @@
#ifndef AIDGE_CPU_OPERATOR_PADIMPL_KERNELS_H_
#define AIDGE_CPU_OPERATOR_PADIMPL_KERNELS_H_
#include <algorithm> // std::max, std::min
#include <aidge/operator/Pad.hpp>
#include <aidge/utils/ErrorHandling.hpp>
#include <algorithm> // std::max, std::min
#include <array>
#include <cstddef> // std::size_t
#include <cstdint> // std::int32_t
#include <cmath>
#include <cstddef> // std::size_t
#include <cstdint> // std::int32_t
#include <fmt/base.h>
#include <stdexcept>
#include <type_traits>
#include "aidge/backend/cpu/operator/PadImpl.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
////////////////////////////////////////////////////////////////////////////////////////////////
// PAD 1D
////////////////////////////////////////////////////////////////////////////////////////////////
/**
* @brief Forward kernel for 1D Padding on CPU backend.
* @tparam I Input data type.
......@@ -187,6 +198,368 @@ REGISTRAR(PadImpl2D_cpu,
REGISTRAR(PadImpl2D_cpu,
{{DataType::Int32, DataFormat::NCHW}, {DataType::Int32, DataFormat::NCHW}},
{Pad_ProdConso_cpu::defaultModel, Aidge::PadImpl2D_cpu_forward_kernel<cpptype_t<DataType::Int32>, cpptype_t<DataType::Int32>>, nullptr});
} // namespace Aidge
////////////////////////////////////////////////////////////////////////////////////////////////
// PAD 3D
////////////////////////////////////////////////////////////////////////////////////////////////
template <typename I, typename O>
static inline void
pad3DForwardConstant(const std::array<DimSize_t, 6> &beginEndBorders,
const O borderValue,
const std::array<DimSize_t, 5> &iDims,
const std::array<DimSize_t, 4> &iStrides,
std::array<DimSize_t, 4> &iOffsets,
const I *input,
const std::array<DimSize_t, 3> &oDims,
const std::array<DimSize_t, 4> &oStrides,
std::array<DimSize_t, 4> &oOffsets,
O *output) {
for (DimSize_t oX = 0; oX < oDims[0]; ++oX) {
oOffsets[2] = oX * oStrides[2] + oOffsets[1];
const SignedDimSize_t iX = oX - beginEndBorders[0];
if (iX >= 0 && iX < static_cast<SignedDimSize_t>(iDims[2])) {
iOffsets[2] = iX * iStrides[2] + iOffsets[1];
} else {
std::fill(output + oOffsets[2],
output + oOffsets[2] + oStrides[2],
borderValue);
continue;
}
for (DimSize_t oY = 0; oY < oDims[1]; ++oY) {
oOffsets[3] = oY * oStrides[3] + oOffsets[2];
const SignedDimSize_t iY = oY - beginEndBorders[1];
if (iY >= 0 && iY < static_cast<SignedDimSize_t>(iDims[3])) {
iOffsets[3] = iY * iStrides[3] + iOffsets[2];
} else {
std::fill(output + oOffsets[3],
output + oOffsets[3] + oStrides[3],
borderValue);
continue;
}
for (DimSize_t oZ = 0; oZ < oDims[2]; ++oZ) {
const SignedDimSize_t iZ = oZ - beginEndBorders[2];
// if in bounds, takes corresponding value, otherwise takes
// default value
output[oOffsets[3] + oZ] =
(iZ >= 0 && iZ < static_cast<SignedDimSize_t>(iDims[4]))
? input[iOffsets[3] + iZ]
: borderValue;
}
}
}
}
/**
* @brief small inline fctn to generate the corresponding input coordinates of
* an output coord in edge padding along a given dimension.
* @param[in] beginBorder Padding at the beginning of given dimension.
* @param[in] iDim Size of given dimension
* @param[in] oCoord output coord along given dimension
*/
static inline DimSize_t padEdgeComputeInputCoord(const DimSize_t beginBorder,
const DimSize_t iDim,
const DimSize_t oCoord) {
return static_cast<DimSize_t>(std::max(
static_cast<SignedDimSize_t>(0),
std::min(static_cast<SignedDimSize_t>(iDim - 1),
static_cast<SignedDimSize_t>(oCoord - beginBorder))));
}
template <typename I, typename O>
static inline void
pad3DForwardEdge(const std::array<DimSize_t, 6> &beginEndBorders,
const std::array<DimSize_t, 5> &iDims,
const std::array<DimSize_t, 4> &iStrides,
std::array<DimSize_t, 4> &iOffsets,
const I *input,
const std::array<DimSize_t, 3> &oDims,
const std::array<DimSize_t, 4> &oStrides,
std::array<DimSize_t, 4> &oOffsets,
O *output) {
for (DimSize_t oX = 0; oX < oDims[0]; ++oX) {
oOffsets[2] = oX * oStrides[2] + oOffsets[1];
const DimSize_t iX =
padEdgeComputeInputCoord(beginEndBorders[0], iDims[2], oX);
iOffsets[2] = iX * iStrides[2] + iOffsets[1];
for (DimSize_t oY = 0; oY < oDims[1]; ++oY) {
oOffsets[3] = oY * oStrides[3] + oOffsets[2];
const DimSize_t iY =
padEdgeComputeInputCoord(beginEndBorders[1], iDims[3], oY);
iOffsets[3] = iY * iStrides[3] + iOffsets[2];
for (DimSize_t oZ = 0; oZ < oDims[2]; ++oZ) {
const DimSize_t iZ =
padEdgeComputeInputCoord(beginEndBorders[2], iDims[4], oZ);
output[oOffsets[3] + oZ] = input[iOffsets[3] + iZ];
}
}
}
}
/**
* @brief small inline fctn to generate the corresponding input coordinates of
* an output coord in reflect padding along a given dimension.
* @param[in] beginBorder Padding at the beginning of given dimension.
* @param[in] iDim Size of given dimension
* @param[in] oCoord output coord along given dimension
*/
static inline DimSize_t
padReflectComputeInputCoord(const DimSize_t beginBorder,
const DimSize_t iDim,
const DimSize_t oCoord) {
SignedDimSize_t iCoord =
std::abs(static_cast<SignedDimSize_t>(oCoord - beginBorder));
// Handle case where iCoord > iDim
// If so iCoord must be changed to (iDim - 1) - delta
// With delta = |iDim - 1 - icoord|
//
// Since iCoord > iDim - 1, |(iDim - 1) - iCoord| <=> iCoord - (iDim - 1)
// <=> iCoord + 1 - iDim
// Hence iDim - 1 - delta <=> iDim - 1 - (iCoord + 1 - iDim)
// <=> 2 * (iDim - 1) - iCoord
iCoord = (iCoord >= static_cast<SignedDimSize_t>(iDim))
? static_cast<SignedDimSize_t>(iDim + iDim - 2) - iCoord
: iCoord;
return iCoord;
}
template <typename I, typename O>
static inline void
pad3DForwardReflect(const std::array<DimSize_t, 6> &beginEndBorders,
const std::array<DimSize_t, 5> &iDims,
const std::array<DimSize_t, 4> &iStrides,
std::array<DimSize_t, 4> &iOffsets,
const I *input,
const std::array<DimSize_t, 3> &oDims,
const std::array<DimSize_t, 4> &oStrides,
std::array<DimSize_t, 4> &oOffsets,
O *output) {
for (DimSize_t oX = 0; oX < oDims[0]; ++oX) {
oOffsets[2] = oX * oStrides[2] + oOffsets[1];
DimSize_t iX =
padReflectComputeInputCoord(beginEndBorders[0], iDims[2], oX);
iOffsets[2] = iX * iStrides[2] + iOffsets[1];
for (DimSize_t oY = 0; oY < oDims[1]; ++oY) {
oOffsets[3] = oY * oStrides[3] + oOffsets[2];
DimSize_t iY =
padReflectComputeInputCoord(beginEndBorders[1], iDims[3], oY);
iOffsets[3] = iY * iStrides[3] + iOffsets[2];
for (DimSize_t oZ = 0; oZ < oDims[2]; ++oZ) {
DimSize_t iZ = padReflectComputeInputCoord(beginEndBorders[2],
iDims[4],
oZ);
output[oOffsets[3] + oZ] = input[iOffsets[3] + iZ];
}
}
}
}
/**
* @brief small inline fctn to generate the corresponding input coordinates of
* an output coord in wrap padding along a given dimension.
* @param[in] beginBorder Padding at the beginning of given dimension.
* @param[in] iDim Size of given dimension
* @param[in] oCoord output coord along given dimension
*/
static inline DimSize_t padWrapComputeInputCoord(const DimSize_t beginBorder,
const DimSize_t iDim,
const DimSize_t oCoord) {
return (iDim + oCoord - beginBorder) % iDim;
}
template <typename I, typename O>
static inline void
pad3DForwardWrap(const std::array<DimSize_t, 6> &beginEndBorders,
const std::array<DimSize_t, 5> &iDims,
const std::array<DimSize_t, 4> &iStrides,
std::array<DimSize_t, 4> &iOffsets,
const I *input,
const std::array<DimSize_t, 3> &oDims,
const std::array<DimSize_t, 4> &oStrides,
std::array<DimSize_t, 4> &oOffsets,
O *output) {
for (DimSize_t oX = 0; oX < oDims[0]; ++oX) {
oOffsets[2] = oX * oStrides[2] + oOffsets[1];
const DimSize_t iX =
padWrapComputeInputCoord(beginEndBorders[0], iDims[2], oX);
iOffsets[2] = iX * iStrides[2] + iOffsets[1];
for (DimSize_t oY = 0; oY < oDims[1]; ++oY) {
oOffsets[3] = oY * oStrides[3] + oOffsets[2];
const DimSize_t iY =
padWrapComputeInputCoord(beginEndBorders[1], iDims[3], oY);
iOffsets[3] = iY * iStrides[3] + iOffsets[2];
for (DimSize_t oZ = 0; oZ < oDims[2]; ++oZ) {
const DimSize_t iZ =
padWrapComputeInputCoord(beginEndBorders[2], iDims[4], oZ);
output[oOffsets[3] + oZ] = input[iOffsets[3] + iZ];
}
}
}
}
/**
* @brief Forward kernel for 2D Padding on CPU backend.
* @tparam I Input data type.
* @tparam O Output data type.
* @param attrs tuple of Parameters from the Operator
* @param iDims Array of input dimensions.
* @param input_ const input Tensor.
* @param output_ Output Tensor.
*/
template <class I, class O>
void PadImpl3D_cpu_forward_kernel(
const std::array<DimSize_t, 6> &beginEndBorders,
const PadBorderType borderType,
const double borderValue,
const std::array<DimSize_t, 5> &iDims,
const void *input_,
void *output_) {
const I *input = static_cast<const I *>(input_);
O *output = static_cast<O *>(output_);
// not taking in count batch & channel as they are identical to iDims
const std::array<DimSize_t, 3> oDims = {
iDims[2] + beginEndBorders[0] + beginEndBorders[3],
iDims[3] + beginEndBorders[1] + beginEndBorders[4],
iDims[4] + beginEndBorders[2] + beginEndBorders[5]};
const std::array<DimSize_t, 4> oStrides = {
iDims[1] * oDims[0] * oDims[1] * oDims[2],
oDims[0] * oDims[1] * oDims[2],
oDims[1] * oDims[2],
oDims[2],
};
const std::array<DimSize_t, 4> iStrides = {
iDims[1] * iDims[2] * iDims[3] * iDims[4],
iDims[2] * iDims[3] * iDims[4],
iDims[3] * iDims[4],
iDims[4],
};
std::array<DimSize_t, 4> oOffsets = {0, 0, 0, 0};
std::array<DimSize_t, 4> iOffsets = {0, 0, 0, 0};
for (std::size_t batch = 0; batch < iDims[0]; ++batch) {
oOffsets[0] = batch * oStrides[0];
iOffsets[0] = batch * iStrides[0];
for (std::size_t ch = 0; ch < iDims[1]; ++ch) {
iOffsets[1] = ch * iStrides[1] + iOffsets[0];
oOffsets[1] = ch * oStrides[1] + oOffsets[0];
switch (borderType) {
case PadBorderType::Constant: {
pad3DForwardConstant(beginEndBorders,
static_cast<O>(borderValue),
iDims,
iStrides,
iOffsets,
input,
oDims,
oStrides,
oOffsets,
output);
break;
}
case PadBorderType::Zero: {
pad3DForwardConstant(beginEndBorders,
static_cast<O>(0),
iDims,
iStrides,
iOffsets,
input,
oDims,
oStrides,
oOffsets,
output);
break;
}
case PadBorderType::Edge: {
pad3DForwardEdge(beginEndBorders,
iDims,
iStrides,
iOffsets,
input,
oDims,
oStrides,
oOffsets,
output);
break;
}
case PadBorderType::Reflect: {
pad3DForwardReflect(beginEndBorders,
iDims,
iStrides,
iOffsets,
input,
oDims,
oStrides,
oOffsets,
output);
break;
}
case PadBorderType::Wrap: {
pad3DForwardWrap(beginEndBorders,
iDims,
iStrides,
iOffsets,
input,
oDims,
oStrides,
oOffsets,
output);
break;
}
default: {
AIDGE_THROW_OR_ABORT(
std::runtime_error,
"Pad3D : unsupported padding method : {}.",
borderType);
}
}
}
}
}
// Kernels registration to implementation entry point
REGISTRAR(PadImpl3D_cpu,
{{DataType::Float32, DataFormat::NCHW},
{DataType::Float32, DataFormat::NCHW}},
{Pad_ProdConso_cpu::defaultModel,
Aidge::PadImpl3D_cpu_forward_kernel<cpptype_t<DataType::Float32>,
cpptype_t<DataType::Float32>>,
nullptr});
REGISTRAR(PadImpl3D_cpu,
{{DataType::Float64, DataFormat::NCHW},
{DataType::Float64, DataFormat::NCHW}},
{Pad_ProdConso_cpu::defaultModel,
Aidge::PadImpl3D_cpu_forward_kernel<cpptype_t<DataType::Float64>,
cpptype_t<DataType::Float64>>,
nullptr});
REGISTRAR(PadImpl3D_cpu,
{{DataType::Int32, DataFormat::NCHW},
{DataType::Int32, DataFormat::NCHW}},
{Pad_ProdConso_cpu::defaultModel,
Aidge::PadImpl3D_cpu_forward_kernel<cpptype_t<DataType::Int32>,
cpptype_t<DataType::Int32>>,
nullptr});
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_PADIMPL_KERNELS_H_ */
......@@ -74,3 +74,26 @@ template <>
void Aidge::PadImpl2D_cpu::backward() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for Pad_Op<2> on backend cpu");
}
template <>
void Aidge::PadImpl3D_cpu::forward() {
const auto& op_ = dynamic_cast<const Pad_Op<3>&>(mOp);
AIDGE_ASSERT(op_.getInput(0), "missing input #0 in Pad Operator.");
// Find the correct kernel type
const auto impl = Registrar<PadImpl3D_cpu>::create(getBestMatch(getRequiredSpec()));
// Call kernel
impl.forward(op_.beginEndBorders(),
op_.borderType(),
op_.borderValue(),
op_.getInput(0)->template dims<5>(),
getCPUPtr(mOp.getRawInput(0)),
getCPUPtr(mOp.getRawOutput(0)));
}
template <>
void Aidge::PadImpl3D_cpu::backward() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for Pad_Op<2> on backend cpu");
}
......@@ -9,6 +9,9 @@
*
********************************************************************************/
#include <aidge/utils/ArrayHelpers.hpp>
#include <aidge/utils/TensorUtils.hpp>
#include <aidge/utils/Types.h>
#include <memory>
#include <catch2/catch_test_macros.hpp>
......@@ -22,550 +25,694 @@
using namespace Aidge;
TEST_CASE("[cpu/operator] Pad(forward)", "[Pad][CPU]") {
SECTION("Symmetric Pad") {
const int pv = 0; // pad value
std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
{
{
{{ pv, pv, pv, pv, pv, pv, pv},
{ pv, 0, 1, 2, 3, 4, pv},
{ pv, 5, 6, 7, 8, 9, pv},
{ pv, 10, 11, 12, 13, 14, pv},
{ pv, 15, 16, 17, 18, 19, pv},
{ pv, 20, 21, 22, 23, 24, pv},
{ pv, pv, pv, pv, pv, pv, pv}},
{{ pv, pv, pv, pv, pv, pv, pv},
{ pv, 25, 26, 27, 28, 29, pv},
{ pv, 30, 31, 32, 33, 34, pv},
{ pv, 35, 36, 37, 38, 39, pv},
{ pv, 40, 41, 42, 43, 44, pv},
{ pv, 45, 46, 47, 48, 49, pv},
{ pv, pv, pv, pv, pv, pv, pv}},
{{ pv, pv, pv, pv, pv, pv, pv},
{ pv, 50, 51, 52, 53, 54, pv},
{ pv, 55, 56, 57, 58, 59, pv},
{ pv, 60, 61, 62, 63, 64, pv},
{ pv, 65, 66, 67, 68, 69, pv},
{ pv, 70, 71, 72, 73, 74, pv},
{ pv, pv, pv, pv, pv, pv, pv}}
},
{
{{ pv, pv, pv, pv, pv, pv, pv},
{ pv, 75, 76, 77, 78, 79, pv},
{ pv, 80, 81, 82, 83, 84, pv},
{ pv, 85, 86, 87, 88, 89, pv},
{ pv, 90, 91, 92, 93, 94, pv},
{ pv, 95, 96, 97, 98, 99, pv},
{ pv, pv, pv, pv, pv, pv, pv}},
{{ pv, pv, pv, pv, pv, pv, pv},
{pv, 100, 101, 102, 103, 104, pv},
{pv, 105, 106, 107, 108, 109, pv},
{pv, 110, 111, 112, 113, 114, pv},
{pv, 115, 116, 117, 118, 119, pv},
{pv, 120, 121, 122, 123, 124, pv},
{ pv, pv, pv, pv, pv, pv, pv}},
{{ pv, pv, pv, pv, pv, pv, pv},
{pv, 125, 126, 127, 128, 129, pv},
{pv, 130, 131, 132, 133, 134, pv},
{pv, 135, 136, 137, 138, 139, pv},
{pv, 140, 141, 142, 143, 144, pv},
{pv, 145, 146, 147, 148, 149, pv},
{ pv, pv, pv, pv, pv, pv, pv}}
}
}
});
myPad->getOperator()->associateInput(0,myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
myPad->forward();
// myPad->getOperator()->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
template <DimSize_t DIM>
static std::shared_ptr<OperatorTensor>
setupTestPad(std::array<DimSize_t, 2 * DIM> beginEndBorder,
const std::shared_ptr<Tensor> input,
PadBorderType padType,
double borderValue) {
input->setBackend("cpu");
std::shared_ptr<Node> padNode =
Pad<DIM>(beginEndBorder, "pad_op", padType, borderValue);
auto op = std::static_pointer_cast<OperatorTensor>(padNode->getOperator());
SECTION("Asymmetric Pad") {
const int pv = 0; // pad value
std::shared_ptr<Node> myPad = Pad<2>({1, 0, 0, 1}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,6,6> { //NCHW
{
{
{{ pv, pv, pv, pv, pv, pv},
{ 0, 1, 2, 3, 4, pv},
{ 5, 6, 7, 8, 9, pv},
{ 10, 11, 12, 13, 14, pv},
{ 15, 16, 17, 18, 19, pv},
{ 20, 21, 22, 23, 24, pv}},
{{ pv, pv, pv, pv, pv, pv},
{ 25, 26, 27, 28, 29, pv},
{ 30, 31, 32, 33, 34, pv},
{ 35, 36, 37, 38, 39, pv},
{ 40, 41, 42, 43, 44, pv},
{ 45, 46, 47, 48, 49, pv}},
{{ pv, pv, pv, pv, pv, pv},
{ 50, 51, 52, 53, 54, pv},
{ 55, 56, 57, 58, 59, pv},
{ 60, 61, 62, 63, 64, pv},
{ 65, 66, 67, 68, 69, pv},
{ 70, 71, 72, 73, 74, pv}}
},
{
{{ pv, pv, pv, pv, pv, pv},
{ 75, 76, 77, 78, 79, pv},
{ 80, 81, 82, 83, 84, pv},
{ 85, 86, 87, 88, 89, pv},
{ 90, 91, 92, 93, 94, pv},
{ 95, 96, 97, 98, 99, pv}},
{{ pv, pv, pv, pv, pv, pv},
{ 100, 101, 102, 103, 104, pv},
{ 105, 106, 107, 108, 109, pv},
{ 110, 111, 112, 113, 114, pv},
{ 115, 116, 117, 118, 119, pv},
{ 120, 121, 122, 123, 124, pv}},
{{ pv, pv, pv, pv, pv, pv},
{ 125, 126, 127, 128, 129, pv},
{ 130, 131, 132, 133, 134, pv},
{ 135, 136, 137, 138, 139, pv},
{ 140, 141, 142, 143, 144, pv},
{ 145, 146, 147, 148, 149, pv}}
}
}
});
myPad->getOperator()->associateInput(0,myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
myPad->forward();
// myPad->getOperator()->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
op->setDataType(DataType::Float32);
op->setBackend("cpu");
SECTION("Pad Edge") {
std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Edge);
auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
{
{
{{ 0, 0, 1, 2, 3, 4, 4},
{ 0, 0, 1, 2, 3, 4, 4},
{ 5, 5, 6, 7, 8, 9, 9},
{ 10, 10, 11, 12, 13, 14, 14},
{ 15, 15, 16, 17, 18, 19, 19},
{ 20, 20, 21, 22, 23, 24, 24},
{ 20, 20, 21, 22, 23, 24, 24}},
{{ 25, 25, 26, 27, 28, 29, 29},
{ 25, 25, 26, 27, 28, 29, 29},
{ 30, 30, 31, 32, 33, 34, 34},
{ 35, 35, 36, 37, 38, 39, 39},
{ 40, 40, 41, 42, 43, 44, 44},
{ 45, 45, 46, 47, 48, 49, 49},
{ 45, 45, 46, 47, 48, 49, 49}},
{{ 50, 50, 51, 52, 53, 54, 54},
{ 50, 50, 51, 52, 53, 54, 54},
{ 55, 55, 56, 57, 58, 59, 59},
{ 60, 60, 61, 62, 63, 64, 64},
{ 65, 65, 66, 67, 68, 69, 69},
{ 70, 70, 71, 72, 73, 74, 74},
{ 70, 70, 71, 72, 73, 74, 74}}
},
{
{{ 75, 75, 76, 77, 78, 79, 79},
{ 75, 75, 76, 77, 78, 79, 79},
{ 80, 80, 81, 82, 83, 84, 84},
{ 85, 85, 86, 87, 88, 89, 89},
{ 90, 90, 91, 92, 93, 94, 94},
{ 95, 95, 96, 97, 98, 99, 99},
{ 95, 95, 96, 97, 98, 99, 99}},
{{100, 100, 101, 102, 103, 104, 104},
{100, 100, 101, 102, 103, 104, 104},
{105, 105, 106, 107, 108, 109, 109},
{110, 110, 111, 112, 113, 114, 114},
{115, 115, 116, 117, 118, 119, 119},
{120, 120, 121, 122, 123, 124, 124},
{120, 120, 121, 122, 123, 124, 124}},
{{125, 125, 126, 127, 128, 129, 129},
{125, 125, 126, 127, 128, 129, 129},
{130, 130, 131, 132, 133, 134, 134},
{135, 135, 136, 137, 138, 139, 139},
{140, 140, 141, 142, 143, 144, 144},
{145, 145, 146, 147, 148, 149, 149},
{145, 145, 146, 147, 148, 149, 149}}
}
}
});
myPad->getOperator()->associateInput(0,myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
myPad->forward();
// myPad->getOperator()->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
op->associateInput(0, input);
SECTION("Pad Reflect") {
std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Reflect);
auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
{
{
{
{ 6, 5, 6, 7, 8, 9, 5},
{ 1, 0, 1, 2, 3, 4, 0},
{ 6, 5, 6, 7, 8, 9, 5},
{ 11, 10, 11, 12, 13, 14, 10},
{ 16, 15, 16, 17, 18, 19, 15},
{ 21, 20, 21, 22, 23, 24, 20},
{ 1, 0, 1, 2, 3, 4, 0}
},
{
{ 31, 30, 31, 32, 33, 34, 30},
{ 26, 25, 26, 27, 28, 29, 25},
{ 31, 30, 31, 32, 33, 34, 30},
{ 36, 35, 36, 37, 38, 39, 35},
{ 41, 40, 41, 42, 43, 44, 40},
{ 46, 45, 46, 47, 48, 49, 45},
{ 26, 25, 26, 27, 28, 29, 25}
},
{
{ 56, 55, 56, 57, 58, 59, 55},
{ 51, 50, 51, 52, 53, 54, 50},
{ 56, 55, 56, 57, 58, 59, 55},
{ 61, 60, 61, 62, 63, 64, 60},
{ 66, 65, 66, 67, 68, 69, 65},
{ 71, 70, 71, 72, 73, 74, 70},
{ 51, 50, 51, 52, 53, 54, 50}
}
},
{
{
{ 81, 80, 81, 82, 83, 84, 80},
{ 76, 75, 76, 77, 78, 79, 75},
{ 81, 80, 81, 82, 83, 84, 80},
{ 86, 85, 86, 87, 88, 89, 85},
{ 91, 90, 91, 92, 93, 94, 90},
{ 96, 95, 96, 97, 98, 99, 95},
{ 76, 75, 76, 77, 78, 79, 75}
},
{
{ 106, 105, 106, 107, 108, 109, 105},
{ 101, 100, 101, 102, 103, 104, 100},
{ 106, 105, 106, 107, 108, 109, 105},
{ 111, 110, 111, 112, 113, 114, 110},
{ 116, 115, 116, 117, 118, 119, 115},
{ 121, 120, 121, 122, 123, 124, 120},
{ 101, 100, 101, 102, 103, 104, 100}
},
{
{ 131, 130, 131, 132, 133, 134, 130},
{ 126, 125, 126, 127, 128, 129, 125},
{ 131, 130, 131, 132, 133, 134, 130},
{ 136, 135, 136, 137, 138, 139, 135},
{ 141, 140, 141, 142, 143, 144, 140},
{ 146, 145, 146, 147, 148, 149, 145},
{ 126, 125, 126, 127, 128, 129, 125}
}
}
}
});
myPad->getOperator()->associateInput(0,myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
myPad->forward();
op->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
REQUIRE_NOTHROW(op->forwardDims(true));
SECTION("Pad Wrap") {
std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Wrap);
auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,3,7,7> { //NCHW
{
{
{{ 24, 20, 21, 22, 23, 24, 20},
{ 4, 0, 1, 2, 3, 4, 0},
{ 9, 5, 6, 7, 8, 9, 5},
{ 14, 10, 11, 12, 13, 14, 10},
{ 19, 15, 16, 17, 18, 19, 15},
{ 24, 20, 21, 22, 23, 24, 20},
{ 4, 0, 1, 2, 3, 4, 0}},
{{ 49, 45, 46, 47, 48, 49, 45},
{ 29, 25, 26, 27, 28, 29, 25},
{ 34, 30, 31, 32, 33, 34, 30},
{ 39, 35, 36, 37, 38, 39, 35},
{ 44, 40, 41, 42, 43, 44, 40},
{ 49, 45, 46, 47, 48, 49, 45},
{ 29, 25, 26, 27, 28, 29, 25}},
{{ 74, 70, 71, 72, 73, 74, 70},
{ 54, 50, 51, 52, 53, 54, 50},
{ 59, 55, 56, 57, 58, 59, 55},
{ 64, 60, 61, 62, 63, 64, 60},
{ 69, 65, 66, 67, 68, 69, 65},
{ 74, 70, 71, 72, 73, 74, 70},
{ 54, 50, 51, 52, 53, 54, 50}}
},
{
{{ 99, 95, 96, 97, 98, 99, 95},
{ 79, 75, 76, 77, 78, 79, 75},
{ 84, 80, 81, 82, 83, 84, 80},
{ 89, 85, 86, 87, 88, 89, 85},
{ 94, 90, 91, 92, 93, 94, 90},
{ 99, 95, 96, 97, 98, 99, 95},
{ 79, 75, 76, 77, 78, 79, 75}},
{{124, 120, 121, 122, 123, 124, 120},
{104, 100, 101, 102, 103, 104, 100},
{109, 105, 106, 107, 108, 109, 105},
{114, 110, 111, 112, 113, 114, 110},
{119, 115, 116, 117, 118, 119, 115},
{124, 120, 121, 122, 123, 124, 120},
{104, 100, 101, 102, 103, 104, 100}},
{{149, 145, 146, 147, 148, 149, 145},
{129, 125, 126, 127, 128, 129, 125},
{134, 130, 131, 132, 133, 134, 130},
{139, 135, 136, 137, 138, 139, 135},
{144, 140, 141, 142, 143, 144, 140},
{149, 145, 146, 147, 148, 149, 145},
{129, 125, 126, 127, 128, 129, 125}}
}
return op;
}
TEST_CASE("[cpu/operator] Pad(forward)", "[Pad][CPU]") {
SECTION("2D") {
SECTION("Symmetric Pad") {
const int pv = 0; // pad value
std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1},
"mypad",
PadBorderType::Constant,
static_cast<double>(pv));
auto op =
std::static_pointer_cast<OperatorTensor>(myPad->getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
Array4D<int, 2, 3, 5, 5>{// NCHW
{{{{0, 1, 2, 3, 4},
{5, 6, 7, 8, 9},
{10, 11, 12, 13, 14},
{15, 16, 17, 18, 19},
{20, 21, 22, 23, 24}},
{{25, 26, 27, 28, 29},
{30, 31, 32, 33, 34},
{35, 36, 37, 38, 39},
{40, 41, 42, 43, 44},
{45, 46, 47, 48, 49}},
{{50, 51, 52, 53, 54},
{55, 56, 57, 58, 59},
{60, 61, 62, 63, 64},
{65, 66, 67, 68, 69},
{70, 71, 72, 73, 74}}},
{{{75, 76, 77, 78, 79},
{80, 81, 82, 83, 84},
{85, 86, 87, 88, 89},
{90, 91, 92, 93, 94},
{95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}}}});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(
Array4D<int, 2, 3, 7, 7>{// NCHW
{{{{pv, pv, pv, pv, pv, pv, pv},
{pv, 0, 1, 2, 3, 4, pv},
{pv, 5, 6, 7, 8, 9, pv},
{pv, 10, 11, 12, 13, 14, pv},
{pv, 15, 16, 17, 18, 19, pv},
{pv, 20, 21, 22, 23, 24, pv},
{pv, pv, pv, pv, pv, pv, pv}},
{{pv, pv, pv, pv, pv, pv, pv},
{pv, 25, 26, 27, 28, 29, pv},
{pv, 30, 31, 32, 33, 34, pv},
{pv, 35, 36, 37, 38, 39, pv},
{pv, 40, 41, 42, 43, 44, pv},
{pv, 45, 46, 47, 48, 49, pv},
{pv, pv, pv, pv, pv, pv, pv}},
{{pv, pv, pv, pv, pv, pv, pv},
{pv, 50, 51, 52, 53, 54, pv},
{pv, 55, 56, 57, 58, 59, pv},
{pv, 60, 61, 62, 63, 64, pv},
{pv, 65, 66, 67, 68, 69, pv},
{pv, 70, 71, 72, 73, 74, pv},
{pv, pv, pv, pv, pv, pv, pv}}},
{{{pv, pv, pv, pv, pv, pv, pv},
{pv, 75, 76, 77, 78, 79, pv},
{pv, 80, 81, 82, 83, 84, pv},
{pv, 85, 86, 87, 88, 89, pv},
{pv, 90, 91, 92, 93, 94, pv},
{pv, 95, 96, 97, 98, 99, pv},
{pv, pv, pv, pv, pv, pv, pv}},
{{pv, pv, pv, pv, pv, pv, pv},
{pv, 100, 101, 102, 103, 104, pv},
{pv, 105, 106, 107, 108, 109, pv},
{pv, 110, 111, 112, 113, 114, pv},
{pv, 115, 116, 117, 118, 119, pv},
{pv, 120, 121, 122, 123, 124, pv},
{pv, pv, pv, pv, pv, pv, pv}},
{{pv, pv, pv, pv, pv, pv, pv},
{pv, 125, 126, 127, 128, 129, pv},
{pv, 130, 131, 132, 133, 134, pv},
{pv, 135, 136, 137, 138, 139, pv},
{pv, 140, 141, 142, 143, 144, pv},
{pv, 145, 146, 147, 148, 149, pv},
{pv, pv, pv, pv, pv, pv, pv}}}}});
myPad->getOperator()->associateInput(0, myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
myPad->forward();
// myPad->getOperator()->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
SECTION("Asymmetric Pad") {
const int pv = 0; // pad value
std::shared_ptr<Node> myPad = Pad<2>({1, 0, 0, 1},
"mypad",
PadBorderType::Constant,
static_cast<double>(pv));
auto op =
std::static_pointer_cast<OperatorTensor>(myPad->getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
Array4D<int, 2, 3, 5, 5>{// NCHW
{{{{0, 1, 2, 3, 4},
{5, 6, 7, 8, 9},
{10, 11, 12, 13, 14},
{15, 16, 17, 18, 19},
{20, 21, 22, 23, 24}},
{{25, 26, 27, 28, 29},
{30, 31, 32, 33, 34},
{35, 36, 37, 38, 39},
{40, 41, 42, 43, 44},
{45, 46, 47, 48, 49}},
{{50, 51, 52, 53, 54},
{55, 56, 57, 58, 59},
{60, 61, 62, 63, 64},
{65, 66, 67, 68, 69},
{70, 71, 72, 73, 74}}},
{{{75, 76, 77, 78, 79},
{80, 81, 82, 83, 84},
{85, 86, 87, 88, 89},
{90, 91, 92, 93, 94},
{95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}}}});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(
Array4D<int, 2, 3, 6, 6>{// NCHW
{{{{pv, pv, pv, pv, pv, pv},
{0, 1, 2, 3, 4, pv},
{5, 6, 7, 8, 9, pv},
{10, 11, 12, 13, 14, pv},
{15, 16, 17, 18, 19, pv},
{20, 21, 22, 23, 24, pv}},
{{pv, pv, pv, pv, pv, pv},
{25, 26, 27, 28, 29, pv},
{30, 31, 32, 33, 34, pv},
{35, 36, 37, 38, 39, pv},
{40, 41, 42, 43, 44, pv},
{45, 46, 47, 48, 49, pv}},
{{pv, pv, pv, pv, pv, pv},
{50, 51, 52, 53, 54, pv},
{55, 56, 57, 58, 59, pv},
{60, 61, 62, 63, 64, pv},
{65, 66, 67, 68, 69, pv},
{70, 71, 72, 73, 74, pv}}},
{{{pv, pv, pv, pv, pv, pv},
{75, 76, 77, 78, 79, pv},
{80, 81, 82, 83, 84, pv},
{85, 86, 87, 88, 89, pv},
{90, 91, 92, 93, 94, pv},
{95, 96, 97, 98, 99, pv}},
{{pv, pv, pv, pv, pv, pv},
{100, 101, 102, 103, 104, pv},
{105, 106, 107, 108, 109, pv},
{110, 111, 112, 113, 114, pv},
{115, 116, 117, 118, 119, pv},
{120, 121, 122, 123, 124, pv}},
{{pv, pv, pv, pv, pv, pv},
{125, 126, 127, 128, 129, pv},
{130, 131, 132, 133, 134, pv},
{135, 136, 137, 138, 139, pv},
{140, 141, 142, 143, 144, pv},
{145, 146, 147, 148, 149, pv}}}}});
myPad->getOperator()->associateInput(0, myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
myPad->forward();
// myPad->getOperator()->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
SECTION("Pad Edge") {
std::shared_ptr<Node> myPad =
Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Edge);
auto op =
std::static_pointer_cast<OperatorTensor>(myPad->getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
Array4D<int, 2, 3, 5, 5>{// NCHW
{{{{0, 1, 2, 3, 4},
{5, 6, 7, 8, 9},
{10, 11, 12, 13, 14},
{15, 16, 17, 18, 19},
{20, 21, 22, 23, 24}},
{{25, 26, 27, 28, 29},
{30, 31, 32, 33, 34},
{35, 36, 37, 38, 39},
{40, 41, 42, 43, 44},
{45, 46, 47, 48, 49}},
{{50, 51, 52, 53, 54},
{55, 56, 57, 58, 59},
{60, 61, 62, 63, 64},
{65, 66, 67, 68, 69},
{70, 71, 72, 73, 74}}},
{{{75, 76, 77, 78, 79},
{80, 81, 82, 83, 84},
{85, 86, 87, 88, 89},
{90, 91, 92, 93, 94},
{95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}}}});
std::shared_ptr<Tensor> myOutput =
std::make_shared<Tensor>(Array4D<int, 2, 3, 7, 7>{
// NCHW
{{{{0, 0, 1, 2, 3, 4, 4},
{0, 0, 1, 2, 3, 4, 4},
{5, 5, 6, 7, 8, 9, 9},
{10, 10, 11, 12, 13, 14, 14},
{15, 15, 16, 17, 18, 19, 19},
{20, 20, 21, 22, 23, 24, 24},
{20, 20, 21, 22, 23, 24, 24}},
{{25, 25, 26, 27, 28, 29, 29},
{25, 25, 26, 27, 28, 29, 29},
{30, 30, 31, 32, 33, 34, 34},
{35, 35, 36, 37, 38, 39, 39},
{40, 40, 41, 42, 43, 44, 44},
{45, 45, 46, 47, 48, 49, 49},
{45, 45, 46, 47, 48, 49, 49}},
{{50, 50, 51, 52, 53, 54, 54},
{50, 50, 51, 52, 53, 54, 54},
{55, 55, 56, 57, 58, 59, 59},
{60, 60, 61, 62, 63, 64, 64},
{65, 65, 66, 67, 68, 69, 69},
{70, 70, 71, 72, 73, 74, 74},
{70, 70, 71, 72, 73, 74, 74}}},
{{{75, 75, 76, 77, 78, 79, 79},
{75, 75, 76, 77, 78, 79, 79},
{80, 80, 81, 82, 83, 84, 84},
{85, 85, 86, 87, 88, 89, 89},
{90, 90, 91, 92, 93, 94, 94},
{95, 95, 96, 97, 98, 99, 99},
{95, 95, 96, 97, 98, 99, 99}},
{{100, 100, 101, 102, 103, 104, 104},
{100, 100, 101, 102, 103, 104, 104},
{105, 105, 106, 107, 108, 109, 109},
{110, 110, 111, 112, 113, 114, 114},
{115, 115, 116, 117, 118, 119, 119},
{120, 120, 121, 122, 123, 124, 124},
{120, 120, 121, 122, 123, 124, 124}},
{{125, 125, 126, 127, 128, 129, 129},
{125, 125, 126, 127, 128, 129, 129},
{130, 130, 131, 132, 133, 134, 134},
{135, 135, 136, 137, 138, 139, 139},
{140, 140, 141, 142, 143, 144, 144},
{145, 145, 146, 147, 148, 149, 149},
{145, 145, 146, 147, 148, 149, 149}}}}});
myPad->getOperator()->associateInput(0, myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
myPad->forward();
// myPad->getOperator()->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
SECTION("Pad Reflect") {
std::shared_ptr<Node> myPad =
Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Reflect);
auto op =
std::static_pointer_cast<OperatorTensor>(myPad->getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
Array4D<int, 2, 3, 5, 5>{// NCHW
{{{{0, 1, 2, 3, 4},
{5, 6, 7, 8, 9},
{10, 11, 12, 13, 14},
{15, 16, 17, 18, 19},
{20, 21, 22, 23, 24}},
{{25, 26, 27, 28, 29},
{30, 31, 32, 33, 34},
{35, 36, 37, 38, 39},
{40, 41, 42, 43, 44},
{45, 46, 47, 48, 49}},
{{50, 51, 52, 53, 54},
{55, 56, 57, 58, 59},
{60, 61, 62, 63, 64},
{65, 66, 67, 68, 69},
{70, 71, 72, 73, 74}}},
{{{75, 76, 77, 78, 79},
{80, 81, 82, 83, 84},
{85, 86, 87, 88, 89},
{90, 91, 92, 93, 94},
{95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}}}});
std::shared_ptr<Tensor> myOutput =
std::make_shared<Tensor>(Array4D<int, 2, 3, 7, 7>{
// NCHW
{{{{6, 5, 6, 7, 8, 9, 5},
{1, 0, 1, 2, 3, 4, 0},
{6, 5, 6, 7, 8, 9, 5},
{11, 10, 11, 12, 13, 14, 10},
{16, 15, 16, 17, 18, 19, 15},
{21, 20, 21, 22, 23, 24, 20},
{1, 0, 1, 2, 3, 4, 0}},
{{31, 30, 31, 32, 33, 34, 30},
{26, 25, 26, 27, 28, 29, 25},
{31, 30, 31, 32, 33, 34, 30},
{36, 35, 36, 37, 38, 39, 35},
{41, 40, 41, 42, 43, 44, 40},
{46, 45, 46, 47, 48, 49, 45},
{26, 25, 26, 27, 28, 29, 25}},
{{56, 55, 56, 57, 58, 59, 55},
{51, 50, 51, 52, 53, 54, 50},
{56, 55, 56, 57, 58, 59, 55},
{61, 60, 61, 62, 63, 64, 60},
{66, 65, 66, 67, 68, 69, 65},
{71, 70, 71, 72, 73, 74, 70},
{51, 50, 51, 52, 53, 54, 50}}},
{{{81, 80, 81, 82, 83, 84, 80},
{76, 75, 76, 77, 78, 79, 75},
{81, 80, 81, 82, 83, 84, 80},
{86, 85, 86, 87, 88, 89, 85},
{91, 90, 91, 92, 93, 94, 90},
{96, 95, 96, 97, 98, 99, 95},
{76, 75, 76, 77, 78, 79, 75}},
{{106, 105, 106, 107, 108, 109, 105},
{101, 100, 101, 102, 103, 104, 100},
{106, 105, 106, 107, 108, 109, 105},
{111, 110, 111, 112, 113, 114, 110},
{116, 115, 116, 117, 118, 119, 115},
{121, 120, 121, 122, 123, 124, 120},
{101, 100, 101, 102, 103, 104, 100}},
{{131, 130, 131, 132, 133, 134, 130},
{126, 125, 126, 127, 128, 129, 125},
{131, 130, 131, 132, 133, 134, 130},
{136, 135, 136, 137, 138, 139, 135},
{141, 140, 141, 142, 143, 144, 140},
{146, 145, 146, 147, 148, 149, 145},
{126, 125, 126, 127, 128, 129, 125}}}}});
myPad->getOperator()->associateInput(0, myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
myPad->forward();
op->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
SECTION("Pad Wrap") {
std::shared_ptr<Node> myPad =
Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Wrap);
auto op =
std::static_pointer_cast<OperatorTensor>(myPad->getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
Array4D<int, 2, 3, 5, 5>{// NCHW
{{{{0, 1, 2, 3, 4},
{5, 6, 7, 8, 9},
{10, 11, 12, 13, 14},
{15, 16, 17, 18, 19},
{20, 21, 22, 23, 24}},
{{25, 26, 27, 28, 29},
{30, 31, 32, 33, 34},
{35, 36, 37, 38, 39},
{40, 41, 42, 43, 44},
{45, 46, 47, 48, 49}},
{{50, 51, 52, 53, 54},
{55, 56, 57, 58, 59},
{60, 61, 62, 63, 64},
{65, 66, 67, 68, 69},
{70, 71, 72, 73, 74}}},
{{{75, 76, 77, 78, 79},
{80, 81, 82, 83, 84},
{85, 86, 87, 88, 89},
{90, 91, 92, 93, 94},
{95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}}}});
std::shared_ptr<Tensor> myOutput =
std::make_shared<Tensor>(Array4D<int, 2, 3, 7, 7>{
// NCHW
{{{{24, 20, 21, 22, 23, 24, 20},
{4, 0, 1, 2, 3, 4, 0},
{9, 5, 6, 7, 8, 9, 5},
{14, 10, 11, 12, 13, 14, 10},
{19, 15, 16, 17, 18, 19, 15},
{24, 20, 21, 22, 23, 24, 20},
{4, 0, 1, 2, 3, 4, 0}},
{{49, 45, 46, 47, 48, 49, 45},
{29, 25, 26, 27, 28, 29, 25},
{34, 30, 31, 32, 33, 34, 30},
{39, 35, 36, 37, 38, 39, 35},
{44, 40, 41, 42, 43, 44, 40},
{49, 45, 46, 47, 48, 49, 45},
{29, 25, 26, 27, 28, 29, 25}},
{{74, 70, 71, 72, 73, 74, 70},
{54, 50, 51, 52, 53, 54, 50},
{59, 55, 56, 57, 58, 59, 55},
{64, 60, 61, 62, 63, 64, 60},
{69, 65, 66, 67, 68, 69, 65},
{74, 70, 71, 72, 73, 74, 70},
{54, 50, 51, 52, 53, 54, 50}}},
{{{99, 95, 96, 97, 98, 99, 95},
{79, 75, 76, 77, 78, 79, 75},
{84, 80, 81, 82, 83, 84, 80},
{89, 85, 86, 87, 88, 89, 85},
{94, 90, 91, 92, 93, 94, 90},
{99, 95, 96, 97, 98, 99, 95},
{79, 75, 76, 77, 78, 79, 75}},
{{124, 120, 121, 122, 123, 124, 120},
{104, 100, 101, 102, 103, 104, 100},
{109, 105, 106, 107, 108, 109, 105},
{114, 110, 111, 112, 113, 114, 110},
{119, 115, 116, 117, 118, 119, 115},
{124, 120, 121, 122, 123, 124, 120},
{104, 100, 101, 102, 103, 104, 100}},
{{149, 145, 146, 147, 148, 149, 145},
{129, 125, 126, 127, 128, 129, 125},
{134, 130, 131, 132, 133, 134, 130},
{139, 135, 136, 137, 138, 139, 135},
{144, 140, 141, 142, 143, 144, 140},
{149, 145, 146, 147, 148, 149, 145},
{129, 125, 126, 127, 128, 129, 125}}}}});
myPad->getOperator()->associateInput(0, myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
myPad->forward();
// myPad->getOperator()->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
}
SECTION("3D") {
constexpr DimSize_t DIM = 3;
SECTION("PadBorderType::Constant") {
constexpr DimSize_t batch = 1;
constexpr DimSize_t channel = 1;
constexpr std::array<DimSize_t, DIM> inDataSize = {2, 2, 2};
constexpr std::array<DimSize_t, 2 * DIM> beginEndBorder =
{1, 1, 1, 1, 1, 1};
constexpr std::array<DimSize_t, DIM> outDataSize = {
inDataSize[0] + beginEndBorder[0] + beginEndBorder[3],
inDataSize[1] + beginEndBorder[1] + beginEndBorder[4],
inDataSize[2] + beginEndBorder[2] + beginEndBorder[5]};
constexpr double borderValue = 10;
auto input = std::make_shared<Tensor>(
Array5D<float,
batch,
channel,
inDataSize[0],
inDataSize[1],
inDataSize[2]>({{{{{{-1, 4}, {-2, -5}},
{{-2, 4}, {2, -2}}}}}}));
auto padOp = setupTestPad<DIM>(beginEndBorder,
input,
PadBorderType::Constant,
borderValue);
REQUIRE_NOTHROW(padOp->forward());
Tensor expectedOutput(
Array5D<float,
batch,
channel,
outDataSize[0],
outDataSize[1],
outDataSize[2]>({{{{{{10, 10, 10, 10},
{10, 10, 10, 10},
{10, 10, 10, 10},
{10, 10, 10, 10}},
{{10, 10, 10, 10},
{10, -1, 4, 10},
{10, -2, -5, 10},
{10, 10, 10, 10}},
{{10, 10, 10, 10},
{10, -2, 4, 10},
{10, 2, -2, 10},
{10, 10, 10, 10}},
{{10, 10, 10, 10},
{10, 10, 10, 10},
{10, 10, 10, 10},
{10, 10, 10, 10}}}}}}));
CHECK(approxEq<float>(*padOp->getOutput(0), expectedOutput));
}
SECTION("PadBorderType::Edge") {
SECTION("small test") {
constexpr DimSize_t batch = 1;
constexpr DimSize_t channel = 1;
constexpr std::array<DimSize_t, DIM> inDataSize = {1, 2, 2};
constexpr std::array<DimSize_t, 2 * DIM> beginEndBorder =
{1, 1, 1, 1, 1, 1};
constexpr std::array<DimSize_t, DIM> outDataSize = {
inDataSize[0] + beginEndBorder[0] + beginEndBorder[3],
inDataSize[1] + beginEndBorder[1] + beginEndBorder[4],
inDataSize[2] + beginEndBorder[2] + beginEndBorder[5]};
auto input = std::make_shared<Tensor>(
Array5D<float,
batch,
channel,
inDataSize[0],
inDataSize[1],
inDataSize[2]>({{{{{{-1, 4}, {-2, -5}}}}}}));
auto padOp = setupTestPad<DIM>(beginEndBorder,
input,
PadBorderType::Edge,
0);
REQUIRE_NOTHROW(padOp->forward());
Tensor expectedOutput(
Array5D<float,
batch,
channel,
outDataSize[0],
outDataSize[1],
outDataSize[2]>({{{{{{-1, -1, 4, 4},
{-1, -1, 4, 4},
{-2, -2, -5, -5},
{-2, -2, -5, -5}},
{{-1, -1, 4, 4},
{-1, -1, 4, 4},
{-2, -2, -5, -5},
{-2, -2, -5, -5}},
{{-1, -1, 4, 4},
{-1, -1, 4, 4},
{-2, -2, -5, -5},
{-2, -2, -5, -5}}}}}}));
CHECK(approxEq<float>(*padOp->getOutput(0), expectedOutput));
}
});
myPad->getOperator()->associateInput(0,myInput);
myPad->getOperator()->setDataType(DataType::Int32);
myPad->getOperator()->setBackend("cpu");
myPad->forward();
// myPad->getOperator()->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
SECTION("PadBorderType::Reflect") {
constexpr DimSize_t batch = 1;
constexpr DimSize_t channel = 1;
constexpr std::array<DimSize_t, DIM> inDataSize = {1, 3, 3};
constexpr std::array<DimSize_t, 2 * DIM> beginEndBorder =
{0, 0, 2, 0, 0, 2};
constexpr std::array<DimSize_t, DIM> outDataSize = {
inDataSize[0] + beginEndBorder[0] + beginEndBorder[3],
inDataSize[1] + beginEndBorder[1] + beginEndBorder[4],
inDataSize[2] + beginEndBorder[2] + beginEndBorder[5]};
auto input = std::make_shared<Tensor>(Array5D<float,
batch,
channel,
inDataSize[0],
inDataSize[1],
inDataSize[2]>(
{{{{{{-1, 4, -2}, {-5, -2, 4}, {2, -2, 2}}}}}}));
auto padOp = setupTestPad<DIM>(beginEndBorder,
input,
PadBorderType::Reflect,
0);
REQUIRE_NOTHROW(padOp->forward());
Tensor expectedOutput(
Array5D<float,
batch,
channel,
outDataSize[0],
outDataSize[1],
outDataSize[2]>({{{{{{-2, 4, -1, 4, -2, 4, -1},
{4, -2, -5, -2, 4, -2, -5},
{2, -2, 2, -2, 2, -2, 2}}}}}}));
CHECK(approxEq<float>(*padOp->getOutput(0), expectedOutput));
}
SECTION("PadBorderType::Wrap") {
constexpr DimSize_t batch = 1;
constexpr DimSize_t channel = 1;
constexpr std::array<DimSize_t, DIM> inDataSize = {1, 3, 3};
constexpr std::array<DimSize_t, 2 * DIM> beginEndBorder =
{0, 0, 2, 0, 0, 2};
constexpr std::array<DimSize_t, DIM> outDataSize = {
inDataSize[0] + beginEndBorder[0] + beginEndBorder[3],
inDataSize[1] + beginEndBorder[1] + beginEndBorder[4],
inDataSize[2] + beginEndBorder[2] + beginEndBorder[5]};
auto input = std::make_shared<Tensor>(Array5D<float,
batch,
channel,
inDataSize[0],
inDataSize[1],
inDataSize[2]>(
{{{{{{-1, 4, -2}, {-5, -2, 4}, {2, -2, 2}}}}}}));
auto padOp = setupTestPad<DIM>(beginEndBorder,
input,
PadBorderType::Wrap,
0);
REQUIRE_NOTHROW(padOp->forward());
Tensor expectedOutput(
Array5D<float,
batch,
channel,
outDataSize[0],
outDataSize[1],
outDataSize[2]>({{{{{{4, -2, -1, 4, -2, -1, 4},
{-2, 4, -5, -2, 4, -5, -2},
{-2, 2, 2, -2, 2, 2, -2}}}}}}));
CHECK(approxEq<float>(*padOp->getOutput(0), expectedOutput));
}
}
}
\ No newline at end of file
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment