Skip to content
Snippets Groups Projects
Commit 68064166 authored by Grégoire Kubler's avatar Grégoire Kubler Committed by Maxence Naud
Browse files

feat : new resize operator

parent 1d2d8d00
No related branches found
No related tags found
2 merge requests!279v0.4.0,!242Extends the functionalities of Resize Operator
......@@ -9,60 +9,214 @@
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_Resize_H_
#define AIDGE_CORE_OPERATOR_Resize_H_
#ifndef AIDGE_CORE_OPERATOR_RESIZE_H_
#define AIDGE_CORE_OPERATOR_RESIZE_H_
#include <memory>
#include <string>
#include <vector>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Interpolation.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Resize_Op : public OperatorTensor,
public Registrable<Resize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Resize_Op&)>>{
/* @brief attributes for the aidge operator */
enum class ResizeAttr {
// antialias,
// axes,
CoordinateTransformationMode,
CubicCoeffA,
// excludeOutside,
// extrapolation_value,
// keep_aspect_ratio_policy,
InterpolationMode,
};
public:
/**
* @brief Resize operator, will up/downscale a given tensor given the input.
* @verbatim
* Output size can be computed in 2 ways :
* 1. Image can be rescaled proportionally to the input size :
* output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)
* 2. Output dimensions are directly given via the size input(#4)
*
* Hence, either input Scale or Input Sizes can be defined, if both are
* connected, the operator will throw an error.
*
* Resize takes (up to) 4 different inputs :
* #1 Input to resize :
* N-D tensor.
*
* #2 ROI (optional) :
* 1-D tensor of coordinates given as [start1, …, startN, end1, …, endN]
* where N is the rank of X or the length of axes, if provided. The RoIs’
* coordinates are normalized in the coordinate system of the input image.
* If not set default ROI is the entier image.
* #3 scales (optional) - tensor(float):
* The scale array along each dimension.
* The number of elements of ‘scales’ should be the same as the rank of
* input ‘X’ or the length of ‘axes’, if provided. Accepted values: (0,inf)
* - (0,1) : downsampling
* - 1 : identity
* - (1,inf) : upsampling
* #4. Sizes - tensor(int64):
* Target size of the output tensor.
* Its interpretation depends on the ‘keep_aspect_ratio_policy’ value.
* The number of elements of ‘sizes’ should be the same as either :
* - The rank of input ‘X’
* - The length of ‘axes’ attribute, if provided.
* @endverbatim
* @warning : Only one of ‘scales’ and ‘sizes’ can be specified.
* @param coordinate_transformation_mode
* @param cubic_coeff_a the a coefficient of cubic interpolation. Moost often
* it is set to -0.75
* @param InterpolationMode type of interpolation (currently only support cubic
* interpolation)
*/
class Resize_Op
: public OperatorTensor,
public Registrable<
Resize_Op,
std::string,
std::function<std::shared_ptr<OperatorImpl>(const Resize_Op &)>> {
private:
using Attributes_ =
StaticAttributes<ResizeAttr,
Interpolation::CoordinateTransformation,
float,
Interpolation::Mode>;
template <ResizeAttr e>
using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
public:
static const std::string Type;
Resize_Op();
/**
* @brief creates a resize operator
* This node can take 4 different inputs, more details in the class
* doxygen.
* 1. Input to resize :
* 2. ROI NOT SUPPORTED (optional) :
* 3. scales (optional) - tensor(float):
* 4. sizes - tensor(int64):
* @param[in] coordinate_transformation_mode
* @param[in] cubic_coeff_a the a coefficient of cubic interpolation. Only
* used if interpolation_mode = Interpolation::Mode::Cubic
* @param[in] interpolationMode : Type of interpolation used for
* up/downsampling
* @warning Scales & ROI input cannot be set simultaneously. If bot are
* set, forward will fail.
* @return NodePtr
*/
explicit Resize_Op(Interpolation::CoordinateTransformation coordTransfoMode,
Interpolation::Mode interpol_mode =
Interpolation::Mode::NearestRoundPreferFloor,
float cubic_coef_a = -.75f)
: OperatorTensor(Type,
{InputCategory::Data,
InputCategory::OptionalData,
InputCategory::OptionalData,
InputCategory::OptionalData},
1),
mAttributes(std::make_shared<Attributes_>(
attr<ResizeAttr::CubicCoeffA>(cubic_coef_a),
attr<ResizeAttr::CoordinateTransformationMode>(coordTransfoMode),
attr<ResizeAttr::InterpolationMode>(interpol_mode))) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
* but not its input tensors (the new operator has no input associated).
* @brief Copy-constructor. Copy the operator attributes and its output
* tensor(s), but not its input tensors : The new operator has no input
* associated).
* @param op Operator to copy.
*/
Resize_Op(const Resize_Op& op);
Resize_Op(const Resize_Op &op)
: OperatorTensor(op), mAttributes(op.mAttributes) {
if (!op.backend().empty()) {
SET_IMPL_MACRO(Resize_Op, *this, op.backend());
} else {
mImpl = nullptr;
}
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Resize_Op
*/
std::shared_ptr<Operator> clone() const override;
std::shared_ptr<Operator> clone() const override final {
return std::make_shared<Resize_Op>(*this);
}
bool dimsForwarded() const override final;
bool forwardDims(bool allowDataDependency = false) override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
std::set<std::string> getAvailableBackends() const override;
void setBackend(const std::string &name,
DeviceIdx_t device = 0) override final;
std::set<std::string> getAvailableBackends() const override {
return Registrar<Resize_Op>::getKeys();
}
Interpolation::CoordinateTransformation &coordinateTransformationMode() {
return mAttributes
->template getAttr<ResizeAttr::CoordinateTransformationMode>();
}
float &cubicCoefA() {
return mAttributes->template getAttr<ResizeAttr::CubicCoeffA>();
}
Interpolation::Mode &interpolationmode() {
return mAttributes->template getAttr<ResizeAttr::InterpolationMode>();
}
// bool &excludeOutside() {
// return mAttributes->template getAttr<ResizeAttr::excludeOutside>();
// }
static const std::vector<std::string> getInputsName(){
static const std::vector<std::string> getInputsName() {
// roi, scales, sizes, even if considered as const parameters/input
return {"data_input", "roi ", "scales", "sizes"};
}
static const std::vector<std::string> getOutputsName(){
static const std::vector<std::string> getOutputsName() {
return {"data_output"};
}
};
std::shared_ptr<Node> Resize(const std::string &name = "");
} // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_Resize_H_ */
\ No newline at end of file
/**
* @brief creates a node that contains a resize operator
* This node can take 4 different inputs, more details in the class doxygen.
* #0 Input to resize
* #1 ROI NOT SUPPORTED (optional) - Tensor(double|float|float16)
* #2 scales (optional) - tensor(float)
* #3 sizes - tensor(int64)
* @param[in] coordinate_transformation_mode
* @param[in] interpolationMode type of interpolation used in case of
* upsampling
* @param[in] cubic_coeff_a the "a" coefficient of cubic interpolation. Only
* used if interpolation_mode = Interpolation::Mode::Cubic
* @warning Scales & ROI input cannot be set simultaneously. If bot are set,
* forward will fail.
* @return NodePtr
*/
std::shared_ptr<Node>
Resize(Interpolation::CoordinateTransformation coordTransfoMode =
Interpolation::CoordinateTransformation::HalfPixel,
Interpolation::Mode interpolMode =
Interpolation::Mode::NearestRoundPreferFloor,
float cubicCoefA = -.75f,
const std::string &name = "");
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
"coordinateTransformationMode",
"cubicCoeffA",
"InterpolationMode",
};
}
#endif /* AIDGE_CORE_OPERATOR_RESIZE_H_ */
......@@ -11,20 +11,55 @@
#include <pybind11/pybind11.h>
#include "aidge/operator/Resize.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Resize.hpp"
#include "aidge/utils/Registrar.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Resize(py::module& m) {
py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance())
.def_static("get_inputs_name", &Resize_Op::getInputsName)
.def_static("get_outputs_name", &Resize_Op::getOutputsName)
.def_readonly_static("Type", &Resize_Op::Type);
void init_Resize(py::module &m) {
auto pyResizeOp =
py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(
m, "ResizeOp", py::multiple_inheritance())
.def_static("get_inputs_name", &Resize_Op::getInputsName)
.def_static("get_outputs_name", &Resize_Op::getOutputsName)
.def_readonly_static("Type", &Resize_Op::Type);
declare_registrable<Resize_Op>(m, "ResizeOp");
declare_registrable<Resize_Op>(m, "ResizeOp");
// Enum binding under BitShiftOp class
py::enum_<Resize_Op::CoordinateTransformation>(pyResizeOp,
"coordinate_transformation")
.value("half_pixel", Resize_Op::CoordinateTransformation::HalfPixel)
.value("half_pixel_symmetric",
Resize_Op::CoordinateTransformation::HalfPixelSymmetric)
.value("half_pixel_pytorch",
Resize_Op::CoordinateTransformation::PytorchHalfPixel)
.value("align_corners", Resize_Op::CoordinateTransformation::AlignCorners)
.value("asymetric", Resize_Op::CoordinateTransformation::Asymetric)
.export_values();
m.def("Resize", &Resize, py::arg("name") = "");
m.def("Resize", &Resize,
py::arg("coord_transfo_mode") =
Resize_Op::CoordinateTransformation::HalfPixel,
py::arg("interpolation_mode") =
Interpolation::Mode::NearestRoundPreferFloor,
py::arg("cubic_interpolation_coefficient_a") = -.75f,
py::arg("name") = "", R"mydelimiter(
Initialize a node containing a Resize operator.
This node can take 4 different inputs.
#0 Input to resize
#1 ROI NOT SUPPORTED (optional) - Tensor(double|float|float16)
#2 scales (optional) - tensor(float): #3 sizes - tensor(int64)
#3 sizes - tensor(int64)
:type coordinate_transformation_mode : :py:class: List[Int]
:param interpolationMode : Type of interpolation used in case of upsampling
:type interpolationMode : Interpolation::Mode
:param cubic_coeff_a : "A" coefficient of cubic interpolation. Only used if interpolation_mode = Interpolation::Mode::Cubic
:type cubic_coeff_a : float
:param name : name of the node.
:type name : str
)mydelimiter");
}
} // namespace Aidge
} // namespace Aidge
......@@ -11,56 +11,29 @@
#include "aidge/operator/Resize.hpp"
#include <cstddef> // std::size_t
#include <cstdint> // std::int64_t
#include <stdexcept> // std::runtime_error
#include <algorithm>
#include <cstddef> // std::size_t
#include <cstdint> // std::int64_t
#include <fmt/core.h>
#include <stdexcept> // std::runtime_error
#include <string>
#include <vector>
#include <fmt/core.h>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Interpolation.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
const std::string Aidge::Resize_Op::Type = "Resize";
Aidge::Resize_Op::Resize_Op()
: OperatorTensor(Type,
{InputCategory::Data,
InputCategory::OptionalData,
InputCategory::OptionalData,
InputCategory::OptionalData},
1) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
* but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Aidge::Resize_Op::Resize_Op(const Aidge::Resize_Op& op)
: OperatorTensor(op)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Resize_Op, *this, op.backend());
}
else {
mImpl = nullptr;
}
}
namespace Aidge {
std::shared_ptr<Aidge::Operator> Aidge::Resize_Op::clone() const {
return std::make_shared<Resize_Op>(*this);
}
const std::string Resize_Op::Type = "Resize";
bool Aidge::Resize_Op::dimsForwarded() const {
bool Resize_Op::dimsForwarded() const {
// in case of ROI add getInput(1) condition
if ((getInput(1) && !getInput(1)->undefined())
|| (getInput(2) && !getInput(2)->undefined())
|| (getInput(3) && !getInput(3)->undefined())
)
{
if ((getInput(1) && !getInput(1)->undefined()) ||
(getInput(2) && !getInput(2)->undefined()) ||
(getInput(3) && !getInput(3)->undefined())) {
// output dims are data dependent
return false;
}
......@@ -68,93 +41,167 @@ bool Aidge::Resize_Op::dimsForwarded() const {
return OperatorTensor::dimsForwarded();
}
bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
if (inputsAssociated()) {
AIDGE_ASSERT(getInput(0)->nbDims() == 4,
"input tensor must have dimensions = 4 (batch, channel, height, width).");
const bool input1ROIPresent = getInput(1) && !getInput(1)->undefined();
const bool input2ScalesPresent = getInput(2) && !getInput(2)->undefined();
const bool input3SizesPresent = getInput(3) && !getInput(3)->undefined();
AIDGE_ASSERT(input2ScalesPresent != input3SizesPresent, "Only one of scales and sizes can be specified.")
bool Resize_Op::forwardDims(bool allowDataDependency) {
if (!inputsAssociated()) {
return false;
}
if (input1ROIPresent) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Input #1 (ROI) is given and it is not supported.");
/** @brief input #0 */
int16_t inDataIdx = 0;
/** @brief input #1 */
int16_t inROIIdx = 1;
/** @brief input #2 */
int16_t inScalesIdx = 2;
/** @brief input #3 */
int16_t inSizesIdx = 3;
std::vector<DimSize_t> outDims = getInput(inDataIdx)->dims();
/////////////////////////////////////////////////////
// Ensuring operator is connected properly
const bool inputROIPresent =
getInput(inROIIdx) && !getInput(inROIIdx)->undefined();
const bool inputScalesPresent =
getInput(inScalesIdx) && !getInput(inScalesIdx)->undefined();
const bool inputSizesPresent =
getInput(inSizesIdx) && !getInput(inSizesIdx)->undefined();
AIDGE_ASSERT(getInput(inDataIdx)->nbDims() == 4,
"{}: Input tensor must have dimensions = 4 (batch, channel, "
"height, width).",
type());
AIDGE_ASSERT(
inputScalesPresent || inputSizesPresent,
"{}: Either input Scales(#2) or input Sizes(#3) must be defined.",
type());
AIDGE_ASSERT(inputScalesPresent != inputSizesPresent,
"{}: Only one of the two inputs can be defined between input "
"Scales(#2) "
"and Sizes(#3). They cannot be specified at the same time.",
type())
////////////////////////////////////////////
// Case resize is done using Scales formula
if (inputScalesPresent) {
if (!allowDataDependency) {
Log::warn("{}: cannot execute forwardDims() as the output "
"dimensions depends on the input #2",
type());
return false;
}
else if (input2ScalesPresent) {
if (!allowDataDependency) {
Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #2");
return false;
}
AIDGE_ASSERT(getInput(0)->nbDims() == getInput(2)->size(),
"input #0 and input #2 (Scales) must have the same dimensions.");
std::vector<DimSize_t> outDims = getInput(0)->dims();
const std::vector<DimSize_t> inDims = getInput(0)->dims();
std::shared_ptr<Tensor> fallback;
const auto& scales = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
for (std::size_t dim=0; dim < getInput(2)->size(); ++dim) {
outDims[dim] = inDims[dim]*static_cast<int64_t*>(scales.getImpl()->hostPtr())[dim];
}
mOutputs[0]->resize(outDims);
return true;
std::vector<int> ROI;
if (inputROIPresent) {
AIDGE_THROW_OR_ABORT(
std::runtime_error,
"{}: input ROI(#{}) is present but it is not supported.",
type(),
inROIIdx);
// ROI = std::vector<int>(0, getInput(inDataIdx)->size() - 1);
// // magic numbers explaiend above
// size_t ROIExpectedSize = (getInput(inDataIdx)->nbDims() - 1) *
// 2; ROI.resize(ROIExpectedSize);
// AIDGE_ASSERT(getInput(inROIIdx)->size() == ROIExpectedSize,
// "{}: Input #{} (ROI) should be ordered as following :
// " "1-D tensor given as [start1, …, startN, end1, …,
// endN]," "where N is the rank of input tensor."
// "Hence, its dims should be "
// "input_tensor.nbDims() * 2 = {}"
// "Received following size: {}",
// type(), inROIIdx, ROIExpectedSize,
// getInput(inROIIdx)->size());
}
else if (input3SizesPresent) {
if (!allowDataDependency) {
Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #3");
return false;
}
AIDGE_ASSERT(getInput(0)->nbDims() == getInput(3)->size(),
"input #0 and input #3 (Sizes) must have the same dimensions.");
std::vector<DimSize_t> outDims = getInput(0)->dims();
std::shared_ptr<Tensor> fallback;
const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {
outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
}
AIDGE_ASSERT(
getInput(inDataIdx)->nbDims() == getInput(inScalesIdx)->size(),
"{}: input #0 and input #2 (Scales) must have the "
"same dimensions.",
type());
AIDGE_ASSERT(
getInput(inScalesIdx)->dataType() == DataType::Float32,
"{}: Wrong data type for input Scales(#{}), supported dtype: {}.",
type(),
inScalesIdx,
DataType::Float32);
std::shared_ptr<Tensor> fallback;
const auto &scales =
getInput(inScalesIdx)
->refCastFrom(fallback,
DataType::Float32,
getInput(inScalesIdx)->backend());
const std::vector<DimSize_t> inDims = getInput(inDataIdx)->dims();
for (std::size_t dim = 0; dim < getInput(inScalesIdx)->size(); ++dim) {
auto scaleAlongDim = scales.get<float>(dim);
AIDGE_ASSERT(scaleAlongDim > 0,
"{}: all scales values must be sctricly positive, "
"received {}.",
type(),
scaleAlongDim);
outDims[dim] =
static_cast<DimSize_t>(inDims[dim] * scaleAlongDim);
}
mOutputs[0]->resize(outDims);
return true;
///////////////////////////////////////////////////////////////
// case where resize output dims are given via the Size input
} else {
if (!allowDataDependency) {
Log::warn("{}: cannot execute forwardDims() as the output "
"dimensions depend on the input sizes(#{})",
type(),
inSizesIdx);
return false;
}
else {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: Either Input #2 or Input #3 must be present.");
AIDGE_ASSERT(
getInput(inDataIdx)->nbDims() == getInput(inSizesIdx)->size(),
"input #0 and input #3 (Sizes) must have the "
"same dimensions.");
AIDGE_ASSERT(
getInput(inSizesIdx)->dataType() == DataType::Int64,
"{}: Wrong data type for input Sizes(#{}), supported dtype: {}.",
type(),
inSizesIdx,
DataType::Int64);
std::shared_ptr<Tensor> fallback;
const auto &sizes = getInput(inSizesIdx)
->refCastFrom(fallback,
DataType::Int64,
getInput(inSizesIdx)->backend());
for (std::size_t dim = 0; dim < getInput(inSizesIdx)->size(); ++dim) {
outDims[dim] = sizes.get<int64_t>(dim);
}
}
return false;
mOutputs[0]->resize(outDims);
return true;
}
void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
void Resize_Op::setBackend(const std::string &name, DeviceIdx_t device) {
SET_IMPL_MACRO(Resize_Op, *this, name);
mOutputs[0]->setBackend(name, device);
// By default, automatically set backend for all inputs: roi, scales and sizes
if(getInput(1)) {
// By default, automatically set backend for all inputs: roi, scales and
// sizes
if (getInput(1)) {
getInput(1)->setBackend(name, device);
}
if(getInput(2)) {
if (getInput(2)) {
getInput(2)->setBackend(name, device);
}
if(getInput(3)) {
if (getInput(3)) {
getInput(3)->setBackend(name, device);
}
}
std::set<std::string> Aidge::Resize_Op::getAvailableBackends() const {
return Registrar<Resize_Op>::getKeys();
std::shared_ptr<Node>
Resize(Interpolation::CoordinateTransformation coordTransfoMode,
Interpolation::Mode interpolMode,
float cubicCoefA,
const std::string &name) {
return std::make_shared<Node>(std::make_shared<Resize_Op>(coordTransfoMode,
interpolMode,
cubicCoefA),
name);
}
/////////////////////////////////////////////
std::shared_ptr<Aidge::Node> Aidge::Resize(const std::string &name) {
return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
}
\ No newline at end of file
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/operator/Resize.hpp"
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <cstdint>
#include <memory>
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Log.hpp"
namespace Aidge {
/**
* Test the resize operation of the given operator with the specified input
* dimensions, scales or sizes, and expected output dimensions.
*
* @param op The operator to test.
* @param input_dims The input dimensions to use for the test.
* @param scales_or_sizes The scales or sizes to use for the test.
* @param expected_dims The expected output dimensions for the test.
* @param use_scales A boolean flag indicating whether to use scales or sizes
* for the test.
*/
void setupTestResize(const std::shared_ptr<OperatorTensor> &op,
const std::vector<Aidge::DimSize_t> &input_dims,
const std::vector<float> &scales,
const std::vector<int64_t> &sizes,
const std::vector<Aidge::DimSize_t> &expected_dims) {
Log::setConsoleLevel(Log::Level::Info);
Log::info("\n\n\nResize test:");
Log::info("\tInput_dims: {}", input_dims);
Log::info("\tScales: {}", scales);
Log::info("\tSizes: {}", sizes);
Log::info("\tExpected output dims: {}", expected_dims);
std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>();
input_data->setBackend("cpu");
input_data->resize(input_dims);
input_data->zeros();
op->associateInput(0, input_data);
const std::shared_ptr<Tensor> tensor_values = std::make_shared<Tensor>();
tensor_values->setBackend("cpu");
if (!scales.empty()) {
tensor_values->setDataType(DataType::Float32);
tensor_values->resize(std::vector<std::size_t>({scales.size()}));
tensor_values->getImpl()->copyFromHost(scales.data(), scales.size());
op->associateInput(2, tensor_values);
}
if (!sizes.empty()) {
tensor_values->setDataType(DataType::Int64);
tensor_values->resize(std::vector<std::size_t>({sizes.size()}));
tensor_values->getImpl()->copyFromHost(sizes.data(), sizes.size());
op->associateInput(3, tensor_values);
}
}
TEST_CASE("[core/operator] Resize_Op(forwardDims)",
"[Resize][forwardDimsScales]") {
std::vector<Aidge::DimSize_t> input_dims;
std::vector<float> scales;
std::vector<int64_t> sizes;
std::vector<Aidge::DimSize_t> expected_dims;
std::shared_ptr<Node> myResize = Resize();
auto op = std::static_pointer_cast<OperatorTensor>(myResize->getOperator());
SECTION("Un-connected input leads to failure.") {
REQUIRE_THROWS(op->forwardDims());
}
SECTION("Connecting both Scales & Sizes leads to failure") {
input_dims = std::vector<Aidge::DimSize_t>({4, 1, 2, 2});
scales = std::vector<float>({.5, 3, 2, 2});
sizes = std::vector<int64_t>({});
expected_dims = std::vector<Aidge::DimSize_t>({2, 3, 4, 4});
setupTestResize(op, input_dims, scales, sizes, expected_dims);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
SECTION("Input Scales") {
SECTION("TEST 1") {
input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
scales = std::vector<float>({1, 1, 2, 2});
sizes = std::vector<int64_t>({});
expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 4, 4});
setupTestResize(op, input_dims, scales, sizes, expected_dims);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
SECTION("TEST 2") {
input_dims = std::vector<Aidge::DimSize_t>({4, 4, 10, 10});
scales = std::vector<float>({1, 1, 2, 3});
sizes = std::vector<int64_t>({});
expected_dims = std::vector<Aidge::DimSize_t>({4, 4, 20, 30});
setupTestResize(op, input_dims, scales, sizes, expected_dims);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
SECTION("TEST 3") {
input_dims = std::vector<Aidge::DimSize_t>({4, 2, 10, 10});
scales = std::vector<float>({1, 1, 0.5, 0.5});
sizes = std::vector<int64_t>({});
expected_dims = std::vector<Aidge::DimSize_t>({4, 2, 5, 5});
setupTestResize(op, input_dims, scales, sizes, expected_dims);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
SECTION("TEST 4") {
input_dims = std::vector<Aidge::DimSize_t>({11, 11, 4, 4});
scales = std::vector<float>({1, 1, 0.3, 0.3});
sizes = std::vector<int64_t>({});
expected_dims = std::vector<Aidge::DimSize_t>({11, 11, 1, 1});
setupTestResize(op, input_dims, scales, sizes, expected_dims);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
}
SECTION("Input Sizes") {
SECTION("TEST 1") {
input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
scales = std::vector<float>({});
sizes = std::vector<int64_t>({4, 5, 8, 8});
expected_dims = std::vector<Aidge::DimSize_t>({4, 5, 8, 8});
setupTestResize(op, input_dims, scales, sizes, expected_dims);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
SECTION("TEST 2") {
input_dims = std::vector<Aidge::DimSize_t>({60, 60, 30, 30});
scales = std::vector<float>({});
sizes = std::vector<int64_t>({1, 1, 75, 75});
expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 75, 75});
setupTestResize(op, input_dims, scales, sizes, expected_dims);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
SECTION("TEST 3") {
input_dims = std::vector<Aidge::DimSize_t>({11, 11, 20, 20});
scales = std::vector<float>({});
sizes = std::vector<int64_t>({19, 6, 8, 8});
expected_dims = std::vector<Aidge::DimSize_t>({19, 6, 8, 8});
setupTestResize(op, input_dims, scales, sizes, expected_dims);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
SECTION("TEST 4") {
input_dims = std::vector<Aidge::DimSize_t>({43, 211, 22, 22});
scales = std::vector<float>({});
sizes = std::vector<int64_t>({1, 1, 10, 10});
expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 10, 10});
setupTestResize(op, input_dims, scales, sizes, expected_dims);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
}
}
} // namespace Aidge
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment