diff --git a/include/aidge/data/Interpolation.hpp b/include/aidge/data/Interpolation.hpp index 897bffe08b4e00000fb9f1888f93090a936cb86d..2d53ebdd0dd5141acc9a3bce8e906f42f7a557a2 100644 --- a/include/aidge/data/Interpolation.hpp +++ b/include/aidge/data/Interpolation.hpp @@ -92,10 +92,10 @@ class Interpolation { enum Mode { Cubic, Linear, - NearestRoundPreferFloor, - NearestRoundPreferCeil, - NearestFloor, - NearestCeil + RoundPreferFloor, + RoundPreferCeil, + Floor, + Ceil }; /* diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp index d588a5332138df953afd6cc7f5e49c4ef16d1a1e..5c84f52e052e67ca27bfc851f510e522d485e4b7 100644 --- a/include/aidge/data/Tensor.hpp +++ b/include/aidge/data/Tensor.hpp @@ -25,11 +25,10 @@ #include "aidge/backend/TensorImpl.hpp" #include "aidge/data/Data.hpp" - +#include "aidge/utils/ArrayHelpers.hpp" #include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" -#include "aidge/utils/ArrayHelpers.hpp" namespace Aidge { /** @@ -623,12 +622,12 @@ public: * @brief From the the 1D contiguous index, return the coordinate of an element in the tensor. * Beware: do not use this function with the storage index! * - * @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor. + * @param index 1D contiguous index of the value considering a flatten, contiguous, tensor. * @return std::vector<DimSize_t> */ - std::vector<std::size_t> getCoord(std::size_t flatIdx) const { - return Tensor::getCoord(mDims, flatIdx); - } + static std::vector<std::size_t> + toCoord(const std::vector<Aidge::DimSize_t> &dimensions, std::size_t index); + /** * @brief From the the 1D contiguous index, return the coordinate of an element in the tensor. @@ -637,8 +636,13 @@ public: * @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor. * @return std::vector<DimSize_t> */ - static std::vector<std::size_t> - getCoord(const std::vector<Aidge::DimSize_t> &dimensions, std::size_t flattenedIdx); + std::vector<std::size_t> getCoord(std::size_t index) const { + if (isInBounds(mDims, index)) { + return toCoord(mDims, index); + } else { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Out of bound coordinates."); + } + } /** * @brief From the coordinate returns the 1D contiguous index of an element in the tensor. @@ -648,12 +652,10 @@ public: * if the tensor is contiguous! * Note that the coordIdx may be an empty vector. * - * @param coordIdx Coordinate to an element in the tensor + * @param coords Coordinate to an element in the tensor * @return DimSize_t Contiguous index */ - std::size_t getIdx(const std::vector<std::size_t>& coordIdx) const { - return Tensor::getIdx(mDims,coordIdx); - } + static std::size_t toIndex(const std::vector<DimSize_t>& dimensions, const std::vector<std::size_t>& coords); /** * @brief From the coordinate returns the 1D contiguous index of an element in the tensor. @@ -666,18 +668,26 @@ public: * @param coordIdx Coordinate to an element in the tensor * @return DimSize_t Contiguous index */ - static std::size_t getIdx(const std::vector<DimSize_t>& tensorDims, const std::vector<std::size_t>& coords); - + std::size_t getIdx(const std::vector<std::size_t>& coords) const { + if (isInBounds<std::size_t>(mDims, coords)) { + return toIndex(mDims, coords); + } else { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Out of bound coordinates."); + } + } + /** * @brief check if index is in bound of given tensor dimensions * @warning this function is templated in order to welcome cases like interpolation where indexes are not integers. * However, the only types accepted are floating, integer & size_t * @param tensorDims : tensor dimensions - * @param coords : coords of the tensor you want to flattened index of + * @param coords : coords of the tensor you want to flattened index of * @return true if all coords are in bound. False otherwise */ template<typename T> - static bool isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<T>& coords); + static bool isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<T>& coords); + + static bool isInBounds(const std::vector<DimSize_t>& dimensions, const std::size_t index); /** * @brief From the coordinate returns the 1D storage index of an element in the tensor. diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp index 20a27073c4cad68186d1629df9f6b7ddbbaa217e..a6346671b3e32b97c915f3bbbb40da57c76756f4 100644 --- a/include/aidge/operator/Resize.hpp +++ b/include/aidge/operator/Resize.hpp @@ -118,12 +118,11 @@ class Resize_Op * set, forward will fail. * @return NodePtr */ - explicit Resize_Op( + Resize_Op( Interpolation::CoordinateTransformation coordTransfoMode, - Interpolation::Mode interpol_mode = - Interpolation::Mode::NearestRoundPreferFloor, + Interpolation::Mode interpol_mode = Interpolation::Mode::RoundPreferFloor, float cubic_coef_a = -.75f, - PadBorderType paddingMode = PadBorderType::Constant) + PadBorderType paddingMode = PadBorderType::Edge) : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData, @@ -223,10 +222,12 @@ class Resize_Op * @return NodePtr */ std::shared_ptr<Node> -Resize(Interpolation::CoordinateTransformation coordTransfoMode = +Resize(std::vector<float> scale = std::vector<float>(), + std::vector<std::size_t> size = std::vector<std::size_t>(), + Interpolation::CoordinateTransformation coordTransfoMode = Interpolation::CoordinateTransformation::HalfPixel, Interpolation::Mode interpolMode = - Interpolation::Mode::NearestRoundPreferFloor, + Interpolation::Mode::RoundPreferFloor, float cubicCoefA = -.75f, const std::string &name = ""); diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp index 1bfe0929bf67bb0c6d3b893f3dbaf6993dcfd6ff..88312280d572302ecce4157c34db0ba1efd52da9 100644 --- a/include/aidge/utils/TensorUtils.hpp +++ b/include/aidge/utils/TensorUtils.hpp @@ -49,6 +49,7 @@ bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float } return true; } -} + +} // namespace Aidge #endif /* AIDGE_CORE_UTILS_TENSOR_UTILS_H_s */ diff --git a/python_binding/data/pybind_Interpolation.cpp b/python_binding/data/pybind_Interpolation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0839d1c04925f46595630191da9291217d40f10f --- /dev/null +++ b/python_binding/data/pybind_Interpolation.cpp @@ -0,0 +1,42 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <pybind11/pybind11.h> + +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/data/Interpolation.hpp" +#include "aidge/utils/Registrar.hpp" + +namespace py = pybind11; +namespace Aidge { + +void init_Interpolation(py::module &m) { + auto pyInterpolation = py::class_<Aidge::Interpolation>(m, "Interpolation"); + + py::enum_<Interpolation::Mode>(pyInterpolation, "Mode") + .value("CUBIC", Interpolation::Mode::Cubic) + .value("LINEAR", Interpolation::Mode::Linear) + .value("ROUND_PREFER_FLOOR", Interpolation::Mode::RoundPreferFloor) + .value("ROUND_PREFER_CEIL", Interpolation::Mode::RoundPreferCeil) + .value("FLOOR", Interpolation::Mode::Floor) + .value("CEIL", Interpolation::Mode::Ceil) + .export_values(); + + py::enum_<Interpolation::CoordinateTransformation>(pyInterpolation, "CoordinateTransformation") + .value("HALF_PIXEL", Interpolation::CoordinateTransformation::HalfPixel) + .value("HALF_PIXEL_SYMETRIC", Interpolation::CoordinateTransformation::HalfPixelSymmetric) + .value("PYTORCH_HALF_PIXEL", Interpolation::CoordinateTransformation::PytorchHalfPixel) + .value("ALIGN_CORNERS", Interpolation::CoordinateTransformation::AlignCorners) + .value("ASYMMETRIC", Interpolation::CoordinateTransformation::Asymmetric) + .export_values(); +} + +} // namespace Aidge diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp index a6dcf8aa5d3d9440735cf41bde49abf34a3410b1..0ac42f507b722d5006a36ea59816766d54164c8d 100644 --- a/python_binding/data/pybind_Tensor.cpp +++ b/python_binding/data/pybind_Tensor.cpp @@ -329,8 +329,9 @@ void init_Tensor(py::module& m){ .def("capacity", &Tensor::capacity) .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize, py::arg("dims"), py::arg("strides") = std::vector<DimSize_t>()) .def("has_impl", &Tensor::hasImpl) - .def("get_coord", (std::vector<std::size_t> (Tensor::*)(const std::size_t) &Tensor::getCoord, py::arg("flatIdx")) - .def("get_idx",(std::size_t (Tensor::*)(const std::vector<std::size_t> &) &Tensor::getIdx, py::arg("coords")) + .def("get_coord", (std::vector<std::size_t> (Tensor::*)(const std::size_t)) &Tensor::getCoord, py::arg("flatIdx")) + .def("get_idx",(std::size_t (Tensor::*)(const std::vector<std::size_t> &)) &Tensor::getIdx, py::arg("coords")) + .def_static("get_available_backends", &Tensor::getAvailableBackends) .def("undefined", &Tensor::undefined) .def("cpy_transpose", (void (Tensor::*)(const Tensor& src, const std::vector<DimSize_t>& transpose)) &Tensor::copyTranspose, py::arg("src"), py::arg("transpose")) diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp index 4674c90493e80a23a37e008ceffba24c52e3d0b1..2aa62609835a7042dd0df54f28b453b7e33a3b5b 100644 --- a/python_binding/operator/pybind_Resize.cpp +++ b/python_binding/operator/pybind_Resize.cpp @@ -9,9 +9,13 @@ * ********************************************************************************/ +#include <cstddef> // std::size_t + #include <pybind11/pybind11.h> +#include "aidge/data/Interpolation.hpp" #include "aidge/operator/OperatorTensor.hpp" +#include "aidge/operator/Pad.hpp" #include "aidge/operator/Resize.hpp" #include "aidge/utils/Registrar.hpp" @@ -19,47 +23,37 @@ namespace py = pybind11; namespace Aidge { void init_Resize(py::module &m) { - auto pyResizeOp = - py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>( + py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>( m, "ResizeOp", py::multiple_inheritance()) + .def(py::init<Interpolation::CoordinateTransformation, Interpolation::Mode, float, PadBorderType>(), py::arg("coordinate_transformation_mode"), py::arg("interpolation_mode"), py::arg("cubic_coeff_a") = -0.75f, py::arg("padding_mode") = PadBorderType::Edge) .def_static("get_inputs_name", &Resize_Op::getInputsName) .def_static("get_outputs_name", &Resize_Op::getOutputsName) .def_readonly_static("Type", &Resize_Op::Type); declare_registrable<Resize_Op>(m, "ResizeOp"); - // Enum binding under BitShiftOp class - py::enum_<Resize_Op::CoordinateTransformation>(pyResizeOp, - "coordinate_transformation") - .value("half_pixel", Resize_Op::CoordinateTransformation::HalfPixel) - .value("half_pixel_symmetric", - Resize_Op::CoordinateTransformation::HalfPixelSymmetric) - .value("half_pixel_pytorch", - Resize_Op::CoordinateTransformation::PytorchHalfPixel) - .value("align_corners", Resize_Op::CoordinateTransformation::AlignCorners) - .value("asymetric", Resize_Op::CoordinateTransformation::Asymetric) - .export_values(); - m.def("Resize", &Resize, + py::arg("scale") = std::vector<float>({}), + py::arg("size") = std::vector<std::size_t>({}), py::arg("coord_transfo_mode") = - Resize_Op::CoordinateTransformation::HalfPixel, + Interpolation::CoordinateTransformation::HalfPixel, py::arg("interpolation_mode") = - Interpolation::Mode::NearestRoundPreferFloor, + Interpolation::Mode::RoundPreferFloor, py::arg("cubic_interpolation_coefficient_a") = -.75f, py::arg("name") = "", R"mydelimiter( Initialize a node containing a Resize operator. This node can take 4 different inputs. - #0 Input to resize + #0 Input to resize #1 ROI NOT SUPPORTED (optional) - Tensor(double|float|float16) - #2 scales (optional) - tensor(float): #3 sizes - tensor(int64) + #2 scales (optional) - tensor(float): #3 sizes - tensor(int64) #3 sizes - tensor(int64) :type coordinate_transformation_mode : :py:class: List[Int] - :param interpolationMode : Type of interpolation used in case of upsampling + :param interpolationMode : Type of interpolation used in case of upsampling :type interpolationMode : Interpolation::Mode :param cubic_coeff_a : "A" coefficient of cubic interpolation. Only used if interpolation_mode = Interpolation::Mode::Cubic :type cubic_coeff_a : float :param name : name of the node. :type name : str -)mydelimiter"); + )mydelimiter"); } } // namespace Aidge diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp index 4f7ffea5fefe299a2670fd7bcb816c86070bf315..006eeb289f25570ddf337f048b05816102624028 100644 --- a/python_binding/pybind_core.cpp +++ b/python_binding/pybind_core.cpp @@ -20,6 +20,7 @@ void init_Random(py::module&); void init_Data(py::module&); void init_Database(py::module&); void init_DataProvider(py::module&); +void init_Interpolation(py::module&); void init_Tensor(py::module&); void init_TensorImpl(py::module&); void init_Attributes(py::module&); @@ -107,6 +108,7 @@ void init_Aidge(py::module& m) { init_Data(m); init_Database(m); init_DataProvider(m); + init_Interpolation(m); init_Tensor(m); init_TensorImpl(m); init_Attributes(m); diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp index 6cb113a3c6d06076006b9755b96185044cd8aafa..928046dc07866273a43567facecb3adfe3c0d3f4 100644 --- a/src/data/Tensor.cpp +++ b/src/data/Tensor.cpp @@ -673,65 +673,64 @@ const Tensor& Tensor::ref(std::shared_ptr<Tensor>& fallback, } } -std::set<std::string> Tensor::getAvailableBackends() { - std::set<std::string> backendsList; - for (const auto& tupleKey : Registrar<Tensor>::getKeys()) { - backendsList.insert(std::get<0>(tupleKey)); - } - return backendsList; -} -/////////////////////////////////////////////////////////////////////////////////////////////////////////// -// COORDINATES MANIPULATION std::vector<std::size_t> -Tensor::getCoord(const std::vector<DimSize_t> &tensorDims, - std::size_t flatIdx) { - std::vector<std::size_t> coordIdx(tensorDims.size()); - std::size_t i = tensorDims.size(); - - while (i-- > 0) { - coordIdx[i] = (flatIdx % tensorDims[i]); - flatIdx/=tensorDims[i]; - } - return coordIdx; +Tensor::toCoord(const std::vector<DimSize_t>& dimensions, std::size_t index) { + std::vector<std::size_t> coord(dimensions.size()); + std::size_t i = dimensions.size(); + + while (i-- > 0) { + coord[i] = (index % dimensions[i]); + index /= dimensions[i]; + } + return coord; } -std::size_t Tensor::getIdx(const std::vector<DimSize_t> &tensorDims, const std::vector<std::size_t>& coordIdx) { - AIDGE_ASSERT(coordIdx.size() <= tensorDims.size(), "Tensor::getIdx(): Coordinates does not match number of dimensions.\n\tCoords : {}\n\tDimensions: {}",coordIdx, tensorDims); - std::size_t flatIdx = 0; - for(std::size_t i = 0; i < tensorDims.size(); ++i) { - auto coord = i < coordIdx.size() ? coordIdx[i]: 0; - AIDGE_ASSERT(coord < tensorDims[i], - "Tensor::getIdx(): Coordinates dimensions ({})" - " does not fit the dimensions of the tensor ({})", - coordIdx, - tensorDims); - auto nextDimSize = i + 1 < tensorDims.size() ? tensorDims[i + 1]: 1; - flatIdx = (flatIdx + coord) * nextDimSize; +std::size_t Tensor::toIndex(const std::vector<DimSize_t> &dimensions, const std::vector<std::size_t>& coords) { + AIDGE_ASSERT(coords.size() == dimensions.size(), "Tensor::getIdx(): Coordinates does not match number of dimensions.\n\tCoords : {}\n\tDimensions: {}",coords, dimensions); + std::size_t index = 0; + std::size_t dimensions_s = 1; // stride + std::size_t i = dimensions.size(); + while (i-- > 0) { + index += coords[i] * dimensions_s; + dimensions_s *= dimensions[i]; } - return flatIdx; + return index; } template<typename T> -bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<T>& coords){ - AIDGE_ASSERT(coords.size() == tensorDims.size(), +bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<T>& coords){ + AIDGE_ASSERT(coords.size() == dimensions.size(), "Coordinates({}) to compare have not " "the same number of dimension as tensor dimensions({}), aborting.", coords, - tensorDims); + dimensions); bool isInBound {true}; for(std::size_t i = 0 ; i < coords.size() && isInBound; ++i ){ - isInBound = coords[i] >= 0 && coords[i] < static_cast<T>(tensorDims[i]) ; + isInBound = coords[i] >= 0 && coords[i] < static_cast<T>(dimensions[i]) ; } return isInBound; } -template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<int16_t>& coords); -template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<int32_t>& coords); -template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<int64_t>& coords); -template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<DimSize_t>& coords); -template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<float>& coords); -template bool Tensor::isInBounds(const std::vector<DimSize_t>& tensorDims, const std::vector<double>& coords); +bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::size_t index){ + return index < std::accumulate(dimensions.cbegin(), dimensions.cend(), std::size_t(1), std::multiplies<std::size_t>()); +} + +template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int16_t>& coords); +template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int32_t>& coords); +template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int64_t>& coords); +template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::size_t>& coords); +template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<float>& coords); +template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<double>& coords); + + +std::set<std::string> Tensor::getAvailableBackends() { + std::set<std::string> backendsList; + for (const auto& tupleKey : Registrar<Tensor>::getKeys()) { + backendsList.insert(std::get<0>(tupleKey)); + } + return backendsList; +} } // namespace Aidge diff --git a/src/data/interpolation.cpp b/src/data/interpolation.cpp index 59d40cbb8b8194c277417ca715a427a752cbc5cb..ce5431a2b3bd742f98a169666811bd5d373a5c24 100644 --- a/src/data/interpolation.cpp +++ b/src/data/interpolation.cpp @@ -165,7 +165,7 @@ Interpolation::retrieveNeighbours(const T *tensorValues, if (Tensor::isInBounds(tensorDims, neighbourCoords)) { // cast from unsigned to signed won't create problem as we ensured // that all neighboursCoords values are > 0 with isInBounds - value = tensorValues[Tensor::getIdx( + value = tensorValues[Tensor::toIndex( tensorDims, std::vector<DimSize_t>(neighbourCoords.begin(), neighbourCoords.end()))]; @@ -177,7 +177,7 @@ Interpolation::retrieveNeighbours(const T *tensorValues, ((neighbourCoords[j] >= static_cast<std::int64_t>(tensorDims[j])) ? (tensorDims[j] - 1) : neighbourCoords[j]); } - value = tensorValues[Tensor::getIdx( + value = tensorValues[Tensor::toIndex( tensorDims, std::vector<DimSize_t>(neighbourCoords.begin(), neighbourCoords.end()))]; diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp index 2bde27f143c7a1ca245650118afc2f7e6670274e..a56370ac7edb74443992210ac0a39576a803298d 100644 --- a/src/operator/Resize.cpp +++ b/src/operator/Resize.cpp @@ -22,6 +22,8 @@ #include "aidge/data/Data.hpp" #include "aidge/data/Interpolation.hpp" #include "aidge/data/Tensor.hpp" +#include "aidge/operator/Producer.hpp" +#include "aidge/utils/ArrayHelpers.hpp" #include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/Types.h" @@ -42,100 +44,74 @@ bool Resize_Op::dimsForwarded() const { } bool Resize_Op::forwardDims(bool allowDataDependency) { + if (!allowDataDependency) { + Log::warn("{}: cannot execute forwardDims() as the output " + "dimensions are computed from some input data.", + type()); + return false; + } + if (!inputsAssociated()) { return false; } /** @brief input #0 */ - int16_t inDataIdx = 0; + constexpr IOIndex_t inDataIdx = 0; /** @brief input #1 */ - int16_t inROIIdx = 1; + constexpr IOIndex_t inROIIdx = 1; /** @brief input #2 */ - int16_t inScalesIdx = 2; + constexpr IOIndex_t inScalesIdx = 2; /** @brief input #3 */ - int16_t inSizesIdx = 3; + constexpr IOIndex_t inSizesIdx = 3; std::vector<DimSize_t> outDims = getInput(inDataIdx)->dims(); ///////////////////////////////////////////////////// // Ensuring operator is connected properly const bool inputROIPresent = getInput(inROIIdx) && !getInput(inROIIdx)->undefined(); + if (inputROIPresent) { + AIDGE_THROW_OR_ABORT( + std::runtime_error, + "{}: input ROI(#{}) is present but it is not supported.", + type(), + inROIIdx); + } + const bool inputScalesPresent = getInput(inScalesIdx) && !getInput(inScalesIdx)->undefined(); const bool inputSizesPresent = getInput(inSizesIdx) && !getInput(inSizesIdx)->undefined(); - AIDGE_ASSERT(getInput(inDataIdx)->nbDims() == 4, - "{}: Input tensor must have dimensions = 4 (batch, channel, " - "height, width).", - type()); - AIDGE_ASSERT( - inputScalesPresent || inputSizesPresent, - "{}: Either input Scales(#2) or input Sizes(#3) must be defined.", - type()); - AIDGE_ASSERT(inputScalesPresent != inputSizesPresent, - "{}: Only one of the two inputs can be defined between input " + AIDGE_ASSERT(inputScalesPresent ^ inputSizesPresent, + "{}: Only one of the two inputs must be defined between input " "Scales(#2) " "and Sizes(#3). They cannot be specified at the same time.", type()) + std::shared_ptr<Tensor> resizeParam = inputScalesPresent ? getInput(inScalesIdx) : getInput(inSizesIdx); + AIDGE_ASSERT(getInput(inDataIdx)->nbDims() == resizeParam->size(), + "{}: data input #0 and resizing parameter input #{} must have the " + "same dimensions.", + type(), inputScalesPresent ? inScalesIdx :inSizesIdx); + + //////////////////////////////////////////// // Case resize is done using Scales formula if (inputScalesPresent) { - if (!allowDataDependency) { - Log::warn("{}: cannot execute forwardDims() as the output " - "dimensions depends on the input #2", - type()); - return false; - } - std::vector<int> ROI; - - if (inputROIPresent) { - AIDGE_THROW_OR_ABORT( - std::runtime_error, - "{}: input ROI(#{}) is present but it is not supported.", - type(), - inROIIdx); - // ROI = std::vector<int>(0, getInput(inDataIdx)->size() - 1); - // // magic numbers explaiend above - // size_t ROIExpectedSize = (getInput(inDataIdx)->nbDims() - 1) * - // 2; ROI.resize(ROIExpectedSize); - // AIDGE_ASSERT(getInput(inROIIdx)->size() == ROIExpectedSize, - // "{}: Input #{} (ROI) should be ordered as following : - // " "1-D tensor given as [start1, …, startN, end1, …, - // endN]," "where N is the rank of input tensor." - // "Hence, its dims should be " - // "input_tensor.nbDims() * 2 = {}" - // "Received following size: {}", - // type(), inROIIdx, ROIExpectedSize, - // getInput(inROIIdx)->size()); - } - - AIDGE_ASSERT( - getInput(inDataIdx)->nbDims() == getInput(inScalesIdx)->size(), - "{}: input #0 and input #2 (Scales) must have the " - "same dimensions.", - type()); - AIDGE_ASSERT( - getInput(inScalesIdx)->dataType() == DataType::Float32, - "{}: Wrong data type for input Scales(#{}), supported dtype: {}.", - type(), - inScalesIdx, - DataType::Float32); std::shared_ptr<Tensor> fallback; const auto &scales = - getInput(inScalesIdx) + resizeParam ->refCastFrom(fallback, DataType::Float32, - getInput(inScalesIdx)->backend()); + resizeParam->backend()); const std::vector<DimSize_t> inDims = getInput(inDataIdx)->dims(); for (std::size_t dim = 0; dim < getInput(inScalesIdx)->size(); ++dim) { - auto scaleAlongDim = scales.get<float>(dim); + const auto scaleAlongDim = scales.get<cpptype_t<DataType::Float32>>(dim); AIDGE_ASSERT(scaleAlongDim > 0, "{}: all scales values must be sctricly positive, " - "received {}.", + "got {}.", type(), scaleAlongDim); outDims[dim] = @@ -145,32 +121,14 @@ bool Resize_Op::forwardDims(bool allowDataDependency) { /////////////////////////////////////////////////////////////// // case where resize output dims are given via the Size input } else { - if (!allowDataDependency) { - Log::warn("{}: cannot execute forwardDims() as the output " - "dimensions depend on the input sizes(#{})", - type(), - inSizesIdx); - return false; - } - AIDGE_ASSERT( - getInput(inDataIdx)->nbDims() == getInput(inSizesIdx)->size(), - "input #0 and input #3 (Sizes) must have the " - "same dimensions."); - AIDGE_ASSERT( - getInput(inSizesIdx)->dataType() == DataType::Int64, - "{}: Wrong data type for input Sizes(#{}), supported dtype: {}.", - type(), - inSizesIdx, - DataType::Int64); - std::shared_ptr<Tensor> fallback; - const auto &sizes = getInput(inSizesIdx) + const auto &sizes = resizeParam ->refCastFrom(fallback, - DataType::Int64, - getInput(inSizesIdx)->backend()); + NativeType<DimSize_t>::type, + resizeParam->backend()); for (std::size_t dim = 0; dim < getInput(inSizesIdx)->size(); ++dim) { - outDims[dim] = sizes.get<int64_t>(dim); + outDims[dim] = sizes.get<DimSize_t>(dim); } } mOutputs[0]->resize(outDims); @@ -181,7 +139,7 @@ void Resize_Op::setBackend(const std::string &name, DeviceIdx_t device) { SET_IMPL_MACRO(Resize_Op, *this, name); mOutputs[0]->setBackend(name, device); - // By default, automatically set backend for all inputs: roi, scales and + // By default, automatically set backend for all optional inputs: roi, scales and // sizes if (getInput(1)) { getInput(1)->setBackend(name, device); @@ -195,13 +153,26 @@ void Resize_Op::setBackend(const std::string &name, DeviceIdx_t device) { } std::shared_ptr<Node> -Resize(Interpolation::CoordinateTransformation coordTransfoMode, +Resize(std::vector<float> scale, + std::vector<std::size_t> size, + Interpolation::CoordinateTransformation coordTransfoMode, Interpolation::Mode interpolMode, float cubicCoefA, const std::string &name) { - return std::make_shared<Node>(std::make_shared<Resize_Op>(coordTransfoMode, + std::shared_ptr<Node> node_resize = std::make_shared<Node>(std::make_shared<Resize_Op>(coordTransfoMode, interpolMode, cubicCoefA), name); + if (scale.size()) { + std::shared_ptr<Node> prod_scale = Producer(std::make_shared<Tensor>(Vector<float>(scale))); + prod_scale->addChild(node_resize, 0, 2); + } + if (size.size()) + { + std::shared_ptr<Node> prod_size = Producer(std::make_shared<Tensor>(Vector<std::size_t>(size))); + prod_size->addChild(node_resize, 0, 3); + } + return node_resize; + } } // namespace Aidge diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp index cebc995d5bd2e0120937b8a81acd773b08cb5861..58003bb4009a484ca63acffdb50fbda156a48787 100644 --- a/unit_tests/data/Test_Tensor.cpp +++ b/unit_tests/data/Test_Tensor.cpp @@ -340,38 +340,36 @@ TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") { } // Test get() and set() by coords - // We create coords of rank 0 to the number of dimensions - for (std::size_t coord_size = 0; coord_size < dims.size(); ++coord_size) { - std::vector<std::size_t> coords(coord_size); - for (std::size_t coord_idx = 0; coord_idx < coord_size; ++coord_idx) { - std::size_t dim_idx = (dimsDist(gen)-1) % dims[coord_idx]; - coords[coord_idx] = dim_idx; - } - std::size_t flat_idx, flat_storage_idx; - // As it is continuous we have getIdx() == getStorageIdx() - REQUIRE_NOTHROW(flat_idx = x.getIdx(coords)); - REQUIRE_NOTHROW(flat_storage_idx = x.getStorageIdx(coords)); - REQUIRE(flat_storage_idx == flat_idx); - float val, val_flat; - // Test get() by index and by coords - REQUIRE_NOTHROW(val_flat = x.get<float>(flat_idx)); - REQUIRE_NOTHROW(val = x.get<float>(coords)); - REQUIRE(val == val_flat); - REQUIRE(val == values[flat_idx]); - // Test set() by coords, also update the reference array - REQUIRE_NOTHROW(x.set(coords, val + 1)); - values[flat_idx] += 1; + // We create coords of the number of dimensions + std::vector<std::size_t> coords(nb_dims); + for (std::size_t coord_idx = 0; coord_idx < nb_dims; ++coord_idx) { + std::size_t dim_idx = (dimsDist(gen)-1) % dims[coord_idx]; + coords[coord_idx] = dim_idx; } + std::size_t flat_idx, flat_storage_idx; + // As it is continuous we have getIdx() == getStorageIdx() + REQUIRE_NOTHROW(flat_idx = x.getIdx(coords)); + REQUIRE_NOTHROW(flat_storage_idx = x.getStorageIdx(coords)); + REQUIRE(flat_storage_idx == flat_idx); + float val, val_flat; + // Test get() by index and by coords + REQUIRE_NOTHROW(val_flat = x.get<float>(flat_idx)); + REQUIRE_NOTHROW(val = x.get<float>(coords)); + REQUIRE(val == val_flat); + REQUIRE(val == values[flat_idx]); + // Test set() by coords, also update the reference array + REQUIRE_NOTHROW(x.set(coords, val + 1)); + values[flat_idx] += 1; } } } SECTION("Index & coord manipulation"){ Tensor tensor; std::vector<DimSize_t> dims {2,2}; - int nbVal = std::accumulate(dims.begin(), - dims.end(), + int nbVal = std::accumulate(dims.begin(), + dims.end(), 1, - std::multiplies<DimSize_t>()); + std::multiplies<DimSize_t>()); float* values = static_cast<float*>(malloc(nbVal * sizeof(float))); values[0] = 0; values[1] = 1; @@ -383,14 +381,14 @@ TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") { tensor.getImpl()->setRawPtr(values, 4); std::vector<std::size_t> coords; SECTION("getIdx"){ - CHECK(Tensor::getIdx(tensor.dims(), std::vector<std::size_t>({1,1}) ) == 3); - CHECK(Tensor::getIdx(tensor.dims(), std::vector<std::size_t>({1,0}) ) == 2); + CHECK(Tensor::toIndex(tensor.dims(), std::vector<std::size_t>({1,1}) ) == 3); + CHECK(Tensor::toIndex(tensor.dims(), std::vector<std::size_t>({1,0}) ) == 2); // No check to ensure if value is in bounds - CHECK_THROWS(Tensor::getIdx(tensor.dims(), std::vector<std::size_t>({0,2}) )); + CHECK_THROWS(tensor.getIdx(std::vector<std::size_t>({0,2}))); } SECTION("getCoord"){ - CHECK(Tensor::getCoord(tensor.dims(), 3 ) ==std::vector<std::size_t>({1,1})); - CHECK(Tensor::getCoord(tensor.dims(), 2 ) ==std::vector<std::size_t>({1,0})); + CHECK(Tensor::toCoord(tensor.dims(), 3 ) ==std::vector<std::size_t>({1,1})); + CHECK(Tensor::toCoord(tensor.dims(), 2 ) ==std::vector<std::size_t>({1,0})); } SECTION("isInBound"){ CHECK_THROWS(Tensor::isInBounds(dims, std::vector<DimSize_t>({1,2,4,5})) == true); diff --git a/unit_tests/operator/Test_Resize_Op.cpp b/unit_tests/operator/Test_Resize_Op.cpp index 2e84d40e6e64bed0d6be96cf6d3b16e5343a12e8..111e8fb4f62040127f8b5da8125ba9d91c546f23 100644 --- a/unit_tests/operator/Test_Resize_Op.cpp +++ b/unit_tests/operator/Test_Resize_Op.cpp @@ -21,110 +21,97 @@ #include "aidge/utils/Log.hpp" namespace Aidge { -/** - * Test the resize operation of the given operator with the specified input - * dimensions, scales or sizes, and expected output dimensions. - * - * @param op The operator to test. - * @param input_dims The input dimensions to use for the test. - * @param scales_or_sizes The scales or sizes to use for the test. - * @param expected_dims The expected output dimensions for the test. - * @param use_scales A boolean flag indicating whether to use scales or sizes - * for the test. - */ - -void setupTestResize(const std::shared_ptr<OperatorTensor> &op, - const std::vector<Aidge::DimSize_t> &input_dims, - const std::vector<float> &scales, - const std::vector<int64_t> &sizes, - const std::vector<Aidge::DimSize_t> &expected_dims) { - Log::setConsoleLevel(Log::Level::Info); - Log::info("\n\n\nResize test:"); - Log::info("\tInput_dims: {}", input_dims); - Log::info("\tScales: {}", scales); - Log::info("\tSizes: {}", sizes); - Log::info("\tExpected output dims: {}", expected_dims); - std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(); - input_data->setBackend("cpu"); - input_data->resize(input_dims); - input_data->zeros(); - - op->associateInput(0, input_data); - - const std::shared_ptr<Tensor> tensor_values = std::make_shared<Tensor>(); - tensor_values->setBackend("cpu"); - if (!scales.empty()) { - tensor_values->setDataType(DataType::Float32); - tensor_values->resize(std::vector<std::size_t>({scales.size()})); - tensor_values->getImpl()->copyFromHost(scales.data(), scales.size()); - op->associateInput(2, tensor_values); - } - if (!sizes.empty()) { - tensor_values->setDataType(DataType::Int64); - tensor_values->resize(std::vector<std::size_t>({sizes.size()})); - tensor_values->getImpl()->copyFromHost(sizes.data(), sizes.size()); - op->associateInput(3, tensor_values); - } -} TEST_CASE("[core/operator] Resize_Op(forwardDims)", "[Resize][forwardDimsScales]") { std::vector<Aidge::DimSize_t> input_dims; std::vector<float> scales; - std::vector<int64_t> sizes; + std::vector<std::size_t> sizes; std::vector<Aidge::DimSize_t> expected_dims; - std::shared_ptr<Node> myResize = Resize(); - auto op = std::static_pointer_cast<OperatorTensor>(myResize->getOperator()); - SECTION("Un-connected input leads to failure.") { - REQUIRE_THROWS(op->forwardDims()); + input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2}); + std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims); + + auto resize_node = Resize(); + auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator()); + op->associateInput(0, input_data); + + REQUIRE_THROWS(op->forwardDims(true)); } + SECTION("Connecting both Scales & Sizes leads to failure") { - input_dims = std::vector<Aidge::DimSize_t>({4, 1, 2, 2}); - scales = std::vector<float>({.5, 3, 2, 2}); - sizes = std::vector<int64_t>({}); + input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2}); + std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims); + + scales = std::vector<float>({.5, 3.0f, 2.0f, 2.0f}); + sizes = std::vector<std::size_t>({1, 3, 4, 4}); expected_dims = std::vector<Aidge::DimSize_t>({2, 3, 4, 4}); - setupTestResize(op, input_dims, scales, sizes, expected_dims); - REQUIRE_NOTHROW(op->forwardDims(true)); - REQUIRE(op->getOutput(0)->dims() == expected_dims); + + auto resize_node = Resize(scales, sizes); + auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator()); + op->associateInput(0, input_data); + + REQUIRE_THROWS(op->forwardDims(true)); } SECTION("Input Scales") { SECTION("TEST 1") { input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2}); + std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims); + scales = std::vector<float>({1, 1, 2, 2}); - sizes = std::vector<int64_t>({}); + sizes = std::vector<std::size_t>({}); expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 4, 4}); - setupTestResize(op, input_dims, scales, sizes, expected_dims); + auto resize_node = Resize(scales, sizes); + auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator()); + op->associateInput(0, input_data); + REQUIRE_NOTHROW(op->forwardDims(true)); REQUIRE(op->getOutput(0)->dims() == expected_dims); } + SECTION("TEST 2") { input_dims = std::vector<Aidge::DimSize_t>({4, 4, 10, 10}); + std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims); + scales = std::vector<float>({1, 1, 2, 3}); - sizes = std::vector<int64_t>({}); + sizes = std::vector<std::size_t>({}); expected_dims = std::vector<Aidge::DimSize_t>({4, 4, 20, 30}); - setupTestResize(op, input_dims, scales, sizes, expected_dims); + auto resize_node = Resize(scales, sizes); + auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator()); + op->associateInput(0, input_data); + REQUIRE_NOTHROW(op->forwardDims(true)); REQUIRE(op->getOutput(0)->dims() == expected_dims); } SECTION("TEST 3") { input_dims = std::vector<Aidge::DimSize_t>({4, 2, 10, 10}); + std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims); + scales = std::vector<float>({1, 1, 0.5, 0.5}); - sizes = std::vector<int64_t>({}); + sizes = std::vector<std::size_t>({}); expected_dims = std::vector<Aidge::DimSize_t>({4, 2, 5, 5}); - setupTestResize(op, input_dims, scales, sizes, expected_dims); + auto resize_node = Resize(scales, sizes); + auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator()); + op->associateInput(0, input_data); + REQUIRE_NOTHROW(op->forwardDims(true)); REQUIRE(op->getOutput(0)->dims() == expected_dims); } SECTION("TEST 4") { input_dims = std::vector<Aidge::DimSize_t>({11, 11, 4, 4}); + std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims); + + scales = std::vector<float>({1, 1, 0.3, 0.3}); - sizes = std::vector<int64_t>({}); + sizes = std::vector<std::size_t>({}); expected_dims = std::vector<Aidge::DimSize_t>({11, 11, 1, 1}); - setupTestResize(op, input_dims, scales, sizes, expected_dims); + auto resize_node = Resize(scales, sizes); + auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator()); + op->associateInput(0, input_data); + REQUIRE_NOTHROW(op->forwardDims(true)); REQUIRE(op->getOutput(0)->dims() == expected_dims); } @@ -133,37 +120,57 @@ TEST_CASE("[core/operator] Resize_Op(forwardDims)", SECTION("Input Sizes") { SECTION("TEST 1") { input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2}); + std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims); + scales = std::vector<float>({}); - sizes = std::vector<int64_t>({4, 5, 8, 8}); + sizes = std::vector<std::size_t>({4, 5, 8, 8}); expected_dims = std::vector<Aidge::DimSize_t>({4, 5, 8, 8}); - setupTestResize(op, input_dims, scales, sizes, expected_dims); + auto resize_node = Resize(scales, sizes); + auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator()); + op->associateInput(0, input_data); + REQUIRE_NOTHROW(op->forwardDims(true)); REQUIRE(op->getOutput(0)->dims() == expected_dims); } SECTION("TEST 2") { input_dims = std::vector<Aidge::DimSize_t>({60, 60, 30, 30}); + std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims); + scales = std::vector<float>({}); - sizes = std::vector<int64_t>({1, 1, 75, 75}); + sizes = std::vector<std::size_t>({1, 1, 75, 75}); expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 75, 75}); - setupTestResize(op, input_dims, scales, sizes, expected_dims); + auto resize_node = Resize(scales, sizes); + auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator()); + op->associateInput(0, input_data); + REQUIRE_NOTHROW(op->forwardDims(true)); REQUIRE(op->getOutput(0)->dims() == expected_dims); } SECTION("TEST 3") { input_dims = std::vector<Aidge::DimSize_t>({11, 11, 20, 20}); + std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims); + scales = std::vector<float>({}); - sizes = std::vector<int64_t>({19, 6, 8, 8}); + sizes = std::vector<std::size_t>({19, 6, 8, 8}); expected_dims = std::vector<Aidge::DimSize_t>({19, 6, 8, 8}); - setupTestResize(op, input_dims, scales, sizes, expected_dims); + auto resize_node = Resize(scales, sizes); + auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator()); + op->associateInput(0, input_data); + REQUIRE_NOTHROW(op->forwardDims(true)); REQUIRE(op->getOutput(0)->dims() == expected_dims); } SECTION("TEST 4") { input_dims = std::vector<Aidge::DimSize_t>({43, 211, 22, 22}); + std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims); + scales = std::vector<float>({}); - sizes = std::vector<int64_t>({1, 1, 10, 10}); + sizes = std::vector<std::size_t>({1, 1, 10, 10}); expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 10, 10}); - setupTestResize(op, input_dims, scales, sizes, expected_dims); + auto resize_node = Resize(scales, sizes); + auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator()); + op->associateInput(0, input_data); + REQUIRE_NOTHROW(op->forwardDims(true)); REQUIRE(op->getOutput(0)->dims() == expected_dims); }