Skip to content
Snippets Groups Projects
Commit cd840ec6 authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'feat_enhance_operator_resize_support' into 'dev'

Extends the functionalities of Resize Operator

See merge request !242
parents 1e291f69 609e974b
No related branches found
No related tags found
2 merge requests!279v0.4.0,!242Extends the functionalities of Resize Operator
Pipeline #60206 passed
Showing
with 1325 additions and 284 deletions
...@@ -14,6 +14,9 @@ add_definitions(-DPROJECT_VERSION="${version}") ...@@ -14,6 +14,9 @@ add_definitions(-DPROJECT_VERSION="${version}")
message(STATUS "Project name: ${CMAKE_PROJECT_NAME}") message(STATUS "Project name: ${CMAKE_PROJECT_NAME}")
message(STATUS "Project version: ${version}") message(STATUS "Project version: ${version}")
# helper for LSP users
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
# Note : project name is {project} and python module name is also {project} # Note : project name is {project} and python module name is also {project}
set(module_name _${CMAKE_PROJECT_NAME}) # target name set(module_name _${CMAKE_PROJECT_NAME}) # target name
set(pybind_module_name ${CMAKE_PROJECT_NAME}) # name of submodule for python bindings set(pybind_module_name ${CMAKE_PROJECT_NAME}) # name of submodule for python bindings
...@@ -26,6 +29,7 @@ option(TEST "Enable tests" ON) ...@@ -26,6 +29,7 @@ option(TEST "Enable tests" ON)
option(COVERAGE "Enable coverage" OFF) option(COVERAGE "Enable coverage" OFF)
option(ENABLE_ASAN "Enable ASan (AddressSanitizer) for runtime analysis of memory use (over/underflow, memory leak, ...)" OFF) option(ENABLE_ASAN "Enable ASan (AddressSanitizer) for runtime analysis of memory use (over/underflow, memory leak, ...)" OFF)
############################################## ##############################################
# Import utils CMakeLists # Import utils CMakeLists
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
......
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_UTILS_INTERPOLATION_H_
#define AIDGE_CORE_UTILS_INTERPOLATION_H_
#include <cstdint> // std::int64_t
#include <utility> // std::pair
#include <vector>
#include "aidge/operator/Pad.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
/* @brief generic class to hold interpolation */
class Interpolation {
public:
/**
* @brief simple type alias to describe a coordinates
* @note the indexes are deliberately chosen to be signed values as some
* points retrieved by interpolation are out of bound, hence their coords
* can be < 0
*/
using Coords = std::vector<std::int64_t>;
/**
* @brief type alias to designate a point of any type : hence coordinates &
* associated value
*/
template <class T> using Point = std::pair<Coords, T>;
/**
* @brief details how coordinates are transformed from interpolated tensor
* to original tensor
*/
enum CoordinateTransformation {
HalfPixel,
HalfPixelSymmetric,
PytorchHalfPixel,
AlignCorners,
Asymmetric,
};
/**
* @brief apply transformation to coords in interpolated Tensor to find
* equivalent coordinates in original tensor reference frame.
* @warning it is assumed that all parameters have the same
* number of dimensions.
* @param[in] transformedCoords : coords in interpolated tensor
* @param[in] inputDims: input dimensions of tensor
* @param[in] inputDims: output dimensions of tensor
* @return std::vector containing coords in orginal tensor reference frame
*/
static std::vector<float> untransformCoordinates(
const std::vector<DimSize_t> &transformedCoords,
const std::vector<DimSize_t> &inputDims,
const std::vector<DimSize_t> &outputDims,
const Interpolation::CoordinateTransformation coordTransfoMode);
/**
* @brief retrieves neighbouring value of a given index
* @param[in] tensorValues raw pointer of the tensor values
* retrieved with
* @code
* tensor->getImpl()->rawPtr()
* @endcode
* @param[in] tensorDimensions dimensions of given tensor
* retrieved with
* @code
* tensor->dims()
* @endcode
* @param[in] coords coordinates in the tensor of the values we want to
* find the neighbours of.
* @return static std::vector<std::pair<std::vector<DimSize_t>, T>>
* containing both indexes of neighbours & their values
*/
template <typename T>
static std::set<Point<T>>
retrieveNeighbours(const T *tensorValues,
const std::vector<DimSize_t> &tensorDims,
const std::vector<float> &coords,
const PadBorderType paddingMode = PadBorderType::Zero);
/* @brief interpolation type */
enum Mode {
Cubic,
Linear,
RoundPreferFloor,
RoundPreferCeil,
Floor,
Ceil
};
/*
* @brief Interpolates values given via input in given mode.
*
* @warning This function is empty and is meant to be overriden in derived
* class in backend libraries.
*
* Values are contiguously arranged in a "square" shape around the point to
* interpolate. Depending on interpolation mode.
* The point that will be interpolated is located right in the
* middle of all points.
* Immediate neighbours :
* 1D interp : 2D interp :
* . . . . . .
* . . 1 2 . . . . . . . .
* . . 1 2 . .
* . . 3 4 . .
* . . . . . .
* . . . . . .
*
* 2 neighbours :
* 1D interp : 2D interp :
* . . . . . . . .
* . . . . . . . .
* . . 1 2 3 4 . . . . 1 2 3 4 . .
* . . 5 6 7 8 . .
* . . 9 10 11 12 . .
* . . 13 14 15 16 . .
* . . . . . . . .
* . . . . . . . .
*
* @param[in] originalIndex: index of the point to in the original picture
* Since the coord are being transformed from the interpolatedTensor frame
* to originalTensor frame, the result might be in float.
* @param[in] points : points to interpolate, arranged in a vector of a
* pairs ((point_coord), value) :
* [[[X1, X2, ..., XN], Xval], ...., [[A1, A2, ..., AN],Aval]].
* With :
* - N: the number of dimensions.
* - A: the number of points of the grid to interpolate.
* - All coordinates expressed in originalTensor frame.
* @param[in] interpMode: interpolation mode
* @return interpolated value
*/
template <typename T>
[[noreturn]] static T interpolate(const std::vector<float> &originalIndex,
const std::vector<Point<T>> &points,
const Mode interpMode);
};
} // namespace Aidge
#endif
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#ifndef AIDGE_CORE_DATA_TENSOR_H_ #ifndef AIDGE_CORE_DATA_TENSOR_H_
#define AIDGE_CORE_DATA_TENSOR_H_ #define AIDGE_CORE_DATA_TENSOR_H_
#include <algorithm>
#include <cstddef> // std::size_t #include <cstddef> // std::size_t
#include <cstring> #include <cstring>
#include <functional> // std::multiplies #include <functional> // std::multiplies
...@@ -24,10 +25,10 @@ ...@@ -24,10 +25,10 @@
#include "aidge/backend/TensorImpl.hpp" #include "aidge/backend/TensorImpl.hpp"
#include "aidge/data/Data.hpp" #include "aidge/data/Data.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
#include "aidge/utils/ArrayHelpers.hpp"
namespace Aidge { namespace Aidge {
/** /**
...@@ -562,9 +563,9 @@ public: ...@@ -562,9 +563,9 @@ public:
template <typename expectedType> template <typename expectedType>
const expectedType& get(std::size_t idx) const { const expectedType& get(std::size_t idx) const {
AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type"); AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "Tensor::get<>({}): wrong data type, expected {}, got {}", idx, mDataType, NativeType<expectedType>::type);
AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer"); AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "Tensor::get<>({}): can only be used for backends providing a valid host pointer.", idx);
AIDGE_ASSERT(idx < mSize, "idx out of range"); AIDGE_ASSERT(idx < mSize, "Tensor::get<>({}): idx {} out of range, tensor size {}", idx, mSize);
return *reinterpret_cast<expectedType *>(mImpl->hostPtr(mImplOffset + idx)); return *reinterpret_cast<expectedType *>(mImpl->hostPtr(mImplOffset + idx));
} }
...@@ -621,20 +622,41 @@ public: ...@@ -621,20 +622,41 @@ public:
* @brief From the the 1D contiguous index, return the coordinate of an element in the tensor. * @brief From the the 1D contiguous index, return the coordinate of an element in the tensor.
* Beware: do not use this function with the storage index! * Beware: do not use this function with the storage index!
* *
* @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor. * @param index 1D contiguous index of the value considering a flatten, contiguous, tensor.
* @return std::vector<DimSize_t> * @return std::vector<DimSize_t>
*/ */
std::vector<std::size_t> getCoord(std::size_t flatIdx) const { static std::vector<std::size_t>
std::vector<std::size_t> coordIdx(mDims.size()); toCoord(const std::vector<Aidge::DimSize_t> &dimensions, std::size_t index);
std::size_t i = mDims.size();
while (i-- > 0) { /**
coordIdx[i] = (flatIdx % mDims[i]); * @brief From the the 1D contiguous index, return the coordinate of an element in the tensor.
flatIdx/=mDims[i]; * Beware: do not use this function with the storage index!
*
* @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor.
* @return std::vector<DimSize_t>
*/
std::vector<std::size_t> getCoord(std::size_t index) const {
if (isInBounds(mDims, index)) {
return toCoord(mDims, index);
} else {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Out of bound coordinates.");
} }
return coordIdx;
} }
/**
* @brief From the coordinate returns the 1D contiguous index of an element in the tensor.
* If the number of coordinates is inferior to the number of dimensions,
* the remaining coordinates are assumed to be 0.
* Beware: the contiguous index will only correspond to the storage index
* if the tensor is contiguous!
* Note that the coordIdx may be an empty vector.
*
* @param coords Coordinate to an element in the tensor
* @return DimSize_t Contiguous index
*/
static std::size_t toIndex(const std::vector<DimSize_t>& dimensions, const std::vector<std::size_t>& coords);
/** /**
* @brief From the coordinate returns the 1D contiguous index of an element in the tensor. * @brief From the coordinate returns the 1D contiguous index of an element in the tensor.
* If the number of coordinates is inferior to the number of dimensions, * If the number of coordinates is inferior to the number of dimensions,
...@@ -646,18 +668,27 @@ public: ...@@ -646,18 +668,27 @@ public:
* @param coordIdx Coordinate to an element in the tensor * @param coordIdx Coordinate to an element in the tensor
* @return DimSize_t Contiguous index * @return DimSize_t Contiguous index
*/ */
std::size_t getIdx(const std::vector<std::size_t>& coordIdx) const { std::size_t getIdx(const std::vector<std::size_t>& coords) const {
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions"); if (isInBounds<std::size_t>(mDims, coords)) {
std::size_t flatIdx = 0; return toIndex(mDims, coords);
for(std::size_t i = 0; i < mDims.size(); ++i) { } else {
auto coord = i < coordIdx.size() ? coordIdx[i]: 0; AIDGE_THROW_OR_ABORT(std::runtime_error, "Out of bound coordinates.");
AIDGE_ASSERT(coord < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
auto nextDimSize = i + 1 < mDims.size() ? mDims[i + 1]: 1;
flatIdx = (flatIdx + coord) * nextDimSize;
} }
return flatIdx;
} }
/**
* @brief check if index is in bound of given tensor dimensions
* @warning this function is templated in order to welcome cases like interpolation where indexes are not integers.
* However, the only types accepted are floating, integer & size_t
* @param tensorDims : tensor dimensions
* @param coords : coords of the tensor you want to flattened index of
* @return true if all coords are in bound. False otherwise
*/
template<typename T>
static bool isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<T>& coords);
static bool isInBounds(const std::vector<DimSize_t>& dimensions, const std::size_t index);
/** /**
* @brief From the coordinate returns the 1D storage index of an element in the tensor. * @brief From the coordinate returns the 1D storage index of an element in the tensor.
* If the number of coordinates is inferior to the number of dimensions, * If the number of coordinates is inferior to the number of dimensions,
......
#include "aidge/data/half.hpp"
#include <fmt/core.h>
// Specialize fmt::formatter for half_float::half
template <>
struct fmt::formatter<half_float::half> : fmt::formatter<float> {
// Parses the format specifications and stores them in the base formatter
template <typename ParseContext>
constexpr auto parse(ParseContext& ctx) {
return fmt::formatter<float>::parse(ctx);
}
// Formats the half type by first converting it to float
template <typename FormatContext>
auto format(const half_float::half& value, FormatContext& ctx) const {
return fmt::formatter<float>::format(static_cast<float>(value), ctx);
}
};
...@@ -26,7 +26,15 @@ ...@@ -26,7 +26,15 @@
namespace Aidge { namespace Aidge {
enum class PadAttr { BeginEndBorders, BorderType, BorderValue }; enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
enum class PadBorderType { Constant, Edge, Reflect, Wrap }; enum class PadBorderType {
/** @brief all out of bound values will be set to a given value.*/
Constant,
Edge,
Reflect,
Wrap,
/** @brief all out of bound values will be set to 0.*/
Zero,
};
template <DimIdx_t DIM> template <DimIdx_t DIM>
class Pad_Op : public OperatorTensor, class Pad_Op : public OperatorTensor,
......
...@@ -9,60 +9,226 @@ ...@@ -9,60 +9,226 @@
* *
********************************************************************************/ ********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_Resize_H_ #ifndef AIDGE_CORE_OPERATOR_RESIZE_H_
#define AIDGE_CORE_OPERATOR_Resize_H_ #define AIDGE_CORE_OPERATOR_RESIZE_H_
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Interpolation.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Pad.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
class Resize_Op : public OperatorTensor, /* @brief attributes for the aidge operator */
public Registrable<Resize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Resize_Op&)>>{ enum class ResizeAttr {
// antialias,
// axes,
CoordinateTransformationMode,
CubicCoeffA,
// excludeOutside,
// extrapolation_value,
// keep_aspect_ratio_policy,
InterpolationMode,
PaddingMode
};
public: /**
* @brief Resize operator, will up/downscale a given tensor given the input.
* @verbatim
* Output size can be computed in 2 ways :
* 1. Image can be rescaled proportionally to the input size :
* output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)
* 2. Output dimensions are directly given via the size input(#4)
*
* Hence, either input Scale or Input Sizes can be defined, if both are
* connected, the operator will throw an error.
*
* Resize takes (up to) 4 different inputs :
* #1 Input to resize :
* N-D tensor.
*
* #2 ROI (optional) :
* 1-D tensor of coordinates given as [start1, …, startN, end1, …, endN]
* where N is the rank of X or the length of axes, if provided. The RoIs’
* coordinates are normalized in the coordinate system of the input image.
* If not set default ROI is the entier image.
* #3 scales (optional) - tensor(float):
* The scale array along each dimension.
* The number of elements of ‘scales’ should be the same as the rank of
* input ‘X’ or the length of ‘axes’, if provided. Accepted values: (0,inf)
* - (0,1) : downsampling
* - 1 : identity
* - (1,inf) : upsampling
* #4. Sizes - tensor(int64):
* Target size of the output tensor.
* Its interpretation depends on the ‘keep_aspect_ratio_policy’ value.
* The number of elements of ‘sizes’ should be the same as either :
* - The rank of input ‘X’
* - The length of ‘axes’ attribute, if provided.
* @endverbatim
* @warning : Only one of ‘scales’ and ‘sizes’ can be specified.
* @param coordinate_transformation_mode
* @param cubic_coeff_a the a coefficient of cubic interpolation. Moost often
* it is set to -0.75
* @param InterpolationMode type of interpolation (currently only support cubic
* interpolation)
*/
class Resize_Op
: public OperatorTensor,
public Registrable<
Resize_Op,
std::string,
std::function<std::shared_ptr<OperatorImpl>(const Resize_Op &)>> {
private:
using Attributes_ =
StaticAttributes<ResizeAttr,
Interpolation::CoordinateTransformation,
float,
Interpolation::Mode,
PadBorderType>;
template <ResizeAttr e>
using attr = typename Attributes_::template attr<e>;
const std::shared_ptr<Attributes_> mAttributes;
public:
static const std::string Type; static const std::string Type;
/**
Resize_Op(); * @brief creates a resize operator
* This node can take 4 different inputs, more details in the class
* doxygen.
* 1. Input to resize :
* 2. ROI NOT SUPPORTED (optional) :
* 3. scales (optional) - tensor(float):
* 4. sizes - tensor(int64):
* @param[in] coordinate_transformation_mode
* @param[in] cubic_coeff_a the a coefficient of cubic interpolation. Only
* used if interpolation_mode = Interpolation::Mode::Cubic
* @param[in] interpolationMode : Type of interpolation used for
* up/downsampling
* @warning Scales & ROI input cannot be set simultaneously. If bot are
* set, forward will fail.
* @return NodePtr
*/
Resize_Op(
Interpolation::CoordinateTransformation coordTransfoMode,
Interpolation::Mode interpol_mode = Interpolation::Mode::RoundPreferFloor,
float cubic_coef_a = -.75f,
PadBorderType paddingMode = PadBorderType::Edge)
: OperatorTensor(Type,
{InputCategory::Data,
InputCategory::OptionalData,
InputCategory::OptionalData,
InputCategory::OptionalData},
1),
mAttributes(std::make_shared<Attributes_>(
attr<ResizeAttr::CubicCoeffA>(cubic_coef_a),
attr<ResizeAttr::CoordinateTransformationMode>(coordTransfoMode),
attr<ResizeAttr::InterpolationMode>(interpol_mode),
attr<ResizeAttr::PaddingMode>(paddingMode))) {}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), * @brief Copy-constructor. Copy the operator attributes and its output
* but not its input tensors (the new operator has no input associated). * tensor(s), but not its input tensors : The new operator has no input
* associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
Resize_Op(const Resize_Op& op); Resize_Op(const Resize_Op &op)
: OperatorTensor(op), mAttributes(op.mAttributes) {
if (!op.backend().empty()) {
SET_IMPL_MACRO(Resize_Op, *this, op.backend());
} else {
mImpl = nullptr;
}
}
/** /**
* @brief Clone the operator using its copy-constructor. * @brief Clone the operator using its copy-constructor.
* @see Operator::Resize_Op * @see Operator::Resize_Op
*/ */
std::shared_ptr<Operator> clone() const override; std::shared_ptr<Operator> clone() const override final {
return std::make_shared<Resize_Op>(*this);
}
bool dimsForwarded() const override final; bool dimsForwarded() const override final;
bool forwardDims(bool allowDataDependency = false) override final; bool forwardDims(bool allowDataDependency = false) override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; void setBackend(const std::string &name,
std::set<std::string> getAvailableBackends() const override; DeviceIdx_t device = 0) override final;
std::set<std::string> getAvailableBackends() const override {
return Registrar<Resize_Op>::getKeys();
}
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline Interpolation::CoordinateTransformation
coordinateTransformationMode() const {
return mAttributes
->template getAttr<ResizeAttr::CoordinateTransformationMode>();
}
inline float cubicCoefA() const {
return mAttributes->template getAttr<ResizeAttr::CubicCoeffA>();
}
inline Interpolation::Mode interpolationMode() const {
return mAttributes->template getAttr<ResizeAttr::InterpolationMode>();
}
inline PadBorderType paddingMode() const {
return mAttributes->template getAttr<ResizeAttr::PaddingMode>();
}
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName() {
// roi, scales, sizes, even if considered as const parameters/input // roi, scales, sizes, even if considered as const parameters/input
return {"data_input", "roi ", "scales", "sizes"}; return {"data_input", "roi ", "scales", "sizes"};
} }
static const std::vector<std::string> getOutputsName(){ static const std::vector<std::string> getOutputsName() {
return {"data_output"}; return {"data_output"};
} }
}; };
std::shared_ptr<Node> Resize(const std::string &name = ""); /**
* @brief creates a node that contains a resize operator
} // namespace Aidge * This node can take 4 different inputs, more details in the class doxygen.
* #0 Input to resize
* #1 ROI NOT SUPPORTED (optional) - Tensor(double|float|float16)
#endif /* AIDGE_CORE_OPERATOR_Resize_H_ */ * #2 scales (optional) - tensor(float)
\ No newline at end of file * #3 sizes - tensor(int64)
* @param[in] coordinate_transformation_mode
* @param[in] interpolationMode type of interpolation used in case of
* upsampling
* @param[in] cubic_coeff_a the "a" coefficient of cubic interpolation. Only
* used if interpolation_mode = Interpolation::Mode::Cubic
* @warning Scales & ROI input cannot be set simultaneously. If bot are set,
* forward will fail.
* @warning Padding mode will tell how values out of bound are treated.
* @return NodePtr
*/
std::shared_ptr<Node>
Resize(std::vector<float> scale = std::vector<float>(),
std::vector<std::size_t> size = std::vector<std::size_t>(),
Interpolation::CoordinateTransformation coordTransfoMode =
Interpolation::CoordinateTransformation::HalfPixel,
Interpolation::Mode interpolMode =
Interpolation::Mode::RoundPreferFloor,
float cubicCoefA = -.75f,
const std::string &name = "");
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
"coordinateTransformationMode",
"cubicCoeffA",
"InterpolationMode",
"PaddingMode"
};
}
#endif /* AIDGE_CORE_OPERATOR_RESIZE_H_ */
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
* *
********************************************************************************/ ********************************************************************************/
#ifndef AIDGE_LOG_H_ #ifndef AIDGE_LOG_H_
#define AIDGE_LOG_H_ #define AIDGE_LOG_H_
...@@ -19,44 +18,36 @@ ...@@ -19,44 +18,36 @@
#include <fmt/format.h> #include <fmt/format.h>
#include <fmt/ranges.h> #include <fmt/ranges.h>
#include "aidge/data/half_fmt.hpp"
#include "aidge/utils/Attributes.hpp" #include "aidge/utils/Attributes.hpp"
namespace Aidge { namespace Aidge {
/** /**
* Helper to define a context anywhere, hidding the scoped variable name * Helper to define a context anywhere, hidding the scoped variable name
* which has no relevance. * which has no relevance.
*/ */
#define AIDGE_LOG_CONTEXT(...) const Log::Context logContext_##__LINE__(__VA_ARGS__) #define AIDGE_LOG_CONTEXT(...) \
const Log::Context logContext_##__LINE__(__VA_ARGS__)
template <class U> static void discard_args(U parg) {
template<class U>
static void discard_args(U parg) {
(void)parg; (void)parg;
} }
template<class U, class... Us> template <class U, class... Us> static void discard_args(U parg, Us... pargs) {
static void discard_args(U parg, Us... pargs) {
(void)parg; (void)parg;
discard_args(pargs...); discard_args(pargs...);
} }
/** /**
* Aidge logging class, for displaying and file logging of events. * Aidge logging class, for displaying and file logging of events.
*/ */
class Log { class Log {
public: public:
enum Level { enum Level { Debug = 0, Info, Notice, Warn, Error, Fatal };
Debug = 0,
Info,
Notice,
Warn,
Error,
Fatal
};
class Context { class Context {
public: public:
template <typename... Args> template <typename... Args> Context(Args &&...args) {
Context(Args&&... args) {
Log::mContext.push_back(fmt::format(std::forward<Args>(args)...)); Log::mContext.push_back(fmt::format(std::forward<Args>(args)...));
} }
...@@ -68,13 +59,12 @@ public: ...@@ -68,13 +59,12 @@ public:
/** /**
* Detailed messages for debugging purposes, providing information helpful * Detailed messages for debugging purposes, providing information helpful
* for developers to trace and identify issues. * for developers to trace and identify issues.
* Detailed insights of what is appening in an operation, not useful for the * Detailed insights of what is appening in an operation, not useful for
* end-user. The operation is performed nominally. * the end-user. The operation is performed nominally.
* @note This level is disabled at compile time for Release, therefore * @note This level is disabled at compile time for Release, therefore
* inducing no runtime overhead for Release. * inducing no runtime overhead for Release.
*/ */
template <typename... Args> template <typename... Args> static void debug(Args &&...args) {
static void debug(Args&&... args) {
#ifndef NDEBUG #ifndef NDEBUG
// only when compiled in Debug // only when compiled in Debug
log(Debug, fmt::format(std::forward<Args>(args)...)); log(Debug, fmt::format(std::forward<Args>(args)...));
...@@ -86,22 +76,19 @@ public: ...@@ -86,22 +76,19 @@ public:
/** /**
* Messages that provide a record of the normal operation, about * Messages that provide a record of the normal operation, about
* the application's state, progress, or important events. * the application's state, progress, or important events.
* Reports normal start, end and key steps in an operation. The operation is * Reports normal start, end and key steps in an operation. The operation
* performed nominally. * is performed nominally.
*/ */
template <typename... Args> template <typename... Args> static void info(Args &&...args) {
static void info(Args&&... args) {
log(Info, fmt::format(std::forward<Args>(args)...)); log(Info, fmt::format(std::forward<Args>(args)...));
} }
/** /**
* Applies to normal but significant conditions that may require monitoring, * Applies to normal but significant conditions that may require
* like unusual or normal fallback events. * monitoring, like unusual or normal fallback events. Reports specific
* Reports specific paths in an operation. The operation can still be * paths in an operation. The operation can still be performed normally.
* performed normally. */
*/ template <typename... Args> static void notice(Args &&...args) {
template <typename... Args>
static void notice(Args&&... args) {
log(Notice, fmt::format(std::forward<Args>(args)...)); log(Notice, fmt::format(std::forward<Args>(args)...));
} }
...@@ -110,9 +97,8 @@ public: ...@@ -110,9 +97,8 @@ public:
* not necessarily cause immediate problems. * not necessarily cause immediate problems.
* Some specific steps of the operation could not be performed, but it can * Some specific steps of the operation could not be performed, but it can
* still provide an exploitable result. * still provide an exploitable result.
*/ */
template <typename... Args> template <typename... Args> static void warn(Args &&...args) {
static void warn(Args&&... args) {
log(Warn, fmt::format(std::forward<Args>(args)...)); log(Warn, fmt::format(std::forward<Args>(args)...));
} }
...@@ -121,26 +107,24 @@ public: ...@@ -121,26 +107,24 @@ public:
* recover from, but attention is needed to prevent further issues. * recover from, but attention is needed to prevent further issues.
* The operation could not be performed, but it does not prevent potential * The operation could not be performed, but it does not prevent potential
* further operations. * further operations.
*/ */
template <typename... Args> template <typename... Args> static void error(Args &&...args) {
static void error(Args&&... args) {
log(Error, fmt::format(std::forward<Args>(args)...)); log(Error, fmt::format(std::forward<Args>(args)...));
} }
/** /**
* Represents a critical error or condition that leads to the termination of * Represents a critical error or condition that leads to the termination
* the application, indicating a severe and unrecoverable problem. * of the application, indicating a severe and unrecoverable problem. The
* The operation could not be performed and any further operation is * operation could not be performed and any further operation is
* impossible. * impossible.
*/ */
template <typename... Args> template <typename... Args> static void fatal(Args &&...args) {
static void fatal(Args&&... args) {
log(Fatal, fmt::format(std::forward<Args>(args)...)); log(Fatal, fmt::format(std::forward<Args>(args)...));
} }
/** /**
* Set the minimum log level displayed in the console. * Set the minimum log level displayed in the console.
*/ */
static void setConsoleLevel(Level level) { static void setConsoleLevel(Level level) {
mConsoleLevel = level; mConsoleLevel = level;
} }
...@@ -148,14 +132,14 @@ public: ...@@ -148,14 +132,14 @@ public:
/** /**
* Set or disable colors on console. * Set or disable colors on console.
* Initial value should be assumed true. * Initial value should be assumed true.
*/ */
static void setConsoleColor(bool enabled) { static void setConsoleColor(bool enabled) {
mConsoleColor = enabled; mConsoleColor = enabled;
} }
/** /**
* Set the minimum log level saved in the log file. * Set the minimum log level saved in the log file.
*/ */
constexpr static void setFileLevel(Level level) { constexpr static void setFileLevel(Level level) {
mFileLevel = level; mFileLevel = level;
} }
...@@ -164,8 +148,8 @@ public: ...@@ -164,8 +148,8 @@ public:
* Set the log file name. * Set the log file name.
* Close the current log file and open the one with the new file name. * Close the current log file and open the one with the new file name.
* If empty, stop logging into a file. * If empty, stop logging into a file.
*/ */
static void setFileName(const std::string& fileName) { static void setFileName(const std::string &fileName) {
if (fileName != mFileName) { if (fileName != mFileName) {
mFileName = fileName; mFileName = fileName;
mFile.release(); mFile.release();
...@@ -187,8 +171,8 @@ public: ...@@ -187,8 +171,8 @@ public:
* warnings. * warnings.
*/ */
struct fcloseDeleter { struct fcloseDeleter {
void operator()(FILE *f) const noexcept { void operator()(FILE *f) const noexcept {
std::fclose(f); std::fclose(f);
} }
}; };
...@@ -203,11 +187,12 @@ private: ...@@ -203,11 +187,12 @@ private:
static std::unique_ptr<FILE, fcloseDeleter> mFile; static std::unique_ptr<FILE, fcloseDeleter> mFile;
static std::vector<std::string> mContext; static std::vector<std::string> mContext;
}; };
} } // namespace Aidge
namespace { namespace {
template <> template <>
const char *const EnumStrings<Aidge::Log::Level>::data[] = {"Debug", "Info", "Notice", "Warn", "Error", "Fatal"}; const char *const EnumStrings<Aidge::Log::Level>::data[] =
{"Debug", "Info", "Notice", "Warn", "Error", "Fatal"};
} }
#endif //AIDGE_LOG_H_ #endif // AIDGE_LOG_H_
...@@ -49,6 +49,7 @@ bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float ...@@ -49,6 +49,7 @@ bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float
} }
return true; return true;
} }
}
} // namespace Aidge
#endif /* AIDGE_CORE_UTILS_TENSOR_UTILS_H_s */ #endif /* AIDGE_CORE_UTILS_TENSOR_UTILS_H_s */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/data/Interpolation.hpp"
#include "aidge/utils/Registrar.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Interpolation(py::module &m) {
auto pyInterpolation = py::class_<Aidge::Interpolation>(m, "Interpolation");
py::enum_<Interpolation::Mode>(pyInterpolation, "Mode")
.value("CUBIC", Interpolation::Mode::Cubic)
.value("LINEAR", Interpolation::Mode::Linear)
.value("ROUND_PREFER_FLOOR", Interpolation::Mode::RoundPreferFloor)
.value("ROUND_PREFER_CEIL", Interpolation::Mode::RoundPreferCeil)
.value("FLOOR", Interpolation::Mode::Floor)
.value("CEIL", Interpolation::Mode::Ceil)
.export_values();
py::enum_<Interpolation::CoordinateTransformation>(pyInterpolation, "CoordinateTransformation")
.value("HALF_PIXEL", Interpolation::CoordinateTransformation::HalfPixel)
.value("HALF_PIXEL_SYMETRIC", Interpolation::CoordinateTransformation::HalfPixelSymmetric)
.value("PYTORCH_HALF_PIXEL", Interpolation::CoordinateTransformation::PytorchHalfPixel)
.value("ALIGN_CORNERS", Interpolation::CoordinateTransformation::AlignCorners)
.value("ASYMMETRIC", Interpolation::CoordinateTransformation::Asymmetric)
.export_values();
}
} // namespace Aidge
...@@ -329,8 +329,8 @@ void init_Tensor(py::module& m){ ...@@ -329,8 +329,8 @@ void init_Tensor(py::module& m){
.def("capacity", &Tensor::capacity) .def("capacity", &Tensor::capacity)
.def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize, py::arg("dims"), py::arg("strides") = std::vector<DimSize_t>()) .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize, py::arg("dims"), py::arg("strides") = std::vector<DimSize_t>())
.def("has_impl", &Tensor::hasImpl) .def("has_impl", &Tensor::hasImpl)
.def("get_coord", &Tensor::getCoord) .def("get_coord", (std::vector<std::size_t> (Tensor::*)(const std::size_t)) &Tensor::getCoord, py::arg("flatIdx"))
.def("get_idx", &Tensor::getIdx) .def("get_idx",(std::size_t (Tensor::*)(const std::vector<std::size_t> &)) &Tensor::getIdx, py::arg("coords"))
.def_static("get_available_backends", &Tensor::getAvailableBackends) .def_static("get_available_backends", &Tensor::getAvailableBackends)
.def("undefined", &Tensor::undefined) .def("undefined", &Tensor::undefined)
.def("cpy_transpose", (void (Tensor::*)(const Tensor& src, const std::vector<DimSize_t>& transpose)) &Tensor::copyTranspose, py::arg("src"), py::arg("transpose")) .def("cpy_transpose", (void (Tensor::*)(const Tensor& src, const std::vector<DimSize_t>& transpose)) &Tensor::copyTranspose, py::arg("src"), py::arg("transpose"))
......
...@@ -9,22 +9,51 @@ ...@@ -9,22 +9,51 @@
* *
********************************************************************************/ ********************************************************************************/
#include <cstddef> // std::size_t
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include "aidge/operator/Resize.hpp" #include "aidge/data/Interpolation.hpp"
#include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Pad.hpp"
#include "aidge/operator/Resize.hpp"
#include "aidge/utils/Registrar.hpp"
namespace py = pybind11; namespace py = pybind11;
namespace Aidge { namespace Aidge {
void init_Resize(py::module& m) { void init_Resize(py::module &m) {
py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance()) py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(
.def_static("get_inputs_name", &Resize_Op::getInputsName) m, "ResizeOp", py::multiple_inheritance())
.def_static("get_outputs_name", &Resize_Op::getOutputsName) .def(py::init<Interpolation::CoordinateTransformation, Interpolation::Mode, float, PadBorderType>(), py::arg("coordinate_transformation_mode"), py::arg("interpolation_mode"), py::arg("cubic_coeff_a") = -0.75f, py::arg("padding_mode") = PadBorderType::Edge)
.def_readonly_static("Type", &Resize_Op::Type); .def_static("get_inputs_name", &Resize_Op::getInputsName)
.def_static("get_outputs_name", &Resize_Op::getOutputsName)
.def_readonly_static("Type", &Resize_Op::Type);
declare_registrable<Resize_Op>(m, "ResizeOp"); declare_registrable<Resize_Op>(m, "ResizeOp");
m.def("Resize", &Resize, py::arg("name") = ""); m.def("Resize", &Resize,
py::arg("scale") = std::vector<float>({}),
py::arg("size") = std::vector<std::size_t>({}),
py::arg("coord_transfo_mode") =
Interpolation::CoordinateTransformation::HalfPixel,
py::arg("interpolation_mode") =
Interpolation::Mode::RoundPreferFloor,
py::arg("cubic_interpolation_coefficient_a") = -.75f,
py::arg("name") = "", R"mydelimiter(
Initialize a node containing a Resize operator.
This node can take 4 different inputs.
#0 Input to resize
#1 ROI NOT SUPPORTED (optional) - Tensor(double|float|float16)
#2 scales (optional) - tensor(float): #3 sizes - tensor(int64)
#3 sizes - tensor(int64)
:type coordinate_transformation_mode : :py:class: List[Int]
:param interpolationMode : Type of interpolation used in case of upsampling
:type interpolationMode : Interpolation::Mode
:param cubic_coeff_a : "A" coefficient of cubic interpolation. Only used if interpolation_mode = Interpolation::Mode::Cubic
:type cubic_coeff_a : float
:param name : name of the node.
:type name : str
)mydelimiter");
} }
} // namespace Aidge } // namespace Aidge
...@@ -20,6 +20,7 @@ void init_Random(py::module&); ...@@ -20,6 +20,7 @@ void init_Random(py::module&);
void init_Data(py::module&); void init_Data(py::module&);
void init_Database(py::module&); void init_Database(py::module&);
void init_DataProvider(py::module&); void init_DataProvider(py::module&);
void init_Interpolation(py::module&);
void init_Tensor(py::module&); void init_Tensor(py::module&);
void init_TensorImpl(py::module&); void init_TensorImpl(py::module&);
void init_Attributes(py::module&); void init_Attributes(py::module&);
...@@ -107,6 +108,7 @@ void init_Aidge(py::module& m) { ...@@ -107,6 +108,7 @@ void init_Aidge(py::module& m) {
init_Data(m); init_Data(m);
init_Database(m); init_Database(m);
init_DataProvider(m); init_DataProvider(m);
init_Interpolation(m);
init_Tensor(m); init_Tensor(m);
init_TensorImpl(m); init_TensorImpl(m);
init_Attributes(m); init_Attributes(m);
......
...@@ -95,6 +95,10 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs) ...@@ -95,6 +95,10 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
Log::debug("getBestMatch() for requirements: {}", requiredSpecs); Log::debug("getBestMatch() for requirements: {}", requiredSpecs);
const auto availableSpecsSet = getAvailableImplSpecs(); const auto availableSpecsSet = getAvailableImplSpecs();
AIDGE_ASSERT(availableSpecsSet.size() > 0 ,
"OperatorImpl::getBestMatch(): No available specs found by"
"getAvailableSpecs(). "
"Cannot find best implementation for required specs, aborting.");
const std::vector<ImplSpec> availableSpecs(availableSpecsSet.begin(), availableSpecsSet.end()); const std::vector<ImplSpec> availableSpecs(availableSpecsSet.begin(), availableSpecsSet.end());
std::vector<int> matchingSpecs(availableSpecs.size(), -1); std::vector<int> matchingSpecs(availableSpecs.size(), -1);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <cstddef> #include <cstddef>
#include <vector> #include <vector>
#include "aidge/data/half.hpp"
#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Abs.hpp" #include "aidge/operator/Abs.hpp"
...@@ -23,14 +24,14 @@ ...@@ -23,14 +24,14 @@
#include "aidge/operator/ReduceMean.hpp" #include "aidge/operator/ReduceMean.hpp"
#include "aidge/operator/Sub.hpp" #include "aidge/operator/Sub.hpp"
#include "aidge/operator/Sqrt.hpp" #include "aidge/operator/Sqrt.hpp"
#include "aidge/operator/Transpose.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge {
Aidge::Tensor::~Tensor() noexcept = default; Tensor::~Tensor() noexcept = default;
Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const { Tensor Tensor::operator+(const Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type"); AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
...@@ -47,7 +48,7 @@ Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const { ...@@ -47,7 +48,7 @@ Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
} }
Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const { Tensor Tensor::operator-(const Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type"); AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
...@@ -64,7 +65,7 @@ Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const { ...@@ -64,7 +65,7 @@ Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
} }
Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const { Tensor Tensor::operator*(const Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type"); AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
...@@ -81,7 +82,7 @@ Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const { ...@@ -81,7 +82,7 @@ Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
} }
Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const { Tensor Tensor::operator/(const Tensor& other) const {
AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type"); AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
...@@ -97,7 +98,7 @@ Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const { ...@@ -97,7 +98,7 @@ Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
return div_.getOutput(0)->clone(); return div_.getOutput(0)->clone();
} }
Aidge::Tensor Aidge::Tensor::sqrt() const { Tensor Tensor::sqrt() const {
AIDGE_ASSERT(hasImpl(), "Tensor has no implementation."); AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
auto sqrt_ = Sqrt_Op(); auto sqrt_ = Sqrt_Op();
sqrt_.associateInput(0, std::make_shared<Tensor>(*this)); sqrt_.associateInput(0, std::make_shared<Tensor>(*this));
...@@ -108,7 +109,7 @@ Aidge::Tensor Aidge::Tensor::sqrt() const { ...@@ -108,7 +109,7 @@ Aidge::Tensor Aidge::Tensor::sqrt() const {
return sqrt_.getOutput(0)->clone(); return sqrt_.getOutput(0)->clone();
} }
Aidge::Tensor Aidge::Tensor::abs() const { Tensor Tensor::abs() const {
AIDGE_ASSERT(hasImpl(), "Tensor has no implementation."); AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
auto abs_ = Abs_Op(); auto abs_ = Abs_Op();
abs_.associateInput(0, std::make_shared<Tensor>(*this)); abs_.associateInput(0, std::make_shared<Tensor>(*this));
...@@ -119,7 +120,7 @@ Aidge::Tensor Aidge::Tensor::abs() const { ...@@ -119,7 +120,7 @@ Aidge::Tensor Aidge::Tensor::abs() const {
return abs_.getOutput(0)->clone(); return abs_.getOutput(0)->clone();
} }
Aidge::Tensor Aidge::Tensor::mean() const { Tensor Tensor::mean() const {
AIDGE_ASSERT(hasImpl(), "Tensor has no implementation."); AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
// TODO: should be the default behavior of ReduceMean_Op // TODO: should be the default behavior of ReduceMean_Op
// No need to specify the list of all axes! // No need to specify the list of all axes!
...@@ -134,7 +135,7 @@ Aidge::Tensor Aidge::Tensor::mean() const { ...@@ -134,7 +135,7 @@ Aidge::Tensor Aidge::Tensor::mean() const {
return mean_.getOutput(0)->clone(); return mean_.getOutput(0)->clone();
} }
Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) { Tensor& Tensor::operator=(const Tensor& other) {
if (this == &other) { if (this == &other) {
return *this; return *this;
} }
...@@ -154,7 +155,7 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) { ...@@ -154,7 +155,7 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
} }
void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t device, bool copyFrom) { void Tensor::setBackend(const std::string &name, DeviceIdx_t device, bool copyFrom) {
if (mImpl) { if (mImpl) {
if (mImpl->device() != std::make_pair(name, device)) { if (mImpl->device() != std::make_pair(name, device)) {
// Backend change: create new impl, copy from old to new and replace // Backend change: create new impl, copy from old to new and replace
...@@ -171,8 +172,8 @@ void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t devic ...@@ -171,8 +172,8 @@ void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t devic
} }
} }
void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims, void Tensor::resize(const std::vector<DimSize_t>& dims,
std::vector<Aidge::DimSize_t> strides) { std::vector<DimSize_t> strides) {
if (dims.empty()) { // scalar if (dims.empty()) { // scalar
mDims = std::vector<DimSize_t>(0); mDims = std::vector<DimSize_t>(0);
mStrides = std::vector<DimSize_t>({1}); mStrides = std::vector<DimSize_t>({1});
...@@ -234,7 +235,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims, ...@@ -234,7 +235,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
} }
} }
std::string Aidge::Tensor::toString() const { std::string Tensor::toString() const {
if (!hasImpl() || undefined()) { if (!hasImpl() || undefined()) {
// Return no value on no implementation or undefined size // Return no value on no implementation or undefined size
...@@ -343,7 +344,7 @@ std::string Aidge::Tensor::toString() const { ...@@ -343,7 +344,7 @@ std::string Aidge::Tensor::toString() const {
return res; return res;
} }
Aidge::Tensor Aidge::Tensor::extract( Tensor Tensor::extract(
const std::vector<std::size_t>& fixedCoord) const { const std::vector<std::size_t>& fixedCoord) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
AIDGE_ASSERT(fixedCoord.size() <= mDims.size(), AIDGE_ASSERT(fixedCoord.size() <= mDims.size(),
...@@ -359,7 +360,7 @@ Aidge::Tensor Aidge::Tensor::extract( ...@@ -359,7 +360,7 @@ Aidge::Tensor Aidge::Tensor::extract(
return subTensor; return subTensor;
} }
Aidge::Tensor Aidge::Tensor::extract( Tensor Tensor::extract(
const std::vector<std::size_t>& startCoord, const std::vector<std::size_t>& startCoord,
const std::vector<std::size_t>& dims) const { const std::vector<std::size_t>& dims) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
...@@ -373,7 +374,7 @@ Aidge::Tensor Aidge::Tensor::extract( ...@@ -373,7 +374,7 @@ Aidge::Tensor Aidge::Tensor::extract(
return subTensor; return subTensor;
} }
void Aidge::Tensor::makeContiguous() { void Tensor::makeContiguous() {
if (!mImpl || isContiguous()) { if (!mImpl || isContiguous()) {
return; return;
} }
...@@ -411,7 +412,7 @@ void Aidge::Tensor::makeContiguous() { ...@@ -411,7 +412,7 @@ void Aidge::Tensor::makeContiguous() {
resize(mDims); resize(mDims);
} }
void Aidge::Tensor::copyCast(const Tensor& src) { void Tensor::copyCast(const Tensor& src) {
if (&src == this) { if (&src == this) {
return; return;
} }
...@@ -432,7 +433,7 @@ void Aidge::Tensor::copyCast(const Tensor& src) { ...@@ -432,7 +433,7 @@ void Aidge::Tensor::copyCast(const Tensor& src) {
src.size(), mImplOffset); src.size(), mImplOffset);
} }
void Aidge::Tensor::copyFrom(const Tensor& src) { void Tensor::copyFrom(const Tensor& src) {
if (&src == this) { if (&src == this) {
return; return;
} }
...@@ -453,7 +454,7 @@ void Aidge::Tensor::copyFrom(const Tensor& src) { ...@@ -453,7 +454,7 @@ void Aidge::Tensor::copyFrom(const Tensor& src) {
mImplOffset); mImplOffset);
} }
void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) { void Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) {
std::vector<DimSize_t> newDims; std::vector<DimSize_t> newDims;
for (std::size_t i = 0; i < src.dims().size(); ++i) { for (std::size_t i = 0; i < src.dims().size(); ++i) {
newDims.push_back(src.dims()[transpose[i]]); newDims.push_back(src.dims()[transpose[i]]);
...@@ -495,11 +496,11 @@ void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t ...@@ -495,11 +496,11 @@ void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t
setImpl(newImpl); setImpl(newImpl);
} }
void Aidge::Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) { void Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) {
copyTranspose(src, std::vector<DimSize_t>(transpose.begin(), transpose.end())); copyTranspose(src, std::vector<DimSize_t>(transpose.begin(), transpose.end()));
} }
void Aidge::Tensor::copyCastFrom(const Tensor& src, void Tensor::copyCastFrom(const Tensor& src,
std::shared_ptr<Tensor>& movedSrcPtr) { std::shared_ptr<Tensor>& movedSrcPtr) {
if (&src == this) { if (&src == this) {
return; return;
...@@ -532,13 +533,13 @@ void Aidge::Tensor::copyCastFrom(const Tensor& src, ...@@ -532,13 +533,13 @@ void Aidge::Tensor::copyCastFrom(const Tensor& src,
} }
} }
Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) { Tensor& Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) {
// Scott Meyers' solution to avoid code duplication // Scott Meyers' solution to avoid code duplication
return const_cast<Tensor&>( return const_cast<Tensor&>(
static_cast<const Tensor&>(*this).refContiguous(fallback)); static_cast<const Tensor&>(*this).refContiguous(fallback));
} }
const Aidge::Tensor& Aidge::Tensor::refContiguous( const Tensor& Tensor::refContiguous(
std::shared_ptr<Tensor>& fallback) const { std::shared_ptr<Tensor>& fallback) const {
AIDGE_ASSERT(getImpl(), AIDGE_ASSERT(getImpl(),
"no backend was set for tensor, cannot refCast() it"); "no backend was set for tensor, cannot refCast() it");
...@@ -557,15 +558,15 @@ const Aidge::Tensor& Aidge::Tensor::refContiguous( ...@@ -557,15 +558,15 @@ const Aidge::Tensor& Aidge::Tensor::refContiguous(
} }
} }
Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, Tensor& Tensor::refCast(std::shared_ptr<Tensor>& fallback,
const Aidge::DataType& dt) { const DataType& dt) {
// Scott Meyers' solution to avoid code duplication // Scott Meyers' solution to avoid code duplication
return const_cast<Tensor&>( return const_cast<Tensor&>(
static_cast<const Tensor&>(*this).refCast(fallback, dt)); static_cast<const Tensor&>(*this).refCast(fallback, dt));
} }
const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, const Tensor& Tensor::refCast(std::shared_ptr<Tensor>& fallback,
const Aidge::DataType& dt) const { const DataType& dt) const {
AIDGE_ASSERT(getImpl(), AIDGE_ASSERT(getImpl(),
"no backend was set for tensor, cannot refCast() it"); "no backend was set for tensor, cannot refCast() it");
...@@ -598,7 +599,7 @@ const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, ...@@ -598,7 +599,7 @@ const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
} }
} }
Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, Tensor& Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
const std::string& backend, const std::string& backend,
DeviceIdx_t device) { DeviceIdx_t device) {
// Scott Meyers' solution to avoid code duplication // Scott Meyers' solution to avoid code duplication
...@@ -606,7 +607,7 @@ Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, ...@@ -606,7 +607,7 @@ Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
static_cast<const Tensor&>(*this).refFrom(fallback, backend, device)); static_cast<const Tensor&>(*this).refFrom(fallback, backend, device));
} }
const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, const Tensor& Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
const std::string& backend, const std::string& backend,
DeviceIdx_t device) const { DeviceIdx_t device) const {
AIDGE_ASSERT(getImpl(), AIDGE_ASSERT(getImpl(),
...@@ -639,8 +640,8 @@ const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, ...@@ -639,8 +640,8 @@ const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
} }
} }
Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, Tensor& Tensor::ref(std::shared_ptr<Tensor>& fallback,
const Aidge::DataType& dt, const DataType& dt,
const std::string& backend, const std::string& backend,
DeviceIdx_t device) { DeviceIdx_t device) {
// Scott Meyers' solution to avoid code duplication // Scott Meyers' solution to avoid code duplication
...@@ -648,8 +649,8 @@ Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, ...@@ -648,8 +649,8 @@ Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
static_cast<const Tensor&>(*this).ref(fallback, dt, backend, device)); static_cast<const Tensor&>(*this).ref(fallback, dt, backend, device));
} }
const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const Tensor& Tensor::ref(std::shared_ptr<Tensor>& fallback,
const Aidge::DataType& dt, const DataType& dt,
const std::string& backend, const std::string& backend,
DeviceIdx_t device) const { DeviceIdx_t device) const {
AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot ref() it"); AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot ref() it");
...@@ -673,9 +674,64 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, ...@@ -673,9 +674,64 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
} }
} }
std::set<std::string> Aidge::Tensor::getAvailableBackends() {
std::vector<std::size_t>
Tensor::toCoord(const std::vector<DimSize_t>& dimensions, std::size_t index) {
std::vector<std::size_t> coord(dimensions.size());
std::size_t i = dimensions.size();
while (i-- > 0) {
coord[i] = (index % dimensions[i]);
index /= dimensions[i];
}
return coord;
}
std::size_t Tensor::toIndex(const std::vector<DimSize_t> &dimensions, const std::vector<std::size_t>& coords) {
AIDGE_ASSERT(coords.size() == dimensions.size(), "Tensor::getIdx(): Coordinates does not match number of dimensions.\n\tCoords : {}\n\tDimensions: {}",coords, dimensions);
std::size_t index = 0;
std::size_t dimensions_s = 1; // stride
std::size_t i = dimensions.size();
while (i-- > 0) {
index += coords[i] * dimensions_s;
dimensions_s *= dimensions[i];
}
return index;
}
template<typename T>
bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<T>& coords){
AIDGE_ASSERT(coords.size() == dimensions.size(),
"Coordinates({}) to compare have not "
"the same number of dimension as tensor dimensions({}), aborting.",
coords,
dimensions);
bool isInBound {true};
for(std::size_t i = 0 ; i < coords.size() && isInBound; ++i ){
isInBound = coords[i] >= 0 && coords[i] < static_cast<T>(dimensions[i]) ;
}
return isInBound;
}
bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::size_t index){
return index < std::accumulate(dimensions.cbegin(), dimensions.cend(), std::size_t(1), std::multiplies<std::size_t>());
}
template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int16_t>& coords);
template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int32_t>& coords);
template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int64_t>& coords);
template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::size_t>& coords);
template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<float>& coords);
template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<double>& coords);
std::set<std::string> Tensor::getAvailableBackends() {
std::set<std::string> backendsList; std::set<std::string> backendsList;
for (const auto& tupleKey : Registrar<Tensor>::getKeys()) for (const auto& tupleKey : Registrar<Tensor>::getKeys()) {
backendsList.insert(std::get<0>(tupleKey)); backendsList.insert(std::get<0>(tupleKey));
}
return backendsList; return backendsList;
} }
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/data/Interpolation.hpp"
#include <algorithm> // std::clamp
#include <bitset>
#include <cmath> // std::ceil, std::floor
#include <cstddef> // std::size_t
#include <cstdint> // std::int64_t
#include <stdexcept> // std::runtime_error
#include <utility> // std::make_pair, std::set
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/data/half.hpp"
#include "aidge/operator/Pad.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Log.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
template <typename T>
[[noreturn]] T
Interpolation::interpolate(const std::vector<float> & /*originalIndex*/,
const std::vector<Point<T>> & /*points*/,
const Mode /*interpMode*/) {
AIDGE_THROW_OR_ABORT(
std::runtime_error,
"interpolate() is backend dependendant and should be"
"called from derived classes: Interpolation<Backend>::interpolate(...)"
"Meaning that for CPU backend, InterpolationCPU::interpolate() should "
"be called.");
}
std::vector<float> Interpolation::untransformCoordinates(
const std::vector<DimSize_t> &transformedCoords,
const std::vector<DimSize_t> &inputDims,
const std::vector<DimSize_t> &outputDims,
const Interpolation::CoordinateTransformation coordTransfoMode) {
AIDGE_ASSERT(
inputDims.size() == outputDims.size(),
"Interpolate::untransformCoordinates: input and output coordinates "
"dimension number mismatch, they should be equal."
"Got inputDims({}) and outputDims ({}).",
inputDims,
outputDims);
AIDGE_ASSERT(
transformedCoords.size() == outputDims.size(),
"Interpolate::untransformCoordinates: coordinates dimension mismatch, "
"transformed coords number should be equal to output dimension number."
"Got coords to transform ({}) and outputDims ({})",
transformedCoords,
outputDims);
std::vector<float> originalCoords(transformedCoords.size());
for (DimIdx_t i = 0; i < transformedCoords.size(); ++i) {
float scale = static_cast<float>(outputDims[i]) /
static_cast<float>(inputDims[i]);
switch (coordTransfoMode) {
case CoordinateTransformation::AlignCorners:
AIDGE_THROW_OR_ABORT(
std::runtime_error,
"Interpolation::untransformCoords: Unsupported Coordinate "
"transform : AlignCorners");
break;
case CoordinateTransformation::Asymmetric:
originalCoords[i] = transformedCoords[i] / scale;
break;
case CoordinateTransformation::HalfPixel:
originalCoords[i] = (transformedCoords[i] + 0.5) / scale - 0.5;
break;
case CoordinateTransformation::HalfPixelSymmetric:
AIDGE_THROW_OR_ABORT(
std::runtime_error,
"Interpolation::untransformCoords: Unsupported Coordinate "
"transform : HalfPixelSymmetric");
break;
case Interpolation::CoordinateTransformation::PytorchHalfPixel:
AIDGE_THROW_OR_ABORT(
std::runtime_error,
"Interpolation::untransformCoords: Unsupported Coordinate "
"transform : PytorchHalfPixel");
break;
}
}
return originalCoords;
}
/**
* @details Generates a list of all neighbours of a given coordinate.
* Since the coordinates are floating points as they are the result of
* Interpolation::untransformCoords, they are approximation of coordinates in
* originalTensor frame from coordinates in interpolatedTensor frame.
*
* So to retrieve the neghbouring values, we must apply either floor() or
* ceil() to each coordinate.
*
* In order to generate the list of all combinations
* available, we simply iterate through the bits of each values from 0 to
* tensorDims.
* @example : in 2 dimensions , we have the point (1.3, 3.4)
* we iterate up to 2^2 - 1 and
* 0 = 0b00 -> (floor(x), floor(y)) = (1,3)
* 1 = 0b01 -> (floor(x), ceil(y)) = (1,4)
* 2 = 0b10 -> (ceil(x) , floor(y)) = (2,3)
* 3 = 0b11 -> (ceil(x) , ceil(y)) = (2,4)
*/
template <typename T>
std::set<Interpolation::Point<T>>
Interpolation::retrieveNeighbours(const T *tensorValues,
const std::vector<DimSize_t> &tensorDims,
const std::vector<float> &coords,
const PadBorderType paddingMode) {
Log::debug("retrieveNeighbours: TensorDims : {}", tensorDims);
Log::debug("retrieveNeighbours: coords to interpolate : {}", coords);
// Will retrieve out of bound values depending on given padding mode.
// auto retrieveOutOfBoundValue =
// [&tensorValues, &tensorDims, &paddingMode](Coords coord) -> T {
// std::vector<DimSize_t> rectifiedCoord;
// rectifiedCoord.reserve(coord.size());
// switch (paddingMode) {
// case Aidge::PadBorderType::Edge: {
// for (DimSize_t i = 0; i < coord.size(); ++i) {
// rectifiedCoord[i] = coord[i] < 0 ? 0 : tensorDims[i] - 1;
// }
// return tensorValues[Tensor::getIdx(tensorDims, rectifiedCoord)];
// }
// case Aidge::PadBorderType::Zero: {
// return static_cast<T>(0);
// }
// default: {
// AIDGE_THROW_OR_ABORT(
// std::runtime_error,
// "Unsupported padding mode as of now for interpolation.");
// }
// }
// };
std::set<Point<T>> neighbours;
const std::size_t nbNeighbours = std::size_t(1) << tensorDims.size();
Coords neighbourCoords(tensorDims.size());
for (std::size_t i = 0; i < nbNeighbours; ++i) {
const std::bitset<MaxDim> bits = std::bitset<MaxDim>{i};
for (size_t j = 0; j < tensorDims.size(); ++j) {
neighbourCoords[j] =
bits[j] == 0 ? std::ceil(coords[j]) : std::floor(coords[j]);
}
T value;
if (Tensor::isInBounds(tensorDims, neighbourCoords)) {
// cast from unsigned to signed won't create problem as we ensured
// that all neighboursCoords values are > 0 with isInBounds
value = tensorValues[Tensor::toIndex(
tensorDims,
std::vector<DimSize_t>(neighbourCoords.begin(),
neighbourCoords.end()))];
} else {
switch (paddingMode) {
case PadBorderType::Edge:
for (DimSize_t j = 0; j < tensorDims.size(); ++j) {
neighbourCoords[j] = (neighbourCoords[j] < 0) ? 0 :
((neighbourCoords[j] >= static_cast<std::int64_t>(tensorDims[j])) ? (tensorDims[j] - 1) :
neighbourCoords[j]);
}
value = tensorValues[Tensor::toIndex(
tensorDims,
std::vector<DimSize_t>(neighbourCoords.begin(),
neighbourCoords.end()))];
break;
case PadBorderType::Zero:
value = static_cast<T>(0);
break;
default:
AIDGE_THROW_OR_ABORT(
std::runtime_error,
"Unsupported padding mode as of now for interpolation.");
}
}
neighbours.insert(std::make_pair(neighbourCoords, value));
}
Log::debug("Interpolation::retrieveNeighbours(): neighbourCoords: {}",
neighbours);
return neighbours;
}
template std::set<Interpolation::Point<int16_t>>
Interpolation::retrieveNeighbours(const int16_t *tensorValues,
const std::vector<DimSize_t> &tensorDims,
const std::vector<float> &coords,
const PadBorderType paddingMode);
template std::set<Interpolation::Point<int32_t>>
Interpolation::retrieveNeighbours(const int32_t *tensorValues,
const std::vector<DimSize_t> &tensorDims,
const std::vector<float> &coords,
const PadBorderType paddingMode);
template std::set<Interpolation::Point<int64_t>>
Interpolation::retrieveNeighbours(const int64_t *tensorValues,
const std::vector<DimSize_t> &tensorDims,
const std::vector<float> &coords,
const PadBorderType paddingMode);
template std::set<Interpolation::Point<half_float::half>>
Interpolation::retrieveNeighbours(const half_float::half *tensorValues,
const std::vector<DimSize_t> &tensorDims,
const std::vector<float> &coords,
const PadBorderType paddingMode);
template std::set<Interpolation::Point<float>>
Interpolation::retrieveNeighbours(const float *tensorValues,
const std::vector<DimSize_t> &tensorDims,
const std::vector<float> &coords,
const PadBorderType paddingMode);
template std::set<Interpolation::Point<double>>
Interpolation::retrieveNeighbours(const double *tensorValues,
const std::vector<DimSize_t> &tensorDims,
const std::vector<float> &coords,
const PadBorderType paddingMode);
} // namespace Aidge
...@@ -45,7 +45,8 @@ Aidge::OperatorTensor::OperatorTensor(const OperatorTensor& other) ...@@ -45,7 +45,8 @@ Aidge::OperatorTensor::OperatorTensor(const OperatorTensor& other)
void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) { void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs()); AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs());
AIDGE_ASSERT(data->type() == Tensor::Type, "Input data must be of Tensor type"); AIDGE_ASSERT(data != nullptr, "Undefined data argument, make sure that the associated tensor holds data before associating the input.")
AIDGE_ASSERT(data->type() == Tensor::Type, "OperatorTensor::associateInput(): Input data must be of Tensor type, got {}", data->type());
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
} }
......
...@@ -11,56 +11,31 @@ ...@@ -11,56 +11,31 @@
#include "aidge/operator/Resize.hpp" #include "aidge/operator/Resize.hpp"
#include <cstddef> // std::size_t #include <algorithm>
#include <cstdint> // std::int64_t #include <cstddef> // std::size_t
#include <stdexcept> // std::runtime_error #include <cstdint> // std::int64_t
#include <fmt/core.h>
#include <stdexcept> // std::runtime_error
#include <string> #include <string>
#include <vector> #include <vector>
#include <fmt/core.h>
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/data/Data.hpp"
#include "aidge/data/Interpolation.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
const std::string Aidge::Resize_Op::Type = "Resize"; namespace Aidge {
Aidge::Resize_Op::Resize_Op()
: OperatorTensor(Type,
{InputCategory::Data,
InputCategory::OptionalData,
InputCategory::OptionalData,
InputCategory::OptionalData},
1) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
* but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Aidge::Resize_Op::Resize_Op(const Aidge::Resize_Op& op)
: OperatorTensor(op)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Resize_Op, *this, op.backend());
}
else {
mImpl = nullptr;
}
}
std::shared_ptr<Aidge::Operator> Aidge::Resize_Op::clone() const { const std::string Resize_Op::Type = "Resize";
return std::make_shared<Resize_Op>(*this);
}
bool Aidge::Resize_Op::dimsForwarded() const { bool Resize_Op::dimsForwarded() const {
// in case of ROI add getInput(1) condition // in case of ROI add getInput(1) condition
if ((getInput(1) && !getInput(1)->undefined()) if ((getInput(1) && !getInput(1)->undefined()) ||
|| (getInput(2) && !getInput(2)->undefined()) (getInput(2) && !getInput(2)->undefined()) ||
|| (getInput(3) && !getInput(3)->undefined()) (getInput(3) && !getInput(3)->undefined())) {
)
{
// output dims are data dependent // output dims are data dependent
return false; return false;
} }
...@@ -68,93 +43,137 @@ bool Aidge::Resize_Op::dimsForwarded() const { ...@@ -68,93 +43,137 @@ bool Aidge::Resize_Op::dimsForwarded() const {
return OperatorTensor::dimsForwarded(); return OperatorTensor::dimsForwarded();
} }
bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) { bool Resize_Op::forwardDims(bool allowDataDependency) {
if (inputsAssociated()) { if (!allowDataDependency) {
AIDGE_ASSERT(getInput(0)->nbDims() == 4, Log::warn("{}: cannot execute forwardDims() as the output "
"input tensor must have dimensions = 4 (batch, channel, height, width)."); "dimensions are computed from some input data.",
type());
const bool input1ROIPresent = getInput(1) && !getInput(1)->undefined(); return false;
const bool input2ScalesPresent = getInput(2) && !getInput(2)->undefined(); }
const bool input3SizesPresent = getInput(3) && !getInput(3)->undefined();
AIDGE_ASSERT(input2ScalesPresent != input3SizesPresent, "Only one of scales and sizes can be specified.") // Some optional input may be linked but undefined because of ONNX import
if (!inputsAssociated(false)) {
return false;
}
if (input1ROIPresent) { /** @brief input #0 */
AIDGE_THROW_OR_ABORT(std::runtime_error, "Input #1 (ROI) is given and it is not supported."); constexpr IOIndex_t inDataIdx = 0;
/** @brief input #1 */
constexpr IOIndex_t inROIIdx = 1;
/** @brief input #2 */
constexpr IOIndex_t inScalesIdx = 2;
/** @brief input #3 */
constexpr IOIndex_t inSizesIdx = 3;
std::vector<DimSize_t> outDims = getInput(inDataIdx)->dims();
/////////////////////////////////////////////////////
// Ensuring operator is connected properly
const bool inputROIPresent =
getInput(inROIIdx) && !getInput(inROIIdx)->undefined();
if (inputROIPresent) {
AIDGE_THROW_OR_ABORT(
std::runtime_error,
"{}: input ROI(#{}) is present but it is not supported.",
type(),
inROIIdx);
} }
else if (input2ScalesPresent) {
if (!allowDataDependency) {
Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #2");
return false;
}
AIDGE_ASSERT(getInput(0)->nbDims() == getInput(2)->size(),
"input #0 and input #2 (Scales) must have the same dimensions.");
std::vector<DimSize_t> outDims = getInput(0)->dims();
const std::vector<DimSize_t> inDims = getInput(0)->dims();
std::shared_ptr<Tensor> fallback; const bool inputScalesPresent =
const auto& scales = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu"); getInput(inScalesIdx) && !getInput(inScalesIdx)->undefined();
const bool inputSizesPresent =
for (std::size_t dim=0; dim < getInput(2)->size(); ++dim) { getInput(inSizesIdx) && !getInput(inSizesIdx)->undefined();
outDims[dim] = inDims[dim]*static_cast<int64_t*>(scales.getImpl()->hostPtr())[dim];
} AIDGE_ASSERT(inputScalesPresent ^ inputSizesPresent,
"{}: Only one of the two inputs must be defined between input "
mOutputs[0]->resize(outDims); "Scales(#2) "
return true; "and Sizes(#3). They cannot be specified at the same time.",
type())
std::shared_ptr<Tensor> resizeParam = inputScalesPresent ? getInput(inScalesIdx) : getInput(inSizesIdx);
AIDGE_ASSERT(getInput(inDataIdx)->nbDims() == resizeParam->size(),
"{}: data input #0 and resizing parameter input #{} must have the "
"same dimensions.",
type(), inputScalesPresent ? inScalesIdx :inSizesIdx);
////////////////////////////////////////////
// Case resize is done using Scales formula
if (inputScalesPresent) {
std::shared_ptr<Tensor> fallback;
const auto &scales =
resizeParam
->refCastFrom(fallback,
DataType::Float32,
resizeParam->backend());
const std::vector<DimSize_t> inDims = getInput(inDataIdx)->dims();
for (std::size_t dim = 0; dim < getInput(inScalesIdx)->size(); ++dim) {
const auto scaleAlongDim = scales.get<cpptype_t<DataType::Float32>>(dim);
AIDGE_ASSERT(scaleAlongDim > 0,
"{}: all scales values must be sctricly positive, "
"got {}.",
type(),
scaleAlongDim);
outDims[dim] =
static_cast<DimSize_t>(inDims[dim] * scaleAlongDim);
} }
else if (input3SizesPresent) {
if (!allowDataDependency) {
Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #3");
return false;
}
AIDGE_ASSERT(getInput(0)->nbDims() == getInput(3)->size(),
"input #0 and input #3 (Sizes) must have the same dimensions.");
std::vector<DimSize_t> outDims = getInput(0)->dims();
std::shared_ptr<Tensor> fallback; ///////////////////////////////////////////////////////////////
const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu"); // case where resize output dims are given via the Size input
} else {
for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) { std::shared_ptr<Tensor> fallback;
outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim]; const auto &sizes = resizeParam
} ->refCastFrom(fallback,
NativeType<DimSize_t>::type,
mOutputs[0]->resize(outDims); resizeParam->backend());
return true;
} for (std::size_t dim = 0; dim < getInput(inSizesIdx)->size(); ++dim) {
else { outDims[dim] = sizes.get<DimSize_t>(dim);
AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: Either Input #2 or Input #3 must be present.");
} }
} }
mOutputs[0]->resize(outDims);
return false; return true;
} }
void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { void Resize_Op::setBackend(const std::string &name, DeviceIdx_t device) {
SET_IMPL_MACRO(Resize_Op, *this, name); SET_IMPL_MACRO(Resize_Op, *this, name);
mOutputs[0]->setBackend(name, device); mOutputs[0]->setBackend(name, device);
// By default, automatically set backend for all inputs: roi, scales and sizes // By default, automatically set backend for all optional inputs: roi, scales and
if(getInput(1)) { // sizes
if (getInput(1)) {
getInput(1)->setBackend(name, device); getInput(1)->setBackend(name, device);
} }
if(getInput(2)) { if (getInput(2)) {
getInput(2)->setBackend(name, device); getInput(2)->setBackend(name, device);
} }
if(getInput(3)) { if (getInput(3)) {
getInput(3)->setBackend(name, device); getInput(3)->setBackend(name, device);
} }
} }
std::set<std::string> Aidge::Resize_Op::getAvailableBackends() const { std::shared_ptr<Node>
return Registrar<Resize_Op>::getKeys(); Resize(std::vector<float> scale,
} std::vector<std::size_t> size,
Interpolation::CoordinateTransformation coordTransfoMode,
///////////////////////////////////////////// Interpolation::Mode interpolMode,
float cubicCoefA,
const std::string &name) {
std::shared_ptr<Node> node_resize = std::make_shared<Node>(std::make_shared<Resize_Op>(coordTransfoMode,
interpolMode,
cubicCoefA),
name);
if (scale.size()) {
std::shared_ptr<Node> prod_scale = Producer(std::make_shared<Tensor>(Vector<float>(scale)));
prod_scale->addChild(node_resize, 0, 2);
}
if (size.size())
{
std::shared_ptr<Node> prod_size = Producer(std::make_shared<Tensor>(Vector<std::size_t>(size)));
prod_size->addChild(node_resize, 0, 3);
}
return node_resize;
std::shared_ptr<Aidge::Node> Aidge::Resize(const std::string &name) { }
return std::make_shared<Node>(std::make_shared<Resize_Op>(), name); } // namespace Aidge
}
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cstdlib>
#include <memory>
#include <string>
#include <vector>
#include <catch2/catch_test_macros.hpp>
#include <catch2/generators/catch_generators_random.hpp>
#include "aidge/data/Data.hpp"
#include "aidge/data/Interpolation.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/filler/Filler.hpp"
#include "aidge/operator/Pad.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
TEST_CASE("[core/data] Interpolation", "[Interpolation][Data]") {
Log::setConsoleLevel(Log::Debug);
auto tensor = std::make_shared<Tensor>(std::vector<DimSize_t>({10, 10}));
tensor->setDataType(DataType::Float32);
tensor->setBackend("cpu");
Aidge::constantFiller(tensor, 1337.F);
SECTION("retrieveNeighbours") {
std::set<Interpolation::Point<float>> neighbours;
std::set<Interpolation::Point<float>> expectedResult;
std::vector<float> coords;
SECTION("Out of bounds") {
coords = {-0.5, -0.5};
expectedResult = {{{-1, -1}, 0.f},
{{0, -1}, 0.F},
{{-1, 0}, 0.F},
{{0, 0}, 1337.F}};
neighbours = Interpolation::retrieveNeighbours<float>(
reinterpret_cast<float *>(tensor->getImpl()->rawPtr()),
tensor->dims(),
coords,
PadBorderType::Zero);
CHECK(neighbours == expectedResult);
}
SECTION("Some coords are rounds hence duplicates are filtered out") {
tensor = std::make_shared<Tensor>(
std::vector<DimSize_t>({5, 10, 10, 10}));
tensor->setDataType(DataType::Float32);
tensor->setBackend("cpu");
Aidge::constantFiller(tensor, 1337.F);
expectedResult = {{{0, 0, -1, -1}, 0.F},
{{0, 0, 0, -1}, 0.F},
{{0, 0, -1, 0}, 0.F},
{{0, 0, 0, 0}, 1337.F}};
neighbours = Interpolation::retrieveNeighbours(
reinterpret_cast<float *>(tensor->getImpl()->rawPtr()),
tensor->dims(),
std::vector<float>({0, 0, -0.25, -0.25}));
CHECK(expectedResult == neighbours);
}
}
}
} // namespace Aidge
...@@ -9,9 +9,9 @@ ...@@ -9,9 +9,9 @@
* *
********************************************************************************/ ********************************************************************************/
#include <array>
#include <cstddef> // std::size_t #include <cstddef> // std::size_t
#include <cstdint> // std::uint8_t, std::uint16_t, std::int32_t #include <cstdint> // std::uint8_t, std::uint16_t, std::int32_t
#include <cstdlib>
#include <numeric> // std::accumulate, std::inner_product #include <numeric> // std::accumulate, std::inner_product
#include <functional> // std::multiplies #include <functional> // std::multiplies
#include <random> // std::mt19937, #include <random> // std::mt19937,
...@@ -340,32 +340,63 @@ TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") { ...@@ -340,32 +340,63 @@ TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") {
} }
// Test get() and set() by coords // Test get() and set() by coords
// We create coords of rank 0 to the number of dimensions // We create coords of the number of dimensions
for (std::size_t coord_size = 0; coord_size < dims.size(); ++coord_size) { std::vector<std::size_t> coords(nb_dims);
std::vector<std::size_t> coords(coord_size); for (std::size_t coord_idx = 0; coord_idx < nb_dims; ++coord_idx) {
for (std::size_t coord_idx = 0; coord_idx < coord_size; ++coord_idx) { std::size_t dim_idx = (dimsDist(gen)-1) % dims[coord_idx];
std::size_t dim_idx = (dimsDist(gen)-1) % dims[coord_idx]; coords[coord_idx] = dim_idx;
coords[coord_idx] = dim_idx;
}
std::size_t flat_idx, flat_storage_idx;
// As it is continuous we have getIdx() == getStorageIdx()
REQUIRE_NOTHROW(flat_idx = x.getIdx(coords));
REQUIRE_NOTHROW(flat_storage_idx = x.getStorageIdx(coords));
REQUIRE(flat_storage_idx == flat_idx);
float val, val_flat;
// Test get() by index and by coords
REQUIRE_NOTHROW(val_flat = x.get<float>(flat_idx));
REQUIRE_NOTHROW(val = x.get<float>(coords));
REQUIRE(val == val_flat);
REQUIRE(val == values[flat_idx]);
// Test set() by coords, also update the reference array
REQUIRE_NOTHROW(x.set(coords, val + 1));
values[flat_idx] += 1;
} }
std::size_t flat_idx, flat_storage_idx;
// As it is continuous we have getIdx() == getStorageIdx()
REQUIRE_NOTHROW(flat_idx = x.getIdx(coords));
REQUIRE_NOTHROW(flat_storage_idx = x.getStorageIdx(coords));
REQUIRE(flat_storage_idx == flat_idx);
float val, val_flat;
// Test get() by index and by coords
REQUIRE_NOTHROW(val_flat = x.get<float>(flat_idx));
REQUIRE_NOTHROW(val = x.get<float>(coords));
REQUIRE(val == val_flat);
REQUIRE(val == values[flat_idx]);
// Test set() by coords, also update the reference array
REQUIRE_NOTHROW(x.set(coords, val + 1));
values[flat_idx] += 1;
} }
} }
} }
SECTION("Index & coord manipulation"){
Tensor tensor;
std::vector<DimSize_t> dims {2,2};
int nbVal = std::accumulate(dims.begin(),
dims.end(),
1,
std::multiplies<DimSize_t>());
float* values = static_cast<float*>(malloc(nbVal * sizeof(float)));
values[0] = 0;
values[1] = 1;
values[2] = 2;
values[3] = 3;
tensor.setDataType(DataType::Int32);
tensor.setBackend("cpu");
tensor.resize(dims);
tensor.getImpl()->setRawPtr(values, 4);
std::vector<std::size_t> coords;
SECTION("getIdx"){
CHECK(Tensor::toIndex(tensor.dims(), std::vector<std::size_t>({1,1}) ) == 3);
CHECK(Tensor::toIndex(tensor.dims(), std::vector<std::size_t>({1,0}) ) == 2);
// No check to ensure if value is in bounds
CHECK_THROWS(tensor.getIdx(std::vector<std::size_t>({0,2})));
}
SECTION("getCoord"){
CHECK(Tensor::toCoord(tensor.dims(), 3 ) ==std::vector<std::size_t>({1,1}));
CHECK(Tensor::toCoord(tensor.dims(), 2 ) ==std::vector<std::size_t>({1,0}));
}
SECTION("isInBound"){
CHECK_THROWS(Tensor::isInBounds(dims, std::vector<DimSize_t>({1,2,4,5})) == true);
CHECK(Tensor::isInBounds(dims, std::vector<DimSize_t>({1,2})) == false);
CHECK(Tensor::isInBounds(dims, std::vector<int>({-1,1})) == false);
CHECK(Tensor::isInBounds(dims, std::vector<DimSize_t>({1,1})) == true);
}
}
SECTION("Tensor extract") { SECTION("Tensor extract") {
bool equal; bool equal;
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/operator/Resize.hpp"
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <cstdint>
#include <memory>
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Log.hpp"
namespace Aidge {
TEST_CASE("[core/operator] Resize_Op(forwardDims)",
"[Resize][forwardDimsScales]") {
std::vector<Aidge::DimSize_t> input_dims;
std::vector<float> scales;
std::vector<std::size_t> sizes;
std::vector<Aidge::DimSize_t> expected_dims;
SECTION("Un-connected input leads to failure.") {
input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
auto resize_node = Resize();
auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
op->associateInput(0, input_data);
REQUIRE_THROWS(op->forwardDims(true));
}
SECTION("Connecting both Scales & Sizes leads to failure") {
input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
scales = std::vector<float>({.5, 3.0f, 2.0f, 2.0f});
sizes = std::vector<std::size_t>({1, 3, 4, 4});
expected_dims = std::vector<Aidge::DimSize_t>({2, 3, 4, 4});
auto resize_node = Resize(scales, sizes);
auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
op->associateInput(0, input_data);
REQUIRE_THROWS(op->forwardDims(true));
}
SECTION("Input Scales") {
SECTION("TEST 1") {
input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
scales = std::vector<float>({1, 1, 2, 2});
sizes = std::vector<std::size_t>({});
expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 4, 4});
auto resize_node = Resize(scales, sizes);
auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
op->associateInput(0, input_data);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
SECTION("TEST 2") {
input_dims = std::vector<Aidge::DimSize_t>({4, 4, 10, 10});
std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
scales = std::vector<float>({1, 1, 2, 3});
sizes = std::vector<std::size_t>({});
expected_dims = std::vector<Aidge::DimSize_t>({4, 4, 20, 30});
auto resize_node = Resize(scales, sizes);
auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
op->associateInput(0, input_data);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
SECTION("TEST 3") {
input_dims = std::vector<Aidge::DimSize_t>({4, 2, 10, 10});
std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
scales = std::vector<float>({1, 1, 0.5, 0.5});
sizes = std::vector<std::size_t>({});
expected_dims = std::vector<Aidge::DimSize_t>({4, 2, 5, 5});
auto resize_node = Resize(scales, sizes);
auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
op->associateInput(0, input_data);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
SECTION("TEST 4") {
input_dims = std::vector<Aidge::DimSize_t>({11, 11, 4, 4});
std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
scales = std::vector<float>({1, 1, 0.3, 0.3});
sizes = std::vector<std::size_t>({});
expected_dims = std::vector<Aidge::DimSize_t>({11, 11, 1, 1});
auto resize_node = Resize(scales, sizes);
auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
op->associateInput(0, input_data);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
}
SECTION("Input Sizes") {
SECTION("TEST 1") {
input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
scales = std::vector<float>({});
sizes = std::vector<std::size_t>({4, 5, 8, 8});
expected_dims = std::vector<Aidge::DimSize_t>({4, 5, 8, 8});
auto resize_node = Resize(scales, sizes);
auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
op->associateInput(0, input_data);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
SECTION("TEST 2") {
input_dims = std::vector<Aidge::DimSize_t>({60, 60, 30, 30});
std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
scales = std::vector<float>({});
sizes = std::vector<std::size_t>({1, 1, 75, 75});
expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 75, 75});
auto resize_node = Resize(scales, sizes);
auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
op->associateInput(0, input_data);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
SECTION("TEST 3") {
input_dims = std::vector<Aidge::DimSize_t>({11, 11, 20, 20});
std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
scales = std::vector<float>({});
sizes = std::vector<std::size_t>({19, 6, 8, 8});
expected_dims = std::vector<Aidge::DimSize_t>({19, 6, 8, 8});
auto resize_node = Resize(scales, sizes);
auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
op->associateInput(0, input_data);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
SECTION("TEST 4") {
input_dims = std::vector<Aidge::DimSize_t>({43, 211, 22, 22});
std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
scales = std::vector<float>({});
sizes = std::vector<std::size_t>({1, 1, 10, 10});
expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 10, 10});
auto resize_node = Resize(scales, sizes);
auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
op->associateInput(0, input_data);
REQUIRE_NOTHROW(op->forwardDims(true));
REQUIRE(op->getOutput(0)->dims() == expected_dims);
}
}
}
} // namespace Aidge
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment