Skip to content
Snippets Groups Projects
Commit f3ab1b2e authored by laurent soulier's avatar laurent soulier
Browse files

[IMPR][MAJ] removing reference to Tensor object inside TensorImpl and improving creation interface

parent e2eda56e
No related branches found
No related tags found
2 merge requests!41Support for any backend storage,!13Refactoring Tensor
Pipeline #32940 failed
...@@ -26,11 +26,29 @@ class TensorImpl ...@@ -26,11 +26,29 @@ class TensorImpl
{ {
public: public:
TensorImpl() = delete; TensorImpl() = delete;
/// @brief Construction for a given Tensor, with a given backend. /// @brief Construction for a given data type, with a given backend.
/// @param backend Name of the chosen backend /// @param backend Name of the chosen backend.
/// @param i_Tensor Reference to the i_Tensor describing the data. /// @param i_DataType Data type to be stored.
TensorImpl(const char *backend, Tensor const &i_Tensor) : /// @param i_FirstDataCoordinates Logical coordinates of the data at null natural
mBackend(backend), mTensor(i_Tensor){}; /// coordinates
/// @param i_Dimensions Tensor dimensions
/// @sa Coord_t
TensorImpl(
const char *backend,
DataType const i_DataType,
std::vector<Coord_t> const &i_FirstDataCoordinates,
std::vector<DimSize_t> const &i_Dimensions) :
mBackend(backend),
mDataType(i_DataType),
mvFirstDataCoordinates(i_FirstDataCoordinates),
mvDimensions(i_Dimensions)
{
assert(
mvDimensions.size() == mvFirstDataCoordinates.size()
&& "Tensors origin coordinates and dimensions must have the same size");
mScalarSize = detail::sizeOf(mDataType);
computeLayout();
};
/// @brief Creates a new TensorImpl with same properties as self /// @brief Creates a new TensorImpl with same properties as self
/// @details Copy all characteristics of calling TensorImpl and its data (deep copy). /// @details Copy all characteristics of calling TensorImpl and its data (deep copy).
/// @return Pointer to a copy of the TensorImpl object /// @return Pointer to a copy of the TensorImpl object
...@@ -134,10 +152,10 @@ public: ...@@ -134,10 +152,10 @@ public:
return mNbElts; return mNbElts;
}; };
/// @brief Returns the reference to the Tensor /// @brief Returns stored data type
inline Tensor const &getTensor() const noexcept inline DataType getDataType() const noexcept
{ {
return mTensor; return mDataType;
}; };
/// @brief Get the logical coordinates of the data at given index /// @brief Get the logical coordinates of the data at given index
...@@ -186,7 +204,7 @@ public: ...@@ -186,7 +204,7 @@ public:
} }
/// @brief Change TensorImpl dimensions /// @brief Change TensorImpl dimensions
/// @note Current API does not change first data coordinates /// @note Current API reset origin coordinates to null values
/// @note Preexisting data are lost whatever dims value /// @note Preexisting data are lost whatever dims value
/// @note If new dimensions are not large enough to hold the Tensors that /// @note If new dimensions are not large enough to hold the Tensors that
/// referenced this implementation is undefined behavior /// referenced this implementation is undefined behavior
...@@ -195,31 +213,29 @@ public: ...@@ -195,31 +213,29 @@ public:
virtual void resize(const std::vector<DimSize_t> &dims) = 0; virtual void resize(const std::vector<DimSize_t> &dims) = 0;
private: private:
/// @brief Desired backend identified as a string
const char *mBackend = nullptr;
/// @brief Copy of the Tensor data type. /// @brief Copy of the Tensor data type.
/// @details The purpose of this copy is to remove the need to access the Tensor /// @details The purpose of this copy is to remove the need to access the Tensor
/// object /// object
/// @todo Is there a way to avoid this copy? /// @todo Is there a way to avoid this copy?
DataType mDataType = DataType::Undefined; DataType mDataType = DataType::Undefined;
/// @brief Desired backend identified as a string std::size_t mScalarSize = 0;
const char *mBackend = nullptr; /// @brief Logical coordinates of the data stored at null natural coordinates.
/// @brief Tensor whose the TensorImpl manage the memory /// @sa Coord_t
/// @deprecated Future version may be referenced by several Tensors std::vector<Coord_t> mvFirstDataCoordinates;
/// @todo edesign must be /// @brief Actual in-memory tensor dimensions expressed as number of elements
/// considered so that a TensorImpl either does not know of its user or it has a /// along each dimension
/// list of all of them std::vector<DimSize_t> mvDimensions;
Tensor const &mTensor;
/// @brief Number of stored data /// @brief Number of stored data
/// @details mNbElts == prod(mDimensions)/mScalarSize /// @details mNbElts == prod(mDimensions)<br>
/// A null value indicates that no data has been actually allocated
/// @todo Implement overflow protection as mNbElts*mScalarSize must fit into a /// @todo Implement overflow protection as mNbElts*mScalarSize must fit into a
/// std::size_t /// std::size_t
NbElts_t mNbElts = 0; NbElts_t mNbElts = 0;
/// @brief Size, in bytes, of a single data /// @brief Size, in bytes, of a single data
/// @todo Implement overflow protection as mNbElts*mScalarSize must fit into a /// @todo Implement overflow protection as mNbElts*mScalarSize must fit into a
/// std::size_t /// std::size_t
std::size_t mScalarSize = 0;
/// @brief Actual in-memory tensor dimensions expressed as number of elements
/// along each dimension
std::vector<DimSize_t> mvDimensions;
/// @brief memory tensor layout /// @brief memory tensor layout
/// @details Let ptr be the memory address of a data of logical coordinates /// @details Let ptr be the memory address of a data of logical coordinates
/// (x;y;z) in a 3D tensor, casted to Byte_t *.<br> Let vLayout be the /// (x;y;z) in a 3D tensor, casted to Byte_t *.<br> Let vLayout be the
...@@ -238,8 +254,6 @@ private: ...@@ -238,8 +254,6 @@ private:
/// @todo If needed, replace by a _any storing a backend-dependant handle on /// @todo If needed, replace by a _any storing a backend-dependant handle on
/// storage. /// storage.
Byte_t *mStorage = nullptr; Byte_t *mStorage = nullptr;
/// @brief Logical coordinates of the data stored at mStorage.
std::vector<Coord_t> mvFirstDataCoordinates;
protected: protected:
/// @brief Number of element setter for inherited classes /// @brief Number of element setter for inherited classes
...@@ -301,6 +315,21 @@ protected: ...@@ -301,6 +315,21 @@ protected:
{ {
mvFirstDataCoordinates = iCoords; mvFirstDataCoordinates = iCoords;
} }
/// @brief Computes and the number of elements the Tensor is supposed to store
NbElts_t size()
{
return std::accumulate(
std::begin(getDimensions()),
std::end(getDimensions()),
1,
std::multiplies<NbElts_t>());
}
/// @brief Computes and set the number of elements the Tensor is supposed to store
/// @note To be called only after the data has effectively been allocated.
void computeNbElts()
{
mNbElts = size();
}
/// @brief Computes layout from Tensor total dimensions and size of a single data. /// @brief Computes layout from Tensor total dimensions and size of a single data.
/// @note Assumes that the Tensor dimensions and data type are valid. /// @note Assumes that the Tensor dimensions and data type are valid.
/// @note Should be used only during (re)initialization of storage. /// @note Should be used only during (re)initialization of storage.
...@@ -308,6 +337,10 @@ protected: ...@@ -308,6 +337,10 @@ protected:
{ {
std::size_t NbDims = mvDimensions.size(); std::size_t NbDims = mvDimensions.size();
mvLayout.resize(NbDims); mvLayout.resize(NbDims);
if (NbDims == 0)
{
return;
}
mvLayout[NbDims - 1] = mScalarSize; mvLayout[NbDims - 1] = mScalarSize;
for (std::size_t i = 0; i < NbDims - 1; ++i) for (std::size_t i = 0; i < NbDims - 1; ++i)
...@@ -326,7 +359,7 @@ protected: ...@@ -326,7 +359,7 @@ protected:
// *pLayout = (*pPrevLayout) * (*pDimension); // *pLayout = (*pPrevLayout) * (*pDimension);
// } // }
assert( assert(
mvLayout[0] * mvDimensions[0] == mNbElts * mvLayout[NbDims - 1] mvLayout[0] * mvDimensions[0] == size() * mvLayout[NbDims - 1]
&& "Error while computing layout"); && "Error while computing layout");
} }
/// @brief Copy all the characteristics of a given TensorImpl but not its data /// @brief Copy all the characteristics of a given TensorImpl but not its data
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#ifndef AIDGE_DATA_H_ #ifndef AIDGE_DATA_H_
#define AIDGE_DATA_H_ #define AIDGE_DATA_H_
#include <type_traits>
#include "aidge/utils/Attributes.hpp" #include "aidge/utils/Attributes.hpp"
namespace Aidge namespace Aidge
...@@ -73,20 +75,163 @@ private: ...@@ -73,20 +75,163 @@ private:
// required though pure virtual // required though pure virtual
// inline to avoid redefinition // inline to avoid redefinition
inline Data::~Data() noexcept = default; inline Data::~Data() noexcept = default;
} // namespace Aidge
namespace namespace detail
{ {
template<typename T> struct NativeType template<typename T> struct NativeType
{ {
static const Aidge::DataType type; // generic case not implemented;
static_assert(!std::is_same<T, T>::value, "NativeType not supported on given type");
}; };
template<> const Aidge::DataType NativeType<double>::type = Aidge::DataType::Float64; template<> struct NativeType<double>
template<> const Aidge::DataType NativeType<float>::type = Aidge::DataType::Float32; {
template<> const Aidge::DataType NativeType<long>::type = Aidge::DataType::Int64; static constexpr DataType aidgeType = DataType::Float64;
template<> const Aidge::DataType NativeType<int>::type = Aidge::DataType::Int32; };
template<> template<> struct NativeType<float>
const Aidge::DataType NativeType<std::uint16_t>::type = Aidge::DataType::UInt16; {
static constexpr DataType aidgeType = DataType::Float32;
};
template<> struct NativeType<std::int8_t>
{
static constexpr DataType aidgeType = DataType::Int8;
};
template<> struct NativeType<std::uint8_t>
{
static constexpr DataType aidgeType = DataType::UInt8;
};
template<> struct NativeType<std::int16_t>
{
static constexpr DataType aidgeType = DataType::Int16;
};
template<> struct NativeType<std::uint16_t>
{
static constexpr DataType aidgeType = DataType::UInt16;
};
template<> struct NativeType<std::int32_t>
{
static constexpr DataType aidgeType = DataType::Int32;
};
template<> struct NativeType<std::uint32_t>
{
static constexpr DataType aidgeType = DataType::UInt32;
};
template<> struct NativeType<std::int64_t>
{
static constexpr DataType aidgeType = DataType::Int64;
};
template<> struct NativeType<std::uint64_t>
{
static constexpr DataType aidgeType = DataType::UInt64;
};
template<typename T> constexpr DataType NativeType_v = NativeType<T>::aidgeType;
template<DataType E> struct CppType
{
// generic case not implemented;
static_assert(!(E == E), "CppType not supported on given aidge type");
};
template<> struct CppType<DataType::Float64>
{
using type = double;
};
template<> struct CppType<DataType::Float32>
{
using type = float;
};
template<> struct CppType<DataType::Int8>
{
using type = std::int8_t;
};
template<> struct CppType<DataType::UInt8>
{
using type = std::uint8_t;
};
template<> struct CppType<DataType::Int16>
{
using type = std::int16_t;
};
template<> struct CppType<DataType::UInt16>
{
using type = std::uint16_t;
};
template<> struct CppType<DataType::Int32>
{
using type = std::int32_t;
};
template<> struct CppType<DataType::UInt32>
{
using type = std::uint32_t;
};
template<> struct CppType<DataType::Int64>
{
using type = std::int64_t;
};
template<> struct CppType<DataType::UInt64>
{
using type = std::uint64_t;
};
template<DataType E> using CppType_t = typename CppType<E>::type;
constexpr std::size_t sizeOf(DataType const i_dataType)
{
switch (i_dataType)
{
case DataType::Float64:
{
return sizeof(CppType_t<DataType::Float64>);
break;
}
case DataType::Float32:
{
return sizeof(CppType_t<DataType::Float32>);
break;
}
case DataType::Int8:
{
return sizeof(CppType_t<DataType::Int8>);
break;
}
case DataType::UInt8:
{
return sizeof(CppType_t<DataType::UInt8>);
break;
}
case DataType::Int16:
{
return sizeof(CppType_t<DataType::Int16>);
break;
}
case DataType::UInt16:
{
return sizeof(CppType_t<DataType::UInt16>);
break;
}
case DataType::Int32:
{
return sizeof(CppType_t<DataType::Int32>);
break;
}
case DataType::UInt32:
{
return sizeof(CppType_t<DataType::UInt32>);
break;
}
case DataType::Int64:
{
return sizeof(CppType_t<DataType::Int64>);
break;
}
case DataType::UInt64:
{
return sizeof(CppType_t<DataType::UInt64>);
break;
}
default:
{
assert(false && "sizeOf called on unrecognized data type");
return 0;
}
}
}
template<> template<>
const char* const EnumStrings<Aidge::DataType>::data[] const char* const EnumStrings<Aidge::DataType>::data[]
...@@ -94,6 +239,7 @@ const char* const EnumStrings<Aidge::DataType>::data[] ...@@ -94,6 +239,7 @@ const char* const EnumStrings<Aidge::DataType>::data[]
"Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16",
"Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6", "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6",
"UInt7", "UInt8", "UInt16", "UInt32", "UInt64"}; "UInt7", "UInt8", "UInt16", "UInt32", "UInt64"};
} // namespace } // namespace detail
} // namespace Aidge
#endif /* AIDGE_DATA_H_ */ #endif /* AIDGE_DATA_H_ */
\ No newline at end of file
...@@ -68,7 +68,10 @@ class Tensor : public Data, ...@@ -68,7 +68,10 @@ class Tensor : public Data,
public Registrable< public Registrable<
Tensor, Tensor,
std::tuple<std::string, DataType>, std::tuple<std::string, DataType>,
detail::pimpl::ImplPtr_t(const Tensor &)> detail::pimpl::ImplPtr_t(
DataType const i_DataType,
std::vector<Coord_t> const &i_FirstDataCoordinates,
std::vector<DimSize_t> const &i_Dimensions)>
{ {
private: private:
///@brief Copy data from a raw buffer inside the Tensor data implementation ///@brief Copy data from a raw buffer inside the Tensor data implementation
...@@ -77,15 +80,16 @@ private: ...@@ -77,15 +80,16 @@ private:
void copyData(Byte_t const *const srcPtr, NbElts_t const Length); void copyData(Byte_t const *const srcPtr, NbElts_t const Length);
/// @brief enum to specify data type. /// @brief enum to specify data type.
DataType mDataType = DataType::Undefined; DataType mDataType = DataType::Undefined;
/// @brief Logical coordinates of the first data (lexicographic order) of the tensor
/// active area
/// @sa Coord_t
std::vector<Coord_t> mvActiveAreaOrigin = {};
/// @brief Dimensions of the active area. /// @brief Dimensions of the active area.
std::vector<DimSize_t> mDims; std::vector<DimSize_t> mDims = {};
/// @brief Pointer to the actual data implementation. /// @brief Pointer to the actual data implementation.
detail::pimpl::ImplPtr_t mImpl = nullptr; detail::pimpl::ImplPtr_t mImpl = nullptr;
/// @brief Pointer to the associated gradient Tensor instance. /// @brief Pointer to the associated gradient Tensor instance.
std::shared_ptr<Tensor> mGrad = nullptr; std::shared_ptr<Tensor> mGrad = nullptr;
/// @brief Logical coordinates of the first data (lexicographic order) of the tensor
/// active area
std::vector<Coord_t> mActiveAreaOrigin;
/// @brief Valid neighborhood around active data /// @brief Valid neighborhood around active data
Context mContext; Context mContext;
...@@ -104,7 +108,7 @@ public: ...@@ -104,7 +108,7 @@ public:
* @param dataType Sets the type of inserted data. * @param dataType Sets the type of inserted data.
*/ */
explicit Tensor(DataType dataType = DataType::Float32) : explicit Tensor(DataType dataType = DataType::Float32) :
Data(Type), mDataType(dataType), mDims({}), mContext{} Data(Type), mDataType(dataType), mvActiveAreaOrigin({}), mDims({}), mContext{}
{ {
// ctor // ctor
} }
...@@ -125,15 +129,20 @@ public: ...@@ -125,15 +129,20 @@ public:
template<typename T, std::size_t SIZE_0> template<typename T, std::size_t SIZE_0>
constexpr Tensor(Array1D<T, SIZE_0> &&arr) : constexpr Tensor(Array1D<T, SIZE_0> &&arr) :
Data(Type), Data(Type),
mDataType(NativeType<T>::type), mDataType(detail::NativeType_v<T>),
mvActiveAreaOrigin(1, 0),
// for aidge, on msvc, wrong constructor called, initializes a vector of size // for aidge, on msvc, wrong constructor called, initializes a vector of size
// SIZE_0 instead of a single value vector holding SIZE_0 mDims({SIZE_0}), // SIZE_0 instead of a single value vector holding SIZE_0
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)), // mDims({SIZE_0}),
// mImpl(Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
// mDataType, mvActiveAreaOrigin, mDims)),
mSize(SIZE_0), mSize(SIZE_0),
mSizeM1(SIZE_0) mSizeM1(SIZE_0)
{ {
// work-around: calling assignement instead // work-around: calling assignement instead
mDims = {SIZE_0}; mDims = {SIZE_0};
mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
mDataType, mvActiveAreaOrigin, mDims);
copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0); copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0);
} }
...@@ -143,7 +152,8 @@ public: ...@@ -143,7 +152,8 @@ public:
resize({SIZE_0}); resize({SIZE_0});
if (!mImpl) if (!mImpl)
{ {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this); mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
mDataType, mvActiveAreaOrigin, mDims);
} }
copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0); copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0);
return *this; return *this;
...@@ -158,9 +168,11 @@ public: ...@@ -158,9 +168,11 @@ public:
template<typename T, std::size_t SIZE_0, std::size_t SIZE_1> template<typename T, std::size_t SIZE_0, std::size_t SIZE_1>
constexpr Tensor(Array2D<T, SIZE_0, SIZE_1> &&arr) : constexpr Tensor(Array2D<T, SIZE_0, SIZE_1> &&arr) :
Data(Type), Data(Type),
mDataType(NativeType<T>::type), mDataType(detail::NativeType_v<T>),
mvActiveAreaOrigin(2, 0),
mDims({SIZE_0, SIZE_1}), mDims({SIZE_0, SIZE_1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)), mImpl(Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
mDataType, mvActiveAreaOrigin, mDims)),
mSize(SIZE_0 * SIZE_1), mSize(SIZE_0 * SIZE_1),
mSizeM1(SIZE_1) mSizeM1(SIZE_1)
{ {
...@@ -173,7 +185,8 @@ public: ...@@ -173,7 +185,8 @@ public:
resize({SIZE_0, SIZE_1}); resize({SIZE_0, SIZE_1});
if (!mImpl) if (!mImpl)
{ {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this); mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
mDataType, mvActiveAreaOrigin, mDims);
} }
copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0 * SIZE_1); copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0 * SIZE_1);
return *this; return *this;
...@@ -189,9 +202,11 @@ public: ...@@ -189,9 +202,11 @@ public:
template<typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2> template<typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> const &arr) : constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> const &arr) :
Data(Type), Data(Type),
mDataType(NativeType<T>::type), mDataType(detail::NativeType_v<T>),
mvActiveAreaOrigin(3, 0),
mDims({SIZE_0, SIZE_1, SIZE_2}), mDims({SIZE_0, SIZE_1, SIZE_2}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)), mImpl(Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
mDataType, mvActiveAreaOrigin, mDims)),
mSize(SIZE_0 * SIZE_1 * SIZE_2), mSize(SIZE_0 * SIZE_1 * SIZE_2),
mSizeM1(SIZE_1 * SIZE_2) mSizeM1(SIZE_1 * SIZE_2)
{ {
...@@ -204,7 +219,8 @@ public: ...@@ -204,7 +219,8 @@ public:
resize({SIZE_0, SIZE_1, SIZE_2}); resize({SIZE_0, SIZE_1, SIZE_2});
if (!mImpl) if (!mImpl)
{ {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this); mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
mDataType, mvActiveAreaOrigin, mDims);
} }
copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0 * SIZE_1 * SIZE_2); copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0 * SIZE_1 * SIZE_2);
return *this; return *this;
...@@ -226,9 +242,11 @@ public: ...@@ -226,9 +242,11 @@ public:
std::size_t SIZE_3> std::size_t SIZE_3>
constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> const &arr) : constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> const &arr) :
Data(Type), Data(Type),
mDataType(NativeType<T>::type), mDataType(detail::NativeType_v<T>),
mvActiveAreaOrigin(4, 0),
mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}), mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)), mImpl(Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
mDataType, mvActiveAreaOrigin, mDims)),
mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3), mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3),
mSizeM1(SIZE_1 * SIZE_2 * SIZE_3) mSizeM1(SIZE_1 * SIZE_2 * SIZE_3)
{ {
...@@ -248,7 +266,8 @@ public: ...@@ -248,7 +266,8 @@ public:
resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3}); resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
if (!mImpl) if (!mImpl)
{ {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this); mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
mDataType, mvActiveAreaOrigin, mDims);
} }
copyData( copyData(
reinterpret_cast<Byte_t const *>(&(arr.data)), reinterpret_cast<Byte_t const *>(&(arr.data)),
...@@ -396,6 +415,7 @@ public: ...@@ -396,6 +415,7 @@ public:
* @brief Change the shape of the Tensor object according to the given argument. * @brief Change the shape of the Tensor object according to the given argument.
* @tparam DIM new dimensions. * @tparam DIM new dimensions.
* @param dims * @param dims
* @note Current API reset origin coordinates to null values
*/ */
template<std::array<DimSize_t, 1>::size_type template<std::array<DimSize_t, 1>::size_type
DIM> // deducing std::array size_type and declaring DIM accordingly DIM> // deducing std::array size_type and declaring DIM accordingly
...@@ -408,6 +428,11 @@ public: ...@@ -408,6 +428,11 @@ public:
resize(mDims); resize(mDims);
} }
/**
* @brief Change the shape of the Tensor object according to the given argument.
* @param dims
* @note Current API reset origin coordinates to null values
*/
void resize(const std::vector<DimSize_t> &dims); void resize(const std::vector<DimSize_t> &dims);
/** /**
......
...@@ -11,8 +11,8 @@ ...@@ -11,8 +11,8 @@
#ifndef AIDGE_CORE_UTILS_TENSOR_UTILS_H_ #ifndef AIDGE_CORE_UTILS_TENSOR_UTILS_H_
#define AIDGE_CORE_UTILS_TENSOR_UTILS_H_ #define AIDGE_CORE_UTILS_TENSOR_UTILS_H_
#include <cmath> // std::abs
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include <cmath> // std::abs
/** /**
* @brief Compare two :cpp:class:`Aidge::Tensor` value wise. The comparison function is: * @brief Compare two :cpp:class:`Aidge::Tensor` value wise. The comparison function is:
...@@ -22,27 +22,35 @@ ...@@ -22,27 +22,35 @@
* If a tensor value is different from the other tensor return False * If a tensor value is different from the other tensor return False
* If the tensor does not have the same size, return False * If the tensor does not have the same size, return False
* If the datatype is not the same between each tensor return False * If the datatype is not the same between each tensor return False
* If the templated type does not correspond to the datatype of each tensor, raise an assertion error * If the templated type does not correspond to the datatype of each tensor, raise an
* assertion error
* *
* @tparam T should correspond to the type of the tensor, define the type of the absolute and relative error * @tparam T should correspond to the type of the tensor, define the type of the absolute
* and relative error
* @param t1 first :cpp:class:`Aidge::Tensor` to test * @param t1 first :cpp:class:`Aidge::Tensor` to test
* @param t2 second :cpp:class:`Aidge::Tensor` to test * @param t2 second :cpp:class:`Aidge::Tensor` to test
* @param relative relative difference allowed (should be betwen 0 and 1) * @param relative relative difference allowed (should be betwen 0 and 1)
* @param absolute absolute error allowed (shoulmd be positive) * @param absolute absolute error allowed (shoulmd be positive)
* @return true if both tensor are approximately equal and have the datatype, shape. Else return false * @return true if both tensor are approximately equal and have the datatype, shape. Else
* return false
*/ */
template <typename T> template<typename T>
bool approxEq(Aidge::Tensor t1, Aidge::Tensor t2, float relative, float absolute){ bool approxEq(Aidge::Tensor t1, Aidge::Tensor t2, float relative, float absolute)
{
assert(t1.dataType() == t2.dataType()); assert(t1.dataType() == t2.dataType());
assert(t1.dataType() == NativeType<T>::type); assert(t1.dataType() == Aidge::detail::NativeType_v<T>);
assert(relative >= 0); assert(relative >= 0);
assert(absolute >= 0 && absolute<=1); assert(absolute >= 0 && absolute <= 1);
if (t1.size() != t2.size()){ if (t1.size() != t2.size())
{
return false; return false;
} }
for(size_t i; i < t1.size(); ++i){ for (size_t i; i < t1.size(); ++i)
if (static_cast<float>(std::abs(t1.get<T>(i) - t2.get<T>(i))) > (absolute + (relative * static_cast<float>(std::abs(t2.get<T>(i)))))){ {
if (static_cast<float>(std::abs(t1.get<T>(i) - t2.get<T>(i)))
> (absolute + (relative * static_cast<float>(std::abs(t2.get<T>(i))))))
{
return false; return false;
} }
} }
......
...@@ -31,37 +31,38 @@ void addCtor(py::class_< ...@@ -31,37 +31,38 @@ void addCtor(py::class_<
Registrable< Registrable<
Tensor, Tensor,
std::tuple<std::string, DataType>, std::tuple<std::string, DataType>,
detail::pimpl::ImplPtr_t(const Tensor&)>>& mTensor) detail::pimpl::ImplPtr_t(
DataType const i_DataType,
std::vector<Coord_t> const& i_FirstDataCoordinates,
std::vector<DimSize_t> const& i_Dimensions)>>& mTensor)
{ {
mTensor mTensor
.def(py::init( .def(py::init([](py::array_t<T, py::array::c_style | py::array::forcecast> b) {
[](py::array_t<T, py::array::c_style | py::array::forcecast> b) /* Request a buffer descriptor from Python */
{ py::buffer_info info = b.request();
/* Request a buffer descriptor from Python */ Tensor* newTensor = new Tensor();
py::buffer_info info = b.request(); newTensor->setDatatype(detail::NativeType_v<T>);
Tensor* newTensor = new Tensor(); const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
newTensor->setDatatype(NativeType<T>::type); newTensor->resize(dims);
const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
newTensor->resize(dims);
// TODO : Find a better way to choose backend // TODO : Find a better way to choose backend
std::set<std::string> availableBackends = Tensor::getAvailableBackends(); std::set<std::string> availableBackends = Tensor::getAvailableBackends();
if (availableBackends.find("cpu") != availableBackends.end()) if (availableBackends.find("cpu") != availableBackends.end())
{ {
newTensor->setBackend("cpu"); newTensor->setBackend("cpu");
newTensor->getImpl().copy( newTensor->getImpl().copy(
reinterpret_cast<Byte_t*>(info.ptr), newTensor->size()); reinterpret_cast<Byte_t*>(info.ptr), newTensor->size());
} }
else else
{ {
printf("Warning : Could not use aidge_cpu backend, verify you have " printf("Warning : Could not use aidge_cpu backend, verify you have "
"`import aidge_cpu`\n"); "`import aidge_cpu`\n");
} }
return newTensor; return newTensor;
})) }))
.def("__setitem__", (void(Tensor::*)(NbElts_t, T)) & Tensor::set) .def("__setitem__", (void (Tensor::*)(NbElts_t, T)) & Tensor::set)
.def("__setitem__", (void(Tensor::*)(std::vector<Coord_t>, T)) & Tensor::set); .def("__setitem__", (void (Tensor::*)(std::vector<Coord_t>, T)) & Tensor::set);
} }
void init_Tensor(py::module& m) void init_Tensor(py::module& m)
...@@ -70,11 +71,17 @@ void init_Tensor(py::module& m) ...@@ -70,11 +71,17 @@ void init_Tensor(py::module& m)
Registrable< Registrable<
Tensor, Tensor,
std::tuple<std::string, DataType>, std::tuple<std::string, DataType>,
detail::pimpl::ImplPtr_t(const Tensor&)>, detail::pimpl::ImplPtr_t(
DataType const i_DataType,
std::vector<Coord_t> const& i_FirstDataCoordinates,
std::vector<DimSize_t> const& i_Dimensions)>,
std::shared_ptr<Registrable< std::shared_ptr<Registrable<
Tensor, Tensor,
std::tuple<std::string, DataType>, std::tuple<std::string, DataType>,
detail::pimpl::ImplPtr_t(const Tensor&)>>>(m, "TensorRegistrable"); detail::pimpl::ImplPtr_t(
DataType const i_DataType,
std::vector<Coord_t> const& i_FirstDataCoordinates,
std::vector<DimSize_t> const& i_Dimensions)>>>(m, "TensorRegistrable");
py::class_< py::class_<
Tensor, Tensor,
...@@ -83,7 +90,10 @@ void init_Tensor(py::module& m) ...@@ -83,7 +90,10 @@ void init_Tensor(py::module& m)
Registrable< Registrable<
Tensor, Tensor,
std::tuple<std::string, DataType>, std::tuple<std::string, DataType>,
detail::pimpl::ImplPtr_t(const Tensor&)>> detail::pimpl::ImplPtr_t(
DataType const i_DataType,
std::vector<Coord_t> const& i_FirstDataCoordinates,
std::vector<DimSize_t> const& i_Dimensions)>>
pyClassTensor(m, "Tensor", py::multiple_inheritance(), py::buffer_protocol()); pyClassTensor(m, "Tensor", py::multiple_inheritance(), py::buffer_protocol());
pyClassTensor.def(py::init<>()) pyClassTensor.def(py::init<>())
...@@ -91,12 +101,11 @@ void init_Tensor(py::module& m) ...@@ -91,12 +101,11 @@ void init_Tensor(py::module& m)
.def("dims", (const std::vector<DimSize_t>& (Tensor::*)() const) & Tensor::dims) .def("dims", (const std::vector<DimSize_t>& (Tensor::*)() const) & Tensor::dims)
.def("dtype", &Tensor::dataType) .def("dtype", &Tensor::dataType)
.def("size", &Tensor::size) .def("size", &Tensor::size)
.def("resize", (void(Tensor::*)(const std::vector<DimSize_t>&)) & Tensor::resize) .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&)) & Tensor::resize)
.def("has_impl", &Tensor::hasImpl) .def("has_impl", &Tensor::hasImpl)
.def( .def(
"get_coord", "get_coord",
[](Tensor& b, NbElts_t flatIdx) -> std::vector<Coord_t> [](Tensor& b, NbElts_t flatIdx) -> std::vector<Coord_t> {
{
std::vector<Coord_t> Coords(b.dims().size()); std::vector<Coord_t> Coords(b.dims().size());
/// @todo could be removed soon /// @todo could be removed soon
assert( assert(
...@@ -118,8 +127,7 @@ void init_Tensor(py::module& m) ...@@ -118,8 +127,7 @@ void init_Tensor(py::module& m)
.def("__len__", [](Tensor& b) -> NbElts_t { return b.size(); }) .def("__len__", [](Tensor& b) -> NbElts_t { return b.size(); })
.def( .def(
"__getitem__", "__getitem__",
[](Tensor& b, NbElts_t idx) -> py::object [](Tensor& b, NbElts_t idx) -> py::object {
{
if (idx >= b.size()) if (idx >= b.size())
throw py::index_error(); throw py::index_error();
switch (b.dataType()) switch (b.dataType())
...@@ -136,8 +144,7 @@ void init_Tensor(py::module& m) ...@@ -136,8 +144,7 @@ void init_Tensor(py::module& m)
}) })
.def( .def(
"__getitem__", "__getitem__",
[](Tensor& b, std::vector<Coord_t> coordIdx) -> py::object [](Tensor& b, std::vector<Coord_t> coordIdx) -> py::object {
{
if (b.getIdx(coordIdx) >= b.size()) if (b.getIdx(coordIdx) >= b.size())
throw py::index_error(); throw py::index_error();
switch (b.dataType()) switch (b.dataType())
...@@ -152,52 +159,50 @@ void init_Tensor(py::module& m) ...@@ -152,52 +159,50 @@ void init_Tensor(py::module& m)
return py::none(); return py::none();
} }
}) })
.def_buffer( .def_buffer([](Tensor& b) -> py::buffer_info {
[](Tensor& b) -> py::buffer_info /// @todo const TensorImpl, yet a pointer to writable data is requested
{ /// later: unconsistent?
/// @todo const TensorImpl, yet a pointer to writable data is requested const TensorImpl& tensorImpl = b.getImpl();
/// later: unconsistent?
const TensorImpl& tensorImpl = b.getImpl();
std::vector<DimSize_t> dims; std::vector<DimSize_t> dims;
std::vector<NbElts_t> strides; std::vector<NbElts_t> strides;
NbElts_t stride = tensorImpl.scalarSize(); NbElts_t stride = tensorImpl.scalarSize();
for (unsigned int dim = b.nbDims(); dim > 0; dim--) for (unsigned int dim = b.nbDims(); dim > 0; dim--)
{ {
dims.push_back(b.dims()[dim - 1]); dims.push_back(b.dims()[dim - 1]);
strides.push_back(stride); strides.push_back(stride);
stride *= b.dims()[dim - 1]; stride *= b.dims()[dim - 1];
} }
std::reverse(dims.begin(), dims.end()); std::reverse(dims.begin(), dims.end());
std::reverse(strides.begin(), strides.end()); std::reverse(strides.begin(), strides.end());
std::string dataFormatDescriptor; std::string dataFormatDescriptor;
switch (b.dataType()) switch (b.dataType())
{ {
case DataType::Float64: case DataType::Float64:
dataFormatDescriptor = py::format_descriptor<double>::format(); dataFormatDescriptor = py::format_descriptor<double>::format();
break; break;
case DataType::Float32: case DataType::Float32:
dataFormatDescriptor = py::format_descriptor<float>::format(); dataFormatDescriptor = py::format_descriptor<float>::format();
break; break;
case DataType::Int32: case DataType::Int32:
dataFormatDescriptor = py::format_descriptor<int>::format(); dataFormatDescriptor = py::format_descriptor<int>::format();
break; break;
default: default:
throw py::value_error("Unsupported data format"); throw py::value_error("Unsupported data format");
} }
return py::buffer_info( return py::buffer_info(
const_cast<void*>(reinterpret_cast<void const*>( const_cast<void*>(reinterpret_cast<void const*>(
tensorImpl.getDataAddress())), /* Pointer to buffer */ tensorImpl.getDataAddress())), /* Pointer to buffer */
tensorImpl.scalarSize(), /* Size of one scalar */ tensorImpl.scalarSize(), /* Size of one scalar */
dataFormatDescriptor, /* Python struct-style format descriptor */ dataFormatDescriptor, /* Python struct-style format descriptor */
b.nbDims(), /* Number of dimensions */ b.nbDims(), /* Number of dimensions */
dims, /* Buffer dimensions */ dims, /* Buffer dimensions */
strides /* Strides (in bytes) for each index */ strides /* Strides (in bytes) for each index */
); );
}); });
// TODO : If the ctor with the right data type does not exist, pybind will always // TODO : If the ctor with the right data type does not exist, pybind will always
// convert the data to INT ! Need to find a way to avoid this ! // convert the data to INT ! Need to find a way to avoid this !
......
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
* *
********************************************************************************/ ********************************************************************************/
#include "aidge/data/Tensor.hpp"
#include "aidge/backend/TensorImpl.hpp" #include "aidge/backend/TensorImpl.hpp"
#include "aidge/data/Tensor.hpp"
namespace Aidge namespace Aidge
{ {
...@@ -42,8 +42,8 @@ void Tensor::setBackend(const std::string &name) ...@@ -42,8 +42,8 @@ void Tensor::setBackend(const std::string &name)
{ {
// Backend change: create new impl, copy from old to new and replace // Backend change: create new impl, copy from old to new and replace
// impl // impl
detail::pimpl::ImplPtr_t newImpl detail::pimpl::ImplPtr_t newImpl = Registrar<Tensor>::create(
= Registrar<Tensor>::create({name, mDataType})(*this); {name, mDataType})(mDataType, mvActiveAreaOrigin, mDims);
if (hasData()) if (hasData())
{ {
assert(false && "So far copy between different backend is not supported"); assert(false && "So far copy between different backend is not supported");
...@@ -57,7 +57,8 @@ void Tensor::setBackend(const std::string &name) ...@@ -57,7 +57,8 @@ void Tensor::setBackend(const std::string &name)
} }
else else
{ {
mImpl = Registrar<Tensor>::create({name, mDataType})(*this); mImpl = Registrar<Tensor>::create({name, mDataType})(
mDataType, mvActiveAreaOrigin, mDims);
} }
} }
...@@ -123,8 +124,8 @@ void Tensor::setDatatype(const DataType dt) ...@@ -123,8 +124,8 @@ void Tensor::setDatatype(const DataType dt)
if (hasImpl() && (dataType() != dt)) if (hasImpl() && (dataType() != dt))
{ {
mDataType = dt; mDataType = dt;
detail::pimpl::ImplPtr_t newImpl detail::pimpl::ImplPtr_t newImpl = Registrar<Tensor>::create(
= Registrar<Tensor>::create({mImpl->backend(), dt})(*this); {mImpl->backend(), dt})(mDataType, mvActiveAreaOrigin, mDims);
if (hasData()) if (hasData())
{ {
assert( assert(
...@@ -171,6 +172,7 @@ Tensor::Tensor(const Tensor &otherTensor) : ...@@ -171,6 +172,7 @@ Tensor::Tensor(const Tensor &otherTensor) :
void Tensor::resize(const std::vector<DimSize_t> &dims) void Tensor::resize(const std::vector<DimSize_t> &dims)
{ {
mDims = dims; mDims = dims;
mvActiveAreaOrigin.assign(dims.size(), 0);
computeSize(); computeSize();
if (hasImpl()) if (hasImpl())
{ {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment