Skip to content
Snippets Groups Projects
Commit 84fae3c5 authored by Grégoire Kubler's avatar Grégoire Kubler
Browse files

Merge remote-tracking branch 'EclipseRepo/dev' into feat/operator_globalAveragePooling

parents 5eabd218 fbe319b6
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!91Feat/operator global average pooling
...@@ -171,21 +171,29 @@ public: ...@@ -171,21 +171,29 @@ public:
}; };
/** /**
* Set the size, in number of elements, that must be stored. * @brief Set the size, in number of elements, that must be stored.
*/ */
virtual void resize(std::vector<DimSize_t> dims) { virtual void resize(std::vector<DimSize_t> dims) {
mNbElts = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>()); mNbElts = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
} }
/** /**
* Return the number of elements stored. * @brief Return the number of elements stored.
*/ */
inline std::size_t size() const noexcept { return mNbElts; } inline std::size_t size() const noexcept { return mNbElts; }
/** /**
* Return the size (in bytes) of one element (scalar). * @brief Return the size (in bytes) of one element (scalar).
*/ */
virtual std::size_t scalarSize() const noexcept = 0; virtual std::size_t scalarSize() const noexcept = 0;
/**
* @brief Set every element of the implementation to zero.
*/
virtual void zeros() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Function not implented");
}
constexpr const char *backend() const { return mBackend; } constexpr const char *backend() const { return mBackend; }
/** /**
......
...@@ -53,6 +53,15 @@ public: ...@@ -53,6 +53,15 @@ public:
inline std::size_t scalarSize() const noexcept override final { return sizeof(T); } inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
void zeros() override final {
if (mData.empty()) {
lazyInit();
}
for (std::size_t i = 0; i < mData.size(); ++i) {
*(mData.data() + i) = T(0);
}
}
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final { void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
const T* srcT = static_cast<const T *>(src); const T* srcT = static_cast<const T *>(src);
T* dstT = static_cast<T *>(rawPtr(offset)); T* dstT = static_cast<T *>(rawPtr(offset));
......
...@@ -12,10 +12,12 @@ ...@@ -12,10 +12,12 @@
#ifndef AIDGE_CORE_DATA_TENSOR_H_ #ifndef AIDGE_CORE_DATA_TENSOR_H_
#define AIDGE_CORE_DATA_TENSOR_H_ #define AIDGE_CORE_DATA_TENSOR_H_
#include <cstddef> // std::size_t
#include <cstring> #include <cstring>
#include <functional> // std::multiplies
#include <set> #include <set>
#include <memory> #include <memory>
#include <numeric> // std::accumulate #include <numeric> // std::accumulate
#include <string> #include <string>
#include <type_traits> // std::is_arithmetic #include <type_traits> // std::is_arithmetic
#include <vector> #include <vector>
...@@ -35,15 +37,17 @@ namespace Aidge { ...@@ -35,15 +37,17 @@ namespace Aidge {
class Tensor : public Data, class Tensor : public Data,
public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> { public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
private: private:
DataType mDataType; /** enum to specify data type. */ DataType mDataType = DataType::Float32; /** enum to specify data type. */
std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */ std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
std::vector<DimSize_t> mStrides; /** Stride dimensions of the tensor. */ std::vector<DimSize_t> mStrides; /** Stride dimensions of the tensor. */
std::shared_ptr<TensorImpl> mImpl; /** Pointer to the actual data implementation. */ std::shared_ptr<TensorImpl> mImpl = nullptr; /** Pointer to the actual data implementation. */
std::size_t mImplOffset = 0; std::size_t mImplOffset = 0;
std::shared_ptr<Tensor> mGrad; /** Pointer to the associated gradient Tensor instance. */ std::shared_ptr<Tensor> mGrad = nullptr; /** Pointer to the associated gradient Tensor instance. */
// Cached data // Cached data
std::size_t mSize = 0; /** Number of elements in the Tensor. */ /// @brief Number of elements in the Tensor.
std::size_t mSize;
/// @brief Whether or not data are contiguous in memory.
bool mContiguous = true; bool mContiguous = true;
public: public:
...@@ -51,64 +55,48 @@ class Tensor : public Data, ...@@ -51,64 +55,48 @@ class Tensor : public Data,
/** /**
* @brief Construct a new empty Tensor object. * @brief Construct a new empty Tensor object.
* @param dataType Sets the type of inserted data. * It has the features of an undefined scalar.
*/ */
Tensor(DataType dataType = DataType::Float32) Tensor(DataType dtype = DataType::Float32)
: Data(Type), : Data(Type),
mDataType(dataType) mDataType(dtype),
mDims(std::vector<DimSize_t>({})),
mStrides({1}),
mSize(1)
{ {
// ctor // ctor
} }
/** /**
* @brief Construct a new Tensor object from dimensions. * @brief Construct a new Tensor object from an arithmetic parameter.
* *
* @param dims dimensions of the tensor * @tparam T Type of the input parameter.
* @param dataType datatype of the tensor (default = DataType::Float32) * @tparam VT Decayed type of the input paramter.
* @param val Input value.
*/ */
Tensor(const std::vector<DimSize_t>& dims, DataType dataType = DataType::Float32) template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
Tensor(T val)
: Data(Type), : Data(Type),
mDataType(dataType), mDataType(NativeType<VT>::type),
mDims(dims) mDims({}),
mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
mSize(1)
{ {
computeSize(); *static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
} }
/** /**
* @brief Construct a new Tensor object from another one (shallow copy). * @brief Construct a new Tensor object from dimensions.
* Data memory is not copied, but shared between the new Tensor and the
* initial one.
* *
* @param otherTensor * @param dims dimensions of the tensor
*/ */
Tensor(const Tensor&) = default; Tensor(const std::vector<DimSize_t>& dims)
Tensor(Tensor&&) = default; : Data(Type)
{
/** // set mDims, mStrides, mContiguous, mSize
* Perform a deep copy of the tensor. resize(dims);
*/
Tensor clone() const {
Tensor newTensor(*this);
if (!newTensor.isContiguous()) {
newTensor.makeContiguous();
}
else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl);
}
return newTensor;
}
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
Tensor(T val)
: Data(Type),
mDataType(NativeType<VT>::type),
mDims({}), mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
mSize(1) {
*static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
} }
/** /**
...@@ -123,20 +111,11 @@ class Tensor : public Data, ...@@ -123,20 +111,11 @@ class Tensor : public Data,
mDims({SIZE_0}), mDims({SIZE_0}),
mStrides({1}), mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
mSize(SIZE_0) { mSize(SIZE_0)
{
mImpl->copyFromHost(&arr.data[0], SIZE_0); mImpl->copyFromHost(&arr.data[0], SIZE_0);
} }
template <typename T, std::size_t SIZE_0>
constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
resize({SIZE_0});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0});
}
mImpl->copyFromHost(&arr.data[0], SIZE_0, mImplOffset);
return *this;
}
/** /**
* @brief Construct a new Tensor object from the 2-dimensions Array helper. * @brief Construct a new Tensor object from the 2-dimensions Array helper.
* @tparam T datatype * @tparam T datatype
...@@ -154,16 +133,6 @@ class Tensor : public Data, ...@@ -154,16 +133,6 @@ class Tensor : public Data,
mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1); mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
} }
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
resize({SIZE_0, SIZE_1});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1});
}
mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1, mImplOffset);
return *this;
}
/** /**
* @brief Construct a new Tensor object from the 3-dimensions Array helper. * @brief Construct a new Tensor object from the 3-dimensions Array helper.
* @tparam T datatype * @tparam T datatype
...@@ -182,16 +151,6 @@ class Tensor : public Data, ...@@ -182,16 +151,6 @@ class Tensor : public Data,
mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2); mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
} }
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
resize({SIZE_0, SIZE_1, SIZE_2});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2});
}
mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2, mImplOffset);
return *this;
}
/** /**
* @brief Construct a new Tensor object from the 4-dimensions Array helper. * @brief Construct a new Tensor object from the 4-dimensions Array helper.
* @tparam T datatype * @tparam T datatype
...@@ -211,15 +170,19 @@ class Tensor : public Data, ...@@ -211,15 +170,19 @@ class Tensor : public Data,
mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3); mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
} }
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3> /**
constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) { * @brief Copy constructor. Construct a new Tensor object from another one
resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3}); * (shallow copy). Data memory is not copied, but shared between the new
if (!mImpl) { * Tensor and the initial one.
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3}); * @param other
} */
mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3, mImplOffset); Tensor(const Tensor& other) = default;
return *this;
} /**
* @brief Move constructor.
* @param other
*/
Tensor(Tensor&& other) = default;
/** /**
* @brief Copy dimensions, datatype and data from another Tensor. * @brief Copy dimensions, datatype and data from another Tensor.
...@@ -227,24 +190,32 @@ class Tensor : public Data, ...@@ -227,24 +190,32 @@ class Tensor : public Data,
* existing implementation. Tensor backend/device remain untouched. * existing implementation. Tensor backend/device remain untouched.
* If current Tensor does not have an implementation, only a shallow copy * If current Tensor does not have an implementation, only a shallow copy
* is performed and the Tensor will share data with t. * is performed and the Tensor will share data with t.
* @param t other Tensor object. * @param other other Tensor object.
* @return Tensor& * @return Tensor&
*/ */
Tensor &operator=(const Tensor &t) { Tensor &operator=(const Tensor& other);
resize(t.dims(), t.strides());
setDataType(t.dataType(), false); // do not convert existing data template <typename T, std::size_t SIZE_0>
if (t.hasImpl()) { constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
if (hasImpl()) { *this = Tensor(std::move(arr));
copyFrom(t); return *this;
} }
else {
// Perform a shallow copy only template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
setImpl(t.mImpl, t.mImplOffset); constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
} *this = Tensor(std::move(arr));
} return *this;
else { }
setImpl(nullptr);
} template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
*this = Tensor(std::move(arr));
return *this;
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
*this = Tensor(std::move(arr));
return *this; return *this;
} }
...@@ -260,6 +231,23 @@ class Tensor : public Data, ...@@ -260,6 +231,23 @@ class Tensor : public Data,
return *mImpl == *(otherTensor.mImpl); return *mImpl == *(otherTensor.mImpl);
} }
public:
/**
* @brief Perform a deep copy of the tensor.
*/
Tensor clone() const {
Tensor newTensor(*this);
if (!newTensor.isContiguous()) {
newTensor.makeContiguous();
}
else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl);
}
return newTensor;
}
/** /**
* @brief Set the backend of the Tensor associated implementation. If there * @brief Set the backend of the Tensor associated implementation. If there
* was no previous implementation set, data will be allocated, but it will * was no previous implementation set, data will be allocated, but it will
...@@ -292,12 +280,7 @@ class Tensor : public Data, ...@@ -292,12 +280,7 @@ class Tensor : public Data,
* @brief Get a list of available backends. * @brief Get a list of available backends.
* @return std::set<std::string> * @return std::set<std::string>
*/ */
static std::set<std::string> getAvailableBackends(){ static std::set<std::string> getAvailableBackends();
std::set<std::string> backendsList;
for(std::tuple<std::string, DataType> tupleKey : Registrar<Tensor>::getKeys())
backendsList.insert(std::get<0>(tupleKey));
return backendsList;
}
/** /**
* @brief Get the data type enum. * @brief Get the data type enum.
...@@ -369,13 +352,13 @@ class Tensor : public Data, ...@@ -369,13 +352,13 @@ class Tensor : public Data,
* @brief Get dimensions of the Tensor object. * @brief Get dimensions of the Tensor object.
* @return constexpr const std::vector<DimSize_t>& * @return constexpr const std::vector<DimSize_t>&
*/ */
constexpr const std::vector<DimSize_t> &dims() const { return mDims; } constexpr inline const std::vector<DimSize_t>& dims() const noexcept { return mDims; }
/** /**
* @brief Get strides of the Tensor object. * @brief Get strides of the Tensor object.
* @return constexpr const std::vector<DimSize_t>& * @return constexpr const std::vector<DimSize_t>&
*/ */
constexpr const std::vector<DimSize_t> &strides() const { return mStrides; } constexpr inline const std::vector<DimSize_t>& strides() const noexcept { return mStrides; }
/** /**
* @brief Return true if Tensor is contiguous in memory. * @brief Return true if Tensor is contiguous in memory.
...@@ -424,6 +407,18 @@ class Tensor : public Data, ...@@ -424,6 +407,18 @@ class Tensor : public Data,
* @return false * @return false
*/ */
bool empty() const { return mDims.empty(); } bool empty() const { return mDims.empty(); }
// bool newempty() const noexcept {
// return mSize == 0;
// }
/**
* @brief Set each element of the tensor to zero.
*/
void zeros() const {
if (mImpl) {
mImpl->zeros();
}
}
template <typename expectedType> template <typename expectedType>
const expectedType& get(std::size_t idx) const { const expectedType& get(std::size_t idx) const {
...@@ -455,12 +450,13 @@ class Tensor : public Data, ...@@ -455,12 +450,13 @@ class Tensor : public Data,
inline void print() const { fmt::print("{}\n", toString()); } inline void print() const { fmt::print("{}\n", toString()); }
std::shared_ptr<Tensor> grad() { std::shared_ptr<Tensor> grad() {
if (!mGrad) { // if (!mGrad && mImpl) {
mGrad = std::make_shared<Tensor>(mDataType); // mGrad = std::make_shared<Tensor>(mDims);
mGrad->resize(mDims); // mGrad->setDataType(mDataType);
// mGrad->setBackend(mImpl->backend());
if (mImpl) mGrad->setBackend(mImpl->backend()); // // if (mImpl) mGrad->setBackend(mImpl->backend());
} // }
return mGrad; return mGrad;
} }
...@@ -473,13 +469,13 @@ class Tensor : public Data, ...@@ -473,13 +469,13 @@ class Tensor : public Data,
* @return std::vector<DimSize_t> * @return std::vector<DimSize_t>
*/ */
std::vector<std::size_t> getCoord(std::size_t flatIdx) const { std::vector<std::size_t> getCoord(std::size_t flatIdx) const {
std::vector<std::size_t> coordIdx = std::vector<std::size_t>(mDims.size()); std::vector<std::size_t> coordIdx(mDims.size());
std::size_t idx = flatIdx; std::size_t i = mDims.size();
for (std::size_t i = mDims.size() - 1; i > 0; --i){
coordIdx[i] = (idx % mDims[i]); while (i-- > 0) {
idx/=mDims[i]; coordIdx[i] = (flatIdx % mDims[i]);
flatIdx/=mDims[i];
} }
coordIdx[0] = idx % mDims[0];
return coordIdx; return coordIdx;
} }
...@@ -497,7 +493,7 @@ class Tensor : public Data, ...@@ -497,7 +493,7 @@ class Tensor : public Data,
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions"); AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
std::size_t flatIdx = 0; std::size_t flatIdx = 0;
std::size_t i = 0; std::size_t i = 0;
for(; i < coordIdx.size() - 1; ++i){ for(; i < coordIdx.size() - 1; ++i) {
AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor"); AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
flatIdx = (flatIdx + coordIdx[i]) * mDims[i + 1]; flatIdx = (flatIdx + coordIdx[i]) * mDims[i + 1];
} }
...@@ -513,20 +509,24 @@ class Tensor : public Data, ...@@ -513,20 +509,24 @@ class Tensor : public Data,
* @return DimSize_t Storage index * @return DimSize_t Storage index
*/ */
std::size_t getStorageIdx(const std::vector<std::size_t>& coordIdx) const { std::size_t getStorageIdx(const std::vector<std::size_t>& coordIdx) const {
for(std::size_t i = 0; i < coordIdx.size(); ++i) {
AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
}
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions"); AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
return std::inner_product(coordIdx.begin(), coordIdx.end(), mStrides.begin(), DimSize_t(0)); return std::inner_product(coordIdx.cbegin(), coordIdx.cend(), mStrides.cbegin(), DimSize_t(0));
} }
/** /**
* @brief Returns a sub-tensor with one or more dimension less. * @brief Returns a sub-tensor with equal or lower number of dimensions.
* For instance, t.extract({1}) on a CHW tensor will return the HW tensor *
* @note For instance, ``t.extract({1})`` on a CHW tensor will return the HW tensor
* of channel #1. * of channel #1.
* Likewise, t.extract({0, 1}) on a NCHW tensor will return the HW tensor * Likewise, ``t.extract({0, 1})`` on a NCHW tensor will return the HW tensor
* of batch #0 and channel #1. * of batch #0 and channel #1.
* No memory copy is performed, the returned tensor does not own the memory. * @note No memory copy is performed, the returned tensor does not own the memory.
* If the number of coordinates matches the number of dimensions, an empty * @note If the number of coordinates matches the number of dimensions, a scalar
* tensor is returned. * tensor is returned.
* It current tensor was contiguous, the returned tensor is garanteed to be * @note If current tensor was contiguous, the returned tensor is garanteed to be
* contiguous as well. * contiguous as well.
* *
* @param coordIdx Coordinates of the sub-tensor to extract * @param coordIdx Coordinates of the sub-tensor to extract
...@@ -537,6 +537,8 @@ class Tensor : public Data, ...@@ -537,6 +537,8 @@ class Tensor : public Data,
/** /**
* @brief Returns a sub-tensor at some coordinate and with some dimension. * @brief Returns a sub-tensor at some coordinate and with some dimension.
* *
* @note Data contiguity of the returned Tensor is not guaranted.
*
* @param coordIdx First coordinates of the sub-tensor to extract * @param coordIdx First coordinates of the sub-tensor to extract
* @param dims Dimensions of the sub-tensor to extract * @param dims Dimensions of the sub-tensor to extract
* @return Tensor Sub-tensor. * @return Tensor Sub-tensor.
......
...@@ -9,14 +9,47 @@ ...@@ -9,14 +9,47 @@
* *
********************************************************************************/ ********************************************************************************/
#include <vector> #include "aidge/data/Tensor.hpp"
#include <cstddef> #include <cstddef>
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
resize(other.dims(), other.strides());
setDataType(other.dataType(), false); // do not convert existing data
if (other.hasImpl()) {
if (hasImpl()) {
copyFrom(other);
}
else {
// Perform a shallow copy only
setImpl(other.mImpl, other.mImplOffset);
}
}
else {
setImpl(nullptr);
}
return *this;
}
void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) { void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) {
// TODO: scalar Tensor not handled
if (dims.empty()) { // scalar
mDims = std::vector<DimSize_t>(0);
mStrides = std::vector<DimSize_t>({1});
mContiguous = true;
computeSize();
if (mImpl) {
mImpl->resize(mDims);
}
return;
}
bool checkContiguous = true; bool checkContiguous = true;
if (strides.empty()) { if (strides.empty()) {
strides.resize(dims.size()); strides.resize(dims.size());
...@@ -31,7 +64,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto ...@@ -31,7 +64,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto
AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims"); AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
} }
if (mImpl.use_count() > 1) { if (mImpl && mImpl.use_count() > 1) {
// Here we could also create a new storage for this tensor in this case // Here we could also create a new storage for this tensor in this case
// But, is it more likely that the user really wants this, or that he did a mistake? // But, is it more likely that the user really wants this, or that he did a mistake?
AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage"); AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
...@@ -43,6 +76,11 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto ...@@ -43,6 +76,11 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto
mContiguous = true; mContiguous = true;
if (checkContiguous) { if (checkContiguous) {
std::size_t expectedStride = 1; std::size_t expectedStride = 1;
// std::size_t i = dims.size();
// while ((i-- > 0) && (strides[i] == expectedStride)) {
// mContiguous&= (strides[i] == expectedStride);
// expectedStride*= dims[i];
// }
for (std::size_t i = dims.size()-1; i > 0; --i) { for (std::size_t i = dims.size()-1; i > 0; --i) {
if (strides[i] != expectedStride) { if (strides[i] != expectedStride) {
mContiguous = false; mContiguous = false;
...@@ -148,26 +186,26 @@ std::string Aidge::Tensor::toString() const { ...@@ -148,26 +186,26 @@ std::string Aidge::Tensor::toString() const {
return res; return res;
} }
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const { Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& fixedCoord) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions"); AIDGE_ASSERT(fixedCoord.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
Tensor subTensor(mDataType); Tensor subTensor(mDataType);
subTensor.resize(std::vector<size_t>(mDims.begin() + coordIdx.size(), mDims.end()), subTensor.resize(std::vector<size_t>(mDims.cbegin() + fixedCoord.size(), mDims.cend()),
std::vector<size_t>(mStrides.begin() + coordIdx.size(), mStrides.end())); std::vector<size_t>(mStrides.cbegin() + fixedCoord.size(), mStrides.cend()));
subTensor.setBackend(mImpl->backend(), mImpl->device().second); subTensor.setBackend(mImpl->backend(), mImpl->device().second);
subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx)); subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(fixedCoord));
return subTensor; return subTensor;
} }
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const { Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& startCoord, const std::vector<std::size_t>& dims) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
AIDGE_ASSERT(coordIdx.size() == mDims.size(), "Coordinates does not match number of dimensions"); AIDGE_ASSERT(startCoord.size() == mDims.size(), "Coordinates does not match number of dimensions");
Tensor subTensor(mDataType); Tensor subTensor(mDataType);
subTensor.resize(dims, mStrides); subTensor.resize(dims, mStrides);
subTensor.setBackend(mImpl->backend(), mImpl->device().second); subTensor.setBackend(mImpl->backend(), mImpl->device().second);
subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx)); subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(startCoord));
return subTensor; return subTensor;
} }
...@@ -181,12 +219,12 @@ void Aidge::Tensor::makeContiguous() { ...@@ -181,12 +219,12 @@ void Aidge::Tensor::makeContiguous() {
// Create a new storage that will be contiguous // Create a new storage that will be contiguous
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims); std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
// Copy elements from old to new storage // Copy elements from old to new storage
size_t idx = 0; std::size_t idx = 0;
while (idx < mSize) { while (idx < mSize) {
const size_t storageIdx = getStorageIdx(getCoord(idx)); const std::size_t storageIdx = getStorageIdx(getCoord(idx));
// Determine the size of the contiguous chunk // Determine the size of the contiguous chunk
size_t copySize = 1; std::size_t copySize = 1;
while (idx + copySize < mSize && while (idx + copySize < mSize &&
getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize) getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize)
{ {
...@@ -391,3 +429,10 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const ...@@ -391,3 +429,10 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const
return *fallback; return *fallback;
} }
} }
std::set<std::string> Aidge::Tensor::getAvailableBackends() {
std::set<std::string> backendsList;
for(const auto& tupleKey : Registrar<Tensor>::getKeys())
backendsList.insert(std::get<0>(tupleKey));
return backendsList;
}
...@@ -451,16 +451,15 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::ve ...@@ -451,16 +451,15 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::ve
this->generateScheduling(verbose); this->generateScheduling(verbose);
} }
std::map<std::shared_ptr<Node>, std::string> namePtrTable; const auto namePtrTable = mGraphView->getRankedNodesName("{0} ({1}#{3})");
if (verbose) namePtrTable = mGraphView->getRankedNodesName("{0} ({1}#{3})");
size_t cpt = 0; size_t cpt = 0;
for (const auto& runnable : mStaticSchedule.at(mStaticScheduleStep)) { for (const auto& runnable : mStaticSchedule.at(mStaticScheduleStep)) {
if (verbose) if (verbose)
fmt::print("run: {}\n", namePtrTable[runnable]); fmt::print("run: {}\n", namePtrTable.at(runnable));
else else
drawProgressBar(static_cast<float>(cpt) / static_cast<float>(mStaticSchedule.size()), 50, drawProgressBar(static_cast<float>(cpt) / static_cast<float>(mStaticSchedule.size()), 50,
(std::string("running ") + namePtrTable[runnable])); (std::string("running ") + namePtrTable.at(runnable)));
const auto tStart = std::chrono::high_resolution_clock::now(); const auto tStart = std::chrono::high_resolution_clock::now();
runnable->forward(); runnable->forward();
const auto tEnd = std::chrono::high_resolution_clock::now(); const auto tEnd = std::chrono::high_resolution_clock::now();
......
...@@ -10,6 +10,10 @@ ...@@ -10,6 +10,10 @@
********************************************************************************/ ********************************************************************************/
#include <array> #include <array>
#include <cstddef>
#include <cstdint> //std::uint16_t
#include <random>
#include <vector>
#include <catch2/catch_test_macros.hpp> #include <catch2/catch_test_macros.hpp>
...@@ -19,47 +23,17 @@ ...@@ -19,47 +23,17 @@
using namespace Aidge; using namespace Aidge;
TEST_CASE("[core/data] Tensor creation") { TEST_CASE("[backend/cpu/data] Tensor", "[TensorImpl]") {
SECTION("from const array") {
Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat =
Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
SECTION("Tensor features") {
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
}
SECTION("Access to array") { SECTION("Access to array") {
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1); x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8); REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
} REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
SECTION("get function") {
REQUIRE(x.get<int>({0, 0, 0}) == 1);
REQUIRE(x.get<int>({0, 0, 1}) == 2);
REQUIRE(x.get<int>({0, 1, 1}) == 4);
REQUIRE(x.get<int>({1, 1, 0}) == 7);
x.set<int>({1, 1, 1}, 36);
REQUIRE(x.get<int>({1, 1, 1}) == 36);
} }
SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); }
SECTION("Tensor (in)equality") {
REQUIRE(x == xCopy);
REQUIRE_FALSE(x == xFloat);
}
}
} }
TEST_CASE("Tensor fill") { TEST_CASE("Tensor fill", "[TensorImpl][fill]") {
SECTION("Instantiate batches independantly") { SECTION("Instantiate batches independantly") {
// initialization with 0s // initialization with 0s
std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{}); std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
...@@ -85,43 +59,3 @@ TEST_CASE("Tensor fill") { ...@@ -85,43 +59,3 @@ TEST_CASE("Tensor fill") {
REQUIRE(*concatenatedTensor == *expectedTensor); REQUIRE(*concatenatedTensor == *expectedTensor);
} }
} }
TEST_CASE("[core/data] Tensor methods","[Tensor]") {
Tensor x = Array3D<int, 2, 2, 2>{{
{{1, 2},
{3, 4}},
{{5, 6},
{7, 8}}
}};
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat =
Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
SECTION("Tensor sharing") {
Tensor xCopyCtor(x);
REQUIRE(xCopyCtor.getImpl() == x.getImpl());
Tensor xEqOp = x;
REQUIRE(xEqOp.getImpl() == x.getImpl());
Tensor xCloned = x.clone();
REQUIRE(xCloned.getImpl() != x.getImpl());
REQUIRE(xCloned == x);
}
SECTION("Tensor extract") {
Tensor y = x.extract({0, 1});
REQUIRE(y.getImpl() == x.getImpl());
REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
REQUIRE(y.isContiguous());
Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1});
REQUIRE(y2.getImpl() == x.getImpl());
REQUIRE(!y2.isContiguous());
Tensor y3 = y2.clone();
REQUIRE(y3.isContiguous());
REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
}
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <array>
#include <cstddef> // std::size_t
#include <cstdint> // std::uint8_t, std::uint16_t, std::int32_t
#include <numeric> // std::accumulate, std::inner_product
#include <functional> // std::multiplies
#include <random> // std::random_device, std::mt19937,
// std::uniform_int_distribution, std::uniform_real_distribution
#include <set>
#include <string>
#include <vector>
#include <catch2/catch_test_macros.hpp>
#include "aidge/backend/cpu/data/TensorImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
SECTION("Default constructor") {
Tensor T_default{};
REQUIRE((
(T_default.dataType() == DataType::Float32) &&
(T_default.size() == 1) &&
(T_default.dims() == std::vector<DimSize_t>({})) &&
(T_default.strides() == std::vector<DimSize_t>({1})) &&
(T_default.getImpl() == nullptr) &&
(T_default.grad() == nullptr) &&
(T_default.isContiguous() == true)
));
}
SECTION("scalar constructor") {
Tensor T;
REQUIRE_NOTHROW(T = Tensor(std::int32_t(20)));
REQUIRE((
(T.dataType() == DataType::Int32) &&
(T.size() == 1) &&
(T.dims() == std::vector<DimSize_t>({})) &&
(T.strides() == std::vector<DimSize_t>({1})) &&
(T.getImpl() != nullptr) &&
(T.grad() == nullptr) &&
(T.isContiguous() == true)
));
}
SECTION("dim constructor") {
const std::vector<DimSize_t> Tdims = {1,2,3,4,5,6,7};
Tensor T;
REQUIRE_NOTHROW(T = Tensor(Tdims));
REQUIRE((
(T.dataType() == DataType::Float32) &&
(T.size() == std::accumulate(Tdims.cbegin(), Tdims.cend(), DimSize_t(1), std::multiplies<DimSize_t>())) &&
(T.dims() == Tdims) &&
(T.strides() == std::vector<DimSize_t>({5040,2520,840,210,42,7,1})) &&
(T.getImpl() == nullptr) &&
(T.grad() == nullptr) &&
(T.isContiguous() == true)
));
}
SECTION("TensorUtils, constructor from const arrays") {
Tensor T;
// Construction from different types and sizes
// Set an already constructed Tensor
REQUIRE_NOTHROW(T = Array1D<int, 2>{{1, 2}});
REQUIRE((
(T.dataType() == DataType::Int32) &&
(T.size() == 2) &&
(T.dims() == std::vector<DimSize_t>({2})) &&
(T.strides() == std::vector<DimSize_t>({1})) &&
(T.getImpl() != nullptr) &&
(T.grad() == nullptr) &&
(T.isContiguous() == true)
));
// Change dims
REQUIRE_NOTHROW(T = Array2D<int, 2, 2>{{{1, 2}, {3, 4}}});
// Change data types
REQUIRE_NOTHROW(T = Array3D<std::uint8_t, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
REQUIRE((
(T.dataType() == DataType::UInt8) &&
(T.size() == 8) &&
(T.dims() == std::vector<DimSize_t>({2,2,2})) &&
(T.strides() == std::vector<DimSize_t>({4,2,1})) &&
(T.getImpl() != nullptr) &&
(T.grad() == nullptr) &&
(T.isContiguous() == true)
));
REQUIRE_NOTHROW(T = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
REQUIRE_NOTHROW(T = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}});
REQUIRE_NOTHROW(T = Array3D<double, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}});
// Change dims
REQUIRE_NOTHROW(T = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},
{{{9,10}, {11,12}}, {{13,14},{15,16}}}}});
REQUIRE((
(T.dataType() == DataType::Int32) &&
(T.size() == 16) &&
(T.dims() == std::vector<DimSize_t>({2,2,2,2})) &&
(T.strides() == std::vector<DimSize_t>({8,4,2,1})) &&
(T.getImpl() != nullptr) &&
(T.grad() == nullptr) &&
(T.isContiguous() == true)
));
}
SECTION("copy constructor / copy assignment operator") {
}
SECTION("move constructor / move assignment operator") {
}
SECTION("prototype") {
constexpr std::uint16_t NBTRIALS = 10;
// Create random number generators
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
std::uniform_real_distribution<float> valueDist(0.001f, 1.0f);
for (std::size_t trial = 0; trial < NBTRIALS; ++trial) {
std::vector<std::size_t> Tdims;
const std::size_t Tsize = nbDimsDist(gen);
for (std::size_t i = 0; i < Tsize; ++i) {
Tdims.push_back(dimsDist(gen));
}
Tensor T(Tdims);
// file the tensor
std::unique_ptr<float[]> array0(new float[T.size()]);
for (std::size_t i = 0; i < T.size(); ++i) {
array0[i] = valueDist(gen);
}
T.setBackend("cpu");
T.getImpl() -> setRawPtr(array0.get(), T.size());
Tensor Tclone;
REQUIRE_NOTHROW(Tclone = T.clone());
REQUIRE((
(T.dataType() == Tclone.dataType()) &&
(T.size() == Tclone.size()) &&
(T.dims() == Tclone.dims()) &&
(T.strides() == Tclone.strides()) &&
(T.getImpl() != Tclone.getImpl()) &&
(Tclone.grad() == nullptr) &&
(Tclone.isContiguous() == true)
));
REQUIRE(Tclone == T);
}
}
}
TEST_CASE("[core/data] Tensor(getter/setter)", "[Tensor][Getter][Setter]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create random number generators
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
std::uniform_real_distribution<float> valueDist(0.001f, 1.0f);
for (std::size_t trial = 0; trial < NBTRIALS; ++trial) {
std::vector<std::size_t> Tdims;
const std::size_t Tsize = nbDimsDist(gen);
for (std::size_t i = 0; i < Tsize; ++i) {
Tdims.push_back(dimsDist(gen));
}
// create Tensor
Tensor T(Tdims);
// compute stride
std::vector<std::size_t> Tstrides(Tdims.size(), 1);
std::size_t i = Tdims.size() - 1;
while (i-- > 0) {
Tstrides[i] = Tstrides[i+1]*Tdims[i+1];
}
/////////////////
// dimensions
// nbDims(), dims(), size()
REQUIRE(T.nbDims() == Tdims.size());
REQUIRE(T.dims() == Tdims);
std::size_t trueSize = std::accumulate(Tdims.cbegin(), Tdims.cend(), 1, std::multiplies<std::size_t>());
REQUIRE(T.size() == trueSize);
/////////////////
// implementation
// getImpl(), setImpl(), hasImpl()
REQUIRE(T.hasImpl() == false);
std::shared_ptr<TensorImpl_cpu<float>> tensorImpl = std::make_shared<TensorImpl_cpu<float>>(0, Tdims);
T.setImpl(tensorImpl);
REQUIRE(T.getImpl() == tensorImpl);
REQUIRE(T.hasImpl() == true);
// isContiguous(), stride(),
REQUIRE(T.isContiguous());
REQUIRE(T.strides() == Tstrides);
// file the tensor
std::unique_ptr<float[]> array0(new float[T.size()]);
for (std::size_t i = 0; i < T.size(); ++i) {
array0[i] = valueDist(gen);
}
tensorImpl -> setRawPtr(array0.get(), T.size());
// getCoord(), getIdx(), getStorageIdx()
std::vector<DimSize_t> Tdims_copy = Tdims;
for (auto& val : Tdims_copy) {
val = std::min(DimSize_t(2), std::max(DimSize_t(0), val - 1));
}
DimSize_t true_flatid = std::inner_product(Tdims_copy.cbegin(), Tdims_copy.cend(), Tstrides.cbegin(), DimSize_t(0));
REQUIRE(T.getCoord(true_flatid) == Tdims_copy);
REQUIRE(T.getIdx(Tdims_copy) == true_flatid);
REQUIRE(T.getStorageIdx(Tdims_copy) == true_flatid); // Tensor is not a view
// set(vector), set(size_t), get(vector), get(size_t), getImplOffset()
REQUIRE_NOTHROW(T.set<float>(Tdims_copy, 50.0f));
REQUIRE(T.get<float>(Tdims_copy) == 50.0f);
REQUIRE_NOTHROW(T.set<float>(true_flatid, 40.0f));
REQUIRE(T.get<float>(true_flatid) == 40.0f);
REQUIRE(T.getImplOffset() == 0);
//////////////
// backend
// getAvailableBackends()
REQUIRE(Tensor::getAvailableBackends() == std::set<std::string>({"cpu"}));
// setBackend()
REQUIRE_NOTHROW(T.setBackend("cpu", 0));
// setDataType(), dataType()
REQUIRE_NOTHROW(T.setDataType(DataType::Int16));
REQUIRE(T.dataType() == DataType::Int16);
}
}
TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") {
// extract, makeContiguous
// empty
constexpr std::uint16_t NBTRIALS = 10;
// Create random number generators
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
std::uniform_real_distribution<float> valueDist(0.001f, 1.0f);
// zeros, resize
SECTION("zeros") {
Tensor T;
for (std::size_t trial = 0; trial < NBTRIALS; ++trial) {
std::vector<std::size_t> Tdims;
const std::size_t Tsize = nbDimsDist(gen);
for (std::size_t i = 0; i < Tsize; ++i) {
Tdims.push_back(dimsDist(gen));
}
T.resize(Tdims);
// file the tensor
std::unique_ptr<float[]> array0(new float[T.size()]);
for (std::size_t i = 0; i < T.size(); ++i) {
array0[i] = valueDist(gen);
}
T.setBackend("cpu");
T.getImpl() -> setRawPtr(array0.get(), T.size());
float* res = static_cast<float*>(T.getImpl()->hostPtr());
for (std::size_t i = 0; i < T.size(); ++i) {
REQUIRE(res[i] == array0[i]);
}
T.zeros();
res = static_cast<float*>(T.getImpl()->hostPtr());
for (std::size_t i = 0; i < T.size(); ++i) {
REQUIRE(res[i] == 0.0f);
}
}
}
SECTION("Tensor extract") {
bool equal;
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
const std::size_t nb_dims = 3;
const std::size_t dim0 = dimsDist(gen) + 1; // dim0 >= 2
const std::size_t dim1 = dimsDist(gen) + 1;
const std::size_t dim2 = dimsDist(gen) + 1;
std::vector<std::size_t> dims = {dim0, dim1, dim2};
std::unique_ptr<int[]> array0(new int[dim0*dim1*dim2]);
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
for (std::size_t k = 0; k < dim2; ++k) {
array0[((i * dim1) + j)*dim2 + k] = valueDist(gen);
}
}
}
Tensor x{dims};
x.setDataType(DataType::Int32);
x.setBackend("cpu");
Tensor y;
Tensor y0;
Tensor y1;
Tensor y2;
Tensor y3;
x.getImpl()->setRawPtr(array0.get(), dim0*dim1*dim2);
REQUIRE(x.isContiguous());
////////////////
// extract contiguous Tensor slice given start coordinates
// the whole Tensor
REQUIRE_NOTHROW(y0 = x.extract({}));
REQUIRE(y0 == x);
int* y0_res = static_cast<int*>(y0.getImpl()->hostPtr());
equal = true;
for (std::size_t i = 0; i < dim0*dim1*dim2; ++i) {
equal &= (y0_res[i] == array0[i]);
}
REQUIRE(equal);
REQUIRE(y0.getImpl() == x.getImpl());
REQUIRE(y0.isContiguous());
// Tensor - 1-D
REQUIRE_NOTHROW(y1 = x.extract({dim0 - 2}));
int* y1_res = static_cast<int*>(y1.getImpl()->hostPtr());
equal = true;
for (std::size_t i = 0; i < dim1*dim2; ++i) {
equal &= (y1_res[i] == array0[(dim0-2)*dim1*dim2 + i]);
}
REQUIRE(equal);
REQUIRE(y1.getImpl() == x.getImpl());
REQUIRE(y1.isContiguous());
// Tensor - 2-D
REQUIRE_NOTHROW(y2 = x.extract({dim0 - 2, dim1 - 2}));
int* y2_res = static_cast<int*>(y2.getImpl()->hostPtr());
equal = true;
for (std::size_t i = 0; i < dim2; ++i) {
equal &= (y2_res[i] == array0[(((dim0 - 2) * dim1) + (dim1 - 2))*dim2 + i]);
}
REQUIRE(equal);
REQUIRE(y2.getImpl() == x.getImpl());
REQUIRE(y2.isContiguous());
// Tensor - 3-D => scalar
REQUIRE_NOTHROW(y3 = x.extract({dim0 - 2, dim1 - 2, dim2 - 2}));
int* y3_res = static_cast<int*>(y3.getImpl()->hostPtr());
REQUIRE(y3_res[0] == array0[(((dim0 - 2) * dim1) + (dim1 - 2))*dim2 + dim2 - 2]);
REQUIRE(y3.getImpl() == x.getImpl());
REQUIRE(y3.isContiguous());
// throw an error
REQUIRE_THROWS(y = x.extract({0, dim1, 0}));
/////////////////
// extract Tensor slice given start coordinates and dimension
REQUIRE_NOTHROW(y = x.extract({0, 0, 1}, {dim0-1, 1, dim2-1}));
REQUIRE(y.getImpl() == x.getImpl()); // shared implem
REQUIRE(!y.isContiguous());
Tensor yClone = y.clone(); // when copying data, they are contiguous in memory
REQUIRE(yClone.isContiguous());
// int yTruth[2][1][1] =
REQUIRE(approxEq<int>(yClone, y, 0.0f, 0.0f));
}
}
// print, toString,
SECTION("Pretty printing for debug") {
Tensor x{};
// Empty Tensor
REQUIRE_THROWS(x.print());
// scalar
x = Tensor(42);
REQUIRE_NOTHROW(x.print());
// 1-D Tensors
x = Array1D<int, 1>{{1}};
REQUIRE_NOTHROW(x.print());
x = Array1D<int, 6>{{1,2,3,4,5,6}};
REQUIRE_NOTHROW(x.print());
// 2-D Tensors
x = Array2D<int, 3, 2>{{{1, 2}, {3, 4}, {5, 6}}};
REQUIRE_NOTHROW(x.print());
// +2-D Tensors
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE_NOTHROW(x.print());
x = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},{{{11, 12}, {13, 14}}, {{15, 16}, {17, 18}}}}};
REQUIRE_NOTHROW(x.print());
}
}
} // namespace Aidge
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment