Skip to content
Snippets Groups Projects
Commit d4ca927d authored by Maxence Naud's avatar Maxence Naud
Browse files

Update Tensor.hpp and Tensor.cpp

- [#include] Remove duplicated includes, add includes
- [attributes] add default values for mDataType, mImpl and mGrad
- [constructors] Order constructors, change array copy assignment to remove code duplication
- calling grad() does not instanciate the gradient anymore, a dedicated function should do it
- move getAvailableBackends() and operator=(const Tensor&) to cpp file
- Tensor::resize() now handles scalar Tensors
parent 0167d1dc
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!89Increase the number of unit-tests for Tensor
Pipeline #40282 failed
...@@ -12,10 +12,12 @@ ...@@ -12,10 +12,12 @@
#ifndef AIDGE_CORE_DATA_TENSOR_H_ #ifndef AIDGE_CORE_DATA_TENSOR_H_
#define AIDGE_CORE_DATA_TENSOR_H_ #define AIDGE_CORE_DATA_TENSOR_H_
#include <cstddef> // std::size_t
#include <cstring> #include <cstring>
#include <functional> // std::multiplies
#include <set> #include <set>
#include <memory> #include <memory>
#include <numeric> // std::accumulate #include <numeric> // std::accumulate
#include <string> #include <string>
#include <type_traits> // std::is_arithmetic #include <type_traits> // std::is_arithmetic
#include <vector> #include <vector>
...@@ -35,15 +37,17 @@ namespace Aidge { ...@@ -35,15 +37,17 @@ namespace Aidge {
class Tensor : public Data, class Tensor : public Data,
public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> { public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
private: private:
DataType mDataType; /** enum to specify data type. */ DataType mDataType = DataType::Float32; /** enum to specify data type. */
std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */ std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
std::vector<DimSize_t> mStrides; /** Stride dimensions of the tensor. */ std::vector<DimSize_t> mStrides; /** Stride dimensions of the tensor. */
std::shared_ptr<TensorImpl> mImpl; /** Pointer to the actual data implementation. */ std::shared_ptr<TensorImpl> mImpl = nullptr; /** Pointer to the actual data implementation. */
std::size_t mImplOffset = 0; std::size_t mImplOffset = 0;
std::shared_ptr<Tensor> mGrad; /** Pointer to the associated gradient Tensor instance. */ std::shared_ptr<Tensor> mGrad = nullptr; /** Pointer to the associated gradient Tensor instance. */
// Cached data // Cached data
std::size_t mSize = 0; /** Number of elements in the Tensor. */ /// @brief Number of elements in the Tensor.
std::size_t mSize;
/// @brief Whether or not data are contiguous in memory.
bool mContiguous = true; bool mContiguous = true;
public: public:
...@@ -51,64 +55,48 @@ class Tensor : public Data, ...@@ -51,64 +55,48 @@ class Tensor : public Data,
/** /**
* @brief Construct a new empty Tensor object. * @brief Construct a new empty Tensor object.
* @param dataType Sets the type of inserted data. * It has the features of an undefined scalar.
*/ */
Tensor(DataType dataType = DataType::Float32) Tensor(DataType dtype = DataType::Float32)
: Data(Type), : Data(Type),
mDataType(dataType) mDataType(dtype),
mDims(std::vector<DimSize_t>({})),
mStrides({1}),
mSize(1)
{ {
// ctor // ctor
} }
/** /**
* @brief Construct a new Tensor object from dimensions. * @brief Construct a new Tensor object from an arithmetic parameter.
* *
* @param dims dimensions of the tensor * @tparam T Type of the input parameter.
* @param dataType datatype of the tensor (default = DataType::Float32) * @tparam VT Decayed type of the input paramter.
* @param val Input value.
*/ */
Tensor(const std::vector<DimSize_t>& dims, DataType dataType = DataType::Float32) template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
Tensor(T val)
: Data(Type), : Data(Type),
mDataType(dataType), mDataType(NativeType<VT>::type),
mDims(dims) mDims({}),
mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
mSize(1)
{ {
computeSize(); *static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
} }
/** /**
* @brief Construct a new Tensor object from another one (shallow copy). * @brief Construct a new Tensor object from dimensions.
* Data memory is not copied, but shared between the new Tensor and the
* initial one.
* *
* @param otherTensor * @param dims dimensions of the tensor
*/ */
Tensor(const Tensor&) = default; Tensor(const std::vector<DimSize_t>& dims)
Tensor(Tensor&&) = default; : Data(Type)
{
/** // set mDims, mStrides, mContiguous, mSize
* Perform a deep copy of the tensor. resize(dims);
*/
Tensor clone() const {
Tensor newTensor(*this);
if (!newTensor.isContiguous()) {
newTensor.makeContiguous();
}
else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl);
}
return newTensor;
}
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
Tensor(T val)
: Data(Type),
mDataType(NativeType<VT>::type),
mDims({}), mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
mSize(1) {
*static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
} }
/** /**
...@@ -123,20 +111,11 @@ class Tensor : public Data, ...@@ -123,20 +111,11 @@ class Tensor : public Data,
mDims({SIZE_0}), mDims({SIZE_0}),
mStrides({1}), mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
mSize(SIZE_0) { mSize(SIZE_0)
{
mImpl->copyFromHost(&arr.data[0], SIZE_0); mImpl->copyFromHost(&arr.data[0], SIZE_0);
} }
template <typename T, std::size_t SIZE_0>
constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
resize({SIZE_0});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0});
}
mImpl->copyFromHost(&arr.data[0], SIZE_0, mImplOffset);
return *this;
}
/** /**
* @brief Construct a new Tensor object from the 2-dimensions Array helper. * @brief Construct a new Tensor object from the 2-dimensions Array helper.
* @tparam T datatype * @tparam T datatype
...@@ -154,16 +133,6 @@ class Tensor : public Data, ...@@ -154,16 +133,6 @@ class Tensor : public Data,
mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1); mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
} }
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
resize({SIZE_0, SIZE_1});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1});
}
mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1, mImplOffset);
return *this;
}
/** /**
* @brief Construct a new Tensor object from the 3-dimensions Array helper. * @brief Construct a new Tensor object from the 3-dimensions Array helper.
* @tparam T datatype * @tparam T datatype
...@@ -182,16 +151,6 @@ class Tensor : public Data, ...@@ -182,16 +151,6 @@ class Tensor : public Data,
mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2); mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
} }
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
resize({SIZE_0, SIZE_1, SIZE_2});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2});
}
mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2, mImplOffset);
return *this;
}
/** /**
* @brief Construct a new Tensor object from the 4-dimensions Array helper. * @brief Construct a new Tensor object from the 4-dimensions Array helper.
* @tparam T datatype * @tparam T datatype
...@@ -211,15 +170,19 @@ class Tensor : public Data, ...@@ -211,15 +170,19 @@ class Tensor : public Data,
mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3); mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
} }
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3> /**
constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) { * @brief Copy constructor. Construct a new Tensor object from another one
resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3}); * (shallow copy). Data memory is not copied, but shared between the new
if (!mImpl) { * Tensor and the initial one.
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3}); * @param other
} */
mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3, mImplOffset); Tensor(const Tensor& other) = default;
return *this;
} /**
* @brief Move constructor.
* @param other
*/
Tensor(Tensor&& other) = default;
/** /**
* @brief Copy dimensions, datatype and data from another Tensor. * @brief Copy dimensions, datatype and data from another Tensor.
...@@ -227,24 +190,32 @@ class Tensor : public Data, ...@@ -227,24 +190,32 @@ class Tensor : public Data,
* existing implementation. Tensor backend/device remain untouched. * existing implementation. Tensor backend/device remain untouched.
* If current Tensor does not have an implementation, only a shallow copy * If current Tensor does not have an implementation, only a shallow copy
* is performed and the Tensor will share data with t. * is performed and the Tensor will share data with t.
* @param t other Tensor object. * @param other other Tensor object.
* @return Tensor& * @return Tensor&
*/ */
Tensor &operator=(const Tensor &t) { Tensor &operator=(const Tensor& other);
resize(t.dims(), t.strides());
setDataType(t.dataType(), false); // do not convert existing data template <typename T, std::size_t SIZE_0>
if (t.hasImpl()) { constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
if (hasImpl()) { *this = Tensor(std::move(arr));
copyFrom(t); return *this;
} }
else {
// Perform a shallow copy only template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
setImpl(t.mImpl, t.mImplOffset); constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
} *this = Tensor(std::move(arr));
} return *this;
else { }
setImpl(nullptr);
} template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
*this = Tensor(std::move(arr));
return *this;
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
*this = Tensor(std::move(arr));
return *this; return *this;
} }
...@@ -260,6 +231,23 @@ class Tensor : public Data, ...@@ -260,6 +231,23 @@ class Tensor : public Data,
return *mImpl == *(otherTensor.mImpl); return *mImpl == *(otherTensor.mImpl);
} }
public:
/**
* @brief Perform a deep copy of the tensor.
*/
Tensor clone() const {
Tensor newTensor(*this);
if (!newTensor.isContiguous()) {
newTensor.makeContiguous();
}
else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl);
}
return newTensor;
}
/** /**
* @brief Set the backend of the Tensor associated implementation. If there * @brief Set the backend of the Tensor associated implementation. If there
* was no previous implementation set, data will be allocated, but it will * was no previous implementation set, data will be allocated, but it will
...@@ -292,12 +280,7 @@ class Tensor : public Data, ...@@ -292,12 +280,7 @@ class Tensor : public Data,
* @brief Get a list of available backends. * @brief Get a list of available backends.
* @return std::set<std::string> * @return std::set<std::string>
*/ */
static std::set<std::string> getAvailableBackends(){ static std::set<std::string> getAvailableBackends();
std::set<std::string> backendsList;
for(std::tuple<std::string, DataType> tupleKey : Registrar<Tensor>::getKeys())
backendsList.insert(std::get<0>(tupleKey));
return backendsList;
}
/** /**
* @brief Get the data type enum. * @brief Get the data type enum.
...@@ -369,13 +352,13 @@ class Tensor : public Data, ...@@ -369,13 +352,13 @@ class Tensor : public Data,
* @brief Get dimensions of the Tensor object. * @brief Get dimensions of the Tensor object.
* @return constexpr const std::vector<DimSize_t>& * @return constexpr const std::vector<DimSize_t>&
*/ */
constexpr const std::vector<DimSize_t> &dims() const { return mDims; } constexpr inline const std::vector<DimSize_t>& dims() const noexcept { return mDims; }
/** /**
* @brief Get strides of the Tensor object. * @brief Get strides of the Tensor object.
* @return constexpr const std::vector<DimSize_t>& * @return constexpr const std::vector<DimSize_t>&
*/ */
constexpr const std::vector<DimSize_t> &strides() const { return mStrides; } constexpr inline const std::vector<DimSize_t>& strides() const noexcept { return mStrides; }
/** /**
* @brief Return true if Tensor is contiguous in memory. * @brief Return true if Tensor is contiguous in memory.
...@@ -424,6 +407,9 @@ class Tensor : public Data, ...@@ -424,6 +407,9 @@ class Tensor : public Data,
* @return false * @return false
*/ */
bool empty() const { return mDims.empty(); } bool empty() const { return mDims.empty(); }
// bool newempty() const noexcept {
// return mSize == 0;
// }
/** /**
* @brief Set each element of the tensor to zero. * @brief Set each element of the tensor to zero.
...@@ -464,12 +450,13 @@ class Tensor : public Data, ...@@ -464,12 +450,13 @@ class Tensor : public Data,
inline void print() const { printf("%s\n", toString().c_str()); } inline void print() const { printf("%s\n", toString().c_str()); }
std::shared_ptr<Tensor> grad() { std::shared_ptr<Tensor> grad() {
if (!mGrad) { // if (!mGrad && mImpl) {
mGrad = std::make_shared<Tensor>(mDataType); // mGrad = std::make_shared<Tensor>(mDims);
mGrad->resize(mDims); // mGrad->setDataType(mDataType);
// mGrad->setBackend(mImpl->backend());
if (mImpl) mGrad->setBackend(mImpl->backend()); // // if (mImpl) mGrad->setBackend(mImpl->backend());
} // }
return mGrad; return mGrad;
} }
...@@ -481,14 +468,14 @@ class Tensor : public Data, ...@@ -481,14 +468,14 @@ class Tensor : public Data,
* @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor. * @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor.
* @return std::vector<DimSize_t> * @return std::vector<DimSize_t>
*/ */
std::vector<std::size_t> getCoord(const std::size_t flatIdx) const { std::vector<std::size_t> getCoord(std::size_t flatIdx) const {
std::vector<std::size_t> coordIdx = std::vector<std::size_t>(mDims.size()); std::vector<std::size_t> coordIdx(mDims.size());
std::size_t idx = flatIdx; std::size_t i = mDims.size();
for (std::size_t i = mDims.size() - 1; i > 0; --i){
coordIdx[i] = (idx % mDims[i]); while (i-- > 0) {
idx/=mDims[i]; coordIdx[i] = (flatIdx % mDims[i]);
flatIdx/=mDims[i];
} }
coordIdx[0] = idx % mDims[0];
return coordIdx; return coordIdx;
} }
...@@ -506,7 +493,7 @@ class Tensor : public Data, ...@@ -506,7 +493,7 @@ class Tensor : public Data,
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions"); AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
std::size_t flatIdx = 0; std::size_t flatIdx = 0;
std::size_t i = 0; std::size_t i = 0;
for(; i < coordIdx.size() - 1; ++i){ for(; i < coordIdx.size() - 1; ++i) {
AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor"); AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
flatIdx = (flatIdx + coordIdx[i]) * mDims[i + 1]; flatIdx = (flatIdx + coordIdx[i]) * mDims[i + 1];
} }
...@@ -522,21 +509,24 @@ class Tensor : public Data, ...@@ -522,21 +509,24 @@ class Tensor : public Data,
* @return DimSize_t Storage index * @return DimSize_t Storage index
*/ */
std::size_t getStorageIdx(const std::vector<std::size_t>& coordIdx) const { std::size_t getStorageIdx(const std::vector<std::size_t>& coordIdx) const {
for(std::size_t i = 0; i < coordIdx.size(); ++i) {
AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
}
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions"); AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
return std::inner_product(coordIdx.begin(), coordIdx.end(), mStrides.begin(), DimSize_t(0)); return std::inner_product(coordIdx.cbegin(), coordIdx.cend(), mStrides.cbegin(), DimSize_t(0));
} }
/** /**
* @brief Returns a sub-tensor with equal or lower number of dimensions. * @brief Returns a sub-tensor with equal or lower number of dimensions.
* *
* For instance, ``t.extract({1})`` on a CHW tensor will return the HW tensor * @note For instance, ``t.extract({1})`` on a CHW tensor will return the HW tensor
* of channel #1. * of channel #1.
* Likewise, ``t.extract({0, 1})`` on a NCHW tensor will return the HW tensor * Likewise, ``t.extract({0, 1})`` on a NCHW tensor will return the HW tensor
* of batch #0 and channel #1. * of batch #0 and channel #1.
* No memory copy is performed, the returned tensor does not own the memory. * @note No memory copy is performed, the returned tensor does not own the memory.
* If the number of coordinates matches the number of dimensions, an empty * @note If the number of coordinates matches the number of dimensions, a scalar
* tensor is returned. * tensor is returned.
* It current tensor was contiguous, the returned tensor is garanteed to be * @note If current tensor was contiguous, the returned tensor is garanteed to be
* contiguous as well. * contiguous as well.
* *
* @param coordIdx Coordinates of the sub-tensor to extract * @param coordIdx Coordinates of the sub-tensor to extract
...@@ -547,6 +537,8 @@ class Tensor : public Data, ...@@ -547,6 +537,8 @@ class Tensor : public Data,
/** /**
* @brief Returns a sub-tensor at some coordinate and with some dimension. * @brief Returns a sub-tensor at some coordinate and with some dimension.
* *
* @note Data contiguity of the returned Tensor is not guaranted.
*
* @param coordIdx First coordinates of the sub-tensor to extract * @param coordIdx First coordinates of the sub-tensor to extract
* @param dims Dimensions of the sub-tensor to extract * @param dims Dimensions of the sub-tensor to extract
* @return Tensor Sub-tensor. * @return Tensor Sub-tensor.
......
...@@ -9,9 +9,6 @@ ...@@ -9,9 +9,6 @@
* *
********************************************************************************/ ********************************************************************************/
#include <vector>
#include <cstddef>
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include <cstddef> #include <cstddef>
...@@ -21,7 +18,38 @@ ...@@ -21,7 +18,38 @@
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
resize(other.dims(), other.strides());
setDataType(other.dataType(), false); // do not convert existing data
if (other.hasImpl()) {
if (hasImpl()) {
copyFrom(other);
}
else {
// Perform a shallow copy only
setImpl(other.mImpl, other.mImplOffset);
}
}
else {
setImpl(nullptr);
}
return *this;
}
void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) { void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) {
// TODO: scalar Tensor not handled
if (dims.empty()) { // scalar
mDims = std::vector<DimSize_t>(0);
mStrides = std::vector<DimSize_t>({1});
mContiguous = true;
computeSize();
if (mImpl) {
mImpl->resize(mDims);
}
return;
}
bool checkContiguous = true; bool checkContiguous = true;
if (strides.empty()) { if (strides.empty()) {
strides.resize(dims.size()); strides.resize(dims.size());
...@@ -36,7 +64,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto ...@@ -36,7 +64,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto
AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims"); AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
} }
if (mImpl.use_count() > 1) { if (mImpl && mImpl.use_count() > 1) {
// Here we could also create a new storage for this tensor in this case // Here we could also create a new storage for this tensor in this case
// But, is it more likely that the user really wants this, or that he did a mistake? // But, is it more likely that the user really wants this, or that he did a mistake?
AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage"); AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
...@@ -48,6 +76,11 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto ...@@ -48,6 +76,11 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto
mContiguous = true; mContiguous = true;
if (checkContiguous) { if (checkContiguous) {
std::size_t expectedStride = 1; std::size_t expectedStride = 1;
// std::size_t i = dims.size();
// while ((i-- > 0) && (strides[i] == expectedStride)) {
// mContiguous&= (strides[i] == expectedStride);
// expectedStride*= dims[i];
// }
for (std::size_t i = dims.size()-1; i > 0; --i) { for (std::size_t i = dims.size()-1; i > 0; --i) {
if (strides[i] != expectedStride) { if (strides[i] != expectedStride) {
mContiguous = false; mContiguous = false;
...@@ -153,26 +186,26 @@ std::string Aidge::Tensor::toString() const { ...@@ -153,26 +186,26 @@ std::string Aidge::Tensor::toString() const {
return res; return res;
} }
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const { Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& fixedCoord) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions"); AIDGE_ASSERT(fixedCoord.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
Tensor subTensor(mDataType); Tensor subTensor(mDataType);
subTensor.resize(std::vector<size_t>(mDims.begin() + coordIdx.size(), mDims.end()), subTensor.resize(std::vector<size_t>(mDims.cbegin() + fixedCoord.size(), mDims.cend()),
std::vector<size_t>(mStrides.begin() + coordIdx.size(), mStrides.end())); std::vector<size_t>(mStrides.cbegin() + fixedCoord.size(), mStrides.cend()));
subTensor.setBackend(mImpl->backend(), mImpl->device().second); subTensor.setBackend(mImpl->backend(), mImpl->device().second);
subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx)); subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(fixedCoord));
return subTensor; return subTensor;
} }
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const { Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& startCoord, const std::vector<std::size_t>& dims) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
AIDGE_ASSERT(coordIdx.size() == mDims.size(), "Coordinates does not match number of dimensions"); AIDGE_ASSERT(startCoord.size() == mDims.size(), "Coordinates does not match number of dimensions");
Tensor subTensor(mDataType); Tensor subTensor(mDataType);
subTensor.resize(dims, mStrides); subTensor.resize(dims, mStrides);
subTensor.setBackend(mImpl->backend(), mImpl->device().second); subTensor.setBackend(mImpl->backend(), mImpl->device().second);
subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx)); subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(startCoord));
return subTensor; return subTensor;
} }
...@@ -396,3 +429,10 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const ...@@ -396,3 +429,10 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const
return *fallback; return *fallback;
} }
} }
std::set<std::string> Aidge::Tensor::getAvailableBackends() {
std::set<std::string> backendsList;
for(const auto& tupleKey : Registrar<Tensor>::getKeys())
backendsList.insert(std::get<0>(tupleKey));
return backendsList;
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment