Skip to content
Snippets Groups Projects
Commit d4ca927d authored by Maxence Naud's avatar Maxence Naud
Browse files

Update Tensor.hpp and Tensor.cpp

- [#include] Remove duplicated includes, add includes
- [attributes] add default values for mDataType, mImpl and mGrad
- [constructors] Order constructors, change array copy assignment to remove code duplication
- calling grad() does not instanciate the gradient anymore, a dedicated function should do it
- move getAvailableBackends() and operator=(const Tensor&) to cpp file
- Tensor::resize() now handles scalar Tensors
parent 0167d1dc
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!89Increase the number of unit-tests for Tensor
Pipeline #40282 failed
......@@ -12,10 +12,12 @@
#ifndef AIDGE_CORE_DATA_TENSOR_H_
#define AIDGE_CORE_DATA_TENSOR_H_
#include <cstddef> // std::size_t
#include <cstring>
#include <functional> // std::multiplies
#include <set>
#include <memory>
#include <numeric> // std::accumulate
#include <numeric> // std::accumulate
#include <string>
#include <type_traits> // std::is_arithmetic
#include <vector>
......@@ -35,15 +37,17 @@ namespace Aidge {
class Tensor : public Data,
public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
private:
DataType mDataType; /** enum to specify data type. */
DataType mDataType = DataType::Float32; /** enum to specify data type. */
std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
std::vector<DimSize_t> mStrides; /** Stride dimensions of the tensor. */
std::shared_ptr<TensorImpl> mImpl; /** Pointer to the actual data implementation. */
std::shared_ptr<TensorImpl> mImpl = nullptr; /** Pointer to the actual data implementation. */
std::size_t mImplOffset = 0;
std::shared_ptr<Tensor> mGrad; /** Pointer to the associated gradient Tensor instance. */
std::shared_ptr<Tensor> mGrad = nullptr; /** Pointer to the associated gradient Tensor instance. */
// Cached data
std::size_t mSize = 0; /** Number of elements in the Tensor. */
/// @brief Number of elements in the Tensor.
std::size_t mSize;
/// @brief Whether or not data are contiguous in memory.
bool mContiguous = true;
public:
......@@ -51,64 +55,48 @@ class Tensor : public Data,
/**
* @brief Construct a new empty Tensor object.
* @param dataType Sets the type of inserted data.
* It has the features of an undefined scalar.
*/
Tensor(DataType dataType = DataType::Float32)
Tensor(DataType dtype = DataType::Float32)
: Data(Type),
mDataType(dataType)
mDataType(dtype),
mDims(std::vector<DimSize_t>({})),
mStrides({1}),
mSize(1)
{
// ctor
}
/**
* @brief Construct a new Tensor object from dimensions.
* @brief Construct a new Tensor object from an arithmetic parameter.
*
* @param dims dimensions of the tensor
* @param dataType datatype of the tensor (default = DataType::Float32)
* @tparam T Type of the input parameter.
* @tparam VT Decayed type of the input paramter.
* @param val Input value.
*/
Tensor(const std::vector<DimSize_t>& dims, DataType dataType = DataType::Float32)
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
Tensor(T val)
: Data(Type),
mDataType(dataType),
mDims(dims)
mDataType(NativeType<VT>::type),
mDims({}),
mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
mSize(1)
{
computeSize();
*static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
}
/**
* @brief Construct a new Tensor object from another one (shallow copy).
* Data memory is not copied, but shared between the new Tensor and the
* initial one.
* @brief Construct a new Tensor object from dimensions.
*
* @param otherTensor
* @param dims dimensions of the tensor
*/
Tensor(const Tensor&) = default;
Tensor(Tensor&&) = default;
/**
* Perform a deep copy of the tensor.
*/
Tensor clone() const {
Tensor newTensor(*this);
if (!newTensor.isContiguous()) {
newTensor.makeContiguous();
}
else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl);
}
return newTensor;
}
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
Tensor(T val)
: Data(Type),
mDataType(NativeType<VT>::type),
mDims({}), mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
mSize(1) {
*static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
Tensor(const std::vector<DimSize_t>& dims)
: Data(Type)
{
// set mDims, mStrides, mContiguous, mSize
resize(dims);
}
/**
......@@ -123,20 +111,11 @@ class Tensor : public Data,
mDims({SIZE_0}),
mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
mSize(SIZE_0) {
mSize(SIZE_0)
{
mImpl->copyFromHost(&arr.data[0], SIZE_0);
}
template <typename T, std::size_t SIZE_0>
constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
resize({SIZE_0});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0});
}
mImpl->copyFromHost(&arr.data[0], SIZE_0, mImplOffset);
return *this;
}
/**
* @brief Construct a new Tensor object from the 2-dimensions Array helper.
* @tparam T datatype
......@@ -154,16 +133,6 @@ class Tensor : public Data,
mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
resize({SIZE_0, SIZE_1});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1});
}
mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1, mImplOffset);
return *this;
}
/**
* @brief Construct a new Tensor object from the 3-dimensions Array helper.
* @tparam T datatype
......@@ -182,16 +151,6 @@ class Tensor : public Data,
mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
resize({SIZE_0, SIZE_1, SIZE_2});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2});
}
mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2, mImplOffset);
return *this;
}
/**
* @brief Construct a new Tensor object from the 4-dimensions Array helper.
* @tparam T datatype
......@@ -211,15 +170,19 @@ class Tensor : public Data,
mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3});
}
mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3, mImplOffset);
return *this;
}
/**
* @brief Copy constructor. Construct a new Tensor object from another one
* (shallow copy). Data memory is not copied, but shared between the new
* Tensor and the initial one.
* @param other
*/
Tensor(const Tensor& other) = default;
/**
* @brief Move constructor.
* @param other
*/
Tensor(Tensor&& other) = default;
/**
* @brief Copy dimensions, datatype and data from another Tensor.
......@@ -227,24 +190,32 @@ class Tensor : public Data,
* existing implementation. Tensor backend/device remain untouched.
* If current Tensor does not have an implementation, only a shallow copy
* is performed and the Tensor will share data with t.
* @param t other Tensor object.
* @param other other Tensor object.
* @return Tensor&
*/
Tensor &operator=(const Tensor &t) {
resize(t.dims(), t.strides());
setDataType(t.dataType(), false); // do not convert existing data
if (t.hasImpl()) {
if (hasImpl()) {
copyFrom(t);
}
else {
// Perform a shallow copy only
setImpl(t.mImpl, t.mImplOffset);
}
}
else {
setImpl(nullptr);
}
Tensor &operator=(const Tensor& other);
template <typename T, std::size_t SIZE_0>
constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
*this = Tensor(std::move(arr));
return *this;
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
*this = Tensor(std::move(arr));
return *this;
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
*this = Tensor(std::move(arr));
return *this;
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
*this = Tensor(std::move(arr));
return *this;
}
......@@ -260,6 +231,23 @@ class Tensor : public Data,
return *mImpl == *(otherTensor.mImpl);
}
public:
/**
* @brief Perform a deep copy of the tensor.
*/
Tensor clone() const {
Tensor newTensor(*this);
if (!newTensor.isContiguous()) {
newTensor.makeContiguous();
}
else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl);
}
return newTensor;
}
/**
* @brief Set the backend of the Tensor associated implementation. If there
* was no previous implementation set, data will be allocated, but it will
......@@ -292,12 +280,7 @@ class Tensor : public Data,
* @brief Get a list of available backends.
* @return std::set<std::string>
*/
static std::set<std::string> getAvailableBackends(){
std::set<std::string> backendsList;
for(std::tuple<std::string, DataType> tupleKey : Registrar<Tensor>::getKeys())
backendsList.insert(std::get<0>(tupleKey));
return backendsList;
}
static std::set<std::string> getAvailableBackends();
/**
* @brief Get the data type enum.
......@@ -369,13 +352,13 @@ class Tensor : public Data,
* @brief Get dimensions of the Tensor object.
* @return constexpr const std::vector<DimSize_t>&
*/
constexpr const std::vector<DimSize_t> &dims() const { return mDims; }
constexpr inline const std::vector<DimSize_t>& dims() const noexcept { return mDims; }
/**
* @brief Get strides of the Tensor object.
* @return constexpr const std::vector<DimSize_t>&
*/
constexpr const std::vector<DimSize_t> &strides() const { return mStrides; }
constexpr inline const std::vector<DimSize_t>& strides() const noexcept { return mStrides; }
/**
* @brief Return true if Tensor is contiguous in memory.
......@@ -424,6 +407,9 @@ class Tensor : public Data,
* @return false
*/
bool empty() const { return mDims.empty(); }
// bool newempty() const noexcept {
// return mSize == 0;
// }
/**
* @brief Set each element of the tensor to zero.
......@@ -464,12 +450,13 @@ class Tensor : public Data,
inline void print() const { printf("%s\n", toString().c_str()); }
std::shared_ptr<Tensor> grad() {
if (!mGrad) {
mGrad = std::make_shared<Tensor>(mDataType);
mGrad->resize(mDims);
// if (!mGrad && mImpl) {
// mGrad = std::make_shared<Tensor>(mDims);
// mGrad->setDataType(mDataType);
// mGrad->setBackend(mImpl->backend());
if (mImpl) mGrad->setBackend(mImpl->backend());
}
// // if (mImpl) mGrad->setBackend(mImpl->backend());
// }
return mGrad;
}
......@@ -481,14 +468,14 @@ class Tensor : public Data,
* @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor.
* @return std::vector<DimSize_t>
*/
std::vector<std::size_t> getCoord(const std::size_t flatIdx) const {
std::vector<std::size_t> coordIdx = std::vector<std::size_t>(mDims.size());
std::size_t idx = flatIdx;
for (std::size_t i = mDims.size() - 1; i > 0; --i){
coordIdx[i] = (idx % mDims[i]);
idx/=mDims[i];
std::vector<std::size_t> getCoord(std::size_t flatIdx) const {
std::vector<std::size_t> coordIdx(mDims.size());
std::size_t i = mDims.size();
while (i-- > 0) {
coordIdx[i] = (flatIdx % mDims[i]);
flatIdx/=mDims[i];
}
coordIdx[0] = idx % mDims[0];
return coordIdx;
}
......@@ -506,7 +493,7 @@ class Tensor : public Data,
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
std::size_t flatIdx = 0;
std::size_t i = 0;
for(; i < coordIdx.size() - 1; ++i){
for(; i < coordIdx.size() - 1; ++i) {
AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
flatIdx = (flatIdx + coordIdx[i]) * mDims[i + 1];
}
......@@ -522,21 +509,24 @@ class Tensor : public Data,
* @return DimSize_t Storage index
*/
std::size_t getStorageIdx(const std::vector<std::size_t>& coordIdx) const {
for(std::size_t i = 0; i < coordIdx.size(); ++i) {
AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
}
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
return std::inner_product(coordIdx.begin(), coordIdx.end(), mStrides.begin(), DimSize_t(0));
return std::inner_product(coordIdx.cbegin(), coordIdx.cend(), mStrides.cbegin(), DimSize_t(0));
}
/**
* @brief Returns a sub-tensor with equal or lower number of dimensions.
*
* For instance, ``t.extract({1})`` on a CHW tensor will return the HW tensor
* @note For instance, ``t.extract({1})`` on a CHW tensor will return the HW tensor
* of channel #1.
* Likewise, ``t.extract({0, 1})`` on a NCHW tensor will return the HW tensor
* of batch #0 and channel #1.
* No memory copy is performed, the returned tensor does not own the memory.
* If the number of coordinates matches the number of dimensions, an empty
* @note No memory copy is performed, the returned tensor does not own the memory.
* @note If the number of coordinates matches the number of dimensions, a scalar
* tensor is returned.
* It current tensor was contiguous, the returned tensor is garanteed to be
* @note If current tensor was contiguous, the returned tensor is garanteed to be
* contiguous as well.
*
* @param coordIdx Coordinates of the sub-tensor to extract
......@@ -547,6 +537,8 @@ class Tensor : public Data,
/**
* @brief Returns a sub-tensor at some coordinate and with some dimension.
*
* @note Data contiguity of the returned Tensor is not guaranted.
*
* @param coordIdx First coordinates of the sub-tensor to extract
* @param dims Dimensions of the sub-tensor to extract
* @return Tensor Sub-tensor.
......
......@@ -9,9 +9,6 @@
*
********************************************************************************/
#include <vector>
#include <cstddef>
#include "aidge/data/Tensor.hpp"
#include <cstddef>
......@@ -21,7 +18,38 @@
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
resize(other.dims(), other.strides());
setDataType(other.dataType(), false); // do not convert existing data
if (other.hasImpl()) {
if (hasImpl()) {
copyFrom(other);
}
else {
// Perform a shallow copy only
setImpl(other.mImpl, other.mImplOffset);
}
}
else {
setImpl(nullptr);
}
return *this;
}
void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) {
// TODO: scalar Tensor not handled
if (dims.empty()) { // scalar
mDims = std::vector<DimSize_t>(0);
mStrides = std::vector<DimSize_t>({1});
mContiguous = true;
computeSize();
if (mImpl) {
mImpl->resize(mDims);
}
return;
}
bool checkContiguous = true;
if (strides.empty()) {
strides.resize(dims.size());
......@@ -36,7 +64,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto
AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
}
if (mImpl.use_count() > 1) {
if (mImpl && mImpl.use_count() > 1) {
// Here we could also create a new storage for this tensor in this case
// But, is it more likely that the user really wants this, or that he did a mistake?
AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
......@@ -48,6 +76,11 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto
mContiguous = true;
if (checkContiguous) {
std::size_t expectedStride = 1;
// std::size_t i = dims.size();
// while ((i-- > 0) && (strides[i] == expectedStride)) {
// mContiguous&= (strides[i] == expectedStride);
// expectedStride*= dims[i];
// }
for (std::size_t i = dims.size()-1; i > 0; --i) {
if (strides[i] != expectedStride) {
mContiguous = false;
......@@ -153,26 +186,26 @@ std::string Aidge::Tensor::toString() const {
return res;
}
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const {
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& fixedCoord) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
AIDGE_ASSERT(fixedCoord.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
Tensor subTensor(mDataType);
subTensor.resize(std::vector<size_t>(mDims.begin() + coordIdx.size(), mDims.end()),
std::vector<size_t>(mStrides.begin() + coordIdx.size(), mStrides.end()));
subTensor.resize(std::vector<size_t>(mDims.cbegin() + fixedCoord.size(), mDims.cend()),
std::vector<size_t>(mStrides.cbegin() + fixedCoord.size(), mStrides.cend()));
subTensor.setBackend(mImpl->backend(), mImpl->device().second);
subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx));
subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(fixedCoord));
return subTensor;
}
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const {
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& startCoord, const std::vector<std::size_t>& dims) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
AIDGE_ASSERT(coordIdx.size() == mDims.size(), "Coordinates does not match number of dimensions");
AIDGE_ASSERT(startCoord.size() == mDims.size(), "Coordinates does not match number of dimensions");
Tensor subTensor(mDataType);
subTensor.resize(dims, mStrides);
subTensor.setBackend(mImpl->backend(), mImpl->device().second);
subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx));
subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(startCoord));
return subTensor;
}
......@@ -396,3 +429,10 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const
return *fallback;
}
}
std::set<std::string> Aidge::Tensor::getAvailableBackends() {
std::set<std::string> backendsList;
for(const auto& tupleKey : Registrar<Tensor>::getKeys())
backendsList.insert(std::get<0>(tupleKey));
return backendsList;
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment