Skip to content
Snippets Groups Projects
Commit 9743cb8f authored by Maxence Naud's avatar Maxence Naud
Browse files

Move two functions to Tensor src instead of header

parent 0a0c4904
No related branches found
No related tags found
No related merge requests found
...@@ -63,7 +63,7 @@ class Tensor : public Data, ...@@ -63,7 +63,7 @@ class Tensor : public Data,
* @brief Construct a new Tensor object from another one (shallow copy). * @brief Construct a new Tensor object from another one (shallow copy).
* Data memory is not copied, but shared between the new Tensor and the * Data memory is not copied, but shared between the new Tensor and the
* initial one. * initial one.
* *
* @param otherTensor * @param otherTensor
*/ */
Tensor(const Tensor&) = default; Tensor(const Tensor&) = default;
...@@ -306,7 +306,7 @@ class Tensor : public Data, ...@@ -306,7 +306,7 @@ class Tensor : public Data,
/** /**
* @brief Set the Impl object * @brief Set the Impl object
* *
* @param impl New impl shared pointer * @param impl New impl shared pointer
* @param implOffset Storage offset in this new impl for this Tensor * @param implOffset Storage offset in this new impl for this Tensor
*/ */
...@@ -375,7 +375,7 @@ class Tensor : public Data, ...@@ -375,7 +375,7 @@ class Tensor : public Data,
* @param dims New dimensions * @param dims New dimensions
*/ */
template <std::array<DimSize_t, 1>::size_type DIM> // deducing std::array size_type and declaring DIM accordingly template <std::array<DimSize_t, 1>::size_type DIM> // deducing std::array size_type and declaring DIM accordingly
void resize(const std::array<DimSize_t, DIM> &dims) { inline void resize(const std::array<DimSize_t, DIM> &dims) {
resize(std::vector<DimSize_t>(dims.begin(), dims.end())); resize(std::vector<DimSize_t>(dims.begin(), dims.end()));
} }
...@@ -390,48 +390,7 @@ class Tensor : public Data, ...@@ -390,48 +390,7 @@ class Tensor : public Data,
* @param dims New dimensions * @param dims New dimensions
* @param strides Stride of the tensor (if not specified, "nested" stride is used) * @param strides Stride of the tensor (if not specified, "nested" stride is used)
*/ */
void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>()) { void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>());
bool checkContiguous = true;
if (strides.empty()) {
strides.resize(dims.size());
size_t expectedStride = 1;
for (int dim = dims.size() - 1; dim >= 0; --dim) {
strides[dim] = expectedStride;
expectedStride*= dims[dim];
}
checkContiguous = false;
}
else {
AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
}
if (mImpl.use_count() > 1) {
// Here we could also create a new storage for this tensor in this case
// But, is it more likely that the user really wants this, or that he did a mistake?
AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
}
else {
mDims = dims;
mStrides = strides;
mContiguous = true;
if (checkContiguous) {
size_t expectedStride = 1;
for (int dim = dims.size() - 1; dim >= 0; --dim) {
if (strides[dim] != expectedStride) {
mContiguous = false;
break;
}
expectedStride*= dims[dim];
}
}
computeSize();
if (mImpl) {
mImpl->resize(mSize);
}
}
}
/** /**
* @brief Return if the Tensor object has at leastone element. * @brief Return if the Tensor object has at leastone element.
...@@ -465,95 +424,7 @@ class Tensor : public Data, ...@@ -465,95 +424,7 @@ class Tensor : public Data,
set<expectedType>(getStorageIdx(coordIdx), value); set<expectedType>(getStorageIdx(coordIdx), value);
} }
std::string toString() const;
std::string toString() const {
AIDGE_ASSERT(mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) || (mImpl->hostPtr() != nullptr)), "tensor should have a valid host pointer");
// TODO: move lambda elsewhere?
auto ptrToString = [](DataType dt, void* ptr, size_t idx) {
switch (dt) {
case DataType::Float64:
return std::to_string(static_cast<double*>(ptr)[idx]);
case DataType::Float32:
return std::to_string(static_cast<float*>(ptr)[idx]);
case DataType::Float16:
return std::to_string(static_cast<half_float::half*>(ptr)[idx]);
case DataType::Int8:
return std::to_string(static_cast<int8_t*>(ptr)[idx]);
case DataType::Int16:
return std::to_string(static_cast<int16_t*>(ptr)[idx]);
case DataType::Int32:
return std::to_string(static_cast<int32_t*>(ptr)[idx]);
case DataType::Int64:
return std::to_string(static_cast<int64_t*>(ptr)[idx]);
case DataType::UInt8:
return std::to_string(static_cast<uint8_t*>(ptr)[idx]);
case DataType::UInt16:
return std::to_string(static_cast<uint16_t*>(ptr)[idx]);
case DataType::UInt32:
return std::to_string(static_cast<uint32_t*>(ptr)[idx]);
case DataType::UInt64:
return std::to_string(static_cast<uint64_t*>(ptr)[idx]);
default:
AIDGE_ASSERT(true, "unsupported type to convert to string");
}
return std::string("?"); // To make Clang happy
};
if (dims().empty()) { return ptrToString(mDataType, mImpl->hostPtr(), 0); }
std::string res;
std::size_t dim = 0;
std::size_t counter = 0;
if (nbDims()>=2) {
std::vector<std::size_t> dimVals(nbDims(), 0);
res += "{\n";
while (counter < mSize) {
std::string spaceString = std::string((dim+1)<<1,' ');
if (dim < nbDims()-2) {
if (dimVals[dim] == 0) {
res += spaceString + "{\n";
++dim;
} else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) {
res += spaceString + "},\n" + spaceString + "{\n";
++dim;
} else {
res += spaceString + "}\n";
dimVals[dim--] = 0;
dimVals[dim]++;
}
} else {
for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) {
res += spaceString + "{";
for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + ",";
}
res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + "}";
if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) {
res += ",";
}
res += "\n";
}
if (dim == 0) {
break;
}
dimVals[dim--] = 0;
dimVals[dim]++;
}
}
for(int i = static_cast<int>(dim); i > 0; --i) {
res += std::string((dim+1)<<1,' ') + "}\n";
}
} else {
res += "{";
for (DimSize_t j = 0; j < dims()[0]; ++j) {
res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) + ((j < dims()[0]-1) ? "," : " ");
}
}
res += "}";
return res;
}
inline void print() const { printf("%s\n", toString().c_str()); } inline void print() const { printf("%s\n", toString().c_str()); }
...@@ -621,7 +492,7 @@ class Tensor : public Data, ...@@ -621,7 +492,7 @@ class Tensor : public Data,
} }
/** /**
* Returns a sub-tensor with one or more dimension less. * @brief Returns a sub-tensor with one or more dimension less.
* For instance, t.extract({1}) on a CHW tensor will return the HW tensor * For instance, t.extract({1}) on a CHW tensor will return the HW tensor
* of channel #1. * of channel #1.
* Likewise, t.extract({0, 1}) on a NCHW tensor will return the HW tensor * Likewise, t.extract({0, 1}) on a NCHW tensor will return the HW tensor
...@@ -631,15 +502,15 @@ class Tensor : public Data, ...@@ -631,15 +502,15 @@ class Tensor : public Data,
* tensor is returned. * tensor is returned.
* It current tensor was contiguous, the returned tensor is garanteed to be * It current tensor was contiguous, the returned tensor is garanteed to be
* contiguous as well. * contiguous as well.
* *
* @param coordIdx Coordinates of the sub-tensor to extract * @param coordIdx Coordinates of the sub-tensor to extract
* @return Tensor Sub-tensor. * @return Tensor Sub-tensor.
*/ */
Tensor extract(const std::vector<std::size_t>& coordIdx) const; Tensor extract(const std::vector<std::size_t>& coordIdx) const;
/** /**
* Returns a sub-tensor at some coordinate and with some dimension. * @brief Returns a sub-tensor at some coordinate and with some dimension.
* *
* @param coordIdx First coordinates of the sub-tensor to extract * @param coordIdx First coordinates of the sub-tensor to extract
* @param dims Dimensions of the sub-tensor to extract * @param dims Dimensions of the sub-tensor to extract
* @return Tensor Sub-tensor. * @return Tensor Sub-tensor.
...@@ -647,7 +518,7 @@ class Tensor : public Data, ...@@ -647,7 +518,7 @@ class Tensor : public Data,
Tensor extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const; Tensor extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const;
/** /**
* Make the tensor's storage contiguous, if it is not already the case. * @brief Make the tensor's storage contiguous, if it is not already the case.
* If not contiguous, a new memory space is allocated. * If not contiguous, a new memory space is allocated.
*/ */
void makeContiguous(); void makeContiguous();
...@@ -704,7 +575,7 @@ class Tensor : public Data, ...@@ -704,7 +575,7 @@ class Tensor : public Data,
* The data type, backend and device stay the same. * The data type, backend and device stay the same.
* @param fallback A shared_ptr to Tensor ready to be overwritten if necessary. * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
* The shared_ptr does not need to be initialized. No new memory allocation * The shared_ptr does not need to be initialized. No new memory allocation
* will occur if fallback has already been allocated with the right * will occur if fallback has already been allocated with the right
* type/size/device. * type/size/device.
* @return Reference to either itself or to fallback. * @return Reference to either itself or to fallback.
*/ */
...@@ -782,10 +653,10 @@ class Tensor : public Data, ...@@ -782,10 +653,10 @@ class Tensor : public Data,
} }
/** /**
* Return a reference to a Tensor on desired data type and backend/device: * @brief Return a reference to a Tensor on desired data type and backend/device:
* - itself, if already with the right characteristics; * - itself, if already with the right characteristics;
* - the provided Tensor, overwritten with the right characteristics. * - the provided Tensor, overwritten with the right characteristics.
* NOTE: no data is copy-casted. If it was so in a previous refCastFrom() on * @note no data is copy-casted. If it was so in a previous refCastFrom() on
* the same fallback, it remains valid, otherwise, data is invalid. * the same fallback, it remains valid, otherwise, data is invalid.
* @param fallback A shared_ptr to Tensor ready to be overwritten if necessary. * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
* The shared_ptr does not need to be initialized. No new memory allocation * The shared_ptr does not need to be initialized. No new memory allocation
...@@ -800,11 +671,11 @@ class Tensor : public Data, ...@@ -800,11 +671,11 @@ class Tensor : public Data,
const Tensor& ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device = 0) const; const Tensor& ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device = 0) const;
/** /**
* Return a reference to a Tensor with same characteristics * @brief Return a reference to a Tensor with same characteristics
* (data type, backend/device) as targetReqs Tensor: * (data type, backend/device) as targetReqs Tensor:
* - itself, if already with the right characteristics; * - itself, if already with the right characteristics;
* - the provided Tensor, overwritten with the right characteristics. * - the provided Tensor, overwritten with the right characteristics.
* NOTE: no data is copy-casted. If it was so in a previous refCastFrom() on * @note no data is copy-casted. If it was so in a previous refCastFrom() on
* the same fallback, it remains valid, otherwise, data is invalid. * the same fallback, it remains valid, otherwise, data is invalid.
* @param fallback A shared_ptr to Tensor ready to be overwritten if necessary. * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
* The shared_ptr does not need to be initialized. No new memory allocation * The shared_ptr does not need to be initialized. No new memory allocation
...@@ -819,7 +690,11 @@ class Tensor : public Data, ...@@ -819,7 +690,11 @@ class Tensor : public Data,
} }
private: private:
///\bug not protected against overflow /**
* @brief Compute the number of elements in the Tensor.
* @note If dimensions are not empty, they are multiplied to get the total number
* of elements. Else, the Tensor represents a scalar and contains a single element.
*/
void computeSize() { void computeSize() {
mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>()); mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
} }
......
...@@ -9,10 +9,145 @@ ...@@ -9,10 +9,145 @@
* *
********************************************************************************/ ********************************************************************************/
#include <vector>
#include <cstddef>
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/ErrorHandling.hpp"
void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) {
bool checkContiguous = true;
if (strides.empty()) {
strides.resize(dims.size());
size_t expectedStride = 1;
for (int dim = dims.size() - 1; dim >= 0; --dim) {
strides[dim] = expectedStride;
expectedStride*= dims[dim];
}
checkContiguous = false;
}
else {
AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
}
if (mImpl.use_count() > 1) {
// Here we could also create a new storage for this tensor in this case
// But, is it more likely that the user really wants this, or that he did a mistake?
AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
}
else {
mDims = dims;
mStrides = strides;
mContiguous = true;
if (checkContiguous) {
std::size_t expectedStride = 1;
for (std::size_t i = dims.size()-1; i > 0; --i) {
if (strides[i] != expectedStride) {
mContiguous = false;
break;
}
expectedStride*= dims[i];
}
mContiguous &= (strides[0] == expectedStride);
}
computeSize();
if (mImpl) {
mImpl->resize(mSize);
}
}
}
std::string Aidge::Tensor::toString() const {
AIDGE_ASSERT(mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) || (mImpl->hostPtr() != nullptr)), "tensor should have a valid host pointer");
// TODO: move lambda elsewhere?
auto ptrToString = [](DataType dt, void* ptr, std::size_t idx) {
switch (dt) {
case DataType::Float64:
return std::to_string(static_cast<double*>(ptr)[idx]);
case DataType::Float32:
return std::to_string(static_cast<float*>(ptr)[idx]);
case DataType::Float16:
return std::to_string(static_cast<half_float::half*>(ptr)[idx]);
case DataType::Int8:
return std::to_string(static_cast<int8_t*>(ptr)[idx]);
case DataType::Int16:
return std::to_string(static_cast<int16_t*>(ptr)[idx]);
case DataType::Int32:
return std::to_string(static_cast<int32_t*>(ptr)[idx]);
case DataType::Int64:
return std::to_string(static_cast<int64_t*>(ptr)[idx]);
case DataType::UInt8:
return std::to_string(static_cast<uint8_t*>(ptr)[idx]);
case DataType::UInt16:
return std::to_string(static_cast<uint16_t*>(ptr)[idx]);
case DataType::UInt32:
return std::to_string(static_cast<uint32_t*>(ptr)[idx]);
case DataType::UInt64:
return std::to_string(static_cast<uint64_t*>(ptr)[idx]);
default:
AIDGE_ASSERT(true, "unsupported type to convert to string");
}
return std::string("?"); // To make Clang happy
};
if (dims().empty()) { return ptrToString(mDataType, mImpl->hostPtr(), 0); }
std::string res;
std::size_t dim = 0;
std::size_t counter = 0;
if (nbDims()>=2) {
std::vector<std::size_t> dimVals(nbDims(), 0);
res += "{\n";
while (counter < mSize) {
std::string spaceString = std::string((dim+1)<<1,' ');
if (dim < nbDims()-2) {
if (dimVals[dim] == 0) {
res += spaceString + "{\n";
++dim;
} else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) {
res += spaceString + "},\n" + spaceString + "{\n";
++dim;
} else {
res += spaceString + "}\n";
dimVals[dim--] = 0;
dimVals[dim]++;
}
} else {
for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) {
res += spaceString + "{";
for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + ",";
}
res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + "}";
if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) {
res += ",";
}
res += "\n";
}
if (dim == 0) {
break;
}
dimVals[dim--] = 0;
dimVals[dim]++;
}
}
for(int i = static_cast<int>(dim); i > 0; --i) {
res += std::string((dim+1)<<1,' ') + "}\n";
}
} else {
res += "{";
for (DimSize_t j = 0; j < dims()[0]; ++j) {
res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) + ((j < dims()[0]-1) ? "," : " ");
}
}
res += "}";
return res;
}
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const { Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions"); AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
...@@ -52,7 +187,7 @@ void Aidge::Tensor::makeContiguous() { ...@@ -52,7 +187,7 @@ void Aidge::Tensor::makeContiguous() {
// Determine the size of the contiguous chunk // Determine the size of the contiguous chunk
size_t copySize = 1; size_t copySize = 1;
while (idx + copySize < mSize && while (idx + copySize < mSize &&
getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize) getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize)
{ {
++copySize; ++copySize;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment