Skip to content
Snippets Groups Projects
Commit 6c970d89 authored by Maxence Naud's avatar Maxence Naud
Browse files

[WIP][NF] Start Tensor changes

- [Add] ``zeros()`` member function to set implpementation elements to 0
- [unit_tests][NF] Add many more cases in Test_TensorImpl.cpp
- [include] update includes in Tensor.hpp
parent 4fe5e82e
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!89Increase the number of unit-tests for Tensor
Pipeline #39273 failed
...@@ -67,7 +67,7 @@ private: ...@@ -67,7 +67,7 @@ private:
class TensorImpl { class TensorImpl {
public: public:
TensorImpl() = delete; TensorImpl() = delete;
TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims) : mBackend(backend), mDevice(device) TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims) : mBackend(backend), mDevice(device)
{ {
resize(dims); resize(dims);
}; };
...@@ -148,7 +148,7 @@ public: ...@@ -148,7 +148,7 @@ public:
}; };
/** /**
* Set the size, in number of elements, that must be stored. * @brief Set the size, in number of elements, that must be stored.
*/ */
virtual void resize(std::vector<DimSize_t> dims) { virtual void resize(std::vector<DimSize_t> dims) {
size_t product = 1; size_t product = 1;
...@@ -159,14 +159,20 @@ public: ...@@ -159,14 +159,20 @@ public:
} }
/** /**
* Return the number of elements stored. * @brief Return the number of elements stored.
*/ */
inline std::size_t size() const noexcept { return mNbElts; } inline std::size_t size() const noexcept { return mNbElts; }
/** /**
* Return the size (in bytes) of one element (scalar). * @brief Return the size (in bytes) of one element (scalar).
*/ */
virtual std::size_t scalarSize() const noexcept = 0; virtual std::size_t scalarSize() const noexcept = 0;
/**
* @brief Set every element of the implementation to zero.
*/
virtual void zeros() = 0;
constexpr const char *backend() const { return mBackend; } constexpr const char *backend() const { return mBackend; }
virtual ~TensorImpl() = default; virtual ~TensorImpl() = default;
virtual bool operator==(const TensorImpl &othImpl) const = 0; virtual bool operator==(const TensorImpl &othImpl) const = 0;
......
...@@ -53,6 +53,15 @@ public: ...@@ -53,6 +53,15 @@ public:
inline std::size_t scalarSize() const noexcept override final { return sizeof(T); } inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
void zeros() override final {
if (mData.empty()) {
lazyInit();
}
for (std::size_t i = 0; i < mData.size(); ++i) {
*(mData.data() + i) = T(0);
}
}
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final { void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
const T* srcT = static_cast<const T *>(src); const T* srcT = static_cast<const T *>(src);
T* dstT = static_cast<T *>(rawPtr(offset)); T* dstT = static_cast<T *>(rawPtr(offset));
......
...@@ -92,7 +92,7 @@ class Tensor : public Data, ...@@ -92,7 +92,7 @@ class Tensor : public Data,
newTensor.makeContiguous(); newTensor.makeContiguous();
} }
else { else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims); std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize); newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl); newTensor.setImpl(newImpl);
} }
...@@ -454,6 +454,15 @@ class Tensor : public Data, ...@@ -454,6 +454,15 @@ class Tensor : public Data,
*/ */
bool empty() const { return mDims.empty(); } bool empty() const { return mDims.empty(); }
/**
* @brief Set each element of the tensor to zero.
*/
void zeros() const {
if (mImpl) {
mImpl->zeros();
}
}
template <typename expectedType> template <typename expectedType>
const expectedType& get(std::size_t idx) const { const expectedType& get(std::size_t idx) const {
AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type"); AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
...@@ -589,7 +598,7 @@ class Tensor : public Data, ...@@ -589,7 +598,7 @@ class Tensor : public Data,
* @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor. * @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor.
* @return std::vector<DimSize_t> * @return std::vector<DimSize_t>
*/ */
std::vector<std::size_t> getCoord(std::size_t flatIdx) const { std::vector<std::size_t> getCoord(const std::size_t flatIdx) const {
std::vector<std::size_t> coordIdx = std::vector<std::size_t>(mDims.size()); std::vector<std::size_t> coordIdx = std::vector<std::size_t>(mDims.size());
std::size_t idx = flatIdx; std::size_t idx = flatIdx;
for (std::size_t i = mDims.size() - 1; i > 0; --i){ for (std::size_t i = mDims.size() - 1; i > 0; --i){
...@@ -635,10 +644,11 @@ class Tensor : public Data, ...@@ -635,10 +644,11 @@ class Tensor : public Data,
} }
/** /**
* Returns a sub-tensor with one or more dimension less. * @brief Returns a sub-tensor with equal or lower number of dimensions.
* For instance, t.extract({1}) on a CHW tensor will return the HW tensor *
* For instance, ``t.extract({1})`` on a CHW tensor will return the HW tensor
* of channel #1. * of channel #1.
* Likewise, t.extract({0, 1}) on a NCHW tensor will return the HW tensor * Likewise, ``t.extract({0, 1})`` on a NCHW tensor will return the HW tensor
* of batch #0 and channel #1. * of batch #0 and channel #1.
* No memory copy is performed, the returned tensor does not own the memory. * No memory copy is performed, the returned tensor does not own the memory.
* If the number of coordinates matches the number of dimensions, an empty * If the number of coordinates matches the number of dimensions, an empty
...@@ -652,7 +662,7 @@ class Tensor : public Data, ...@@ -652,7 +662,7 @@ class Tensor : public Data,
Tensor extract(const std::vector<std::size_t>& coordIdx) const; Tensor extract(const std::vector<std::size_t>& coordIdx) const;
/** /**
* Returns a sub-tensor at some coordinate and with some dimension. * @brief Returns a sub-tensor at some coordinate and with some dimension.
* *
* @param coordIdx First coordinates of the sub-tensor to extract * @param coordIdx First coordinates of the sub-tensor to extract
* @param dims Dimensions of the sub-tensor to extract * @param dims Dimensions of the sub-tensor to extract
......
...@@ -103,11 +103,32 @@ constexpr std::array<T, N + 1> append(T t, std::array<T, N> a) { ...@@ -103,11 +103,32 @@ constexpr std::array<T, N + 1> append(T t, std::array<T, N> a) {
// Generic helper for initializing a Tensor // Generic helper for initializing a Tensor
template <typename T, std::size_t SIZE_0> template <typename T, std::size_t SIZE_0>
struct Array1D { struct Array1D {
Array1D(std::initializer_list<T> list) {
auto it = list.begin();
for (std::size_t i = 0; i < SIZE_0; ++i, ++it) {
data[i] = *it;
}
}
Array1D(const T (&dataArray)[SIZE_0]) {
std::copy_n(&dataArray[0], SIZE_0, &data[0]);
}
T data[SIZE_0]; T data[SIZE_0];
}; };
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1> template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
struct Array2D { struct Array2D {
Array2D(std::initializer_list<std::initializer_list<T>> list) {
auto it1 = list.begin();
for (std::size_t i = 0; i < SIZE_0; ++i, ++it1) {
auto it2 = it1->begin();
for (std::size_t j = 0; j < SIZE_1; ++j, ++it2) {
data[i][j] = *it2;
}
}
}
Array2D(const T (&dataArray)[SIZE_0][SIZE_1]) {
std::copy_n(&dataArray[0][0], SIZE_0 * SIZE_1, &data[0][0]);
}
T data[SIZE_0][SIZE_1]; T data[SIZE_0][SIZE_1];
}; };
......
...@@ -10,8 +10,13 @@ ...@@ -10,8 +10,13 @@
********************************************************************************/ ********************************************************************************/
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/utils/Types.h"
#include <cstddef>
#include <vector>
#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const { Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
...@@ -46,13 +51,13 @@ void Aidge::Tensor::makeContiguous() { ...@@ -46,13 +51,13 @@ void Aidge::Tensor::makeContiguous() {
// Create a new storage that will be contiguous // Create a new storage that will be contiguous
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims); std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
// Copy elements from old to new storage // Copy elements from old to new storage
size_t idx = 0; std::size_t idx = 0;
while (idx < mSize) { while (idx < mSize) {
const size_t storageIdx = getStorageIdx(getCoord(idx)); const std::size_t storageIdx = getStorageIdx(getCoord(idx));
// Determine the size of the contiguous chunk // Determine the size of the contiguous chunk
size_t copySize = 1; std::size_t copySize = 1;
while (idx + copySize < mSize && while (idx + copySize < mSize &&
getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize) getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize)
{ {
++copySize; ++copySize;
......
...@@ -10,6 +10,10 @@ ...@@ -10,6 +10,10 @@
********************************************************************************/ ********************************************************************************/
#include <array> #include <array>
#include <cstddef>
#include <cstdint> //std::uint16_t
#include <random>
#include <vector>
#include <catch2/catch_test_macros.hpp> #include <catch2/catch_test_macros.hpp>
...@@ -19,82 +23,180 @@ ...@@ -19,82 +23,180 @@
using namespace Aidge; using namespace Aidge;
TEST_CASE("Tensor creation") { TEST_CASE("[backend/cpu/data] Tensor", "[Tensor]") {
SECTION("from const array") { Tensor x;
Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
SECTION("TensorUtils, constructor from const arrays") {
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; // construction from different types and sizes
REQUIRE_NOTHROW(x = Array1D<int, 2>{{1, 2}});
x.print();
REQUIRE_NOTHROW(x = Array2D<int, 2, 2>{{{1, 2}, {3, 4}}});
x.print();
REQUIRE_NOTHROW(x = Array3D<std::uint8_t, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
REQUIRE_NOTHROW(x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
REQUIRE_NOTHROW(x = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}});
REQUIRE_NOTHROW(x = Array3D<double, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}});
REQUIRE_NOTHROW(x = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},
{{{9,10}, {11,12}}, {{13,14},{15,16}}}}});
}
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat =
Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
SECTION("Tensor features") { SECTION("Tensor features") {
REQUIRE(x.nbDims() == 3); x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(x.dims()[0] == 2); REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[1] == 2); REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[2] == 2); REQUIRE(x.dims()[1] == 2);
REQUIRE(x.size() == 8); REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
} }
SECTION("Access to array") { SECTION("Access to array") {
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1); x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8); REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
} }
SECTION("get function") { SECTION("get function") {
REQUIRE(x.get<int>({0, 0, 0}) == 1); x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(x.get<int>({0, 0, 1}) == 2); REQUIRE(x.get<int>({0, 0, 0}) == 1);
REQUIRE(x.get<int>({0, 1, 1}) == 4); REQUIRE(x.get<int>({0, 0, 1}) == 2);
REQUIRE(x.get<int>({1, 1, 0}) == 7); REQUIRE(x.get<int>({0, 1, 1}) == 4);
x.set<int>({1, 1, 1}, 36); REQUIRE(x.get<int>({1, 1, 0}) == 7);
REQUIRE(x.get<int>({1, 1, 1}) == 36); x.set<int>({1, 1, 1}, 36);
REQUIRE(x.get<int>(7) == 36);
x.set<int>(7, 40);
REQUIRE(x.get<int>({1, 1, 1}) == 40);
} }
SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); } SECTION("Pretty printing for debug") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE_NOTHROW(x.print());
}
SECTION("Tensor (in)equality") { SECTION("Tensor (in)equality") {
REQUIRE(x == xCopy); Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE_FALSE(x == xFloat); Tensor xFloat = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}};
REQUIRE(x == xCopy);
REQUIRE_FALSE(x == xFloat);
} }
}
}
TEST_CASE("Tensor methods") { constexpr std::uint16_t NBTRIALS = 10;
Tensor x = Array3D<int, 2, 2, 2>{{
{{1, 2}, // Create a random number generator
{3, 4}}, std::random_device rd;
{{5, 6}, std::mt19937 gen(rd());
{7, 8}} std::uniform_int_distribution<std::size_t> dist(1, 10);
}}; std::uniform_int_distribution<std::size_t> nbDims(1, 5);
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; x.setDataType(DataType::Int32);
x.setBackend("cpu");
Tensor xFloat =
Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}}; SECTION("Tensor sharing") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
SECTION("Tensor sharing") { // create Tensor
Tensor xCopyCtor(x); const std::size_t nb_dims = nbDims(gen) + 1;
REQUIRE(xCopyCtor.getImpl() == x.getImpl()); std::vector<std::size_t> dims(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
Tensor xEqOp = x; dims[i] = dist(gen);
REQUIRE(xEqOp.getImpl() == x.getImpl()); }
x.resize(dims);
Tensor xCloned = x.clone();
REQUIRE(xCloned.getImpl() != x.getImpl()); // copy constructor
REQUIRE(xCloned == x); Tensor xCopyCtor(x);
} REQUIRE(xCopyCtor.getImpl() == x.getImpl());
SECTION("Tensor extract") { // copy assignment operator
Tensor y = x.extract({0, 1}); Tensor xCopyAssignmentOp = x;
REQUIRE(y.getImpl() == x.getImpl()); REQUIRE(xCopyAssignmentOp.getImpl() == x.getImpl());
REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
REQUIRE(y.isContiguous()); Tensor xCloned = x.clone();
REQUIRE(xCloned.getImpl() != x.getImpl());
Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1}); REQUIRE(xCloned == x);
REQUIRE(y2.getImpl() == x.getImpl()); }
REQUIRE(!y2.isContiguous()); }
Tensor y3 = y2.clone(); SECTION("zeros()") {
REQUIRE(y3.isContiguous()); for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}})); // create Tensor
} constexpr std::size_t nb_dims = 3;
const std::size_t dim0 = nbDims(gen);
const std::size_t dim1 = nbDims(gen);
const std::size_t dim2 = nbDims(gen);
const std::vector<std::size_t> dims = {dim0, dim1, dim2};
int array0[dim0][dim1][dim2];
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
for (std::size_t k = 0; k < dim2; ++k) {
array0[i][j][k] = dist(gen);
}
}
}
x.resize(dims);
x.zeros();
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
const std::size_t idx = (i * dim1 + j) * dim2;
for (std::size_t k = 0; k < dim2; ++k) {
int val = *static_cast<int*>(x.getImpl()->hostPtr(idx + k));
if (val != 0) {
throw std::runtime_error("Value should be 0");
}
// REQUIRE(*static_cast<int*>(x.getImpl()->hostPtr(idx + k)) == 0);
}
}
}
}
}
SECTION("Tensor extract") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor y;
Tensor y0;
Tensor y1;
Tensor y2;
Tensor y3;
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
const std::size_t nb_dims = 3;
const std::size_t dim0 = nbDims(gen) + 1; // dim0 >= 2
const std::size_t dim1 = nbDims(gen) + 1;
const std::size_t dim2 = nbDims(gen) + 1;
std::vector<std::size_t> dims = {dim0, dim1, dim2};
int array0[dim0][dim1][dim2];
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
for (std::size_t k = 0; k < dim2; ++k) {
array0[i][j][k] = dist(gen);
}
}
}
x.resize(dims);
REQUIRE(x.isContiguous());
// extract Tensor slice from one set of coordinates
REQUIRE_NOTHROW(y0 = x.extract({}));
REQUIRE_NOTHROW(y1 = x.extract({nbDims(gen)}));
REQUIRE_NOTHROW(y2 = x.extract({nbDims(gen), nbDims(gen)}));
REQUIRE_NOTHROW(y3 = x.extract({nbDims(gen), nbDims(gen), nbDims(gen)}));
REQUIRE_THROWS(y = x.extract({0, dim0 + 1, 0}));
REQUIRE_NOTHROW(y = x.extract({0, 1}));
REQUIRE(y.getImpl() == x.getImpl()); // shared implem
REQUIRE(!y.isContiguous());
Tensor yClone = y.clone(); // when copying data, they are contiguous in memory
REQUIRE(yClone.isContiguous());
// int yTruth[2][1][1] =
REQUIRE(approxEq<int>(yClone, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
y = x.extract({0, 1});
REQUIRE(y.getImpl() == x.getImpl());
REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
REQUIRE(y.isContiguous());
}
}
} }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment