Skip to content
Snippets Groups Projects
Commit 3cd6469a authored by Maxence Naud's avatar Maxence Naud
Browse files

[WIP][NF] Start Tensor changes

- [Add] ``zeros()`` member function to set implpementation elements to 0
- [unit_tests][NF] Add many more cases in Test_TensorImpl.cpp
- [include] update includes in Tensor.hpp
parent 8f9ea42d
No related branches found
No related tags found
No related merge requests found
......@@ -171,21 +171,27 @@ public:
};
/**
* Set the size, in number of elements, that must be stored.
* @brief Set the size, in number of elements, that must be stored.
*/
virtual void resize(std::vector<DimSize_t> dims) {
mNbElts = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
}
/**
* Return the number of elements stored.
* @brief Return the number of elements stored.
*/
inline std::size_t size() const noexcept { return mNbElts; }
/**
* Return the size (in bytes) of one element (scalar).
* @brief Return the size (in bytes) of one element (scalar).
*/
virtual std::size_t scalarSize() const noexcept = 0;
/**
* @brief Set every element of the implementation to zero.
*/
virtual void zeros() = 0;
constexpr const char *backend() const { return mBackend; }
/**
......
......@@ -53,6 +53,15 @@ public:
inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
void zeros() override final {
if (mData.empty()) {
lazyInit();
}
for (std::size_t i = 0; i < mData.size(); ++i) {
*(mData.data() + i) = T(0);
}
}
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
const T* srcT = static_cast<const T *>(src);
T* dstT = static_cast<T *>(rawPtr(offset));
......
......@@ -425,6 +425,15 @@ class Tensor : public Data,
*/
bool empty() const { return mDims.empty(); }
/**
* @brief Set each element of the tensor to zero.
*/
void zeros() const {
if (mImpl) {
mImpl->zeros();
}
}
template <typename expectedType>
const expectedType& get(std::size_t idx) const {
AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
......@@ -472,7 +481,7 @@ class Tensor : public Data,
* @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor.
* @return std::vector<DimSize_t>
*/
std::vector<std::size_t> getCoord(std::size_t flatIdx) const {
std::vector<std::size_t> getCoord(const std::size_t flatIdx) const {
std::vector<std::size_t> coordIdx = std::vector<std::size_t>(mDims.size());
std::size_t idx = flatIdx;
for (std::size_t i = mDims.size() - 1; i > 0; --i){
......@@ -518,10 +527,11 @@ class Tensor : public Data,
}
/**
* @brief Returns a sub-tensor with one or more dimension less.
* For instance, t.extract({1}) on a CHW tensor will return the HW tensor
* @brief Returns a sub-tensor with equal or lower number of dimensions.
*
* For instance, ``t.extract({1})`` on a CHW tensor will return the HW tensor
* of channel #1.
* Likewise, t.extract({0, 1}) on a NCHW tensor will return the HW tensor
* Likewise, ``t.extract({0, 1})`` on a NCHW tensor will return the HW tensor
* of batch #0 and channel #1.
* No memory copy is performed, the returned tensor does not own the memory.
* If the number of coordinates matches the number of dimensions, an empty
......
......@@ -103,11 +103,32 @@ constexpr std::array<T, N + 1> append(T t, std::array<T, N> a) {
// Generic helper for initializing a Tensor
template <typename T, std::size_t SIZE_0>
struct Array1D {
Array1D(std::initializer_list<T> list) {
auto it = list.begin();
for (std::size_t i = 0; i < SIZE_0; ++i, ++it) {
data[i] = *it;
}
}
Array1D(const T (&dataArray)[SIZE_0]) {
std::copy_n(&dataArray[0], SIZE_0, &data[0]);
}
T data[SIZE_0];
};
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
struct Array2D {
Array2D(std::initializer_list<std::initializer_list<T>> list) {
auto it1 = list.begin();
for (std::size_t i = 0; i < SIZE_0; ++i, ++it1) {
auto it2 = it1->begin();
for (std::size_t j = 0; j < SIZE_1; ++j, ++it2) {
data[i][j] = *it2;
}
}
}
Array2D(const T (&dataArray)[SIZE_0][SIZE_1]) {
std::copy_n(&dataArray[0][0], SIZE_0 * SIZE_1, &data[0][0]);
}
T data[SIZE_0][SIZE_1];
};
......
......@@ -13,8 +13,13 @@
#include <cstddef>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/Types.h"
#include <cstddef>
#include <vector>
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) {
bool checkContiguous = true;
......@@ -181,12 +186,12 @@ void Aidge::Tensor::makeContiguous() {
// Create a new storage that will be contiguous
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
// Copy elements from old to new storage
size_t idx = 0;
std::size_t idx = 0;
while (idx < mSize) {
const size_t storageIdx = getStorageIdx(getCoord(idx));
const std::size_t storageIdx = getStorageIdx(getCoord(idx));
// Determine the size of the contiguous chunk
size_t copySize = 1;
std::size_t copySize = 1;
while (idx + copySize < mSize &&
getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize)
{
......
......@@ -10,6 +10,10 @@
********************************************************************************/
#include <array>
#include <cstddef>
#include <cstdint> //std::uint16_t
#include <random>
#include <vector>
#include <catch2/catch_test_macros.hpp>
......@@ -19,42 +23,56 @@
using namespace Aidge;
TEST_CASE("[core/data] Tensor creation") {
SECTION("from const array") {
Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
TEST_CASE("[backend/cpu/data] Tensor", "[Tensor]") {
Tensor x;
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
SECTION("TensorUtils, constructor from const arrays") {
// construction from different types and sizes
REQUIRE_NOTHROW(x = Array1D<int, 2>{{1, 2}});
x.print();
REQUIRE_NOTHROW(x = Array2D<int, 2, 2>{{{1, 2}, {3, 4}}});
x.print();
REQUIRE_NOTHROW(x = Array3D<std::uint8_t, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
REQUIRE_NOTHROW(x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
REQUIRE_NOTHROW(x = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}});
REQUIRE_NOTHROW(x = Array3D<double, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}});
REQUIRE_NOTHROW(x = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},
{{{9,10}, {11,12}}, {{13,14},{15,16}}}}});
}
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat =
Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
SECTION("Tensor features") {
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
}
SECTION("Access to array") {
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
}
SECTION("get function") {
REQUIRE(x.get<int>({0, 0, 0}) == 1);
REQUIRE(x.get<int>({0, 0, 1}) == 2);
REQUIRE(x.get<int>({0, 1, 1}) == 4);
REQUIRE(x.get<int>({1, 1, 0}) == 7);
x.set<int>({1, 1, 1}, 36);
REQUIRE(x.get<int>({1, 1, 1}) == 36);
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(x.get<int>({0, 0, 0}) == 1);
REQUIRE(x.get<int>({0, 0, 1}) == 2);
REQUIRE(x.get<int>({0, 1, 1}) == 4);
REQUIRE(x.get<int>({1, 1, 0}) == 7);
x.set<int>({1, 1, 1}, 36);
REQUIRE(x.get<int>(7) == 36);
x.set<int>(7, 40);
REQUIRE(x.get<int>({1, 1, 1}) == 40);
}
SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); }
SECTION("Tensor (in)equality") {
REQUIRE(x == xCopy);
REQUIRE_FALSE(x == xFloat);
SECTION("Pretty printing for debug") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE_NOTHROW(x.print());
}
}
}
......@@ -96,32 +114,155 @@ TEST_CASE("[core/data] Tensor methods","[Tensor]") {
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat =
Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
SECTION("Tensor (in)equality") {
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}};
REQUIRE(x == xCopy);
REQUIRE_FALSE(x == xFloat);
}
SECTION("Tensor sharing") {
Tensor xCopyCtor(x);
REQUIRE(xCopyCtor.getImpl() == x.getImpl());
constexpr std::uint16_t NBTRIALS = 10;
Tensor xEqOp = x;
REQUIRE(xEqOp.getImpl() == x.getImpl());
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dist(1, 10);
std::uniform_int_distribution<std::size_t> nbDims(1, 5);
Tensor xCloned = x.clone();
REQUIRE(xCloned.getImpl() != x.getImpl());
REQUIRE(xCloned == x);
}
x.setDataType(DataType::Int32);
x.setBackend("cpu");
SECTION("Tensor sharing") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
const std::size_t nb_dims = nbDims(gen) + 1;
std::vector<std::size_t> dims(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims[i] = dist(gen);
}
x.resize(dims);
// copy constructor
Tensor xCopyCtor(x);
REQUIRE(xCopyCtor.getImpl() == x.getImpl());
// copy assignment operator
Tensor xCopyAssignmentOp = x;
REQUIRE(xCopyAssignmentOp.getImpl() == x.getImpl());
Tensor xCloned = x.clone();
REQUIRE(xCloned.getImpl() != x.getImpl());
REQUIRE(xCloned == x);
}
}
SECTION("zeros()") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
constexpr std::size_t nb_dims = 3;
const std::size_t dim0 = nbDims(gen);
const std::size_t dim1 = nbDims(gen);
const std::size_t dim2 = nbDims(gen);
const std::vector<std::size_t> dims = {dim0, dim1, dim2};
int array0[dim0][dim1][dim2];
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
for (std::size_t k = 0; k < dim2; ++k) {
array0[i][j][k] = dist(gen);
}
}
}
x.resize(dims);
x.zeros();
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
const std::size_t idx = (i * dim1 + j) * dim2;
for (std::size_t k = 0; k < dim2; ++k) {
int val = *static_cast<int*>(x.getImpl()->hostPtr(idx + k));
if (val != 0) {
throw std::runtime_error("Value should be 0");
}
// REQUIRE(*static_cast<int*>(x.getImpl()->hostPtr(idx + k)) == 0);
}
}
}
}
}
SECTION("Tensor extract") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor y;
Tensor y0;
Tensor y1;
Tensor y2;
Tensor y3;
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
const std::size_t nb_dims = 3;
const std::size_t dim0 = nbDims(gen) + 1; // dim0 >= 2
const std::size_t dim1 = nbDims(gen) + 1;
const std::size_t dim2 = nbDims(gen) + 1;
std::vector<std::size_t> dims = {dim0, dim1, dim2};
int array0[dim0][dim1][dim2];
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
for (std::size_t k = 0; k < dim2; ++k) {
array0[i][j][k] = dist(gen);
}
}
}
x.resize(dims);
REQUIRE(x.isContiguous());
// extract Tensor slice from one set of coordinates
REQUIRE_NOTHROW(y0 = x.extract({}));
REQUIRE_NOTHROW(y1 = x.extract({nbDims(gen)}));
REQUIRE_NOTHROW(y2 = x.extract({nbDims(gen), nbDims(gen)}));
REQUIRE_NOTHROW(y3 = x.extract({nbDims(gen), nbDims(gen), nbDims(gen)}));
REQUIRE_THROWS(y = x.extract({0, dim0 + 1, 0}));
SECTION("Tensor extract") {
Tensor y = x.extract({0, 1});
REQUIRE(y.getImpl() == x.getImpl());
REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
REQUIRE(y.isContiguous());
Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1});
REQUIRE(y2.getImpl() == x.getImpl());
REQUIRE(!y2.isContiguous());
Tensor y3 = y2.clone();
REQUIRE(y3.isContiguous());
REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
REQUIRE_NOTHROW(y = x.extract({0, 1}));
REQUIRE(y.getImpl() == x.getImpl()); // shared implem
REQUIRE(!y.isContiguous());
Tensor yClone = y.clone(); // when copying data, they are contiguous in memory
REQUIRE(yClone.isContiguous());
// int yTruth[2][1][1] =
REQUIRE(approxEq<int>(yClone, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
y = x.extract({0, 1});
REQUIRE(y.getImpl() == x.getImpl());
REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
REQUIRE(y.isContiguous());
}
}
}
TEST_CASE("Tensor fill") {
SECTION("Instantiate batches independantly") {
// initialization with 0s
std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
//concatenatedTensor->print();
std::shared_ptr<Tensor> myTensor1 = std::make_shared<Tensor>(Array1D<int, 5>{{1,2,3,4,5}});
std::shared_ptr<Tensor> myTensor2 = std::make_shared<Tensor>(Array1D<int, 5>{{6,7,8,9,10}});
std::shared_ptr<Tensor> myTensor3 = std::make_shared<Tensor>(Array1D<int, 5>{{11,12,13,14,15}});
// use copy function from implementation
concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0);
concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5);
concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10);
// concatenatedTensor->print();
std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{
{{1,2,3,4,5},
{6,7,8,9,10},
{11,12,13,14,15}}
});
// expectedTensor->print();
REQUIRE(*concatenatedTensor == *expectedTensor);
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment