Skip to content
Snippets Groups Projects
Commit 9cc2ed07 authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'improve_tensor_coverage' into create_optimizer

parents 82d8d093 dd27a424
No related branches found
No related tags found
3 merge requests!105version 0.2.0,!88Basic supervised learning,!82Resolve "Optimizer to update gradients"
Pipeline #40318 failed
......@@ -191,7 +191,7 @@ public:
* @brief Set every element of the implementation to zero.
*/
virtual void zeros() {
printf("Not implemented yet");
AIDGE_THROW_OR_ABORT(std::runtime_error, "Function not implented");
}
constexpr const char *backend() const { return mBackend; }
......
......@@ -12,10 +12,12 @@
#ifndef AIDGE_CORE_DATA_TENSOR_H_
#define AIDGE_CORE_DATA_TENSOR_H_
#include <cstddef> // std::size_t
#include <cstring>
#include <functional> // std::multiplies
#include <set>
#include <memory>
#include <numeric> // std::accumulate
#include <numeric> // std::accumulate
#include <string>
#include <type_traits> // std::is_arithmetic
#include <vector>
......@@ -35,15 +37,17 @@ namespace Aidge {
class Tensor : public Data,
public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
private:
DataType mDataType; /** enum to specify data type. */
DataType mDataType = DataType::Float32; /** enum to specify data type. */
std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
std::vector<DimSize_t> mStrides; /** Stride dimensions of the tensor. */
std::shared_ptr<TensorImpl> mImpl; /** Pointer to the actual data implementation. */
std::shared_ptr<TensorImpl> mImpl = nullptr; /** Pointer to the actual data implementation. */
std::size_t mImplOffset = 0;
std::shared_ptr<Tensor> mGrad; /** Pointer to the associated gradient Tensor instance. */
std::shared_ptr<Tensor> mGrad = nullptr; /** Pointer to the associated gradient Tensor instance. */
// Cached data
std::size_t mSize = 0; /** Number of elements in the Tensor. */
/// @brief Number of elements in the Tensor.
std::size_t mSize;
/// @brief Whether or not data are contiguous in memory.
bool mContiguous = true;
public:
......@@ -51,64 +55,48 @@ class Tensor : public Data,
/**
* @brief Construct a new empty Tensor object.
* @param dataType Sets the type of inserted data.
* It has the features of an undefined scalar.
*/
Tensor(DataType dataType = DataType::Float32)
Tensor(DataType dtype = DataType::Float32)
: Data(Type),
mDataType(dataType)
mDataType(dtype),
mDims(std::vector<DimSize_t>({})),
mStrides({1}),
mSize(1)
{
// ctor
}
/**
* @brief Construct a new Tensor object from dimensions.
* @brief Construct a new Tensor object from an arithmetic parameter.
*
* @param dims dimensions of the tensor
* @param dataType datatype of the tensor (default = DataType::Float32)
* @tparam T Type of the input parameter.
* @tparam VT Decayed type of the input paramter.
* @param val Input value.
*/
Tensor(const std::vector<DimSize_t>& dims, DataType dataType = DataType::Float32)
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
Tensor(T val)
: Data(Type),
mDataType(dataType),
mDims(dims)
mDataType(NativeType<VT>::type),
mDims({}),
mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
mSize(1)
{
computeSize();
*static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
}
/**
* @brief Construct a new Tensor object from another one (shallow copy).
* Data memory is not copied, but shared between the new Tensor and the
* initial one.
* @brief Construct a new Tensor object from dimensions.
*
* @param otherTensor
* @param dims dimensions of the tensor
*/
Tensor(const Tensor&) = default;
Tensor(Tensor&&) = default;
/**
* Perform a deep copy of the tensor.
*/
Tensor clone() const {
Tensor newTensor(*this);
if (!newTensor.isContiguous()) {
newTensor.makeContiguous();
}
else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl);
}
return newTensor;
}
template<typename T,
typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
Tensor(T val)
: Data(Type),
mDataType(NativeType<VT>::type),
mDims({}), mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
mSize(1) {
*static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
Tensor(const std::vector<DimSize_t>& dims)
: Data(Type)
{
// set mDims, mStrides, mContiguous, mSize
resize(dims);
}
/**
......@@ -123,20 +111,11 @@ class Tensor : public Data,
mDims({SIZE_0}),
mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
mSize(SIZE_0) {
mSize(SIZE_0)
{
mImpl->copyFromHost(&arr.data[0], SIZE_0);
}
template <typename T, std::size_t SIZE_0>
constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
resize({SIZE_0});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0});
}
mImpl->copyFromHost(&arr.data[0], SIZE_0, mImplOffset);
return *this;
}
/**
* @brief Construct a new Tensor object from the 2-dimensions Array helper.
* @tparam T datatype
......@@ -154,16 +133,6 @@ class Tensor : public Data,
mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
resize({SIZE_0, SIZE_1});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1});
}
mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1, mImplOffset);
return *this;
}
/**
* @brief Construct a new Tensor object from the 3-dimensions Array helper.
* @tparam T datatype
......@@ -182,16 +151,6 @@ class Tensor : public Data,
mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
resize({SIZE_0, SIZE_1, SIZE_2});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2});
}
mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2, mImplOffset);
return *this;
}
/**
* @brief Construct a new Tensor object from the 4-dimensions Array helper.
* @tparam T datatype
......@@ -211,15 +170,19 @@ class Tensor : public Data,
mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3});
}
mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3, mImplOffset);
return *this;
}
/**
* @brief Copy constructor. Construct a new Tensor object from another one
* (shallow copy). Data memory is not copied, but shared between the new
* Tensor and the initial one.
* @param other
*/
Tensor(const Tensor& other) = default;
/**
* @brief Move constructor.
* @param other
*/
Tensor(Tensor&& other) = default;
/**
* @brief Copy dimensions, datatype and data from another Tensor.
......@@ -227,24 +190,32 @@ class Tensor : public Data,
* existing implementation. Tensor backend/device remain untouched.
* If current Tensor does not have an implementation, only a shallow copy
* is performed and the Tensor will share data with t.
* @param t other Tensor object.
* @param other other Tensor object.
* @return Tensor&
*/
Tensor &operator=(const Tensor &t) {
resize(t.dims(), t.strides());
setDataType(t.dataType(), false); // do not convert existing data
if (t.hasImpl()) {
if (hasImpl()) {
copyFrom(t);
}
else {
// Perform a shallow copy only
setImpl(t.mImpl, t.mImplOffset);
}
}
else {
setImpl(nullptr);
}
Tensor &operator=(const Tensor& other);
template <typename T, std::size_t SIZE_0>
constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
*this = Tensor(std::move(arr));
return *this;
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
*this = Tensor(std::move(arr));
return *this;
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
*this = Tensor(std::move(arr));
return *this;
}
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
*this = Tensor(std::move(arr));
return *this;
}
......@@ -260,6 +231,23 @@ class Tensor : public Data,
return *mImpl == *(otherTensor.mImpl);
}
public:
/**
* @brief Perform a deep copy of the tensor.
*/
Tensor clone() const {
Tensor newTensor(*this);
if (!newTensor.isContiguous()) {
newTensor.makeContiguous();
}
else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl);
}
return newTensor;
}
/**
* @brief Set the backend of the Tensor associated implementation. If there
* was no previous implementation set, data will be allocated, but it will
......@@ -292,12 +280,7 @@ class Tensor : public Data,
* @brief Get a list of available backends.
* @return std::set<std::string>
*/
static std::set<std::string> getAvailableBackends(){
std::set<std::string> backendsList;
for(std::tuple<std::string, DataType> tupleKey : Registrar<Tensor>::getKeys())
backendsList.insert(std::get<0>(tupleKey));
return backendsList;
}
static std::set<std::string> getAvailableBackends();
/**
* @brief Get the data type enum.
......@@ -369,13 +352,13 @@ class Tensor : public Data,
* @brief Get dimensions of the Tensor object.
* @return constexpr const std::vector<DimSize_t>&
*/
constexpr const std::vector<DimSize_t> &dims() const { return mDims; }
constexpr inline const std::vector<DimSize_t>& dims() const noexcept { return mDims; }
/**
* @brief Get strides of the Tensor object.
* @return constexpr const std::vector<DimSize_t>&
*/
constexpr const std::vector<DimSize_t> &strides() const { return mStrides; }
constexpr inline const std::vector<DimSize_t>& strides() const noexcept { return mStrides; }
/**
* @brief Return true if Tensor is contiguous in memory.
......@@ -424,6 +407,9 @@ class Tensor : public Data,
* @return false
*/
bool empty() const { return mDims.empty(); }
// bool newempty() const noexcept {
// return mSize == 0;
// }
/**
* @brief Set each element of the tensor to zero.
......@@ -464,12 +450,13 @@ class Tensor : public Data,
inline void print() const { printf("%s\n", toString().c_str()); }
std::shared_ptr<Tensor> grad() {
if (!mGrad) {
mGrad = std::make_shared<Tensor>(mDataType);
mGrad->resize(mDims);
// if (!mGrad && mImpl) {
// mGrad = std::make_shared<Tensor>(mDims);
// mGrad->setDataType(mDataType);
// mGrad->setBackend(mImpl->backend());
if (mImpl) mGrad->setBackend(mImpl->backend());
}
// // if (mImpl) mGrad->setBackend(mImpl->backend());
// }
return mGrad;
}
......@@ -481,14 +468,14 @@ class Tensor : public Data,
* @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor.
* @return std::vector<DimSize_t>
*/
std::vector<std::size_t> getCoord(const std::size_t flatIdx) const {
std::vector<std::size_t> coordIdx = std::vector<std::size_t>(mDims.size());
std::size_t idx = flatIdx;
for (std::size_t i = mDims.size() - 1; i > 0; --i){
coordIdx[i] = (idx % mDims[i]);
idx/=mDims[i];
std::vector<std::size_t> getCoord(std::size_t flatIdx) const {
std::vector<std::size_t> coordIdx(mDims.size());
std::size_t i = mDims.size();
while (i-- > 0) {
coordIdx[i] = (flatIdx % mDims[i]);
flatIdx/=mDims[i];
}
coordIdx[0] = idx % mDims[0];
return coordIdx;
}
......@@ -506,7 +493,7 @@ class Tensor : public Data,
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
std::size_t flatIdx = 0;
std::size_t i = 0;
for(; i < coordIdx.size() - 1; ++i){
for(; i < coordIdx.size() - 1; ++i) {
AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
flatIdx = (flatIdx + coordIdx[i]) * mDims[i + 1];
}
......@@ -522,21 +509,24 @@ class Tensor : public Data,
* @return DimSize_t Storage index
*/
std::size_t getStorageIdx(const std::vector<std::size_t>& coordIdx) const {
for(std::size_t i = 0; i < coordIdx.size(); ++i) {
AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
}
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
return std::inner_product(coordIdx.begin(), coordIdx.end(), mStrides.begin(), DimSize_t(0));
return std::inner_product(coordIdx.cbegin(), coordIdx.cend(), mStrides.cbegin(), DimSize_t(0));
}
/**
* @brief Returns a sub-tensor with equal or lower number of dimensions.
*
* For instance, ``t.extract({1})`` on a CHW tensor will return the HW tensor
* @note For instance, ``t.extract({1})`` on a CHW tensor will return the HW tensor
* of channel #1.
* Likewise, ``t.extract({0, 1})`` on a NCHW tensor will return the HW tensor
* of batch #0 and channel #1.
* No memory copy is performed, the returned tensor does not own the memory.
* If the number of coordinates matches the number of dimensions, an empty
* @note No memory copy is performed, the returned tensor does not own the memory.
* @note If the number of coordinates matches the number of dimensions, a scalar
* tensor is returned.
* It current tensor was contiguous, the returned tensor is garanteed to be
* @note If current tensor was contiguous, the returned tensor is garanteed to be
* contiguous as well.
*
* @param coordIdx Coordinates of the sub-tensor to extract
......@@ -547,6 +537,8 @@ class Tensor : public Data,
/**
* @brief Returns a sub-tensor at some coordinate and with some dimension.
*
* @note Data contiguity of the returned Tensor is not guaranted.
*
* @param coordIdx First coordinates of the sub-tensor to extract
* @param dims Dimensions of the sub-tensor to extract
* @return Tensor Sub-tensor.
......
......@@ -103,32 +103,11 @@ constexpr std::array<T, N + 1> append(T t, std::array<T, N> a) {
// Generic helper for initializing a Tensor
template <typename T, std::size_t SIZE_0>
struct Array1D {
Array1D(std::initializer_list<T> list) {
auto it = list.begin();
for (std::size_t i = 0; i < SIZE_0; ++i, ++it) {
data[i] = *it;
}
}
Array1D(const T (&dataArray)[SIZE_0]) {
std::copy_n(&dataArray[0], SIZE_0, &data[0]);
}
T data[SIZE_0];
};
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
struct Array2D {
Array2D(std::initializer_list<std::initializer_list<T>> list) {
auto it1 = list.begin();
for (std::size_t i = 0; i < SIZE_0; ++i, ++it1) {
auto it2 = it1->begin();
for (std::size_t j = 0; j < SIZE_1; ++j, ++it2) {
data[i][j] = *it2;
}
}
}
Array2D(const T (&dataArray)[SIZE_0][SIZE_1]) {
std::copy_n(&dataArray[0][0], SIZE_0 * SIZE_1, &data[0][0]);
}
T data[SIZE_0][SIZE_1];
};
......
......@@ -9,9 +9,6 @@
*
********************************************************************************/
#include <vector>
#include <cstddef>
#include "aidge/data/Tensor.hpp"
#include <cstddef>
......@@ -21,7 +18,38 @@
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
resize(other.dims(), other.strides());
setDataType(other.dataType(), false); // do not convert existing data
if (other.hasImpl()) {
if (hasImpl()) {
copyFrom(other);
}
else {
// Perform a shallow copy only
setImpl(other.mImpl, other.mImplOffset);
}
}
else {
setImpl(nullptr);
}
return *this;
}
void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) {
// TODO: scalar Tensor not handled
if (dims.empty()) { // scalar
mDims = std::vector<DimSize_t>(0);
mStrides = std::vector<DimSize_t>({1});
mContiguous = true;
computeSize();
if (mImpl) {
mImpl->resize(mDims);
}
return;
}
bool checkContiguous = true;
if (strides.empty()) {
strides.resize(dims.size());
......@@ -36,7 +64,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto
AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
}
if (mImpl.use_count() > 1) {
if (mImpl && mImpl.use_count() > 1) {
// Here we could also create a new storage for this tensor in this case
// But, is it more likely that the user really wants this, or that he did a mistake?
AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
......@@ -48,6 +76,11 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto
mContiguous = true;
if (checkContiguous) {
std::size_t expectedStride = 1;
// std::size_t i = dims.size();
// while ((i-- > 0) && (strides[i] == expectedStride)) {
// mContiguous&= (strides[i] == expectedStride);
// expectedStride*= dims[i];
// }
for (std::size_t i = dims.size()-1; i > 0; --i) {
if (strides[i] != expectedStride) {
mContiguous = false;
......@@ -153,26 +186,26 @@ std::string Aidge::Tensor::toString() const {
return res;
}
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const {
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& fixedCoord) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
AIDGE_ASSERT(fixedCoord.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
Tensor subTensor(mDataType);
subTensor.resize(std::vector<size_t>(mDims.begin() + coordIdx.size(), mDims.end()),
std::vector<size_t>(mStrides.begin() + coordIdx.size(), mStrides.end()));
subTensor.resize(std::vector<size_t>(mDims.cbegin() + fixedCoord.size(), mDims.cend()),
std::vector<size_t>(mStrides.cbegin() + fixedCoord.size(), mStrides.cend()));
subTensor.setBackend(mImpl->backend(), mImpl->device().second);
subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx));
subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(fixedCoord));
return subTensor;
}
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const {
Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& startCoord, const std::vector<std::size_t>& dims) const {
AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
AIDGE_ASSERT(coordIdx.size() == mDims.size(), "Coordinates does not match number of dimensions");
AIDGE_ASSERT(startCoord.size() == mDims.size(), "Coordinates does not match number of dimensions");
Tensor subTensor(mDataType);
subTensor.resize(dims, mStrides);
subTensor.setBackend(mImpl->backend(), mImpl->device().second);
subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx));
subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(startCoord));
return subTensor;
}
......@@ -396,3 +429,10 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const
return *fallback;
}
}
std::set<std::string> Aidge::Tensor::getAvailableBackends() {
std::set<std::string> backendsList;
for(const auto& tupleKey : Registrar<Tensor>::getKeys())
backendsList.insert(std::get<0>(tupleKey));
return backendsList;
}
......@@ -23,185 +23,17 @@
using namespace Aidge;
TEST_CASE("[backend/cpu/data] Tensor", "[Tensor]") {
Tensor x;
SECTION("TensorUtils, constructor from const arrays") {
// construction from different types and sizes
REQUIRE_NOTHROW(x = Array1D<int, 2>{{1, 2}});
x.print();
REQUIRE_NOTHROW(x = Array2D<int, 2, 2>{{{1, 2}, {3, 4}}});
x.print();
REQUIRE_NOTHROW(x = Array3D<std::uint8_t, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
REQUIRE_NOTHROW(x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
REQUIRE_NOTHROW(x = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}});
REQUIRE_NOTHROW(x = Array3D<double, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}});
REQUIRE_NOTHROW(x = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},
{{{9,10}, {11,12}}, {{13,14},{15,16}}}}});
}
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
SECTION("Tensor features") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
}
TEST_CASE("[backend/cpu/data] Tensor", "[TensorImpl]") {
Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
SECTION("Access to array") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
}
SECTION("get function") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(x.get<int>({0, 0, 0}) == 1);
REQUIRE(x.get<int>({0, 0, 1}) == 2);
REQUIRE(x.get<int>({0, 1, 1}) == 4);
REQUIRE(x.get<int>({1, 1, 0}) == 7);
x.set<int>({1, 1, 1}, 36);
REQUIRE(x.get<int>(7) == 36);
x.set<int>(7, 40);
REQUIRE(x.get<int>({1, 1, 1}) == 40);
}
SECTION("Pretty printing for debug") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE_NOTHROW(x.print());
}
SECTION("Tensor (in)equality") {
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}};
REQUIRE(x == xCopy);
REQUIRE_FALSE(x == xFloat);
}
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dist(1, 10);
std::uniform_int_distribution<std::size_t> nbDims(1, 5);
x.setDataType(DataType::Int32);
x.setBackend("cpu");
SECTION("Tensor sharing") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
const std::size_t nb_dims = nbDims(gen) + 1;
std::vector<std::size_t> dims(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims[i] = dist(gen);
}
x.resize(dims);
// copy constructor
Tensor xCopyCtor(x);
REQUIRE(xCopyCtor.getImpl() == x.getImpl());
// copy assignment operator
Tensor xCopyAssignmentOp = x;
REQUIRE(xCopyAssignmentOp.getImpl() == x.getImpl());
Tensor xCloned = x.clone();
REQUIRE(xCloned.getImpl() != x.getImpl());
REQUIRE(xCloned == x);
}
}
SECTION("zeros()") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
constexpr std::size_t nb_dims = 3;
const std::size_t dim0 = nbDims(gen);
const std::size_t dim1 = nbDims(gen);
const std::size_t dim2 = nbDims(gen);
const std::vector<std::size_t> dims = {dim0, dim1, dim2};
int array0[dim0][dim1][dim2];
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
for (std::size_t k = 0; k < dim2; ++k) {
array0[i][j][k] = dist(gen);
}
}
}
x.resize(dims);
x.zeros();
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
const std::size_t idx = (i * dim1 + j) * dim2;
for (std::size_t k = 0; k < dim2; ++k) {
int val = *static_cast<int*>(x.getImpl()->hostPtr(idx + k));
if (val != 0) {
throw std::runtime_error("Value should be 0");
}
// REQUIRE(*static_cast<int*>(x.getImpl()->hostPtr(idx + k)) == 0);
}
}
}
}
}
SECTION("Tensor extract") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor y;
Tensor y0;
Tensor y1;
Tensor y2;
Tensor y3;
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
const std::size_t nb_dims = 3;
const std::size_t dim0 = nbDims(gen) + 1; // dim0 >= 2
const std::size_t dim1 = nbDims(gen) + 1;
const std::size_t dim2 = nbDims(gen) + 1;
std::vector<std::size_t> dims = {dim0, dim1, dim2};
int array0[dim0][dim1][dim2];
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
for (std::size_t k = 0; k < dim2; ++k) {
array0[i][j][k] = dist(gen);
}
}
}
x.resize(dims);
REQUIRE(x.isContiguous());
// extract Tensor slice from one set of coordinates
REQUIRE_NOTHROW(y0 = x.extract({}));
REQUIRE_NOTHROW(y1 = x.extract({nbDims(gen)}));
REQUIRE_NOTHROW(y2 = x.extract({nbDims(gen), nbDims(gen)}));
REQUIRE_NOTHROW(y3 = x.extract({nbDims(gen), nbDims(gen), nbDims(gen)}));
REQUIRE_THROWS(y = x.extract({0, dim0 + 1, 0}));
REQUIRE_NOTHROW(y = x.extract({0, 1}));
REQUIRE(y.getImpl() == x.getImpl()); // shared implem
REQUIRE(!y.isContiguous());
Tensor yClone = y.clone(); // when copying data, they are contiguous in memory
REQUIRE(yClone.isContiguous());
// int yTruth[2][1][1] =
REQUIRE(approxEq<int>(yClone, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
y = x.extract({0, 1});
REQUIRE(y.getImpl() == x.getImpl());
REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
REQUIRE(y.isContiguous());
}
}
}
TEST_CASE("Tensor fill") {
TEST_CASE("Tensor fill", "[TensorImpl][fill]") {
SECTION("Instantiate batches independantly") {
// initialization with 0s
std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
......
......@@ -10,23 +10,403 @@
********************************************************************************/
#include <array>
#include <cstddef>
#include <cstdint> //std::uint16_t
#include <random>
#include <cstddef> // std::size_t
#include <cstdint> // std::uint8_t, std::uint16_t, std::int32_t
#include <numeric> // std::accumulate, std::inner_product
#include <functional> // std::multiplies
#include <random> // std::random_device, std::mt19937,
// std::uniform_int_distribution, std::uniform_real_distribution
#include <set>
#include <string>
#include <vector>
#include <catch2/catch_test_macros.hpp>
#include "aidge/backend/cpu/data/TensorImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
TEST_CASE("[backend/cpu/data] Tensor", "[Tensor]") {
SECTION("Constructor") {
TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
SECTION("Default constructor") {
Tensor T_default{};
REQUIRE((
(T_default.dataType() == DataType::Float32) &&
(T_default.size() == 1) &&
(T_default.dims() == std::vector<DimSize_t>({})) &&
(T_default.strides() == std::vector<DimSize_t>({1})) &&
(T_default.getImpl() == nullptr) &&
(T_default.grad() == nullptr) &&
(T_default.isContiguous() == true)
));
}
SECTION("scalar constructor") {
Tensor T;
REQUIRE_NOTHROW(T = Tensor(std::int32_t(20)));
REQUIRE((
(T.dataType() == DataType::Int32) &&
(T.size() == 1) &&
(T.dims() == std::vector<DimSize_t>({})) &&
(T.strides() == std::vector<DimSize_t>({1})) &&
(T.getImpl() != nullptr) &&
(T.grad() == nullptr) &&
(T.isContiguous() == true)
));
}
SECTION("dim constructor") {
const std::vector<DimSize_t> Tdims = {1,2,3,4,5,6,7};
Tensor T;
REQUIRE_NOTHROW(T = Tensor(Tdims));
REQUIRE((
(T.dataType() == DataType::Float32) &&
(T.size() == std::accumulate(Tdims.cbegin(), Tdims.cend(), DimSize_t(1), std::multiplies<DimSize_t>())) &&
(T.dims() == Tdims) &&
(T.strides() == std::vector<DimSize_t>({5040,2520,840,210,42,7,1})) &&
(T.getImpl() == nullptr) &&
(T.grad() == nullptr) &&
(T.isContiguous() == true)
));
}
SECTION("TensorUtils, constructor from const arrays") {
Tensor T;
// Construction from different types and sizes
// Set an already constructed Tensor
REQUIRE_NOTHROW(T = Array1D<int, 2>{{1, 2}});
REQUIRE((
(T.dataType() == DataType::Int32) &&
(T.size() == 2) &&
(T.dims() == std::vector<DimSize_t>({2})) &&
(T.strides() == std::vector<DimSize_t>({1})) &&
(T.getImpl() != nullptr) &&
(T.grad() == nullptr) &&
(T.isContiguous() == true)
));
// Change dims
REQUIRE_NOTHROW(T = Array2D<int, 2, 2>{{{1, 2}, {3, 4}}});
// Change data types
REQUIRE_NOTHROW(T = Array3D<std::uint8_t, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
REQUIRE((
(T.dataType() == DataType::UInt8) &&
(T.size() == 8) &&
(T.dims() == std::vector<DimSize_t>({2,2,2})) &&
(T.strides() == std::vector<DimSize_t>({4,2,1})) &&
(T.getImpl() != nullptr) &&
(T.grad() == nullptr) &&
(T.isContiguous() == true)
));
REQUIRE_NOTHROW(T = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
REQUIRE_NOTHROW(T = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}});
REQUIRE_NOTHROW(T = Array3D<double, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}});
// Change dims
REQUIRE_NOTHROW(T = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},
{{{9,10}, {11,12}}, {{13,14},{15,16}}}}});
REQUIRE((
(T.dataType() == DataType::Int32) &&
(T.size() == 16) &&
(T.dims() == std::vector<DimSize_t>({2,2,2,2})) &&
(T.strides() == std::vector<DimSize_t>({8,4,2,1})) &&
(T.getImpl() != nullptr) &&
(T.grad() == nullptr) &&
(T.isContiguous() == true)
));
}
SECTION("copy constructor / copy assignment operator") {
}
SECTION("move constructor / move assignment operator") {
}
SECTION("prototype") {
constexpr std::uint16_t NBTRIALS = 10;
// Create random number generators
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
std::uniform_real_distribution<float> valueDist(0.001f, 1.0f);
for (std::size_t trial = 0; trial < NBTRIALS; ++trial) {
std::vector<std::size_t> Tdims;
const std::size_t Tsize = nbDimsDist(gen);
for (std::size_t i = 0; i < Tsize; ++i) {
Tdims.push_back(dimsDist(gen));
}
Tensor T(Tdims);
// file the tensor
std::unique_ptr<float[]> array0(new float[T.size()]);
for (std::size_t i = 0; i < T.size(); ++i) {
array0[i] = valueDist(gen);
}
T.setBackend("cpu");
T.getImpl() -> setRawPtr(array0, T.size());
Tensor Tclone;
REQUIRE_NOTHROW(Tclone = T.clone());
REQUIRE((
(T.dataType() == Tclone.dataType()) &&
(T.size() == Tclone.size()) &&
(T.dims() == Tclone.dims()) &&
(T.strides() == Tclone.strides()) &&
(T.getImpl() != Tclone.getImpl()) &&
(Tclone.grad() == nullptr) &&
(Tclone.isContiguous() == true)
));
REQUIRE(Tclone == T);
}
}
}
TEST_CASE("[core/data] Tensor(getter/setter)", "[Tensor][Getter][Setter]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create random number generators
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
std::uniform_real_distribution<float> valueDist(0.001f, 1.0f);
for (std::size_t trial = 0; trial < NBTRIALS; ++trial) {
std::vector<std::size_t> Tdims;
const std::size_t Tsize = nbDimsDist(gen);
for (std::size_t i = 0; i < Tsize; ++i) {
Tdims.push_back(dimsDist(gen));
}
// create Tensor
Tensor T(Tdims);
// compute stride
std::vector<std::size_t> Tstrides(Tdims.size(), 1);
std::size_t i = Tdims.size() - 1;
while (i-- > 0) {
Tstrides[i] = Tstrides[i+1]*Tdims[i+1];
}
/////////////////
// dimensions
// nbDims(), dims(), size()
REQUIRE(T.nbDims() == Tdims.size());
REQUIRE(T.dims() == Tdims);
std::size_t trueSize = std::accumulate(Tdims.cbegin(), Tdims.cend(), 1, std::multiplies<std::size_t>());
REQUIRE(T.size() == trueSize);
/////////////////
// implementation
// getImpl(), setImpl(), hasImpl()
REQUIRE(T.hasImpl() == false);
std::shared_ptr<TensorImpl_cpu<float>> tensorImpl = std::make_shared<TensorImpl_cpu<float>>(0, Tdims);
T.setImpl(tensorImpl);
REQUIRE(T.getImpl() == tensorImpl);
REQUIRE(T.hasImpl() == true);
// isContiguous(), stride(),
REQUIRE(T.isContiguous());
REQUIRE(T.strides() == Tstrides);
// file the tensor
std::unique_ptr<float[]> array0(new float[T.size()]);
for (std::size_t i = 0; i < T.size(); ++i) {
array0[i] = valueDist(gen);
}
tensorImpl -> setRawPtr(array0, T.size());
// getCoord(), getIdx(), getStorageIdx()
std::vector<DimSize_t> Tdims_copy = Tdims;
for (auto& val : Tdims_copy) {
val = std::min(DimSize_t(2), std::max(DimSize_t(0), val - 1));
}
DimSize_t true_flatid = std::inner_product(Tdims_copy.cbegin(), Tdims_copy.cend(), Tstrides.cbegin(), DimSize_t(0));
REQUIRE(T.getCoord(true_flatid) == Tdims_copy);
REQUIRE(T.getIdx(Tdims_copy) == true_flatid);
REQUIRE(T.getStorageIdx(Tdims_copy) == true_flatid); // Tensor is not a view
// set(vector), set(size_t), get(vector), get(size_t), getImplOffset()
REQUIRE_NOTHROW(T.set<float>(Tdims_copy, 50.0f));
REQUIRE(T.get<float>(Tdims_copy) == 50.0f);
REQUIRE_NOTHROW(T.set<float>(true_flatid, 40.0f));
REQUIRE(T.get<float>(true_flatid) == 40.0f);
REQUIRE(T.getImplOffset() == 0);
//////////////
// backend
// getAvailableBackends()
REQUIRE(Tensor::getAvailableBackends() == std::set<std::string>({"cpu"}));
// setBackend()
REQUIRE_NOTHROW(T.setBackend("cpu", 0));
// setDataType(), dataType()
REQUIRE_NOTHROW(T.setDataType(DataType::Int16));
REQUIRE(T.dataType() == DataType::Int16);
}
}
TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") {
// extract, makeContiguous
// empty
constexpr std::uint16_t NBTRIALS = 10;
// Create random number generators
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
std::uniform_real_distribution<float> valueDist(0.001f, 1.0f);
// zeros, resize
SECTION("zeros") {
Tensor T;
for (std::size_t trial = 0; trial < NBTRIALS; ++trial) {
std::vector<std::size_t> Tdims;
const std::size_t Tsize = nbDimsDist(gen);
for (std::size_t i = 0; i < Tsize; ++i) {
Tdims.push_back(dimsDist(gen));
}
T.resize(Tdims);
// file the tensor
std::unique_ptr<float[]> array0(new float[T.size()]);
for (std::size_t i = 0; i < T.size(); ++i) {
array0[i] = valueDist(gen);
}
T.setBackend("cpu");
T.getImpl() -> setRawPtr(array0.get(), T.size());
float* res = static_cast<float*>(T.getImpl()->hostPtr());
for (std::size_t i = 0; i < T.size(); ++i) {
REQUIRE(res[i] == array0[i]);
}
T.zeros();
res = static_cast<float*>(T.getImpl()->hostPtr());
for (std::size_t i = 0; i < T.size(); ++i) {
REQUIRE(res[i] == 0.0f);
}
}
}
SECTION("Tensor extract") {
bool equal;
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
const std::size_t nb_dims = 3;
const std::size_t dim0 = dimsDist(gen) + 1; // dim0 >= 2
const std::size_t dim1 = dimsDist(gen) + 1;
const std::size_t dim2 = dimsDist(gen) + 1;
std::vector<std::size_t> dims = {dim0, dim1, dim2};
int array0[dim0*dim1*dim2];
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
for (std::size_t k = 0; k < dim2; ++k) {
array0[((i * dim1) + j)*dim2 + k] = valueDist(gen);
}
}
}
Tensor x{dims};
x.setDataType(DataType::Int32);
x.setBackend("cpu");
Tensor y;
Tensor y0;
Tensor y1;
Tensor y2;
Tensor y3;
x.getImpl()->setRawPtr(&array0, dim0*dim1*dim2);
REQUIRE(x.isContiguous());
////////////////
// extract contiguous Tensor slice given start coordinates
// the whole Tensor
REQUIRE_NOTHROW(y0 = x.extract({}));
REQUIRE(y0 == x);
int* y0_res = static_cast<int*>(y0.getImpl()->hostPtr());
equal = true;
for (std::size_t i = 0; i < dim0*dim1*dim2; ++i) {
equal &= (y0_res[i] == array0[i]);
}
REQUIRE(equal);
REQUIRE(y0.getImpl() == x.getImpl());
REQUIRE(y0.isContiguous());
// Tensor - 1-D
REQUIRE_NOTHROW(y1 = x.extract({dim0 - 2}));
int* y1_res = static_cast<int*>(y1.getImpl()->hostPtr());
equal = true;
for (std::size_t i = 0; i < dim1*dim2; ++i) {
equal &= (y1_res[i] == array0[(dim0-2)*dim1*dim2 + i]);
}
REQUIRE(equal);
REQUIRE(y1.getImpl() == x.getImpl());
REQUIRE(y1.isContiguous());
// Tensor - 2-D
REQUIRE_NOTHROW(y2 = x.extract({dim0 - 2, dim1 - 2}));
int* y2_res = static_cast<int*>(y2.getImpl()->hostPtr());
equal = true;
for (std::size_t i = 0; i < dim2; ++i) {
equal &= (y2_res[i] == array0[(((dim0 - 2) * dim1) + (dim1 - 2))*dim2 + i]);
}
REQUIRE(equal);
REQUIRE(y2.getImpl() == x.getImpl());
REQUIRE(y2.isContiguous());
// Tensor - 3-D => scalar
REQUIRE_NOTHROW(y3 = x.extract({dim0 - 2, dim1 - 2, dim2 - 2}));
int* y3_res = static_cast<int*>(y3.getImpl()->hostPtr());
REQUIRE(y3_res[0] == array0[(((dim0 - 2) * dim1) + (dim1 - 2))*dim2 + dim2 - 2]);
REQUIRE(y3.getImpl() == x.getImpl());
REQUIRE(y3.isContiguous());
// throw an error
REQUIRE_THROWS(y = x.extract({0, dim1, 0}));
/////////////////
// extract Tensor slice given start coordinates and dimension
REQUIRE_NOTHROW(y = x.extract({0, 0, 1}, {dim0-1, 1, dim2-1}));
REQUIRE(y.getImpl() == x.getImpl()); // shared implem
REQUIRE(!y.isContiguous());
Tensor yClone = y.clone(); // when copying data, they are contiguous in memory
REQUIRE(yClone.isContiguous());
// int yTruth[2][1][1] =
REQUIRE(approxEq<int>(yClone, y, 0.0f, 0.0f));
}
}
// print, toString,
SECTION("Pretty printing for debug") {
Tensor x{};
// Empty Tensor
REQUIRE_THROWS(x.print());
// scalar
x = Tensor(42);
REQUIRE_NOTHROW(x.print());
// 1-D Tensors
x = Array1D<int, 1>{{1}};
REQUIRE_NOTHROW(x.print());
x = Array1D<int, 6>{{1,2,3,4,5,6}};
REQUIRE_NOTHROW(x.print());
// 2-D Tensors
x = Array2D<int, 3, 2>{{{1, 2}, {3, 4}, {5, 6}}};
REQUIRE_NOTHROW(x.print());
// +2-D Tensors
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE_NOTHROW(x.print());
x = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},{{{11, 12}, {13, 14}}, {{15, 16}, {17, 18}}}}};
REQUIRE_NOTHROW(x.print());
}
}
} // namespace Aidge
\ No newline at end of file
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <array>
#include <cstddef>
#include <cstdint> //std::uint16_t
#include <random>
#include <vector>
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp"
using namespace Aidge;
TEST_CASE("[backend/cpu/data] Tensor", "[Tensor]") {
Tensor x;
SECTION("TensorUtils, constructor from const arrays") {
// construction from different types and sizes
REQUIRE_NOTHROW(x = Array1D<int, 2>{{1, 2}});
x.print();
REQUIRE_NOTHROW(x = Array2D<int, 2, 2>{{{1, 2}, {3, 4}}});
x.print();
REQUIRE_NOTHROW(x = Array3D<std::uint8_t, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
REQUIRE_NOTHROW(x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
REQUIRE_NOTHROW(x = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}});
REQUIRE_NOTHROW(x = Array3D<double, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}});
REQUIRE_NOTHROW(x = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},
{{{9,10}, {11,12}}, {{13,14},{15,16}}}}});
}
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
SECTION("Tensor features") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
}
SECTION("Access to array") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
}
SECTION("get function") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(x.get<int>({0, 0, 0}) == 1);
REQUIRE(x.get<int>({0, 0, 1}) == 2);
REQUIRE(x.get<int>({0, 1, 1}) == 4);
REQUIRE(x.get<int>({1, 1, 0}) == 7);
x.set<int>({1, 1, 1}, 36);
REQUIRE(x.get<int>(7) == 36);
x.set<int>(7, 40);
REQUIRE(x.get<int>({1, 1, 1}) == 40);
}
SECTION("Pretty printing for debug") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE_NOTHROW(x.print());
}
TEST_CASE("Tensor fill") {
SECTION("Instantiate batches independantly") {
// initialization with 0s
std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
//concatenatedTensor->print();
std::shared_ptr<Tensor> myTensor1 = std::make_shared<Tensor>(Array1D<int, 5>{{1,2,3,4,5}});
std::shared_ptr<Tensor> myTensor2 = std::make_shared<Tensor>(Array1D<int, 5>{{6,7,8,9,10}});
std::shared_ptr<Tensor> myTensor3 = std::make_shared<Tensor>(Array1D<int, 5>{{11,12,13,14,15}});
// use copy function from implementation
concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0);
concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5);
concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10);
// concatenatedTensor->print();
std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{
{{1,2,3,4,5},
{6,7,8,9,10},
{11,12,13,14,15}}
});
// expectedTensor->print();
REQUIRE(*concatenatedTensor == *expectedTensor);
}
}
TEST_CASE("[core/data] Tensor methods","[Tensor]") {
Tensor x = Array3D<int, 2, 2, 2>{{
{{1, 2},
{3, 4}},
{{5, 6},
{7, 8}}
}};
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
SECTION("Tensor (in)equality") {
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}};
REQUIRE(x == xCopy);
REQUIRE_FALSE(x == xFloat);
}
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dist(1, 10);
std::uniform_int_distribution<std::size_t> nbDims(1, 5);
x.setDataType(DataType::Int32);
x.setBackend("cpu");
SECTION("Tensor sharing") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
const std::size_t nb_dims = nbDims(gen) + 1;
std::vector<std::size_t> dims(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims[i] = dist(gen);
}
x.resize(dims);
// copy constructor
Tensor xCopyCtor(x);
REQUIRE(xCopyCtor.getImpl() == x.getImpl());
// copy assignment operator
Tensor xCopyAssignmentOp = x;
REQUIRE(xCopyAssignmentOp.getImpl() == x.getImpl());
Tensor xCloned = x.clone();
REQUIRE(xCloned.getImpl() != x.getImpl());
REQUIRE(xCloned == x);
}
}
SECTION("zeros()") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
constexpr std::size_t nb_dims = 3;
const std::size_t dim0 = nbDims(gen);
const std::size_t dim1 = nbDims(gen);
const std::size_t dim2 = nbDims(gen);
const std::vector<std::size_t> dims = {dim0, dim1, dim2};
int array0[dim0][dim1][dim2];
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
for (std::size_t k = 0; k < dim2; ++k) {
array0[i][j][k] = dist(gen);
}
}
}
x.resize(dims);
x.zeros();
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
const std::size_t idx = (i * dim1 + j) * dim2;
for (std::size_t k = 0; k < dim2; ++k) {
int val = *static_cast<int*>(x.getImpl()->hostPtr(idx + k));
if (val != 0) {
throw std::runtime_error("Value should be 0");
}
// REQUIRE(*static_cast<int*>(x.getImpl()->hostPtr(idx + k)) == 0);
}
}
}
}
}
SECTION("Tensor extract") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor y;
Tensor y0;
Tensor y1;
Tensor y2;
Tensor y3;
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
const std::size_t nb_dims = 3;
const std::size_t dim0 = nbDims(gen) + 1; // dim0 >= 2
const std::size_t dim1 = nbDims(gen) + 1;
const std::size_t dim2 = nbDims(gen) + 1;
std::vector<std::size_t> dims = {dim0, dim1, dim2};
int array0[dim0][dim1][dim2];
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
for (std::size_t k = 0; k < dim2; ++k) {
array0[i][j][k] = dist(gen);
}
}
}
x.resize(dims);
REQUIRE(x.isContiguous());
// extract Tensor slice from one set of coordinates
REQUIRE_NOTHROW(y0 = x.extract({}));
REQUIRE_NOTHROW(y1 = x.extract({nbDims(gen)}));
REQUIRE_NOTHROW(y2 = x.extract({nbDims(gen), nbDims(gen)}));
REQUIRE_NOTHROW(y3 = x.extract({nbDims(gen), nbDims(gen), nbDims(gen)}));
REQUIRE_THROWS(y = x.extract({0, dim0 + 1, 0}));
REQUIRE_NOTHROW(y = x.extract({0, 1}));
REQUIRE(y.getImpl() == x.getImpl()); // shared implem
REQUIRE(!y.isContiguous());
Tensor yClone = y.clone(); // when copying data, they are contiguous in memory
REQUIRE(yClone.isContiguous());
// int yTruth[2][1][1] =
REQUIRE(approxEq<int>(yClone, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
y = x.extract({0, 1});
REQUIRE(y.getImpl() == x.getImpl());
REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
REQUIRE(y.isContiguous());
}
}
}
TEST_CASE("Tensor fill") {
SECTION("Instantiate batches independantly") {
// initialization with 0s
std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
//concatenatedTensor->print();
std::shared_ptr<Tensor> myTensor1 = std::make_shared<Tensor>(Array1D<int, 5>{{1,2,3,4,5}});
std::shared_ptr<Tensor> myTensor2 = std::make_shared<Tensor>(Array1D<int, 5>{{6,7,8,9,10}});
std::shared_ptr<Tensor> myTensor3 = std::make_shared<Tensor>(Array1D<int, 5>{{11,12,13,14,15}});
// use copy function from implementation
concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0);
concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5);
concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10);
// concatenatedTensor->print();
std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{
{{1,2,3,4,5},
{6,7,8,9,10},
{11,12,13,14,15}}
});
// expectedTensor->print();
REQUIRE(*concatenatedTensor == *expectedTensor);
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment