Skip to content
Snippets Groups Projects
Commit aed28d94 authored by Maxence Naud's avatar Maxence Naud
Browse files

create Tensor test files

parent 38fa94b0
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!89Increase the number of unit-tests for Tensor
Pipeline #39364 canceled
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <array>
#include <cstddef>
#include <cstdint> //std::uint16_t
#include <random>
#include <vector>
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp"
using namespace Aidge;
TEST_CASE("[backend/cpu/data] Tensor", "[Tensor]") {
Tensor x;
SECTION("TensorUtils, constructor from const arrays") {
// construction from different types and sizes
REQUIRE_NOTHROW(x = Array1D<int, 2>{{1, 2}});
x.print();
REQUIRE_NOTHROW(x = Array2D<int, 2, 2>{{{1, 2}, {3, 4}}});
x.print();
REQUIRE_NOTHROW(x = Array3D<std::uint8_t, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
REQUIRE_NOTHROW(x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
REQUIRE_NOTHROW(x = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}});
REQUIRE_NOTHROW(x = Array3D<double, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}});
REQUIRE_NOTHROW(x = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},
{{{9,10}, {11,12}}, {{13,14},{15,16}}}}});
}
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
SECTION("Tensor features") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
}
SECTION("Access to array") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
}
SECTION("get function") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE(x.get<int>({0, 0, 0}) == 1);
REQUIRE(x.get<int>({0, 0, 1}) == 2);
REQUIRE(x.get<int>({0, 1, 1}) == 4);
REQUIRE(x.get<int>({1, 1, 0}) == 7);
x.set<int>({1, 1, 1}, 36);
REQUIRE(x.get<int>(7) == 36);
x.set<int>(7, 40);
REQUIRE(x.get<int>({1, 1, 1}) == 40);
}
SECTION("Pretty printing for debug") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
REQUIRE_NOTHROW(x.print());
}
SECTION("Tensor (in)equality") {
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}};
REQUIRE(x == xCopy);
REQUIRE_FALSE(x == xFloat);
}
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dist(1, 10);
std::uniform_int_distribution<std::size_t> nbDims(1, 5);
x.setDataType(DataType::Int32);
x.setBackend("cpu");
SECTION("Tensor sharing") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
const std::size_t nb_dims = nbDims(gen) + 1;
std::vector<std::size_t> dims(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims[i] = dist(gen);
}
x.resize(dims);
// copy constructor
Tensor xCopyCtor(x);
REQUIRE(xCopyCtor.getImpl() == x.getImpl());
// copy assignment operator
Tensor xCopyAssignmentOp = x;
REQUIRE(xCopyAssignmentOp.getImpl() == x.getImpl());
Tensor xCloned = x.clone();
REQUIRE(xCloned.getImpl() != x.getImpl());
REQUIRE(xCloned == x);
}
}
SECTION("zeros()") {
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
constexpr std::size_t nb_dims = 3;
const std::size_t dim0 = nbDims(gen);
const std::size_t dim1 = nbDims(gen);
const std::size_t dim2 = nbDims(gen);
const std::vector<std::size_t> dims = {dim0, dim1, dim2};
int array0[dim0][dim1][dim2];
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
for (std::size_t k = 0; k < dim2; ++k) {
array0[i][j][k] = dist(gen);
}
}
}
x.resize(dims);
x.zeros();
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
const std::size_t idx = (i * dim1 + j) * dim2;
for (std::size_t k = 0; k < dim2; ++k) {
int val = *static_cast<int*>(x.getImpl()->hostPtr(idx + k));
if (val != 0) {
throw std::runtime_error("Value should be 0");
}
// REQUIRE(*static_cast<int*>(x.getImpl()->hostPtr(idx + k)) == 0);
}
}
}
}
}
SECTION("Tensor extract") {
x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor y;
Tensor y0;
Tensor y1;
Tensor y2;
Tensor y3;
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// create Tensor
const std::size_t nb_dims = 3;
const std::size_t dim0 = nbDims(gen) + 1; // dim0 >= 2
const std::size_t dim1 = nbDims(gen) + 1;
const std::size_t dim2 = nbDims(gen) + 1;
std::vector<std::size_t> dims = {dim0, dim1, dim2};
int array0[dim0][dim1][dim2];
for (std::size_t i = 0; i < dim0; ++i) {
for (std::size_t j = 0; j < dim1; ++j) {
for (std::size_t k = 0; k < dim2; ++k) {
array0[i][j][k] = dist(gen);
}
}
}
x.resize(dims);
REQUIRE(x.isContiguous());
// extract Tensor slice from one set of coordinates
REQUIRE_NOTHROW(y0 = x.extract({}));
REQUIRE_NOTHROW(y1 = x.extract({nbDims(gen)}));
REQUIRE_NOTHROW(y2 = x.extract({nbDims(gen), nbDims(gen)}));
REQUIRE_NOTHROW(y3 = x.extract({nbDims(gen), nbDims(gen), nbDims(gen)}));
REQUIRE_THROWS(y = x.extract({0, dim0 + 1, 0}));
REQUIRE_NOTHROW(y = x.extract({0, 1}));
REQUIRE(y.getImpl() == x.getImpl()); // shared implem
REQUIRE(!y.isContiguous());
Tensor yClone = y.clone(); // when copying data, they are contiguous in memory
REQUIRE(yClone.isContiguous());
// int yTruth[2][1][1] =
REQUIRE(approxEq<int>(yClone, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
y = x.extract({0, 1});
REQUIRE(y.getImpl() == x.getImpl());
REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
REQUIRE(y.isContiguous());
}
}
}
TEST_CASE("Tensor fill") {
SECTION("Instantiate batches independantly") {
// initialization with 0s
std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
//concatenatedTensor->print();
std::shared_ptr<Tensor> myTensor1 = std::make_shared<Tensor>(Array1D<int, 5>{{1,2,3,4,5}});
std::shared_ptr<Tensor> myTensor2 = std::make_shared<Tensor>(Array1D<int, 5>{{6,7,8,9,10}});
std::shared_ptr<Tensor> myTensor3 = std::make_shared<Tensor>(Array1D<int, 5>{{11,12,13,14,15}});
// use copy function from implementation
concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0);
concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5);
concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10);
// concatenatedTensor->print();
std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{
{{1,2,3,4,5},
{6,7,8,9,10},
{11,12,13,14,15}}
});
// expectedTensor->print();
REQUIRE(*concatenatedTensor == *expectedTensor);
}
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <array>
#include <cstddef>
#include <cstdint> //std::uint16_t
#include <random>
#include <vector>
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
namespace Aidge {
TEST_CASE("[backend/cpu/data] Tensor", "[Tensor]") {
SECTION("Constructor") {
}
}
} // namespace Aidge
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment