diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp index 66352895ca23e498c9ea2a966d78454a5bd558b8..df02f67b0f6573e23efc3cad8dda27b09c4cce79 100644 --- a/include/aidge/backend/cpu/data/TensorImpl.hpp +++ b/include/aidge/backend/cpu/data/TensorImpl.hpp @@ -72,8 +72,14 @@ public: new TensorImpl_cpu<T>(i_DataType, i_FirstDataCoordinates, i_Dimensions)); } - // native interface - const std::vector<T> &data() const + /// @brief read-only access to data, native interface + /// @todo Improve constness management + std::vector<T> const &data() const noexcept + { + return mData; + } + /// @brief read-write access to data, native interface + std::vector<T> &data() noexcept { return mData; } @@ -95,12 +101,10 @@ public: { auto ptr = new TensorImpl_cpu<T>( getDataType(), getFirstDataCoordinates(), getDimensions()); - if (ptr) - { - ptr->cloneProperties(*this); - NbElts_t n = getNbElts(); - ptr->copyFromHost(getDataAddress(), n); - } + assert(ptr && "Allocation error"); + ptr->cloneProperties(*this); + NbElts_t n = getNbElts(); + ptr->copyFromHost(getDataAddress(), n); return detail::pimpl::ImplPtr_t(ptr); } @@ -116,14 +120,79 @@ public: std::vector<Coord_t> const &i_FirstDataCoordinates, std::vector<DimSize_t> const &i_Dimensions) const override { - auto ptr = new TensorImpl_cpu<T>( - getDataType(), getFirstDataCoordinates(), getDimensions()); - if (ptr) + assert( + detail::isAreaValid(i_FirstDataCoordinates, i_Dimensions) + && "Tensors requested area is invalid"); + assert( + detail::isSubTensorIncluded( + i_FirstDataCoordinates, + i_Dimensions, + getFirstDataCoordinates(), + getDimensions()) + && "Requested extract does not fit inside source Tensor"); + + auto ptr + = new TensorImpl_cpu<T>(getDataType(), i_FirstDataCoordinates, i_Dimensions); + assert(ptr && "Allocation error"); + // clones properties + ptr->clonePropertiesExceptArea(*this); + // restricts area + ptr->setDimensions(i_Dimensions); + ptr->setFirstDataCoordinates(i_FirstDataCoordinates); + ptr->computeLayout(); + + // parsing by logical coordinates + // D assumed to be > 1, otherwise easier + std::vector<Coord_t> coords = ptr->getFirstDataCoordinates(); + std::size_t D = coords.size(); + bool finished = false; + /// @todo FIXME managed D==1 case + /// @todo optimization: find the last consecutives dimensions that are actually + /// contiguous in memory parse them as one (here memcopy) and do jumps for the + /// others + + // force storage allocation + ptr->lazyInit(); + while (!finished) { - ptr->cloneProperties(*this); - NbElts_t n = getNbElts(); - ptr->copyFromHost(getDataAddress(), n); + // copy one by one + ptr->data()[ptr->getIdx(coords)] = data()[getIdx(coords)]; + + if (coords[D - 1] + == (ptr->getFirstDataCoordinates()[D - 1] + ptr->getDimensions()[D - 1] + - 1)) + { + std::size_t j = D - 2; + while ( + coords[j] + == (ptr->getFirstDataCoordinates()[j] + ptr->getDimensions()[j] - 1)) + { + if (j == 0) + { + finished = true; + break; + } + --j; + } + if (!finished) + { + ++coords[j]; + for (std::size_t i = j + 1; i < D; ++i) + { + coords[i] = ptr->getFirstDataCoordinates()[i]; + } + } + else + { + break; + } + } + else + { + ++coords[D - 1]; + } } + return detail::pimpl::ImplPtr_t(ptr); } diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp index fe387cff9fa0dd4ecbfb61553919cace24375ff6..01f2e1489f8edc573f3dc78b450002144e7d57f4 100644 --- a/unit_tests/data/Test_TensorImpl.cpp +++ b/unit_tests/data/Test_TensorImpl.cpp @@ -155,7 +155,9 @@ TEST_CASE("Tensor access") { SECTION("coordinates manipulations") { - // clang-format off + SECTION("simple tensor access") + { + // clang-format off Tensor y = Array3D<int, 1, 2, 3>{ { { @@ -164,21 +166,50 @@ TEST_CASE("Tensor access") } } }; - // clang-format on - NbElts_t flatId = 0; - for (Coord_t a = 0; a < y.dims()[0]; ++a) + // clang-format on + NbElts_t flatId = 0; + for (Coord_t a = 0; a < y.dims()[0]; ++a) + { + for (Coord_t b = 0; b < y.dims()[1]; ++b) + { + for (Coord_t c = 0; c < y.dims()[2]; ++c) + { + REQUIRE(y.getIdx(std::vector<Coord_t>{a, b, c}) == flatId); + std::vector<Coord_t> coords(3); + y.getCoord(flatId, coords); + REQUIRE(coords[0] == a); + REQUIRE(coords[1] == b); + REQUIRE(coords[2] == c); + ++flatId; + } + } + } + } + SECTION("offseted tensor access") { - for (Coord_t b = 0; b < y.dims()[1]; ++b) + Tensor Rainbow; + Rainbow.resize({2, 4, 5}); + Rainbow.setDatatype(DataType::UInt16); + Rainbow.setBackend("cpu"); + MakeRainbow<std::uint16_t>(Rainbow); + Tensor extract(Rainbow, {0, 1, 1}, {2, 2, 3}, false); + NbElts_t flatId = 0; + for (Coord_t a = 0; a < extract.dims()[0]; ++a) { - for (Coord_t c = 0; c < y.dims()[2]; ++c) + for (Coord_t b = 0; b < extract.dims()[1]; ++b) { - REQUIRE(y.getIdx(std::vector<Coord_t>{a, b, c}) == flatId); - std::vector<Coord_t> coords(3); - y.getCoord(flatId, coords); - REQUIRE(coords[0] == a); - REQUIRE(coords[1] == b); - REQUIRE(coords[2] == c); - ++flatId; + for (Coord_t c = 0; c < extract.dims()[2]; ++c) + { + REQUIRE( + extract.getIdx(std::vector<Coord_t>{a, b + 1, c + 1}) + == flatId); + std::vector<Coord_t> coords(3); + extract.getCoord(flatId, coords); + REQUIRE(coords[0] == a); + REQUIRE(coords[1] == b + 1); + REQUIRE(coords[2] == c + 1); + ++flatId; + } } } } @@ -193,7 +224,7 @@ TEST_CASE("Tensor extract") Rainbow.setDatatype(DataType::UInt16); Rainbow.setBackend("cpu"); MakeRainbow<std::uint16_t>(Rainbow); - Tensor view(Rainbow, {2, 2, 3}, {0, 1, 1}); + Tensor view(Rainbow, {0, 1, 1}, {2, 2, 3}); for (Coord_t a = 0; a < view.dims()[0]; ++a) { for (Coord_t b = 0; b < view.dims()[1]; ++b) @@ -214,7 +245,7 @@ TEST_CASE("Tensor extract") Rainbow.setDatatype(DataType::UInt16); Rainbow.setBackend("cpu"); MakeRainbow<std::uint16_t>(Rainbow); - Tensor extract(Rainbow, {2, 2, 3}, {0, 1, 1}, false); + Tensor extract(Rainbow, {0, 1, 1}, {2, 2, 3}, false); /// @todo REQUIRE to be added // REQUIRE impl size is same as extract for (Coord_t a = 0; a < extract.dims()[0]; ++a)