Skip to content
Snippets Groups Projects
Commit 370d03f1 authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'view' into 'dev'

New proposal for handling tensor views

See merge request !32
parents 203dcfab bd45aa14
No related branches found
No related tags found
2 merge requests!50version 0.2.0,!32New proposal for handling tensor views
Pipeline #38070 passed
......@@ -16,7 +16,8 @@
namespace Aidge {
inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) {
return std::static_pointer_cast<Tensor>(data)->getImpl()->rawPtr();
const auto tensor = std::static_pointer_cast<Tensor>(data);
return tensor->getImpl()->hostPtr(tensor->getImplOffset());
}
} // namespace Aidge
......
......@@ -25,8 +25,6 @@ namespace Aidge {
template <class T>
class TensorImpl_cpu : public TensorImpl {
private:
const Tensor &mTensor; // Impl needs to access Tensor information, but is not
// supposed to change it!
/// Pointer to the data and its capacity
future_std::span<T> mData;
/// If this instance own the data, std::unique_ptr manages it
......@@ -35,88 +33,87 @@ private:
public:
static constexpr const char *Backend = "cpu";
TensorImpl_cpu(const Tensor &tensor) : TensorImpl(Backend), mTensor(tensor) {}
TensorImpl_cpu(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length) {}
bool operator==(const TensorImpl &otherImpl) const override final {
const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mTensor.size());
AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts);
std::size_t i = 0;
for (; i < mTensor.size() &&
for (; i < mNbElts &&
*(mData.data()+i) == *static_cast<const T*>(typedOtherImpl.rawPtr(i));
++i) {
}
return i == mTensor.size();
return i == mNbElts;
}
static std::unique_ptr<TensorImpl_cpu> create(const Tensor &tensor) {
return std::make_unique<TensorImpl_cpu<T>>(tensor);
static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, NbElts_t length) {
return std::make_shared<TensorImpl_cpu<T>>(device, length);
}
inline std::size_t size() const noexcept override final { return mData.size(); }
inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
void setDevice(DeviceIdx_t device) override final {
AIDGE_ASSERT(device == 0, "device cannot be != 0 for CPU backend");
}
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
static_cast<T *>(rawPtr()) + offset);
const T* srcT = static_cast<const T *>(src);
T* dstT = static_cast<T *>(rawPtr(offset));
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported");
std::copy(srcT, srcT + length, dstT);
}
void copyCast(const void *src, NbElts_t length, const DataType srcDt) override final {
void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final {
if (length == 0) {
return;
}
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
T* dstT = static_cast<T *>(rawPtr(offset));
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
switch (srcDt)
{
case DataType::Float64:
std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Float32:
std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Float16:
std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Int64:
std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::UInt64:
std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Int32:
std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::UInt32:
std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Int16:
std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::UInt16:
std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Int8:
std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::UInt8:
std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
default:
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
......@@ -124,21 +121,20 @@ public:
}
}
void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, DeviceIdx_t>& device) override final {
void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
AIDGE_ASSERT(device.first == Backend, "backend must match");
AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
copy(src, length);
copy(src, length, offset);
}
inline void copyFromHost(const void *src, NbElts_t length) override final {
copy(src, length);
inline void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
copy(src, length, offset);
}
void copyToHost(void *dst, NbElts_t length) const override final {
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
const T* src = static_cast<const T*>(rawPtr());
std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
static_cast<T *>(dst));
void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
const T* src = static_cast<const T*>(rawPtr(offset));
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
std::copy(src, src + length, static_cast<T *>(dst));
}
void *rawPtr(NbElts_t offset = 0) override final {
......@@ -147,7 +143,7 @@ public:
};
const void *rawPtr(NbElts_t offset = 0) const override final {
AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const rawPtr");
AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const rawPtr");
return (mData.data() + offset);
};
......@@ -157,12 +153,12 @@ public:
};
const void *hostPtr(NbElts_t offset = 0) const override final {
AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const hostPtr");
AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const hostPtr");
return (mData.data() + offset);
};
void setRawPtr(void *ptr, NbElts_t length) override final {
AIDGE_ASSERT(length >= mTensor.size(), "trying to set raw pointer of insufficient capacity");
AIDGE_ASSERT(length >= mNbElts, "trying to set raw pointer of insufficient capacity");
mData = future_std::span<T>(static_cast<T *>(ptr), length);
mDataOwner.reset();
};
......@@ -171,11 +167,11 @@ public:
private:
void lazyInit() {
if (mData.size() < mTensor.size()) {
if (mData.size() < mNbElts) {
// Need more data, a re-allocation will occur
AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data");
mDataOwner.reset(new T[mTensor.size()]);
mData = future_std::span<T>(mDataOwner.get(), mTensor.size());
mDataOwner.reset(new T[mNbElts]);
mData = future_std::span<T>(mDataOwner.get(), mNbElts);
}
}
};
......
......@@ -14,6 +14,7 @@
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp"
using namespace Aidge;
......@@ -57,3 +58,43 @@ TEST_CASE("Tensor creation") {
}
}
}
TEST_CASE("Tensor methods") {
Tensor x = Array3D<int, 2, 2, 2>{{
{{1, 2},
{3, 4}},
{{5, 6},
{7, 8}}
}};
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat =
Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
SECTION("Tensor sharing") {
Tensor xCopyCtor(x);
REQUIRE(xCopyCtor.getImpl() == x.getImpl());
Tensor xEqOp = x;
REQUIRE(xEqOp.getImpl() == x.getImpl());
Tensor xCloned = x.clone();
REQUIRE(xCloned.getImpl() != x.getImpl());
REQUIRE(xCloned == x);
}
SECTION("Tensor extract") {
Tensor y = x.extract({0, 1});
REQUIRE(y.getImpl() == x.getImpl());
REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
REQUIRE(y.isContiguous());
Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1});
REQUIRE(y2.getImpl() == x.getImpl());
REQUIRE(!y2.isContiguous());
Tensor y3 = y2.clone();
REQUIRE(y3.isContiguous());
REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment