Skip to content
Snippets Groups Projects

New proposal for handling tensor views

Merged Olivier BICHLER requested to merge view into dev
Files
4
@@ -29,6 +29,9 @@ void thrust_copy(const half_float::half* srcData, half_float::half* dstData, siz
* class), but whose data type does not need to be known.
*/
class TensorImpl_cuda_ {
protected:
mutable cudnnTensorDescriptor_t mCudnnTensor = nullptr;
public:
/**
* @brief Return the CuDNN tensor descriptor of the tensor.
@@ -36,7 +39,12 @@ public:
* (which is therefore mutable in the derived class).
* @return cudnnTensorDescriptor_t CuDNN tensor descriptor.
*/
virtual const cudnnTensorDescriptor_t& getCudnnTensorDesc() const = 0;
virtual const cudnnTensorDescriptor_t& getCudnnTensorDesc(const Tensor& tensor) const = 0;
virtual ~TensorImpl_cuda_() {
if (mCudnnTensor != nullptr)
cudnnDestroyTensorDescriptor(mCudnnTensor);
}
};
template <class T>
@@ -54,119 +62,116 @@ private:
}
private:
const Tensor &mTensor; // Impl needs to access Tensor information, but is not
// supposed to change it!
/// Pointer to the data and its capacity
future_std::span<T> mData;
/// If this instance own the data, std::unique_ptr manages it
std::unique_ptr<T, decltype(&cudaDelete)> mDataOwner;
mutable cudnnTensorDescriptor_t mCudnnTensor = nullptr;
public:
static constexpr const char *Backend = "cuda";
TensorImpl_cuda(const Tensor &tensor) : TensorImpl(Backend), mTensor(tensor), mDataOwner(nullptr, cudaDelete) {}
TensorImpl_cuda(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length), mDataOwner(nullptr, cudaDelete) {}
bool operator==(const TensorImpl &otherImpl) const override final;
static std::unique_ptr<TensorImpl_cuda> create(const Tensor &tensor) {
return std::make_unique<TensorImpl_cuda<T>>(tensor);
static std::shared_ptr<TensorImpl_cuda> create(DeviceIdx_t device, NbElts_t length) {
return std::make_shared<TensorImpl_cuda<T>>(device, length);
}
// native interface
const future_std::span<T>& data() const { return mData; }
std::size_t size() const override { return mData.size(); }
std::size_t scalarSize() const override { return sizeof(T); }
void setDevice(DeviceIdx_t device) override {
mDevice = device;
}
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override {
void* dst = static_cast<void*>(static_cast<T*>(rawPtr()) + offset);
CHECK_CUDA_STATUS(cudaMemcpy(dst, src, length * sizeof(T), cudaMemcpyDeviceToDevice));
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
const T* srcT = static_cast<const T *>(src);
T* dstT = static_cast<T *>(rawPtr(offset));
AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported");
CHECK_CUDA_STATUS(cudaMemcpy(dstT, srcT, length * sizeof(T), cudaMemcpyDeviceToDevice));
}
void copyCast(const void *src, NbElts_t length, const DataType srcDt) override {
void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override {
if (length == 0) {
return;
}
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
if (srcDt == DataType::Float64) {
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
switch (srcDt) {
case DataType::Float64:
thrust_copy(static_cast<const double*>(src),
static_cast<T*>(rawPtr()),
static_cast<T*>(rawPtr(offset)),
length);
}
else if (srcDt == DataType::Float32) {
break;
case DataType::Float32:
thrust_copy(static_cast<const float*>(src),
static_cast<T*>(rawPtr()),
static_cast<T*>(rawPtr(offset)),
length);
}
else if (srcDt == DataType::Float16) {
break;
case DataType::Float16:
thrust_copy(static_cast<const half_float::half*>(src),
static_cast<T*>(rawPtr()),
static_cast<T*>(rawPtr(offset)),
length);
}
else if (srcDt == DataType::Int64) {
break;
case DataType::Int64:
thrust_copy(static_cast<const int64_t*>(src),
static_cast<T*>(rawPtr()),
static_cast<T*>(rawPtr(offset)),
length);
}
else if (srcDt == DataType::UInt64) {
break;
case DataType::UInt64:
thrust_copy(static_cast<const uint64_t*>(src),
static_cast<T*>(rawPtr()),
static_cast<T*>(rawPtr(offset)),
length);
}
else if (srcDt == DataType::Int32) {
break;
case DataType::Int32:
thrust_copy(static_cast<const int32_t*>(src),
static_cast<T*>(rawPtr()),
static_cast<T*>(rawPtr(offset)),
length);
}
else if (srcDt == DataType::UInt32) {
break;
case DataType::UInt32:
thrust_copy(static_cast<const uint32_t*>(src),
static_cast<T*>(rawPtr()),
static_cast<T*>(rawPtr(offset)),
length);
}
else if (srcDt == DataType::Int16) {
break;
case DataType::Int16:
thrust_copy(static_cast<const int16_t*>(src),
static_cast<T*>(rawPtr()),
static_cast<T*>(rawPtr(offset)),
length);
}
else if (srcDt == DataType::UInt16) {
break;
case DataType::UInt16:
thrust_copy(static_cast<const uint16_t*>(src),
static_cast<T*>(rawPtr()),
static_cast<T*>(rawPtr(offset)),
length);
}
else if (srcDt == DataType::Int8) {
break;
case DataType::Int8:
thrust_copy(static_cast<const int8_t*>(src),
static_cast<T*>(rawPtr()),
static_cast<T*>(rawPtr(offset)),
length);
}
else if (srcDt == DataType::UInt8) {
break;
case DataType::UInt8:
thrust_copy(static_cast<const uint8_t*>(src),
static_cast<T*>(rawPtr()),
static_cast<T*>(rawPtr(offset)),
length);
}
else {
break;
default:
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
break;
}
}
void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, DeviceIdx_t>& device) override {
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
CHECK_CUDA_STATUS(cudaMemcpy(rawPtr(), src, length * sizeof(T), cudaMemcpyDeviceToDevice));
void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override {
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
CHECK_CUDA_STATUS(cudaMemcpy(rawPtr(offset), src, length * sizeof(T), cudaMemcpyDeviceToDevice));
}
void copyFromHost(const void *src, NbElts_t length) override {
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
CHECK_CUDA_STATUS(cudaMemcpy(rawPtr(), src, length * sizeof(T), cudaMemcpyHostToDevice));
void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override {
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
CHECK_CUDA_STATUS(cudaMemcpy(rawPtr(offset), src, length * sizeof(T), cudaMemcpyHostToDevice));
}
void copyToHost(void *dst, NbElts_t length) const override {
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
CHECK_CUDA_STATUS(cudaMemcpy(dst, rawPtr(), length * sizeof(T), cudaMemcpyDeviceToHost));
void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override {
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
CHECK_CUDA_STATUS(cudaMemcpy(dst, rawPtr(offset), length * sizeof(T), cudaMemcpyDeviceToHost));
}
void *rawPtr(NbElts_t offset = 0) override {
@@ -175,30 +180,27 @@ public:
};
const void *rawPtr(NbElts_t offset = 0) const override {
AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const rawPtr");
AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const rawPtr");
return (mData.data() + offset);
};
const cudnnTensorDescriptor_t& getCudnnTensorDesc() const override {
const cudnnTensorDescriptor_t& getCudnnTensorDesc(const Tensor& tensor) const override {
if (mCudnnTensor == nullptr) {
CHECK_CUDNN_STATUS(cudnnCreateTensorDescriptor(&mCudnnTensor));
if (mTensor.size() > 0) {
if (tensor.size() > 0) {
/**
** cudNN Tensors are restricted to having at least 4 dimensions :
** When working with lower dimensionsal data, unused dimensions are set to 1.
** Referes to the cudnnSetTensorNdDescriptor documentation from :
** https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html
**/
std::vector<int> dims(mTensor.dims().begin(), mTensor.dims().end());
std::vector<int> dims(tensor.dims().cbegin(), tensor.dims().cend());
std::vector<int> strides(tensor.strides().cbegin(), tensor.strides().cend());
if (dims.size() < 4)
if (dims.size() < 4) {
dims.resize(4, 1);
std::vector<int> strides(dims.size(), 1);
for (size_t dim = 1; dim < dims.size(); ++dim) {
strides[dims.size() - dim - 1] = strides[dims.size() - dim] * dims[dims.size() - dim];
strides.resize(4, 1);
}
CHECK_CUDNN_STATUS(cudnnSetTensorNdDescriptor(mCudnnTensor,
@@ -213,23 +215,20 @@ public:
}
void setRawPtr(void *ptr, NbElts_t length) override final {
AIDGE_ASSERT(length >= mTensor.size(), "trying to set raw pointer of insufficient capacity");
AIDGE_ASSERT(length >= mNbElts, "trying to set raw pointer of insufficient capacity");
mData = future_std::span<T>(static_cast<T *>(ptr), length);
mDataOwner.reset();
};
virtual ~TensorImpl_cuda() {
if (mCudnnTensor != nullptr)
cudnnDestroyTensorDescriptor(mCudnnTensor);
}
virtual ~TensorImpl_cuda() = default;
private:
void lazyInit() {
if (mData.size() < mTensor.size()) {
if (mData.size() < mNbElts) {
// Need more data, a re-allocation will occur
AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data");
mDataOwner.reset(cudaAlloc(mTensor.size()));
mData = future_std::span<T>(mDataOwner.get(), mTensor.size());
mDataOwner.reset(cudaAlloc(mNbElts));
mData = future_std::span<T>(mDataOwner.get(), mNbElts);
}
}
};
Loading