Skip to content
Snippets Groups Projects
Commit 216fa596 authored by Thibault Allenet's avatar Thibault Allenet
Browse files

Update TensorImplOpencv with version of TensorImpl with span + Uniq_ptr & copyHost/Device/Cast

parent b4d1fdda
No related branches found
No related tags found
No related merge requests found
......@@ -7,12 +7,15 @@
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/future_std/span.hpp"
#include <iostream>
namespace {
template <typename T> struct OpenCvType { static const int type; };
template <> const int OpenCvType<char>::type = CV_8SC1;
template <> const int OpenCvType<signed char>::type = CV_8SC1;
template <> const int OpenCvType<short>::type = CV_16SC1;
template <> const int OpenCvType<int>::type = CV_32SC1;
template <> const int OpenCvType<unsigned char>::type = CV_8UC1;
......@@ -24,34 +27,36 @@ template <> const int OpenCvType<double>::type = CV_64FC1;
namespace Aidge {
class TensorImpl_opencv_ {
protected:
cv::Mat mData;
public :
virtual const cv::Mat getCvMat() const { return mData; }
virtual void setCvMat(cv::Mat mat) {mData=mat;}
public:
virtual const cv::Mat& getCvMat() const = 0;
virtual void setCvMat(const cv::Mat& mat ) = 0;
};
template <class T> class TensorImpl_opencv : public TensorImpl, public TensorImpl_opencv_ {
private:
const Tensor &mTensor; // Impl needs to access Tensor information, but is not
// supposed to change it!
cv::Mat mData;
future_std::span<cv::Mat> mData;
std::unique_ptr<cv::Mat> mDataOwner = std::unique_ptr<cv::Mat>(new cv::Mat(0,0,OpenCvType<T>::type));
public:
static constexpr const char *Backend = "opencv";
TensorImpl_opencv(const Tensor &tensor)
TensorImpl_opencv(const Tensor &tensor)
: TensorImpl(Backend), mTensor(tensor) {}
bool operator==(const TensorImpl &otherImpl) const override final {
// Create iterators for both matrices
cv::MatConstIterator_<T> it1 = mData.begin<T>();
const cv::Mat otherData =
reinterpret_cast<const TensorImpl_opencv<T> &>(otherImpl).data();
cv::MatConstIterator_<T> it1 = mDataOwner->begin<T>();
const future_std::span<cv::Mat> tmp = reinterpret_cast<const TensorImpl_opencv<T> &>(otherImpl).data();
const cv::Mat otherData = *(tmp.data());
cv::MatConstIterator_<T> it2 = otherData.begin<T>();
// Iterate over the elements and compare them
for (; it1 != mData.end<T>(); ++it1, ++it2) {
for (; it1 != mDataOwner->end<T>(); ++it1, ++it2) {
if (*it1 != *it2) {
return false;
}
......@@ -59,111 +64,175 @@ public:
return true;
}
static std::unique_ptr<TensorImpl_opencv<T>> create(const Tensor &tensor) {
static std::unique_ptr<TensorImpl_opencv> create(const Tensor &tensor) {
return std::make_unique<TensorImpl_opencv<T>>(tensor);
}
// native interface
const cv::Mat &data() const { return mData; }
// void setData(cv::Mat mat){mData=mat;}
const future_std::span<cv::Mat> data() const { return mData; }
std::size_t scalarSize() const override { return sizeof(T); }
void copy(const void *src, NbElts_t length, std::size_t /*offset = 0*/) override {
std::copy(static_cast<const T*>(src), static_cast<const T*>(src) + length, static_cast<T*>(rawPtr()));
std::size_t size() const override { return mData.size(); }
void setDevice(DeviceIdx_t device) override {
AIDGE_ASSERT(device == 0, "device cannot be != 0 for Opencv backend");
}
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override {
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
static_cast<T *>(rawPtr()) + offset);
}
void *rawPtr() override {
if (mData.ptr() == nullptr) {
lazyInit(mData);
}
return mData.ptr<T>();
void copyCast(const void *src, NbElts_t length, const DataType srcDt) override {
if (length == 0) {
return;
}
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
if (srcDt == DataType::Float64) {
std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::Float32) {
std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::Float16) {
std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::Int64) {
std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::UInt64) {
std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::Int32) {
std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::UInt32) {
std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::Int16) {
std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::UInt16) {
std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::Int8) {
std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::UInt8) {
std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
}
}
void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, DeviceIdx_t>& device) override {
AIDGE_ASSERT(device.first == Backend, "backend must match");
AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
copy(src, length);
}
void copyFromHost(const void *src, NbElts_t length) override {
copy(src, length);
}
void copyToHost(void *dst, NbElts_t length) const override {
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
const T* src = static_cast<const T*>(rawPtr());
std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
static_cast<T *>(dst));
}
void *rawPtr(NbElts_t offset = 0) override {
lazyInit();
return (mData.data()->ptr() + offset*sizeof(T));
};
void setRawPtr(void */*ptr*/) override final {
printf("Not implemented yet.\n");
// assert(mTensor.nbDims()<=3 && "nbDims > 3 is not supported for opencv backends.");
const void *rawPtr(NbElts_t offset = 0) const override {
AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const rawPtr");
return (mData.data()->ptr() + offset*sizeof(T));
};
// std::vector<cv::Mat> channels;
// for (std::size_t k = 0; k < mTensor.dims()[2]; ++k) {
// channels.push_back(cv::Mat(static_cast<int>(mTensor.dims()[1]),
// static_cast<int>(mTensor.dims()[0]),
// OpenCvType<T>::type,
// static_cast<T*>(ptr) + k*mTensor.dims()[1]*mTensor.dims()[0]*sizeof(T)));
// }
// cv::merge(channels, mData);
};
void *hostPtr(NbElts_t offset = 0) override {
lazyInit();
std::cout << *reinterpret_cast<T *>(mData.data()->ptr()) + offset << std::endl;
return (mData.data()->ptr() + offset*sizeof(T));
};
void* getRaw(std::size_t idx) override {
return static_cast<void*>(&mData.at<T>(static_cast<int>(idx)));
};
const void *hostPtr(NbElts_t offset = 0) const override {
AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const hostPtr");
return (mData.data()->ptr() + offset*sizeof(T));
};
const cv::Mat& getCvMat() const override { return *mDataOwner.get(); }
void setCvMat(const cv::Mat& mat) override {mDataOwner.reset(new cv::Mat(std::move(mat)));}
virtual ~TensorImpl_opencv() = default;
private:
void lazyInit(cv::Mat &mat) {
std::cout << "call lazy init tensorimpl_opencv : " << std::endl;
assert(mTensor.nbDims() <= 3 && "OpenCV implementation does not support "
"tensor with more than 3 dimensions");
if (mat.cols == static_cast<int>(mTensor.dims()[0]) && mat.rows == static_cast<int>(mTensor.dims()[1]) &&
mat.channels() == static_cast<int>(mTensor.dims()[2])) {
// Correct size, not change
return;
}
if (mat.rows * mat.cols * mat.channels() == static_cast<int>(mTensor.size())) {
// Shape has changed, data will not be invalided
mat = mat.reshape(mTensor.dims()[2], mTensor.dims()[1]);
return;
}
void lazyInit() {
if (mData.size() < mTensor.size()) {
// Need more data, a re-allocation will occur
AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data");
cv::Mat myNewMatrix;
if (mTensor.nbDims() < 3) {
myNewMatrix = cv::Mat(((mTensor.nbDims() > 1) ? static_cast<int>(mTensor.dims()[1])
: (mTensor.nbDims() > 0) ? 1
: 0),
(mTensor.nbDims() > 0) ? static_cast<int>(mTensor.dims()[0]) : 0,
OpenCvType<T>::type);
} else {
std::vector<cv::Mat> channels;
for (std::size_t k = 0; k < mTensor.dims()[2]; ++k) {
channels.push_back(cv::Mat(static_cast<int>(mTensor.dims()[1]),
static_cast<int>(mTensor.dims()[0]),
OpenCvType<T>::type));
}
cv::merge(channels, myNewMatrix);
}
// Number of element has changed, daya WILL BE invalided
if (mTensor.nbDims() < 3) {
mat = cv::Mat(((mTensor.nbDims() > 1) ? static_cast<int>(mTensor.dims()[1])
: (mTensor.nbDims() > 0) ? 1
: 0),
(mTensor.nbDims() > 0) ? static_cast<int>(mTensor.dims()[0]) : 0,
OpenCvType<T>::type);
} else {
std::vector<cv::Mat> channels;
for (std::size_t k = 0; k < mTensor.dims()[2]; ++k) {
channels.push_back(cv::Mat(static_cast<int>(mTensor.dims()[1]),
static_cast<int>(mTensor.dims()[0]),
OpenCvType<T>::type));
}
cv::merge(channels, mat);
mDataOwner.reset(new cv::Mat(std::forward<cv::Mat>(myNewMatrix)));
mData = future_std::span<cv::Mat>(mDataOwner.get(), mTensor.size());
}
}
};
namespace {
static Registrar<Tensor>
registrarTensorImpl_opencv_Float64({"opencv", DataType::Float64},
Aidge::TensorImpl_opencv<double>::create);
static Registrar<Tensor>
registrarTensorImpl_opencv_Float32({"opencv", DataType::Float32},
Aidge::TensorImpl_opencv<float>::create);
static Registrar<Tensor>
registrarTensorImpl_opencv_Int32({"opencv", DataType::Int32},
Aidge::TensorImpl_opencv<int>::create);
static Registrar<Tensor>
registrarTensorImpl_opencv_Int16({"opencv", DataType::Int16},
Aidge::TensorImpl_opencv<int16_t>::create);
static Registrar<Tensor>
registrarTensorImpl_opencv_UInt16({"opencv", DataType::UInt16},
Aidge::TensorImpl_opencv<uint16_t>::create);
// static Registrar<Tensor>
// registrarTensorImpl_opencv_Int8({"opencv", DataType::Int8},
// Aidge::TensorImpl_opencv<int8_t>::create);
static Registrar<Tensor>
registrarTensorImpl_opencv_UInt8({"opencv", DataType::UInt8},
Aidge::TensorImpl_opencv<uint8_t>::create);
static Registrar<Tensor> registrarTensorImpl_opencv_Float64(
{"opencv", DataType::Float64}, Aidge::TensorImpl_opencv<double>::create);
static Registrar<Tensor> registrarTensorImpl_opencv_Float32(
{"opencv", DataType::Float32}, Aidge::TensorImpl_opencv<float>::create);
static Registrar<Tensor> registrarTensorImpl_opencv_Int32(
{"opencv", DataType::Int32}, Aidge::TensorImpl_opencv<int>::create);
static Registrar<Tensor> registrarTensorImpl_opencv_Int16(
{"opencv", DataType::Int16}, Aidge::TensorImpl_opencv<int16_t>::create);
static Registrar<Tensor> registrarTensorImpl_opencv_UInt16(
{"opencv", DataType::UInt16}, Aidge::TensorImpl_opencv<uint16_t>::create);
static Registrar<Tensor> registrarTensorImpl_opencv_Int8(
{"opencv", DataType::Int8}, Aidge::TensorImpl_opencv<int8_t>::create);
static Registrar<Tensor> registrarTensorImpl_opencv_UInt8(
{"opencv", DataType::UInt8}, Aidge::TensorImpl_opencv<uint8_t>::create);
} // namespace
} // namespace Aidge
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment