Skip to content
Snippets Groups Projects

Change tensorimpl opencv `future_std::span<cv::Mat>` to `cv::Mat`

Merged Thibault Allenet requested to merge TensorImpl into dev
Files
8
@@ -27,35 +27,38 @@ namespace Aidge {
class TensorImpl_opencv_ {
public:
virtual const cv::Mat& getCvMat() const = 0;
virtual const cv::Mat& data() const = 0;
virtual void setCvMat(const cv::Mat& mat ) = 0;
};
template <class T> class TensorImpl_opencv : public TensorImpl, public TensorImpl_opencv_ {
template <class T>
class TensorImpl_opencv : public TensorImpl, public TensorImpl_opencv_ {
private:
const Tensor &mTensor; // Impl needs to access Tensor information, but is not
// supposed to change it!
// Stores the cv::Mat
cv::Mat mData;
future_std::span<cv::Mat> mData;
std::unique_ptr<cv::Mat> mDataOwner = std::unique_ptr<cv::Mat>(new cv::Mat(0, 0, detail::CV_C1_CPP_v<T>));
protected:
std::vector<DimSize_t> mDims;
public:
static constexpr const char *Backend = "opencv";
TensorImpl_opencv(const Tensor &tensor)
: TensorImpl(Backend), mTensor(tensor) {}
TensorImpl_opencv() = delete;
TensorImpl_opencv(DeviceIdx_t device, std::vector<DimSize_t> dims)
: TensorImpl(Backend, device, dims)
{
mDims = dims;
}
bool operator==(const TensorImpl &otherImpl) const override final {
// Create iterators for both matrices
cv::MatConstIterator_<T> it1 = mDataOwner->begin<T>();
const future_std::span<cv::Mat> tmp = reinterpret_cast<const TensorImpl_opencv<T> &>(otherImpl).data();
cv::MatConstIterator_<T> it1 = mData.begin<T>();
const cv::Mat otherData = *(tmp.data());
const cv::Mat & otherData = reinterpret_cast<const TensorImpl_opencv<T> &>(otherImpl).data();
cv::MatConstIterator_<T> it2 = otherData.begin<T>();
// Iterate over the elements and compare them
for (; it1 != mDataOwner->end<T>(); ++it1, ++it2) {
for (; it1 != mData.end<T>(); ++it1, ++it2) {
if (*it1 != *it2) {
return false;
}
@@ -63,157 +66,160 @@ public:
return true;
}
static std::unique_ptr<TensorImpl_opencv> create(const Tensor &tensor) {
return std::make_unique<TensorImpl_opencv<T>>(tensor);
static std::unique_ptr<TensorImpl_opencv> create(DeviceIdx_t device, std::vector<DimSize_t> dims) {
return std::make_unique<TensorImpl_opencv<T>>(device, dims);
}
void resize(std::vector<DimSize_t> dims) override{
mDims = dims;
size_t product = 1;
for (size_t num : dims) {
product *= num;
}
mNbElts = product;
}
// native interface
const future_std::span<cv::Mat> data() const { return mData; }
const cv::Mat & data() const override { return mData; }
inline std::size_t scalarSize() const override { return sizeof(T); }
inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
inline std::size_t size() const override { return mData.size(); }
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
const T* srcT = static_cast<const T *>(src);
T* dstT = static_cast<T *>(rawPtr(offset));
void setDevice(DeviceIdx_t device) override {
AIDGE_ASSERT(device == 0, "device cannot be != 0 for Opencv backend");
}
AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity");
AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported");
std::copy(srcT, srcT + length, dstT);
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override {
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
static_cast<T *>(rawPtr()) + offset);
}
void copyCast(const void *src, NbElts_t length, const DataType srcDt) override {
void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final{
if (length == 0) {
return;
}
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
switch (srcDt) {
T* dstT = static_cast<T *>(rawPtr(offset));
AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity");
switch (srcDt)
{
case DataType::Float64:
std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Float32:
std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Float16:
std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Int64:
std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::UInt64:
std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Int32:
std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::UInt32:
std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Int16:
std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::UInt16:
std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Int8:
std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::UInt8:
std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
default:
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
break;
}
}
void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, DeviceIdx_t>& device) override {
void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
AIDGE_ASSERT(device.first == Backend, "backend must match");
AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
copy(src, length);
copy(src, length, offset);
}
void copyFromHost(const void *src, NbElts_t length) override {
copy(src, length);
void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
copy(src, length, offset);
}
void copyToHost(void *dst, NbElts_t length) const override {
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
const T* src = static_cast<const T*>(rawPtr());
std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
static_cast<T *>(dst));
void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
const T* src = static_cast<const T*>(rawPtr(offset));
AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity");
std::copy(src, src + length, static_cast<T *>(dst));
}
void *rawPtr(NbElts_t offset = 0) override {
void *rawPtr(NbElts_t offset = 0) override final {
lazyInit();
return (mData.data()->ptr() + offset*sizeof(T));
return (mData.ptr<T>() + offset);
};
const void *rawPtr(NbElts_t offset = 0) const override {
AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const rawPtr");
return (mData.data()->ptr() + offset*sizeof(T));
const void *rawPtr(NbElts_t offset = 0) const override final {
AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "accessing uninitialized const rawPtr");
return (mData.ptr<T>() + offset);
};
void *hostPtr(NbElts_t offset = 0) override {
void *hostPtr(NbElts_t offset = 0) override final {
lazyInit();
std::cout << *reinterpret_cast<T *>(mData.data()->ptr()) + offset << std::endl;
return (mData.data()->ptr() + offset*sizeof(T));
return (mData.ptr<T>() + offset);
};
const void *hostPtr(NbElts_t offset = 0) const override {
AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const hostPtr");
return (mData.data()->ptr() + offset*sizeof(T));
AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "accessing uninitialized const hostPtr");
AIDGE_ASSERT(mData.isContinuous(), "CV Matrix not continuous");
return (mData.ptr<T>() + offset);
};
const cv::Mat& getCvMat() const override { return *mDataOwner.get(); }
void setCvMat(const cv::Mat& mat) override {mDataOwner.reset(new cv::Mat(std::move(mat)));}
void setCvMat(const cv::Mat& mat) override {mData=mat;}
virtual ~TensorImpl_opencv() = default;
private:
void lazyInit() {
if (mData.size() < mTensor.size()) {
if ((mData.total() * mData.channels()) < mNbElts) {
// Need more data, a re-allocation will occur
AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data");
AIDGE_ASSERT(mData.empty() , "trying to enlarge non-owned data");
cv::Mat myNewMatrix;
if (mTensor.nbDims() < 3) {
myNewMatrix = cv::Mat(((mTensor.nbDims() > 1) ? static_cast<int>(mTensor.dims()[1])
: (mTensor.nbDims() > 0) ? 1
if (mDims.size() < 3) {
mData = cv::Mat(((mDims.size() > 1) ? static_cast<int>(mDims[0])
: (mDims.size() > 0) ? 1
: 0),
(mTensor.nbDims() > 0) ? static_cast<int>(mTensor.dims()[0]) : 0,
(mDims.size() > 0) ? static_cast<int>(mDims[1]) : 0,
detail::CV_C1_CPP_v<T>);
} else {
std::vector<cv::Mat> channels;
for (std::size_t k = 0; k < mTensor.dims()[2]; ++k) {
channels.push_back(cv::Mat(static_cast<int>(mTensor.dims()[1]),
static_cast<int>(mTensor.dims()[0]),
for (std::size_t k = 0; k < mDims[2]; ++k) {
channels.push_back(cv::Mat(static_cast<int>(mDims[0]),
static_cast<int>(mDims[1]),
detail::CV_C1_CPP_v<T>));
}
cv::merge(channels, myNewMatrix);
cv::merge(channels, mData);
}
mDataOwner.reset(new cv::Mat(std::forward<cv::Mat>(myNewMatrix)));
mData = future_std::span<cv::Mat>(mDataOwner.get(), mTensor.size());
}
}
};
Loading