diff --git a/include/aidge/backend/opencv/data/TensorImpl.hpp b/include/aidge/backend/opencv/data/TensorImpl.hpp index d0bbb6d57035b5dbc812f5449cb70e77e1244a03..84a5e54540794ce432e518e43d5ed13ea80db082 100644 --- a/include/aidge/backend/opencv/data/TensorImpl.hpp +++ b/include/aidge/backend/opencv/data/TensorImpl.hpp @@ -31,27 +31,30 @@ public: virtual void setCvMat(const cv::Mat& mat ) = 0; }; -template <class T> class TensorImpl_opencv : public TensorImpl, public TensorImpl_opencv_ { +template <class T> +class TensorImpl_opencv : public TensorImpl, public TensorImpl_opencv_ { private: - const Tensor &mTensor; // Impl needs to access Tensor information, but is not - // supposed to change it! - + // Stores the cv::Mat cv::Mat mData; - + +protected: + std::vector<DimSize_t> mDims; + public: static constexpr const char *Backend = "opencv"; TensorImpl_opencv() = delete; - TensorImpl_opencv(const Tensor &tensor) - : TensorImpl(Backend), mTensor(tensor) - {} + TensorImpl_opencv(DeviceIdx_t device, std::vector<DimSize_t> dims) + : TensorImpl(Backend, device, dims) + { + mDims = dims; + } bool operator==(const TensorImpl &otherImpl) const override final { // Create iterators for both matrices cv::MatConstIterator_<T> it1 = mData.begin<T>(); const cv::Mat & otherData = reinterpret_cast<const TensorImpl_opencv<T> &>(otherImpl).data(); - cv::MatConstIterator_<T> it2 = otherData.begin<T>(); // Iterate over the elements and compare them @@ -63,117 +66,127 @@ public: return true; } - static std::unique_ptr<TensorImpl_opencv> create(const Tensor &tensor) { - return std::make_unique<TensorImpl_opencv<T>>(tensor); + static std::unique_ptr<TensorImpl_opencv> create(DeviceIdx_t device, std::vector<DimSize_t> dims) { + return std::make_unique<TensorImpl_opencv<T>>(device, dims); + } + + void resize(std::vector<DimSize_t> dims) override{ + mDims = dims; + size_t product = 1; + for (size_t num : dims) { + product *= num; + } + mNbElts = product; } // native interface const cv::Mat & data() const override { return mData; } - inline std::size_t scalarSize() const override { return sizeof(T); } + inline std::size_t scalarSize() const noexcept override final { return sizeof(T); } - std::size_t size() const override { return mData.total() * mData.channels();} + void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final { + const T* srcT = static_cast<const T *>(src); + T* dstT = static_cast<T *>(rawPtr(offset)); - void setDevice(DeviceIdx_t device) override { - AIDGE_ASSERT(device == 0, "device cannot be != 0 for Opencv backend"); - } + AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity"); + AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported"); + std::copy(srcT, srcT + length, dstT); - void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override { - AIDGE_ASSERT(length <= size() || length <= mTensor.size(), "copy length is above capacity"); - std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length, - static_cast<T *>(rawPtr()) + offset); } - void copyCast(const void *src, NbElts_t length, const DataType srcDt) override { + void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final{ if (length == 0) { return; } - - AIDGE_ASSERT(length <= size() || length <= mTensor.size(), "copy length is above capacity"); - if (srcDt == DataType::Float64) { - std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length, - static_cast<T *>(rawPtr())); + + T* dstT = static_cast<T *>(rawPtr(offset)); + AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity"); + switch (srcDt) + { + case DataType::Float64: + std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length, + dstT); break; case DataType::Float32: std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length, - static_cast<T *>(rawPtr())); + dstT); break; case DataType::Float16: std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length, - static_cast<T *>(rawPtr())); + dstT); break; case DataType::Int64: std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length, - static_cast<T *>(rawPtr())); + dstT); break; case DataType::UInt64: std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length, - static_cast<T *>(rawPtr())); + dstT); break; case DataType::Int32: std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length, - static_cast<T *>(rawPtr())); + dstT); break; case DataType::UInt32: std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length, - static_cast<T *>(rawPtr())); + dstT); break; case DataType::Int16: std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length, - static_cast<T *>(rawPtr())); + dstT); break; case DataType::UInt16: std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length, - static_cast<T *>(rawPtr())); + dstT); break; case DataType::Int8: std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length, - static_cast<T *>(rawPtr())); + dstT); break; case DataType::UInt8: std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length, - static_cast<T *>(rawPtr())); + dstT); break; default: AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type."); + break; } } - void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, DeviceIdx_t>& device) override { + void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final { AIDGE_ASSERT(device.first == Backend, "backend must match"); AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend"); - copy(src, length); + copy(src, length, offset); } - void copyFromHost(const void *src, NbElts_t length) override { - copy(src, length); + void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final { + copy(src, length, offset); } - void copyToHost(void *dst, NbElts_t length) const override { - AIDGE_ASSERT(length <= size() || length <= mTensor.size(), "copy length is above capacity"); - const T* src = static_cast<const T*>(rawPtr()); - std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length, - static_cast<T *>(dst)); + void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final { + const T* src = static_cast<const T*>(rawPtr(offset)); + AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity"); + std::copy(src, src + length, static_cast<T *>(dst)); } - void *rawPtr(NbElts_t offset = 0) override { + void *rawPtr(NbElts_t offset = 0) override final { lazyInit(); return (mData.ptr<T>() + offset); }; - const void *rawPtr(NbElts_t offset = 0) const override { - AIDGE_ASSERT(size() >= mTensor.size(), "accessing uninitialized const rawPtr"); + const void *rawPtr(NbElts_t offset = 0) const override final { + AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "accessing uninitialized const rawPtr"); return (mData.ptr<T>() + offset); }; - void *hostPtr(NbElts_t offset = 0) override { + void *hostPtr(NbElts_t offset = 0) override final { lazyInit(); return (mData.ptr<T>() + offset); }; const void *hostPtr(NbElts_t offset = 0) const override { - AIDGE_ASSERT(size() >= mTensor.size(), "accessing uninitialized const hostPtr"); + AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "accessing uninitialized const hostPtr"); AIDGE_ASSERT(mData.isContinuous(), "CV Matrix not continuous"); return (mData.ptr<T>() + offset); }; @@ -186,28 +199,27 @@ public: private: void lazyInit() { - if (size() < mTensor.size()) { + if ((mData.total() * mData.channels()) < mNbElts) { // Need more data, a re-allocation will occur AIDGE_ASSERT(mData.empty() , "trying to enlarge non-owned data"); - if (mTensor.nbDims() < 3) { - mData = cv::Mat(((mTensor.nbDims() > 1) ? static_cast<int>(mTensor.dims()[1]) - : (mTensor.nbDims() > 0) ? 1 + if (mDims.size() < 3) { + mData = cv::Mat(((mDims.size() > 1) ? static_cast<int>(mDims[0]) + : (mDims.size() > 0) ? 1 : 0), - (mTensor.nbDims() > 0) ? static_cast<int>(mTensor.dims()[0]) : 0, + (mDims.size() > 0) ? static_cast<int>(mDims[1]) : 0, detail::CV_C1_CPP_v<T>); } else { std::vector<cv::Mat> channels; - for (std::size_t k = 0; k < mTensor.dims()[2]; ++k) { - channels.push_back(cv::Mat(static_cast<int>(mTensor.dims()[1]), - static_cast<int>(mTensor.dims()[0]), + for (std::size_t k = 0; k < mDims[2]; ++k) { + channels.push_back(cv::Mat(static_cast<int>(mDims[0]), + static_cast<int>(mDims[1]), detail::CV_C1_CPP_v<T>)); } cv::merge(channels, mData); } - } } }; diff --git a/unit_tests/Test_TensorImpl.cpp b/unit_tests/Test_TensorImpl.cpp index 56c4c238ce299ac6c255a1a608bde566cc9cc655..7ef7d5f7cbffe03b5f642897c2f2256caebf77e4 100644 --- a/unit_tests/Test_TensorImpl.cpp +++ b/unit_tests/Test_TensorImpl.cpp @@ -103,4 +103,31 @@ TEST_CASE("Tensor creation opencv", "[Tensor][OpenCV]") { REQUIRE_FALSE(x == xFloat); } } -} + + SECTION("from const array before backend") { + Tensor x = Array3D<int,2,2,2>{ + { + { + {1, 2}, + {3, 4} + }, + { + {5, 6}, + {7, 8} + } + }}; + x.setBackend("opencv"); + + REQUIRE(x.nbDims() == 3); + REQUIRE(x.dims()[0] == 2); + REQUIRE(x.dims()[1] == 2); + REQUIRE(x.dims()[2] == 2); + REQUIRE(x.size() == 8); + + REQUIRE(x.get<int>({0,0,0}) == 1); + REQUIRE(x.get<int>({0,0,1}) == 2); + REQUIRE(x.get<int>({0,1,1}) == 4); + REQUIRE(x.get<int>({1,1,1}) == 8); + } + +} \ No newline at end of file