diff --git a/aidge_backend_opencv/unit_tests/test_tensor.py b/aidge_backend_opencv/unit_tests/test_tensor.py index a47e9a9812b45f71a3a859ddb04a7ce586f0500a..a3cf628588dcaa9ac479f6a61ced9dad624dc75c 100644 --- a/aidge_backend_opencv/unit_tests/test_tensor.py +++ b/aidge_backend_opencv/unit_tests/test_tensor.py @@ -19,7 +19,7 @@ class test_tensor(unittest.TestCase): # np_array = np.arange(9).reshape(1,1,3,3) # # Numpy -> Tensor # t = aidge_core.Tensor(np_array) - # self.assertEqual(t.dtype(), aidge_core.DataType.Int32) + # self.assertEqual(t.dtype(), aidge_core.dtype.int32) # for i_t, i_n in zip(t, np_array.flatten()): # self.assertTrue(i_t == i_n) # for i,j in zip(t.dims(), np_array.shape): @@ -41,7 +41,7 @@ class test_tensor(unittest.TestCase): # np_array = np.random.rand(1, 1, 3, 3).astype(np.float32) # # Numpy -> Tensor # t = aidge_core.Tensor(np_array) - # self.assertEqual(t.dtype(), aidge_core.DataType.Float32) + # self.assertEqual(t.dtype(), aidge_core.dtype.float32) # for i_t, i_n in zip(t, np_array.flatten()): # self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference # for i,j in zip(t.dims(), np_array.shape): diff --git a/include/aidge/backend/opencv/data/TensorImpl.hpp b/include/aidge/backend/opencv/data/TensorImpl.hpp index 84a5e54540794ce432e518e43d5ed13ea80db082..4b548d2157219a3172b947013e4e53faee65d31a 100644 --- a/include/aidge/backend/opencv/data/TensorImpl.hpp +++ b/include/aidge/backend/opencv/data/TensorImpl.hpp @@ -31,7 +31,7 @@ public: virtual void setCvMat(const cv::Mat& mat ) = 0; }; -template <class T> +template <class T> class TensorImpl_opencv : public TensorImpl, public TensorImpl_opencv_ { private: // Stores the cv::Mat @@ -44,7 +44,7 @@ public: static constexpr const char *Backend = "opencv"; TensorImpl_opencv() = delete; - TensorImpl_opencv(DeviceIdx_t device, std::vector<DimSize_t> dims) + TensorImpl_opencv(DeviceIdx_t device, std::vector<DimSize_t> dims) : TensorImpl(Backend, device, dims) { mDims = dims; @@ -82,14 +82,20 @@ public: // native interface const cv::Mat & data() const override { return mData; } + inline std::size_t capacity() const noexcept override { return (mData.total() * mData.channels()); } + inline std::size_t scalarSize() const noexcept override final { return sizeof(T); } + void zeros() override final { + mData.setTo(cv::Scalar::all(T(0))); + } + void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final { const T* srcT = static_cast<const T *>(src); T* dstT = static_cast<T *>(rawPtr(offset)); - AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity"); - AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported"); + AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "TensorImpl_opencv<{}>::copy(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts); + AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "TensorImpl_opencv<{}>::copy(): overlapping copy is not supported", typeid(T).name()); std::copy(srcT, srcT + length, dstT); } @@ -98,9 +104,9 @@ public: if (length == 0) { return; } - + T* dstT = static_cast<T *>(rawPtr(offset)); - AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity"); + AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "TensorImpl_opencv<{}>::copyCast(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts); switch (srcDt) { case DataType::Float64: @@ -148,15 +154,15 @@ public: dstT); break; default: - AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type."); + AIDGE_THROW_OR_ABORT(std::runtime_error, "TensorImpl_opencv<{}>::copyCast(): unsupported data type {}.", typeid(T).name(), srcDt); break; } } void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final { - AIDGE_ASSERT(device.first == Backend, "backend must match"); - AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend"); + AIDGE_ASSERT(device.first == Backend, "TensorImpl_opencv<{}>::copyFromDevice(): backend must match", typeid(T).name()); + AIDGE_ASSERT(device.second == 0, "TensorImpl_opencv<{}>::copyFromDevice(): device ({}) cannot be != 0 for CPU backend", typeid(T).name(), device.second); copy(src, length, offset); } @@ -166,7 +172,7 @@ public: void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final { const T* src = static_cast<const T*>(rawPtr(offset)); - AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity"); + AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "TensorImpl_opencv<{}>::copyToHost(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts); std::copy(src, src + length, static_cast<T *>(dst)); } @@ -176,7 +182,7 @@ public: }; const void *rawPtr(NbElts_t offset = 0) const override final { - AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "accessing uninitialized const rawPtr"); + AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "TensorImpl_opencv<{}>::rawPtr(): accessing uninitialized const rawPtr", typeid(T).name()); return (mData.ptr<T>() + offset); }; @@ -186,13 +192,13 @@ public: }; const void *hostPtr(NbElts_t offset = 0) const override { - AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "accessing uninitialized const hostPtr"); - AIDGE_ASSERT(mData.isContinuous(), "CV Matrix not continuous"); + AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "TensorImpl_opencv<{}>::hostPtr(): accessing uninitialized const hostPtr", typeid(T).name()); + AIDGE_ASSERT(mData.isContinuous(), "TensorImpl_opencv<{}>::hostPtr(): CV Matrix not continuous", typeid(T).name()); return (mData.ptr<T>() + offset); }; void setCvMat(const cv::Mat& mat) override {mData=mat;} - + virtual ~TensorImpl_opencv() = default; @@ -201,7 +207,7 @@ private: void lazyInit() { if ((mData.total() * mData.channels()) < mNbElts) { // Need more data, a re-allocation will occur - AIDGE_ASSERT(mData.empty() , "trying to enlarge non-owned data"); + AIDGE_ASSERT(mData.empty(), "TensorImpl_opencv<{}>: trying to enlarge non-owned data", typeid(T).name()); if (mDims.size() < 3) { mData = cv::Mat(((mDims.size() > 1) ? static_cast<int>(mDims[0]) diff --git a/version.txt b/version.txt index 4e379d2bfeab6461d0455bf5bbb8792845d9bbea..bcab45af15a0f1b0166daf8cbf18b17cd8649277 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.0.2 +0.0.3