Skip to content
Snippets Groups Projects
Commit ac47767e authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'dev' into 'main'

version 0.0.3

Closes aidge#112

See merge request !17
parents fd7466df c6878fd6
No related branches found
No related tags found
1 merge request!17version 0.0.3
Pipeline #49675 passed
...@@ -19,7 +19,7 @@ class test_tensor(unittest.TestCase): ...@@ -19,7 +19,7 @@ class test_tensor(unittest.TestCase):
# np_array = np.arange(9).reshape(1,1,3,3) # np_array = np.arange(9).reshape(1,1,3,3)
# # Numpy -> Tensor # # Numpy -> Tensor
# t = aidge_core.Tensor(np_array) # t = aidge_core.Tensor(np_array)
# self.assertEqual(t.dtype(), aidge_core.DataType.Int32) # self.assertEqual(t.dtype(), aidge_core.dtype.int32)
# for i_t, i_n in zip(t, np_array.flatten()): # for i_t, i_n in zip(t, np_array.flatten()):
# self.assertTrue(i_t == i_n) # self.assertTrue(i_t == i_n)
# for i,j in zip(t.dims(), np_array.shape): # for i,j in zip(t.dims(), np_array.shape):
...@@ -41,7 +41,7 @@ class test_tensor(unittest.TestCase): ...@@ -41,7 +41,7 @@ class test_tensor(unittest.TestCase):
# np_array = np.random.rand(1, 1, 3, 3).astype(np.float32) # np_array = np.random.rand(1, 1, 3, 3).astype(np.float32)
# # Numpy -> Tensor # # Numpy -> Tensor
# t = aidge_core.Tensor(np_array) # t = aidge_core.Tensor(np_array)
# self.assertEqual(t.dtype(), aidge_core.DataType.Float32) # self.assertEqual(t.dtype(), aidge_core.dtype.float32)
# for i_t, i_n in zip(t, np_array.flatten()): # for i_t, i_n in zip(t, np_array.flatten()):
# self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference # self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference
# for i,j in zip(t.dims(), np_array.shape): # for i,j in zip(t.dims(), np_array.shape):
......
...@@ -31,7 +31,7 @@ public: ...@@ -31,7 +31,7 @@ public:
virtual void setCvMat(const cv::Mat& mat ) = 0; virtual void setCvMat(const cv::Mat& mat ) = 0;
}; };
template <class T> template <class T>
class TensorImpl_opencv : public TensorImpl, public TensorImpl_opencv_ { class TensorImpl_opencv : public TensorImpl, public TensorImpl_opencv_ {
private: private:
// Stores the cv::Mat // Stores the cv::Mat
...@@ -44,7 +44,7 @@ public: ...@@ -44,7 +44,7 @@ public:
static constexpr const char *Backend = "opencv"; static constexpr const char *Backend = "opencv";
TensorImpl_opencv() = delete; TensorImpl_opencv() = delete;
TensorImpl_opencv(DeviceIdx_t device, std::vector<DimSize_t> dims) TensorImpl_opencv(DeviceIdx_t device, std::vector<DimSize_t> dims)
: TensorImpl(Backend, device, dims) : TensorImpl(Backend, device, dims)
{ {
mDims = dims; mDims = dims;
...@@ -82,14 +82,20 @@ public: ...@@ -82,14 +82,20 @@ public:
// native interface // native interface
const cv::Mat & data() const override { return mData; } const cv::Mat & data() const override { return mData; }
inline std::size_t capacity() const noexcept override { return (mData.total() * mData.channels()); }
inline std::size_t scalarSize() const noexcept override final { return sizeof(T); } inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
void zeros() override final {
mData.setTo(cv::Scalar::all(T(0)));
}
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final { void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
const T* srcT = static_cast<const T *>(src); const T* srcT = static_cast<const T *>(src);
T* dstT = static_cast<T *>(rawPtr(offset)); T* dstT = static_cast<T *>(rawPtr(offset));
AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity"); AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "TensorImpl_opencv<{}>::copy(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts);
AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported"); AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "TensorImpl_opencv<{}>::copy(): overlapping copy is not supported", typeid(T).name());
std::copy(srcT, srcT + length, dstT); std::copy(srcT, srcT + length, dstT);
} }
...@@ -98,9 +104,9 @@ public: ...@@ -98,9 +104,9 @@ public:
if (length == 0) { if (length == 0) {
return; return;
} }
T* dstT = static_cast<T *>(rawPtr(offset)); T* dstT = static_cast<T *>(rawPtr(offset));
AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity"); AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "TensorImpl_opencv<{}>::copyCast(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts);
switch (srcDt) switch (srcDt)
{ {
case DataType::Float64: case DataType::Float64:
...@@ -148,15 +154,15 @@ public: ...@@ -148,15 +154,15 @@ public:
dstT); dstT);
break; break;
default: default:
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type."); AIDGE_THROW_OR_ABORT(std::runtime_error, "TensorImpl_opencv<{}>::copyCast(): unsupported data type {}.", typeid(T).name(), srcDt);
break; break;
} }
} }
void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final { void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
AIDGE_ASSERT(device.first == Backend, "backend must match"); AIDGE_ASSERT(device.first == Backend, "TensorImpl_opencv<{}>::copyFromDevice(): backend must match", typeid(T).name());
AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend"); AIDGE_ASSERT(device.second == 0, "TensorImpl_opencv<{}>::copyFromDevice(): device ({}) cannot be != 0 for CPU backend", typeid(T).name(), device.second);
copy(src, length, offset); copy(src, length, offset);
} }
...@@ -166,7 +172,7 @@ public: ...@@ -166,7 +172,7 @@ public:
void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final { void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
const T* src = static_cast<const T*>(rawPtr(offset)); const T* src = static_cast<const T*>(rawPtr(offset));
AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity"); AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "TensorImpl_opencv<{}>::copyToHost(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts);
std::copy(src, src + length, static_cast<T *>(dst)); std::copy(src, src + length, static_cast<T *>(dst));
} }
...@@ -176,7 +182,7 @@ public: ...@@ -176,7 +182,7 @@ public:
}; };
const void *rawPtr(NbElts_t offset = 0) const override final { const void *rawPtr(NbElts_t offset = 0) const override final {
AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "accessing uninitialized const rawPtr"); AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "TensorImpl_opencv<{}>::rawPtr(): accessing uninitialized const rawPtr", typeid(T).name());
return (mData.ptr<T>() + offset); return (mData.ptr<T>() + offset);
}; };
...@@ -186,13 +192,13 @@ public: ...@@ -186,13 +192,13 @@ public:
}; };
const void *hostPtr(NbElts_t offset = 0) const override { const void *hostPtr(NbElts_t offset = 0) const override {
AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "accessing uninitialized const hostPtr"); AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "TensorImpl_opencv<{}>::hostPtr(): accessing uninitialized const hostPtr", typeid(T).name());
AIDGE_ASSERT(mData.isContinuous(), "CV Matrix not continuous"); AIDGE_ASSERT(mData.isContinuous(), "TensorImpl_opencv<{}>::hostPtr(): CV Matrix not continuous", typeid(T).name());
return (mData.ptr<T>() + offset); return (mData.ptr<T>() + offset);
}; };
void setCvMat(const cv::Mat& mat) override {mData=mat;} void setCvMat(const cv::Mat& mat) override {mData=mat;}
virtual ~TensorImpl_opencv() = default; virtual ~TensorImpl_opencv() = default;
...@@ -201,7 +207,7 @@ private: ...@@ -201,7 +207,7 @@ private:
void lazyInit() { void lazyInit() {
if ((mData.total() * mData.channels()) < mNbElts) { if ((mData.total() * mData.channels()) < mNbElts) {
// Need more data, a re-allocation will occur // Need more data, a re-allocation will occur
AIDGE_ASSERT(mData.empty() , "trying to enlarge non-owned data"); AIDGE_ASSERT(mData.empty(), "TensorImpl_opencv<{}>: trying to enlarge non-owned data", typeid(T).name());
if (mDims.size() < 3) { if (mDims.size() < 3) {
mData = cv::Mat(((mDims.size() > 1) ? static_cast<int>(mDims[0]) mData = cv::Mat(((mDims.size() > 1) ? static_cast<int>(mDims[0])
......
0.0.2 0.0.3
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment