Skip to content
Snippets Groups Projects
Commit ab2fbc0f authored by Thibault Allenet's avatar Thibault Allenet
Browse files

Merge branch 'TensorImpl' into 'dev'

Change tensorimpl opencv `future_std::span<cv::Mat>` to `cv::Mat`

See merge request !4
parents 2bc94a09 1e3ce9e5
No related branches found
No related tags found
2 merge requests!10Update backend_opencv with modifications from aidge_core,!4Change tensorimpl opencv `future_std::span<cv::Mat>` to `cv::Mat`
Pipeline #39329 failed
...@@ -27,35 +27,38 @@ namespace Aidge { ...@@ -27,35 +27,38 @@ namespace Aidge {
class TensorImpl_opencv_ { class TensorImpl_opencv_ {
public: public:
virtual const cv::Mat& getCvMat() const = 0; virtual const cv::Mat& data() const = 0;
virtual void setCvMat(const cv::Mat& mat ) = 0; virtual void setCvMat(const cv::Mat& mat ) = 0;
}; };
template <class T> class TensorImpl_opencv : public TensorImpl, public TensorImpl_opencv_ { template <class T>
class TensorImpl_opencv : public TensorImpl, public TensorImpl_opencv_ {
private: private:
const Tensor &mTensor; // Impl needs to access Tensor information, but is not // Stores the cv::Mat
// supposed to change it! cv::Mat mData;
future_std::span<cv::Mat> mData; protected:
std::vector<DimSize_t> mDims;
std::unique_ptr<cv::Mat> mDataOwner = std::unique_ptr<cv::Mat>(new cv::Mat(0, 0, detail::CV_C1_CPP_v<T>));
public: public:
static constexpr const char *Backend = "opencv"; static constexpr const char *Backend = "opencv";
TensorImpl_opencv(const Tensor &tensor) TensorImpl_opencv() = delete;
: TensorImpl(Backend), mTensor(tensor) {} TensorImpl_opencv(DeviceIdx_t device, std::vector<DimSize_t> dims)
: TensorImpl(Backend, device, dims)
{
mDims = dims;
}
bool operator==(const TensorImpl &otherImpl) const override final { bool operator==(const TensorImpl &otherImpl) const override final {
// Create iterators for both matrices // Create iterators for both matrices
cv::MatConstIterator_<T> it1 = mDataOwner->begin<T>(); cv::MatConstIterator_<T> it1 = mData.begin<T>();
const future_std::span<cv::Mat> tmp = reinterpret_cast<const TensorImpl_opencv<T> &>(otherImpl).data();
const cv::Mat otherData = *(tmp.data()); const cv::Mat & otherData = reinterpret_cast<const TensorImpl_opencv<T> &>(otherImpl).data();
cv::MatConstIterator_<T> it2 = otherData.begin<T>(); cv::MatConstIterator_<T> it2 = otherData.begin<T>();
// Iterate over the elements and compare them // Iterate over the elements and compare them
for (; it1 != mDataOwner->end<T>(); ++it1, ++it2) { for (; it1 != mData.end<T>(); ++it1, ++it2) {
if (*it1 != *it2) { if (*it1 != *it2) {
return false; return false;
} }
...@@ -63,157 +66,160 @@ public: ...@@ -63,157 +66,160 @@ public:
return true; return true;
} }
static std::unique_ptr<TensorImpl_opencv> create(const Tensor &tensor) { static std::unique_ptr<TensorImpl_opencv> create(DeviceIdx_t device, std::vector<DimSize_t> dims) {
return std::make_unique<TensorImpl_opencv<T>>(tensor); return std::make_unique<TensorImpl_opencv<T>>(device, dims);
}
void resize(std::vector<DimSize_t> dims) override{
mDims = dims;
size_t product = 1;
for (size_t num : dims) {
product *= num;
}
mNbElts = product;
} }
// native interface // native interface
const future_std::span<cv::Mat> data() const { return mData; } const cv::Mat & data() const override { return mData; }
inline std::size_t scalarSize() const override { return sizeof(T); } inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
inline std::size_t size() const override { return mData.size(); } void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
const T* srcT = static_cast<const T *>(src);
T* dstT = static_cast<T *>(rawPtr(offset));
void setDevice(DeviceIdx_t device) override { AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity");
AIDGE_ASSERT(device == 0, "device cannot be != 0 for Opencv backend"); AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported");
} std::copy(srcT, srcT + length, dstT);
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override {
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
static_cast<T *>(rawPtr()) + offset);
} }
void copyCast(const void *src, NbElts_t length, const DataType srcDt) override { void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final{
if (length == 0) { if (length == 0) {
return; return;
} }
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity"); T* dstT = static_cast<T *>(rawPtr(offset));
switch (srcDt) { AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity");
switch (srcDt)
{
case DataType::Float64: case DataType::Float64:
std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length, std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
static_cast<T *>(rawPtr())); dstT);
break; break;
case DataType::Float32: case DataType::Float32:
std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length, std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
static_cast<T *>(rawPtr())); dstT);
break; break;
case DataType::Float16: case DataType::Float16:
std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length, std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
static_cast<T *>(rawPtr())); dstT);
break; break;
case DataType::Int64: case DataType::Int64:
std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length, std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
static_cast<T *>(rawPtr())); dstT);
break; break;
case DataType::UInt64: case DataType::UInt64:
std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length, std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
static_cast<T *>(rawPtr())); dstT);
break; break;
case DataType::Int32: case DataType::Int32:
std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length, std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
static_cast<T *>(rawPtr())); dstT);
break; break;
case DataType::UInt32: case DataType::UInt32:
std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length, std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
static_cast<T *>(rawPtr())); dstT);
break; break;
case DataType::Int16: case DataType::Int16:
std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length, std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
static_cast<T *>(rawPtr())); dstT);
break; break;
case DataType::UInt16: case DataType::UInt16:
std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length, std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
static_cast<T *>(rawPtr())); dstT);
break; break;
case DataType::Int8: case DataType::Int8:
std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length, std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
static_cast<T *>(rawPtr())); dstT);
break; break;
case DataType::UInt8: case DataType::UInt8:
std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length, std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
static_cast<T *>(rawPtr())); dstT);
break; break;
default: default:
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type."); AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
break;
} }
} }
void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, DeviceIdx_t>& device) override { void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
AIDGE_ASSERT(device.first == Backend, "backend must match"); AIDGE_ASSERT(device.first == Backend, "backend must match");
AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend"); AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
copy(src, length); copy(src, length, offset);
} }
void copyFromHost(const void *src, NbElts_t length) override { void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
copy(src, length); copy(src, length, offset);
} }
void copyToHost(void *dst, NbElts_t length) const override { void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity"); const T* src = static_cast<const T*>(rawPtr(offset));
const T* src = static_cast<const T*>(rawPtr()); AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity");
std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length, std::copy(src, src + length, static_cast<T *>(dst));
static_cast<T *>(dst));
} }
void *rawPtr(NbElts_t offset = 0) override { void *rawPtr(NbElts_t offset = 0) override final {
lazyInit(); lazyInit();
return (mData.data()->ptr() + offset*sizeof(T)); return (mData.ptr<T>() + offset);
}; };
const void *rawPtr(NbElts_t offset = 0) const override { const void *rawPtr(NbElts_t offset = 0) const override final {
AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const rawPtr"); AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "accessing uninitialized const rawPtr");
return (mData.data()->ptr() + offset*sizeof(T)); return (mData.ptr<T>() + offset);
}; };
void *hostPtr(NbElts_t offset = 0) override { void *hostPtr(NbElts_t offset = 0) override final {
lazyInit(); lazyInit();
std::cout << *reinterpret_cast<T *>(mData.data()->ptr()) + offset << std::endl; return (mData.ptr<T>() + offset);
return (mData.data()->ptr() + offset*sizeof(T));
}; };
const void *hostPtr(NbElts_t offset = 0) const override { const void *hostPtr(NbElts_t offset = 0) const override {
AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const hostPtr"); AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "accessing uninitialized const hostPtr");
return (mData.data()->ptr() + offset*sizeof(T)); AIDGE_ASSERT(mData.isContinuous(), "CV Matrix not continuous");
return (mData.ptr<T>() + offset);
}; };
const cv::Mat& getCvMat() const override { return *mDataOwner.get(); } void setCvMat(const cv::Mat& mat) override {mData=mat;}
void setCvMat(const cv::Mat& mat) override {mDataOwner.reset(new cv::Mat(std::move(mat)));}
virtual ~TensorImpl_opencv() = default; virtual ~TensorImpl_opencv() = default;
private: private:
void lazyInit() { void lazyInit() {
if (mData.size() < mTensor.size()) { if ((mData.total() * mData.channels()) < mNbElts) {
// Need more data, a re-allocation will occur // Need more data, a re-allocation will occur
AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data"); AIDGE_ASSERT(mData.empty() , "trying to enlarge non-owned data");
cv::Mat myNewMatrix; if (mDims.size() < 3) {
if (mTensor.nbDims() < 3) { mData = cv::Mat(((mDims.size() > 1) ? static_cast<int>(mDims[0])
myNewMatrix = cv::Mat(((mTensor.nbDims() > 1) ? static_cast<int>(mTensor.dims()[1]) : (mDims.size() > 0) ? 1
: (mTensor.nbDims() > 0) ? 1
: 0), : 0),
(mTensor.nbDims() > 0) ? static_cast<int>(mTensor.dims()[0]) : 0, (mDims.size() > 0) ? static_cast<int>(mDims[1]) : 0,
detail::CV_C1_CPP_v<T>); detail::CV_C1_CPP_v<T>);
} else { } else {
std::vector<cv::Mat> channels; std::vector<cv::Mat> channels;
for (std::size_t k = 0; k < mTensor.dims()[2]; ++k) { for (std::size_t k = 0; k < mDims[2]; ++k) {
channels.push_back(cv::Mat(static_cast<int>(mTensor.dims()[1]), channels.push_back(cv::Mat(static_cast<int>(mDims[0]),
static_cast<int>(mTensor.dims()[0]), static_cast<int>(mDims[1]),
detail::CV_C1_CPP_v<T>)); detail::CV_C1_CPP_v<T>));
} }
cv::merge(channels, myNewMatrix); cv::merge(channels, mData);
} }
mDataOwner.reset(new cv::Mat(std::forward<cv::Mat>(myNewMatrix)));
mData = future_std::span<cv::Mat>(mDataOwner.get(), mTensor.size());
} }
} }
}; };
......
...@@ -28,12 +28,12 @@ class StimulusImpl_opencv_imread : public StimulusImpl { ...@@ -28,12 +28,12 @@ class StimulusImpl_opencv_imread : public StimulusImpl {
private: private:
/// Stimulus data path /// Stimulus data path
const std::string mDataPath; const std::string mDataPath;
const int mColorFlag; const int mReadMode;
public: public:
StimulusImpl_opencv_imread(const std::string& dataPath="", std::int32_t colorFlag=cv::IMREAD_COLOR) StimulusImpl_opencv_imread(const std::string& dataPath="", int readMode=cv::IMREAD_UNCHANGED)
: mDataPath(dataPath), : mDataPath(dataPath),
mColorFlag(colorFlag) mReadMode(readMode)
{ {
// ctor // ctor
} }
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
Aidge::StimulusImpl_opencv_imread::~StimulusImpl_opencv_imread() noexcept = default; Aidge::StimulusImpl_opencv_imread::~StimulusImpl_opencv_imread() noexcept = default;
std::shared_ptr<Aidge::Tensor> Aidge::StimulusImpl_opencv_imread::load() const { std::shared_ptr<Aidge::Tensor> Aidge::StimulusImpl_opencv_imread::load() const {
cv::Mat cvImg = cv::imread(mDataPath, mColorFlag); cv::Mat cvImg = cv::imread(mDataPath, mReadMode);
if (cvImg.empty()) { if (cvImg.empty()) {
throw std::runtime_error("Could not open images file: " + mDataPath); throw std::runtime_error("Could not open images file: " + mDataPath);
} }
......
...@@ -64,12 +64,13 @@ std::shared_ptr<Aidge::Tensor> Aidge::tensorOpencv(cv::Mat mat) { ...@@ -64,12 +64,13 @@ std::shared_ptr<Aidge::Tensor> Aidge::tensorOpencv(cv::Mat mat) {
const std::vector<DimSize_t> matDims = std::vector<DimSize_t>({static_cast<DimSize_t>(mat.cols), const std::vector<DimSize_t> matDims = std::vector<DimSize_t>({static_cast<DimSize_t>(mat.cols),
static_cast<DimSize_t>(mat.rows), static_cast<DimSize_t>(mat.rows),
static_cast<DimSize_t>(mat.channels())}); static_cast<DimSize_t>(mat.channels())});
// Get the correct Data Type
Aidge::DataType type;
type = CVtoAidge(mat.depth());
// Create tensor from the dims of the Cv::Mat // Create tensor from the dims of the Cv::Mat
std::shared_ptr<Tensor> tensor = std::make_shared<Tensor>(matDims); std::shared_ptr<Tensor> tensor = std::make_shared<Tensor>(matDims,type);
// Set beackend opencv // Set beackend opencv
tensor->setBackend("opencv"); tensor->setBackend("opencv");
// Set Data Type
tensor->setDataType(CVtoAidge(mat.depth()));
// Cast the tensorImpl to access setCvMat function // Cast the tensorImpl to access setCvMat function
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor->getImpl().get()); TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor->getImpl().get());
...@@ -100,7 +101,7 @@ std::shared_ptr<Aidge::Tensor> Aidge::convertCpu(std::shared_ptr<Aidge::Tensor> ...@@ -100,7 +101,7 @@ std::shared_ptr<Aidge::Tensor> Aidge::convertCpu(std::shared_ptr<Aidge::Tensor>
// Get the cv::Mat from the tensor backend Opencv // Get the cv::Mat from the tensor backend Opencv
Aidge::TensorImpl_opencv_* tImplOpencv = dynamic_cast<Aidge::TensorImpl_opencv_*>(tensorOpencv->getImpl().get()); Aidge::TensorImpl_opencv_* tImplOpencv = dynamic_cast<Aidge::TensorImpl_opencv_*>(tensorOpencv->getImpl().get());
cv::Mat dataOpencv = tImplOpencv->getCvMat(); cv::Mat dataOpencv = tImplOpencv->data();
// Convert the cv::Mat into a vector of cv::Mat (vector of channels) // Convert the cv::Mat into a vector of cv::Mat (vector of channels)
std::vector<cv::Mat> channels; std::vector<cv::Mat> channels;
......
...@@ -25,7 +25,7 @@ using namespace Aidge; ...@@ -25,7 +25,7 @@ using namespace Aidge;
TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") { TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") {
SECTION("Instanciation & load an image") { SECTION("Instanciation & load an image") {
// Load image with imread // Load image with imread
cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm"); cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm", cv::IMREAD_UNCHANGED);
REQUIRE(true_mat.empty()==false); REQUIRE(true_mat.empty()==false);
// Create Stimulus // Create Stimulus
...@@ -39,8 +39,8 @@ TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") { ...@@ -39,8 +39,8 @@ TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") {
// Access the cv::Mat with the tensor // Access the cv::Mat with the tensor
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor_load->getImpl().get()); TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor_load->getImpl().get());
REQUIRE(tImpl_opencv->getCvMat().size() == true_mat.size()); REQUIRE((tImpl_opencv->data().total() * tImpl_opencv->data().channels()) == (true_mat.total() * true_mat.channels()));
REQUIRE(cv::countNonZero(tImpl_opencv->getCvMat() != true_mat) == 0); REQUIRE(cv::countNonZero(tImpl_opencv->data() != true_mat) == 0);
// This time the tensor is already loaded in memory // This time the tensor is already loaded in memory
...@@ -50,8 +50,8 @@ TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") { ...@@ -50,8 +50,8 @@ TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") {
// Access the cv::Mat with the tensor // Access the cv::Mat with the tensor
TensorImpl_opencv_* tImpl_opencv_2 = dynamic_cast<TensorImpl_opencv_*>(tensor_load_2->getImpl().get()); TensorImpl_opencv_* tImpl_opencv_2 = dynamic_cast<TensorImpl_opencv_*>(tensor_load_2->getImpl().get());
REQUIRE(tImpl_opencv_2->getCvMat().size() == true_mat.size()); REQUIRE((tImpl_opencv_2->data().total() * tImpl_opencv_2->data().channels()) == (true_mat.total() * true_mat.channels()));
REQUIRE(cv::countNonZero(tImpl_opencv_2->getCvMat() != true_mat) == 0); REQUIRE(cv::countNonZero(tImpl_opencv_2->data() != true_mat) == 0);
} }
} }
...@@ -26,7 +26,7 @@ TEST_CASE("StimulusImpl_opencv_imread creation", "[StimulusImpl_opencv_imread][O ...@@ -26,7 +26,7 @@ TEST_CASE("StimulusImpl_opencv_imread creation", "[StimulusImpl_opencv_imread][O
SECTION("Instanciation & load an image") { SECTION("Instanciation & load an image") {
// Load image with imread // Load image with imread
// cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/Lenna.png"); // cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/Lenna.png");
cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm"); cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm", cv::IMREAD_UNCHANGED);
REQUIRE(true_mat.empty()==false); REQUIRE(true_mat.empty()==false);
// Create StimulusImpl_opencv_imread // Create StimulusImpl_opencv_imread
...@@ -38,8 +38,8 @@ TEST_CASE("StimulusImpl_opencv_imread creation", "[StimulusImpl_opencv_imread][O ...@@ -38,8 +38,8 @@ TEST_CASE("StimulusImpl_opencv_imread creation", "[StimulusImpl_opencv_imread][O
// Access the cv::Mat with the tensor // Access the cv::Mat with the tensor
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor_load->getImpl().get()); TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor_load->getImpl().get());
REQUIRE(tImpl_opencv->getCvMat().size() == true_mat.size()); REQUIRE((tImpl_opencv->data().total() * tImpl_opencv->data().channels()) == (true_mat.total() * true_mat.channels()));
REQUIRE(cv::countNonZero(tImpl_opencv->getCvMat() != true_mat) == 0); REQUIRE(cv::countNonZero(tImpl_opencv->data() != true_mat) == 0);
} }
} }
...@@ -73,11 +73,11 @@ TEST_CASE("Tensor creation opencv", "[Tensor][OpenCV]") { ...@@ -73,11 +73,11 @@ TEST_CASE("Tensor creation opencv", "[Tensor][OpenCV]") {
} }
SECTION("OpenCV tensor features") { SECTION("OpenCV tensor features") {
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->getCvMat().rows == 2); REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().rows == 2);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->getCvMat().cols == 2); REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().cols == 2);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->getCvMat().dims == 2); REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().dims == 2);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->getCvMat().total() == 4); REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().total() == 4);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->getCvMat().channels() == 2); REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().channels() == 2);
} }
SECTION("Access to array") { SECTION("Access to array") {
...@@ -103,4 +103,31 @@ TEST_CASE("Tensor creation opencv", "[Tensor][OpenCV]") { ...@@ -103,4 +103,31 @@ TEST_CASE("Tensor creation opencv", "[Tensor][OpenCV]") {
REQUIRE_FALSE(x == xFloat); REQUIRE_FALSE(x == xFloat);
} }
} }
}
SECTION("from const array before backend") {
Tensor x = Array3D<int,2,2,2>{
{
{
{1, 2},
{3, 4}
},
{
{5, 6},
{7, 8}
}
}};
x.setBackend("opencv");
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
REQUIRE(x.get<int>({0,0,0}) == 1);
REQUIRE(x.get<int>({0,0,1}) == 2);
REQUIRE(x.get<int>({0,1,1}) == 4);
REQUIRE(x.get<int>({1,1,1}) == 8);
}
}
\ No newline at end of file
...@@ -75,8 +75,8 @@ TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char ...@@ -75,8 +75,8 @@ TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char
// Check the matrix inside the tensor coorresponds to the matrix // Check the matrix inside the tensor coorresponds to the matrix
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensorOcv->getImpl().get()); TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensorOcv->getImpl().get());
auto mat_tensor = tImpl_opencv->getCvMat(); auto mat_tensor = tImpl_opencv->data();
REQUIRE(mat_tensor.size() == mat.size()); REQUIRE(mat_tensor.size() == mat.size());
REQUIRE(cv::countNonZero(mat_tensor != mat) == 0); REQUIRE(cv::countNonZero(mat_tensor != mat) == 0);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment