Skip to content
Snippets Groups Projects
Commit ab2fbc0f authored by Thibault Allenet's avatar Thibault Allenet
Browse files

Merge branch 'TensorImpl' into 'dev'

Change tensorimpl opencv `future_std::span<cv::Mat>` to `cv::Mat`

See merge request !4
parents 2bc94a09 1e3ce9e5
No related branches found
No related tags found
2 merge requests!10Update backend_opencv with modifications from aidge_core,!4Change tensorimpl opencv `future_std::span<cv::Mat>` to `cv::Mat`
Pipeline #39329 failed
......@@ -27,35 +27,38 @@ namespace Aidge {
class TensorImpl_opencv_ {
public:
virtual const cv::Mat& getCvMat() const = 0;
virtual const cv::Mat& data() const = 0;
virtual void setCvMat(const cv::Mat& mat ) = 0;
};
template <class T> class TensorImpl_opencv : public TensorImpl, public TensorImpl_opencv_ {
template <class T>
class TensorImpl_opencv : public TensorImpl, public TensorImpl_opencv_ {
private:
const Tensor &mTensor; // Impl needs to access Tensor information, but is not
// supposed to change it!
// Stores the cv::Mat
cv::Mat mData;
future_std::span<cv::Mat> mData;
std::unique_ptr<cv::Mat> mDataOwner = std::unique_ptr<cv::Mat>(new cv::Mat(0, 0, detail::CV_C1_CPP_v<T>));
protected:
std::vector<DimSize_t> mDims;
public:
static constexpr const char *Backend = "opencv";
TensorImpl_opencv(const Tensor &tensor)
: TensorImpl(Backend), mTensor(tensor) {}
TensorImpl_opencv() = delete;
TensorImpl_opencv(DeviceIdx_t device, std::vector<DimSize_t> dims)
: TensorImpl(Backend, device, dims)
{
mDims = dims;
}
bool operator==(const TensorImpl &otherImpl) const override final {
// Create iterators for both matrices
cv::MatConstIterator_<T> it1 = mDataOwner->begin<T>();
const future_std::span<cv::Mat> tmp = reinterpret_cast<const TensorImpl_opencv<T> &>(otherImpl).data();
cv::MatConstIterator_<T> it1 = mData.begin<T>();
const cv::Mat otherData = *(tmp.data());
const cv::Mat & otherData = reinterpret_cast<const TensorImpl_opencv<T> &>(otherImpl).data();
cv::MatConstIterator_<T> it2 = otherData.begin<T>();
// Iterate over the elements and compare them
for (; it1 != mDataOwner->end<T>(); ++it1, ++it2) {
for (; it1 != mData.end<T>(); ++it1, ++it2) {
if (*it1 != *it2) {
return false;
}
......@@ -63,157 +66,160 @@ public:
return true;
}
static std::unique_ptr<TensorImpl_opencv> create(const Tensor &tensor) {
return std::make_unique<TensorImpl_opencv<T>>(tensor);
static std::unique_ptr<TensorImpl_opencv> create(DeviceIdx_t device, std::vector<DimSize_t> dims) {
return std::make_unique<TensorImpl_opencv<T>>(device, dims);
}
void resize(std::vector<DimSize_t> dims) override{
mDims = dims;
size_t product = 1;
for (size_t num : dims) {
product *= num;
}
mNbElts = product;
}
// native interface
const future_std::span<cv::Mat> data() const { return mData; }
const cv::Mat & data() const override { return mData; }
inline std::size_t scalarSize() const override { return sizeof(T); }
inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
inline std::size_t size() const override { return mData.size(); }
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
const T* srcT = static_cast<const T *>(src);
T* dstT = static_cast<T *>(rawPtr(offset));
void setDevice(DeviceIdx_t device) override {
AIDGE_ASSERT(device == 0, "device cannot be != 0 for Opencv backend");
}
AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity");
AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported");
std::copy(srcT, srcT + length, dstT);
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override {
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
static_cast<T *>(rawPtr()) + offset);
}
void copyCast(const void *src, NbElts_t length, const DataType srcDt) override {
void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final{
if (length == 0) {
return;
}
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
switch (srcDt) {
T* dstT = static_cast<T *>(rawPtr(offset));
AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity");
switch (srcDt)
{
case DataType::Float64:
std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Float32:
std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Float16:
std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Int64:
std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::UInt64:
std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Int32:
std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::UInt32:
std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Int16:
std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::UInt16:
std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::Int8:
std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
case DataType::UInt8:
std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
static_cast<T *>(rawPtr()));
dstT);
break;
default:
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
break;
}
}
void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, DeviceIdx_t>& device) override {
void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
AIDGE_ASSERT(device.first == Backend, "backend must match");
AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
copy(src, length);
copy(src, length, offset);
}
void copyFromHost(const void *src, NbElts_t length) override {
copy(src, length);
void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
copy(src, length, offset);
}
void copyToHost(void *dst, NbElts_t length) const override {
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
const T* src = static_cast<const T*>(rawPtr());
std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
static_cast<T *>(dst));
void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
const T* src = static_cast<const T*>(rawPtr(offset));
AIDGE_ASSERT(length <= (mData.total() * mData.channels()) || length <= mNbElts, "copy length is above capacity");
std::copy(src, src + length, static_cast<T *>(dst));
}
void *rawPtr(NbElts_t offset = 0) override {
void *rawPtr(NbElts_t offset = 0) override final {
lazyInit();
return (mData.data()->ptr() + offset*sizeof(T));
return (mData.ptr<T>() + offset);
};
const void *rawPtr(NbElts_t offset = 0) const override {
AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const rawPtr");
return (mData.data()->ptr() + offset*sizeof(T));
const void *rawPtr(NbElts_t offset = 0) const override final {
AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "accessing uninitialized const rawPtr");
return (mData.ptr<T>() + offset);
};
void *hostPtr(NbElts_t offset = 0) override {
void *hostPtr(NbElts_t offset = 0) override final {
lazyInit();
std::cout << *reinterpret_cast<T *>(mData.data()->ptr()) + offset << std::endl;
return (mData.data()->ptr() + offset*sizeof(T));
return (mData.ptr<T>() + offset);
};
const void *hostPtr(NbElts_t offset = 0) const override {
AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const hostPtr");
return (mData.data()->ptr() + offset*sizeof(T));
AIDGE_ASSERT((mData.total() * mData.channels()) >= mNbElts, "accessing uninitialized const hostPtr");
AIDGE_ASSERT(mData.isContinuous(), "CV Matrix not continuous");
return (mData.ptr<T>() + offset);
};
const cv::Mat& getCvMat() const override { return *mDataOwner.get(); }
void setCvMat(const cv::Mat& mat) override {mDataOwner.reset(new cv::Mat(std::move(mat)));}
void setCvMat(const cv::Mat& mat) override {mData=mat;}
virtual ~TensorImpl_opencv() = default;
private:
void lazyInit() {
if (mData.size() < mTensor.size()) {
if ((mData.total() * mData.channels()) < mNbElts) {
// Need more data, a re-allocation will occur
AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data");
AIDGE_ASSERT(mData.empty() , "trying to enlarge non-owned data");
cv::Mat myNewMatrix;
if (mTensor.nbDims() < 3) {
myNewMatrix = cv::Mat(((mTensor.nbDims() > 1) ? static_cast<int>(mTensor.dims()[1])
: (mTensor.nbDims() > 0) ? 1
if (mDims.size() < 3) {
mData = cv::Mat(((mDims.size() > 1) ? static_cast<int>(mDims[0])
: (mDims.size() > 0) ? 1
: 0),
(mTensor.nbDims() > 0) ? static_cast<int>(mTensor.dims()[0]) : 0,
(mDims.size() > 0) ? static_cast<int>(mDims[1]) : 0,
detail::CV_C1_CPP_v<T>);
} else {
std::vector<cv::Mat> channels;
for (std::size_t k = 0; k < mTensor.dims()[2]; ++k) {
channels.push_back(cv::Mat(static_cast<int>(mTensor.dims()[1]),
static_cast<int>(mTensor.dims()[0]),
for (std::size_t k = 0; k < mDims[2]; ++k) {
channels.push_back(cv::Mat(static_cast<int>(mDims[0]),
static_cast<int>(mDims[1]),
detail::CV_C1_CPP_v<T>));
}
cv::merge(channels, myNewMatrix);
cv::merge(channels, mData);
}
mDataOwner.reset(new cv::Mat(std::forward<cv::Mat>(myNewMatrix)));
mData = future_std::span<cv::Mat>(mDataOwner.get(), mTensor.size());
}
}
};
......
......@@ -28,12 +28,12 @@ class StimulusImpl_opencv_imread : public StimulusImpl {
private:
/// Stimulus data path
const std::string mDataPath;
const int mColorFlag;
const int mReadMode;
public:
StimulusImpl_opencv_imread(const std::string& dataPath="", std::int32_t colorFlag=cv::IMREAD_COLOR)
StimulusImpl_opencv_imread(const std::string& dataPath="", int readMode=cv::IMREAD_UNCHANGED)
: mDataPath(dataPath),
mColorFlag(colorFlag)
mReadMode(readMode)
{
// ctor
}
......
......@@ -23,7 +23,7 @@
Aidge::StimulusImpl_opencv_imread::~StimulusImpl_opencv_imread() noexcept = default;
std::shared_ptr<Aidge::Tensor> Aidge::StimulusImpl_opencv_imread::load() const {
cv::Mat cvImg = cv::imread(mDataPath, mColorFlag);
cv::Mat cvImg = cv::imread(mDataPath, mReadMode);
if (cvImg.empty()) {
throw std::runtime_error("Could not open images file: " + mDataPath);
}
......
......@@ -64,12 +64,13 @@ std::shared_ptr<Aidge::Tensor> Aidge::tensorOpencv(cv::Mat mat) {
const std::vector<DimSize_t> matDims = std::vector<DimSize_t>({static_cast<DimSize_t>(mat.cols),
static_cast<DimSize_t>(mat.rows),
static_cast<DimSize_t>(mat.channels())});
// Get the correct Data Type
Aidge::DataType type;
type = CVtoAidge(mat.depth());
// Create tensor from the dims of the Cv::Mat
std::shared_ptr<Tensor> tensor = std::make_shared<Tensor>(matDims);
std::shared_ptr<Tensor> tensor = std::make_shared<Tensor>(matDims,type);
// Set beackend opencv
tensor->setBackend("opencv");
// Set Data Type
tensor->setDataType(CVtoAidge(mat.depth()));
// Cast the tensorImpl to access setCvMat function
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor->getImpl().get());
......@@ -100,7 +101,7 @@ std::shared_ptr<Aidge::Tensor> Aidge::convertCpu(std::shared_ptr<Aidge::Tensor>
// Get the cv::Mat from the tensor backend Opencv
Aidge::TensorImpl_opencv_* tImplOpencv = dynamic_cast<Aidge::TensorImpl_opencv_*>(tensorOpencv->getImpl().get());
cv::Mat dataOpencv = tImplOpencv->getCvMat();
cv::Mat dataOpencv = tImplOpencv->data();
// Convert the cv::Mat into a vector of cv::Mat (vector of channels)
std::vector<cv::Mat> channels;
......
......@@ -25,7 +25,7 @@ using namespace Aidge;
TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") {
SECTION("Instanciation & load an image") {
// Load image with imread
cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm");
cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm", cv::IMREAD_UNCHANGED);
REQUIRE(true_mat.empty()==false);
// Create Stimulus
......@@ -39,8 +39,8 @@ TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") {
// Access the cv::Mat with the tensor
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor_load->getImpl().get());
REQUIRE(tImpl_opencv->getCvMat().size() == true_mat.size());
REQUIRE(cv::countNonZero(tImpl_opencv->getCvMat() != true_mat) == 0);
REQUIRE((tImpl_opencv->data().total() * tImpl_opencv->data().channels()) == (true_mat.total() * true_mat.channels()));
REQUIRE(cv::countNonZero(tImpl_opencv->data() != true_mat) == 0);
// This time the tensor is already loaded in memory
......@@ -50,8 +50,8 @@ TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") {
// Access the cv::Mat with the tensor
TensorImpl_opencv_* tImpl_opencv_2 = dynamic_cast<TensorImpl_opencv_*>(tensor_load_2->getImpl().get());
REQUIRE(tImpl_opencv_2->getCvMat().size() == true_mat.size());
REQUIRE(cv::countNonZero(tImpl_opencv_2->getCvMat() != true_mat) == 0);
REQUIRE((tImpl_opencv_2->data().total() * tImpl_opencv_2->data().channels()) == (true_mat.total() * true_mat.channels()));
REQUIRE(cv::countNonZero(tImpl_opencv_2->data() != true_mat) == 0);
}
}
......@@ -26,7 +26,7 @@ TEST_CASE("StimulusImpl_opencv_imread creation", "[StimulusImpl_opencv_imread][O
SECTION("Instanciation & load an image") {
// Load image with imread
// cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/Lenna.png");
cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm");
cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm", cv::IMREAD_UNCHANGED);
REQUIRE(true_mat.empty()==false);
// Create StimulusImpl_opencv_imread
......@@ -38,8 +38,8 @@ TEST_CASE("StimulusImpl_opencv_imread creation", "[StimulusImpl_opencv_imread][O
// Access the cv::Mat with the tensor
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor_load->getImpl().get());
REQUIRE(tImpl_opencv->getCvMat().size() == true_mat.size());
REQUIRE(cv::countNonZero(tImpl_opencv->getCvMat() != true_mat) == 0);
REQUIRE((tImpl_opencv->data().total() * tImpl_opencv->data().channels()) == (true_mat.total() * true_mat.channels()));
REQUIRE(cv::countNonZero(tImpl_opencv->data() != true_mat) == 0);
}
}
......@@ -73,11 +73,11 @@ TEST_CASE("Tensor creation opencv", "[Tensor][OpenCV]") {
}
SECTION("OpenCV tensor features") {
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->getCvMat().rows == 2);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->getCvMat().cols == 2);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->getCvMat().dims == 2);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->getCvMat().total() == 4);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->getCvMat().channels() == 2);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().rows == 2);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().cols == 2);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().dims == 2);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().total() == 4);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().channels() == 2);
}
SECTION("Access to array") {
......@@ -103,4 +103,31 @@ TEST_CASE("Tensor creation opencv", "[Tensor][OpenCV]") {
REQUIRE_FALSE(x == xFloat);
}
}
}
SECTION("from const array before backend") {
Tensor x = Array3D<int,2,2,2>{
{
{
{1, 2},
{3, 4}
},
{
{5, 6},
{7, 8}
}
}};
x.setBackend("opencv");
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
REQUIRE(x.get<int>({0,0,0}) == 1);
REQUIRE(x.get<int>({0,0,1}) == 2);
REQUIRE(x.get<int>({0,1,1}) == 4);
REQUIRE(x.get<int>({1,1,1}) == 8);
}
}
\ No newline at end of file
......@@ -75,8 +75,8 @@ TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char
// Check the matrix inside the tensor coorresponds to the matrix
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensorOcv->getImpl().get());
auto mat_tensor = tImpl_opencv->getCvMat();
auto mat_tensor = tImpl_opencv->data();
REQUIRE(mat_tensor.size() == mat.size());
REQUIRE(cv::countNonZero(mat_tensor != mat) == 0);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment