Skip to content
Snippets Groups Projects
Commit 5fb9b7f9 authored by Thibault Allenet's avatar Thibault Allenet
Browse files

Remove convertToCpu in order to use setBackend generic function

parent 907e1771
No related branches found
No related tags found
No related merge requests found
......@@ -73,28 +73,6 @@ namespace Aidge {
tImpl_opencv->setCvMat(mat);
return tensor;
}
/**
* @brief Copy the data from a source 2D cv::mat to a destination pointer with an offset
*
* @tparam CV_T The standard type corresponding to the opencv data type
* @param mat opencv 2D mat to copy the data from
* @param data destination pointer
* @param offset offset an the destination data pointer
*/
template <class CV_T>
void convert(const cv::Mat& mat, void* data, size_t offset);
/**
* @brief Convert a tensor backend opencv into a tensor backend cpu
*
* @param tensorOpencv tensor with backend opencv (contains a cv::mat)
* @return std::shared_ptr<Tensor> tensor backend cpu (contains a std::vector)
*/
std::shared_ptr<Tensor> convertCpu(std::shared_ptr<Aidge::Tensor> tensorOpencv);
} // namespace
#endif // AIDGE_BACKEND_OPENCV_UTILS_ATTRIBUTES_H_
\ No newline at end of file
#include "aidge/backend/opencv/utils/Utils.hpp"
template <class CV_T>
void Aidge::convert(const cv::Mat& mat, void* data, size_t offset)
{
if (mat.isContinuous())
std::memcpy(reinterpret_cast<void*>(reinterpret_cast<CV_T*>(data) + offset), mat.ptr<CV_T>(), sizeof(CV_T)*(mat.cols*mat.rows));
else {
throw std::runtime_error(
"Poui pwoup convert not support if matrix not contiguous");
}
}
std::shared_ptr<Aidge::Tensor> Aidge::convertCpu(std::shared_ptr<Aidge::Tensor> tensorOpencv){
// Assert the tensorOpencv is backend Opencv
assert(std::strcmp(tensorOpencv->getImpl()->backend(), "opencv") == 0 && "Cannot convert tensor backend from opencv to cpu : tensor is not backend opencv.");
// Create a tensor backend cpu from the dimensions of the tensor backend opencv
std::shared_ptr<Aidge::Tensor> tensorCpu = std::make_shared<Aidge::Tensor>(tensorOpencv->dims());
// Get the cv::Mat from the tensor backend Opencv
Aidge::TensorImpl_opencv_* tImplOpencv = dynamic_cast<Aidge::TensorImpl_opencv_*>(tensorOpencv->getImpl().get());
cv::Mat dataOpencv = tImplOpencv->getCvMat();
// Convert the cv::Mat into a vector of cv::Mat (vector of channels)
std::vector<cv::Mat> channels;
cv::split(dataOpencv, channels);
// set the datatype of the cpu tensor
switch (channels[0].depth()) {
case CV_8U:
tensorCpu->setDataType(Aidge::DataType::UInt8);
break;
case CV_8S:
tensorCpu->setDataType(Aidge::DataType::Int8);
break;
case CV_16U:
tensorCpu->setDataType(Aidge::DataType::UInt16);
break;
case CV_16S:
tensorCpu->setDataType(Aidge::DataType::Int16);
break;
case CV_32S:
tensorCpu->setDataType(Aidge::DataType::Int32);
break;
case CV_32F:
tensorCpu->setDataType(Aidge::DataType::Float32);
break;
case CV_64F:
tensorCpu->setDataType(Aidge::DataType::Float64);
break;
default:
throw std::runtime_error(
"Cannot convert cv::Mat to Tensor: incompatible types.");
}
// Set backend cpu
tensorCpu->setBackend("cpu");
// Convert & copy the cv::Mat into the tensor using the rawPtr of tensor cpu
std::size_t count = 0;
for (std::vector<cv::Mat>::const_iterator itChannel = channels.begin();
itChannel != channels.end();
++itChannel)
{
switch ((*itChannel).depth()) {
case CV_8U:
convert<unsigned char>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_8S:
convert<char>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_16U:
convert<unsigned short>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_16S:
convert<short>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_32S:
convert<int>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_32F:
convert<float>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_64F:
convert<double>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
default:
throw std::runtime_error(
"Cannot convert cv::Mat to Tensor: incompatible types.");
}
++count;
}
return tensorCpu;
}
......@@ -24,7 +24,7 @@ cv::Mat createRandomMat(int rows, int cols) {
// TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", char, unsigned char, short, unsigned short, int, float, double) {
// TODO : perform test for char and double
TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char, short, unsigned short, int, float, double) {
TEMPLATE_TEST_CASE("Opencv Create a tensor from cv::Mat", "[Utils][OpenCV]", signed char, unsigned char, short, unsigned short, int, float, double) {
constexpr int num_test_matrices = 50;
......@@ -68,30 +68,6 @@ TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char
REQUIRE(mat_tensor.size() == mat.size());
REQUIRE(cv::countNonZero(mat_tensor != mat) == 0);
// Convert opencv tensor to cpu tensor
auto tensorCpu = convertCpu(tensorOcv);
// Split the mat from tensor opencv into channels
std::vector<cv::Mat> channels_split;
cv::split(mat_tensor, channels_split);
// Get the ptr to the std::vector<TestType> as a void * with rawPtr()
auto cpu_ptr = static_cast<TestType*>(tensorCpu->getImpl()->rawPtr());
// Compare the tensor cpu values with the cv mat in an elementwise fashion
// Loop over channels
for (int c = 0; c < ch; ++c) {
// Loop over rows
for (int i = 0; i < rows; ++i) {
// Loop over columns
for (int j = 0; j < cols; ++j) {
TestType elementValue = channels_split[c].at<TestType>(i, j);
TestType elementValue_cpu = cpu_ptr[c*(rows*cols)+i*cols+j];
REQUIRE(elementValue == elementValue_cpu);
}
}
}
}
}
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment