Skip to content
Snippets Groups Projects
Commit f4e82ab8 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Use setBackend

parent 7de335da
No related branches found
No related tags found
2 merge requests!29Update 0.1.5 -> 0.1.6,!5Use setBackend instead of ConvertToCpu workaround
Pipeline #70386 passed
...@@ -33,27 +33,6 @@ namespace Aidge { ...@@ -33,27 +33,6 @@ namespace Aidge {
*/ */
std::shared_ptr<Tensor> tensorOpencv(cv::Mat mat); std::shared_ptr<Tensor> tensorOpencv(cv::Mat mat);
/**
* @brief Copy the data from a source 2D cv::mat to a destination pointer with an offset
*
* @tparam CV_T The standard type corresponding to the opencv data type
* @param mat opencv 2D mat to copy the data from
* @param data destination pointer
* @param offset offset an the destination data pointer
*/
template <class CV_T>
void convert(const cv::Mat& mat, void* data, std::size_t offset);
/**
* @brief Convert a tensor backend opencv into a tensor backend cpu
*
* @param tensorOpencv tensor with backend opencv (contains a cv::mat)
* @return std::shared_ptr<Tensor> tensor backend cpu (contains a std::vector)
*/
std::shared_ptr<Tensor> convertCpu(std::shared_ptr<Aidge::Tensor> tensorOpencv);
} // namespace } // namespace
#endif // AIDGE_OPENCV_UTILS_UTILS_H_ #endif // AIDGE_OPENCV_UTILS_UTILS_H_
\ No newline at end of file
...@@ -153,7 +153,9 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::MNIST::getItem(const std::siz ...@@ -153,7 +153,9 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::MNIST::getItem(const std::siz
std::vector<std::shared_ptr<Tensor>> item; std::vector<std::shared_ptr<Tensor>> item;
// Load the digit tensor // Load the digit tensor
// TODO : Currently converts the tensor Opencv but this operation will be carried by a convert operator in the preprocessing graph // TODO : Currently converts the tensor Opencv but this operation will be carried by a convert operator in the preprocessing graph
item.push_back(Aidge::convertCpu((std::get<0>(mStimuli.at(index))).load())); item.push_back((std::get<0>(mStimuli.at(index))).load());
item.back()->setBackend("cpu");
item.back()->setDataFormat(DataFormat::CHW);
// item.push_back((std::get<0>(mStimuli.at(index))).load()); // item.push_back((std::get<0>(mStimuli.at(index))).load());
// Load the label tensor // Load the label tensor
item.push_back((std::get<1>(mStimuli.at(index))).load()); item.push_back((std::get<1>(mStimuli.at(index))).load());
......
...@@ -60,94 +60,19 @@ static Aidge::DataType CVtoAidge(const int matDepth) { ...@@ -60,94 +60,19 @@ static Aidge::DataType CVtoAidge(const int matDepth) {
} }
std::shared_ptr<Aidge::Tensor> Aidge::tensorOpencv(cv::Mat mat) { std::shared_ptr<Aidge::Tensor> Aidge::tensorOpencv(cv::Mat mat) {
// Get Mat dims
const std::vector<DimSize_t> matDims = std::vector<DimSize_t>({static_cast<DimSize_t>(mat.channels()),
static_cast<DimSize_t>(mat.rows),
static_cast<DimSize_t>(mat.cols)});
// Get the correct Data Type
Aidge::DataType type;
type = CVtoAidge(mat.depth());
// Create tensor from the dims of the Cv::Mat // Create tensor from the dims of the Cv::Mat
std::shared_ptr<Tensor> tensor = std::make_shared<Tensor>(matDims); auto tensor = std::make_shared<Tensor>(std::vector<DimSize_t>{static_cast<DimSize_t>(mat.rows),
// Set beackend opencv static_cast<DimSize_t>(mat.cols),
static_cast<DimSize_t>(mat.channels())});
// cv::Mat data format is always HWC
tensor->setDataFormat(DataFormat::HWC);
// Set backend opencv
tensor->setBackend("opencv"); tensor->setBackend("opencv");
// Set datatype // Set datatype
tensor->setDataType(type); tensor->setDataType(CVtoAidge(mat.depth()));
// Cast the tensorImpl to access setCvMat function // Cast the tensorImpl to access setCvMat function
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor->getImpl().get()); TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor->getImpl().get());
tImpl_opencv->setCvMat(mat); tImpl_opencv->setCvMat(mat);
return tensor; return tensor;
} }
template <class CV_T>
void Aidge::convert(const cv::Mat& mat, void* data, std::size_t offset)
{
if (mat.isContinuous())
std::memcpy(reinterpret_cast<void*>(reinterpret_cast<CV_T*>(data) + offset), mat.ptr<CV_T>(), sizeof(CV_T)*(mat.cols*mat.rows));
else {
throw std::runtime_error(
"Poui pwoup convert not support if matrix not contiguous");
}
}
std::shared_ptr<Aidge::Tensor> Aidge::convertCpu(std::shared_ptr<Aidge::Tensor> tensorOpencv){
// Assert the tensorOpencv is backend Opencv
AIDGE_ASSERT(tensorOpencv->getImpl()->backend() == "opencv", "Cannot convert tensor backend from opencv to cpu : tensor is not backend opencv.");
// Create a tensor backend cpu from the dimensions of the tensor backend opencv
std::shared_ptr<Aidge::Tensor> tensorCpu = std::make_shared<Aidge::Tensor>(tensorOpencv->dims());
// Get the cv::Mat from the tensor backend Opencv
Aidge::TensorImpl_opencv_* tImplOpencv = dynamic_cast<Aidge::TensorImpl_opencv_*>(tensorOpencv->getImpl().get());
cv::Mat dataOpencv = tImplOpencv->data();
// Convert the cv::Mat into a vector of cv::Mat (vector of channels)
std::vector<cv::Mat> channels;
cv::split(dataOpencv, channels);
// set the datatype of the cpu tensor
tensorCpu->setDataType(CVtoAidge(channels[0].depth()));
// Set backend cpu
tensorCpu->setBackend("cpu");
// Convert & copy the cv::Mat into the tensor using the rawPtr of tensor cpu
std::size_t count = 0;
for (std::vector<cv::Mat>::const_iterator itChannel = channels.cbegin();
itChannel != channels.cend();
++itChannel)
{
switch ((*itChannel).depth()) {
case CV_8U:
convert<unsigned char>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_8S:
convert<char>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_16U:
convert<unsigned short>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_16S:
convert<short>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_32S:
convert<int>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_32F:
convert<float>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_64F:
convert<double>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
default:
throw std::runtime_error(
"Cannot convert cv::Mat to Tensor: incompatible types.");
}
++count;
}
return tensorCpu;
}
...@@ -61,9 +61,9 @@ TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char ...@@ -61,9 +61,9 @@ TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char
auto tensorOcv = tensorOpencv(mat); auto tensorOcv = tensorOpencv(mat);
// Check the size of the tensor // Check the size of the tensor
REQUIRE(mat.channels() == tensorOcv->dims()[0]); REQUIRE(mat.rows == tensorOcv->dims()[0]);
REQUIRE(mat.rows == tensorOcv->dims()[1]); REQUIRE(mat.cols == tensorOcv->dims()[1]);
REQUIRE(mat.cols == tensorOcv->dims()[2]); REQUIRE(mat.channels() == tensorOcv->dims()[2]);
//Get the matrix inside the tensor //Get the matrix inside the tensor
...@@ -74,10 +74,12 @@ TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char ...@@ -74,10 +74,12 @@ TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char
cv::split(mat_tensor, channels_split); cv::split(mat_tensor, channels_split);
// Convert opencv tensor to cpu tensor // Convert opencv tensor to cpu tensor
auto tensorCpu = convertCpu(tensorOcv); auto tensorCpu = tensorOcv->clone();
tensorCpu.setBackend("cpu");
tensorCpu.setDataFormat(DataFormat::CHW);
// Get the cpu ptr of the converted tensor // Get the cpu ptr of the converted tensor
auto cpu_ptr = static_cast<TestType*>(tensorCpu->getImpl()->rawPtr()); auto cpu_ptr = static_cast<TestType*>(tensorCpu.getImpl()->rawPtr());
// Compare the tensor cpu values with the cv mat in an elementwise fashion // Compare the tensor cpu values with the cv mat in an elementwise fashion
// Loop over channels // Loop over channels
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment