#include "aidge/backend/opencv/utils/Utils.hpp" template <class CV_T> void Aidge::convert(const cv::Mat& mat, void* data, size_t offset) { if (mat.isContinuous()) std::memcpy(reinterpret_cast<void*>(reinterpret_cast<CV_T*>(data) + offset), mat.ptr<CV_T>(), sizeof(CV_T)*(mat.cols*mat.rows)); else { throw std::runtime_error( "Poui pwoup convert not support if matrix not contiguous"); } } std::shared_ptr<Aidge::Tensor> Aidge::convertCpu(std::shared_ptr<Aidge::Tensor> tensorOpencv){ // Assert the tensorOpencv is backend Opencv assert(std::strcmp(tensorOpencv->getImpl()->backend(), "opencv") == 0 && "Cannot convert tensor backend from opencv to cpu : tensor is not backend opencv."); // Create a tensor backend cpu from the dimensions of the tensor backend opencv std::shared_ptr<Aidge::Tensor> tensorCpu = std::make_shared<Aidge::Tensor>(tensorOpencv->dims()); // Get the cv::Mat from the tensor backend Opencv Aidge::TensorImpl_opencv_* tImplOpencv = dynamic_cast<Aidge::TensorImpl_opencv_*>(tensorOpencv->getImpl().get()); cv::Mat dataOpencv = tImplOpencv->getCvMat(); // Convert the cv::Mat into a vector of cv::Mat (vector of channels) std::vector<cv::Mat> channels; cv::split(dataOpencv, channels); // set the datatype of the cpu tensor switch (channels[0].depth()) { case CV_8U: tensorCpu->setDataType(Aidge::DataType::UInt8); break; case CV_8S: tensorCpu->setDataType(Aidge::DataType::Int8); break; case CV_16U: tensorCpu->setDataType(Aidge::DataType::UInt16); break; case CV_16S: tensorCpu->setDataType(Aidge::DataType::Int16); break; case CV_32S: tensorCpu->setDataType(Aidge::DataType::Int32); break; case CV_32F: tensorCpu->setDataType(Aidge::DataType::Float32); break; case CV_64F: tensorCpu->setDataType(Aidge::DataType::Float64); break; default: throw std::runtime_error( "Cannot convert cv::Mat to Tensor: incompatible types."); } // Set backend cpu tensorCpu->setBackend("cpu"); // Convert & copy the cv::Mat into the tensor using the rawPtr of tensor cpu std::size_t count = 0; for (std::vector<cv::Mat>::const_iterator itChannel = channels.begin(); itChannel != channels.end(); ++itChannel) { switch ((*itChannel).depth()) { case CV_8U: convert<unsigned char>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols)); break; case CV_8S: convert<char>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols)); break; case CV_16U: convert<unsigned short>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols)); break; case CV_16S: convert<short>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols)); break; case CV_32S: convert<int>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols)); break; case CV_32F: convert<float>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols)); break; case CV_64F: convert<double>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols)); break; default: throw std::runtime_error( "Cannot convert cv::Mat to Tensor: incompatible types."); } ++count; } return tensorCpu; }