Skip to content
Snippets Groups Projects
Commit 471e2cb9 authored by Thibault Allenet's avatar Thibault Allenet
Browse files

Initial commit : Stimulis, database MNIST, convertion tensor opencv to cpu

parents
No related branches found
No related tags found
No related merge requests found
#include "aidge/backend/opencv/utils/Utils.hpp"
template <class CV_T>
void Aidge::convert(const cv::Mat& mat, void* data, size_t offset)
{
if (mat.isContinuous())
std::memcpy(reinterpret_cast<void*>(reinterpret_cast<CV_T*>(data) + offset), mat.ptr<CV_T>(), sizeof(CV_T)*(mat.cols*mat.rows));
else {
throw std::runtime_error(
"Poui pwoup convert not support if matrix not contiguous");
}
}
std::shared_ptr<Aidge::Tensor> Aidge::convertCpu(std::shared_ptr<Aidge::Tensor> tensorOpencv){
// Assert the tensorOpencv is backend Opencv
assert(std::strcmp(tensorOpencv->getImpl()->backend(), "opencv") == 0 && "Cannot convert tensor backend from opencv to cpu : tensor is not backend opencv.");
// Create a tensor backend cpu from the dimensions of the tensor backend opencv
std::shared_ptr<Aidge::Tensor> tensorCpu = std::make_shared<Aidge::Tensor>(tensorOpencv->dims());
// Get the cv::Mat from the tensor backend Opencv
Aidge::TensorImpl_opencv_* tImplOpencv = dynamic_cast<Aidge::TensorImpl_opencv_*>(tensorOpencv->getImpl().get());
cv::Mat dataOpencv = tImplOpencv->getCvMat();
// Convert the cv::Mat into a vector of cv::Mat (vector of channels)
std::vector<cv::Mat> channels;
cv::split(dataOpencv, channels);
// set the datatype of the cpu tensor
switch (channels[0].depth()) {
case CV_8U:
tensorCpu->setDatatype(Aidge::DataType::UInt8);
break;
// case CV_8S:
// tensorCpu->setDatatype(Aidge::DataType::Int8);
// break;
case CV_16U:
tensorCpu->setDatatype(Aidge::DataType::UInt16);
break;
case CV_16S:
tensorCpu->setDatatype(Aidge::DataType::Int16);
break;
case CV_32S:
tensorCpu->setDatatype(Aidge::DataType::Int32);
break;
case CV_32F:
tensorCpu->setDatatype(Aidge::DataType::Float32);
break;
// case CV_64F:
// tensorCpu->setDatatype(Aidge::DataType::Float64);
// break;
default:
throw std::runtime_error(
"Cannot convert cv::Mat to Tensor: incompatible types.");
}
// Set backend cpu
tensorCpu->setBackend("cpu");
// Convert & copy the cv::Mat into the tensor using the rawPtr of tensor cpu
std::size_t count = 0;
for (std::vector<cv::Mat>::const_iterator itChannel = channels.begin();
itChannel != channels.end();
++itChannel)
{
switch ((*itChannel).depth()) {
case CV_8U:
convert<unsigned char>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
// case CV_8S:
// convert<char>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
// break;
case CV_16U:
convert<unsigned short>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_16S:
convert<short>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_32S:
convert<int>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
case CV_32F:
convert<float>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
break;
// case CV_64F:
// convert<double>(*itChannel, tensorCpu->getImpl()->rawPtr(), count*static_cast<std::size_t>((*itChannel).rows*(*itChannel).cols));
// break;
default:
throw std::runtime_error(
"Cannot convert cv::Mat to Tensor: incompatible types.");
}
++count;
}
return tensorCpu;
}
Include(FetchContent)
FetchContent_Declare(
Catch2
GIT_REPOSITORY https://github.com/catchorg/Catch2.git
GIT_TAG v3.0.1 # or a later release
)
FetchContent_MakeAvailable(Catch2)
file(GLOB_RECURSE src_files "*.cpp")
add_executable(tests${module_name} ${src_files})
target_link_libraries(tests${module_name} PUBLIC ${module_name})
target_link_libraries(tests${module_name} PRIVATE Catch2::Catch2WithMain)
list(APPEND CMAKE_MODULE_PATH ${catch2_SOURCE_DIR}/extras)
include(CTest)
include(Catch)
catch_discover_tests(tests${module_name})
#include <catch2/catch_test_macros.hpp>
#include "aidge/backend/opencv/database/MNIST.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/DataProvider.hpp"
// #include "aidge/backend/opencv/data/TensorImpl.hpp"
// #include "aidge/backend/cpu/data/TensorImpl.hpp"
using namespace Aidge;
TEST_CASE("DataProvider instanciation & test mnist","[Data][OpenCV]") {
// Create database
std::string path = "/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/test_mnist_database";
bool train = false;
MNIST mnist(path, train);
// DataProvider settings
unsigned int batchSize = 256;
unsigned int number_batch = std::ceil(mnist.getLen() / batchSize);
// Instanciate the dataloader
DataProvider provider(mnist, batchSize);
// Perform the tests on the batches
for (unsigned int i = 0; i < number_batch; ++i){
auto batch = provider.readBatch(i*batchSize);
auto data_batch_ptr = static_cast<uint8_t*>(batch[0]->getImpl()->rawPtr());
auto label_batch_ptr = static_cast<int*>(batch[1]->getImpl()->rawPtr());
for (unsigned int s = 0; s < batchSize; ++s){
auto data = mnist.getItem(i*batchSize+s)[0];
auto label = mnist.getItem(i*batchSize+s)[1];
unsigned int size_data = data->size();
unsigned int size_label = label->size();
auto data_ptr = static_cast<uint8_t*>(data->getImpl()->rawPtr());
auto label_ptr = static_cast<int*>(label->getImpl()->rawPtr());
for (unsigned int j = 0; j < size_data; ++j){
auto element_data = data_ptr[j];
auto element_data_batch = data_batch_ptr[size_data*s+j];
REQUIRE(element_data == element_data_batch);
}
for (unsigned int j = 0; j < size_label; ++j){
auto element_label = label_ptr[j];
auto element_label_batch = label_batch_ptr[size_label*s+j];
REQUIRE(element_label == element_label_batch);
}
}
}
}
\ No newline at end of file
#include <catch2/catch_test_macros.hpp>
#include "opencv2/core.hpp"
#include <opencv2/imgcodecs.hpp>
#include <memory>
#include <iostream>
#include "aidge/stimuli/Stimuli.hpp"
#include "aidge/backend/opencv/data/TensorImpl.hpp"
#include "aidge/data/Tensor.hpp"
using namespace Aidge;
TEST_CASE("Stimuli creation", "[Stimuli][OpenCV]") {
SECTION("Instanciation & load an image") {
// Load image with imread
cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm");
REQUIRE(true_mat.empty()==false);
// Create Stimuli
Stimuli stimg("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm", true);
stimg.setBackend("opencv");
// Load the image in a tensor & save it in memory
std::shared_ptr<Tensor> tensor_load;
tensor_load = stimg.load();
// Access the cv::Mat with the tensor
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor_load->getImpl().get());
REQUIRE(tImpl_opencv->getCvMat().size() == true_mat.size());
REQUIRE(cv::countNonZero(tImpl_opencv->getCvMat() != true_mat) == 0);
// This time the tensor is already loaded in memory
std::shared_ptr<Tensor> tensor_load_2;
tensor_load_2 = stimg.load();
// Access the cv::Mat with the tensor
TensorImpl_opencv_* tImpl_opencv_2 = dynamic_cast<TensorImpl_opencv_*>(tensor_load_2->getImpl().get());
REQUIRE(tImpl_opencv_2->getCvMat().size() == true_mat.size());
REQUIRE(cv::countNonZero(tImpl_opencv_2->getCvMat() != true_mat) == 0);
}
}
#include <catch2/catch_test_macros.hpp>
#include "opencv2/core.hpp"
#include <opencv2/imgcodecs.hpp>
#include <memory>
#include <iostream>
#include "aidge/backend/opencv/stimuli/StimuliImpl_opencv_imread.hpp"
#include "aidge/backend/opencv/data/TensorImpl.hpp"
#include "aidge/data/Tensor.hpp"
using namespace Aidge;
TEST_CASE("StimuliImpl_opencv_imread creation", "[StimuliImpl_opencv_imread][OpenCV]") {
SECTION("Instanciation & load an image") {
// Load image with imread
// cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/Lenna.png");
cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm");
REQUIRE(true_mat.empty()==false);
// Create StimuliImpl_opencv_imread
// StimuliImpl_opencv_imread stImpl("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/Lenna.png");
StimuliImpl_opencv_imread stImpl("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm");
std::shared_ptr<Tensor> tensor_load;
tensor_load = stImpl.load();
// Access the cv::Mat with the tensor
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor_load->getImpl().get());
REQUIRE(tImpl_opencv->getCvMat().size() == true_mat.size());
REQUIRE(cv::countNonZero(tImpl_opencv->getCvMat() != true_mat) == 0);
}
}
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/backend/opencv/data/TensorImpl.hpp"
using namespace Aidge;
TEST_CASE("Tensor creation opencv", "[Tensor][OpenCV]") {
SECTION("from const array") {
Tensor x;
x.setBackend("opencv");
x = Array3D<int,2,2,2>{
{
{
{1, 2},
{3, 4}
},
{
{5, 6},
{7, 8}
}
}};
Tensor xCopy;
xCopy.setBackend("opencv");
xCopy = Array3D<int,2,2,2>{
{
{
{1, 2},
{3, 4}
},
{
{5, 6},
{7, 8}
}
}};
Tensor xFloat;
xFloat.setBackend("opencv");
xFloat = Array3D<float,2,2,2>{
{
{
{1., 2.},
{3., 4.}
},
{
{5., 6.},
{7., 8.}
}
}};
SECTION("Tensor features") {
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
}
SECTION("OpenCV tensor features") {
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().rows == 2);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().cols == 2);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().dims == 2);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().total() == 4);
REQUIRE(static_cast<TensorImpl_opencv<int>*>(x.getImpl().get())->data().channels() == 2);
}
SECTION("Access to array") {
REQUIRE(static_cast<int*>(x.getImpl()->rawPtr())[0] == 1);
REQUIRE(static_cast<int*>(x.getImpl()->rawPtr())[7] == 8);
}
SECTION("get function") {
REQUIRE(x.get<int>({0,0,0}) == 1);
REQUIRE(x.get<int>({0,0,1}) == 2);
REQUIRE(x.get<int>({0,1,1}) == 4);
REQUIRE(x.get<int>({1,1,0}) == 7);
x.get<int>({1,1,1}) = 36;
REQUIRE(x.get<int>({1,1,1}) == 36);
}
SECTION("Pretty printing for debug") {
REQUIRE_NOTHROW(x.print());
}
SECTION("Tensor (in)equality") {
REQUIRE(x == xCopy);
REQUIRE_FALSE(x == xFloat);
}
}
}
#include <catch2/catch_test_macros.hpp>
#include <catch2/catch_template_test_macros.hpp>
#include <memory>
#include <string>
#include "opencv2/core.hpp"
#include "aidge/backend/opencv/utils/Utils.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/backend/opencv/data/TensorImpl.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp"
using namespace Aidge;
template <typename T>
cv::Mat createRandomMat(int rows, int cols) {
cv::Mat randomMat(rows, cols, cv::DataType<T>::type);
cv::randu(randomMat, cv::Scalar::all(0), cv::Scalar::all(255));
return randomMat;
}
// TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", char, unsigned char, short, unsigned short, int, float, double) {
// TODO : perform test for char and double
TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", unsigned char, short, unsigned short, int, float) {
constexpr int num_test_matrices = 1000;
SECTION("Test create tensor from opencv and convert to cpu") {
// Generate random cv::mat
for (int i = 0; i < num_test_matrices; ++i) {
// Opencv mat have maximum 512 channels
int ch = std::rand() % 512 + 1;
int rows = std::rand() % 10 + 1;
int cols = std::rand() % 10 + 1;
std::vector<cv::Mat> channels;
cv::Mat mat;
for (int c = 0; c < ch; ++c){
// Create a random matrix
cv::Mat randomMat = createRandomMat<TestType>(rows, cols);
// Add each random matrix to the vector
channels.push_back(randomMat);
}
// Merge the vector of cv mat into one cv mat
cv::merge(channels, mat);
// Check the size and datatype of the matrix
REQUIRE(mat.rows == rows);
REQUIRE(mat.cols == cols);
REQUIRE(mat.channels() == ch);
REQUIRE(mat.depth() == cv::DataType<TestType>::type);
// Instanciate a tensor opencv
auto tensorOcv = tensorOpencv(mat);
// Check the size of the tensor
REQUIRE(mat.rows == tensorOcv->dims()[1]);
REQUIRE(mat.cols == tensorOcv->dims()[0]);
REQUIRE(mat.channels() == tensorOcv->dims()[2]);
// Check the matrix inside the tensor coorresponds to the matrix
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensorOcv->getImpl().get());
auto mat_tensor = tImpl_opencv->getCvMat();
REQUIRE(mat_tensor.size() == mat.size());
REQUIRE(cv::countNonZero(mat_tensor != mat) == 0);
// Convert opencv tensor to cpu tensor
auto tensorCpu = convertCpu(tensorOcv);
// Split the mat from tensor opencv into channels
std::vector<cv::Mat> channels_split;
cv::split(mat_tensor, channels_split);
// Get the ptr to the std::vector<TestType> as a void * with rawPtr()
auto cpu_ptr = static_cast<TestType*>(tensorCpu->getImpl()->rawPtr());
// Compare the tensor cpu values with the cv mat in an elementwise fashion
// Loop over channels
for (int c = 0; c < ch; ++c) {
// Loop over rows
for (int i = 0; i < rows; ++i) {
// Loop over columns
for (int j = 0; j < cols; ++j) {
TestType elementValue = channels_split[c].at<TestType>(i, j);
TestType elementValue_cpu = cpu_ptr[c*(rows*cols)+i*cols+j];
REQUIRE(elementValue == elementValue_cpu);
}
}
}
}
}
}
\ No newline at end of file
0.0.1
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment