Skip to content
Snippets Groups Projects
Commit 2bc94a09 authored by Thibault Allenet's avatar Thibault Allenet
Browse files

Merge branch 'rework_code' into 'master'

[Upd] Code rework

See merge request !7
parents 81651d84 e3a46aa8
No related branches found
No related tags found
2 merge requests!7[Upd] Code rework,!3Update master to v0.1.0
Pipeline #38927 failed
Showing
with 882 additions and 300 deletions
......@@ -25,7 +25,9 @@
################################################################################
stages:
# Build
# Analyse code
- static_analysis
# Build Aidge
- build
# Unit test stage
- test
......@@ -34,6 +36,7 @@ stages:
include:
- local: '/.gitlab/ci/_global.gitlab-ci.yml'
# - local: '/.gitlab/ci/static_analysis.gitlab-ci.yml'
- local: '/.gitlab/ci/build.gitlab-ci.yml'
- local: '/.gitlab/ci/test.gitlab-ci.yml'
- local: '/.gitlab/ci/coverage.gitlab-ci.yml'
# - local: '/.gitlab/ci/coverage.gitlab-ci.yml'
################################################################################
# Centralized definitions of common job parameter values. #
# Parameters with many optional configurations may be in separate files. #
# #
################################################################################
variables:
GIT_SUBMODULE_STRATEGY: recursive
OMP_NUM_THREADS: 4
GIT_SSL_NO_VERIFY: 1
DEBIAN_FRONTEND: noninteractive
# See https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
workflow:
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS
when: never
- if: $CI_COMMIT_BRANCH
default:
image: nvidia/cuda:12.2.0-devel-ubuntu22.04
before_script:
- apt update
- apt install -y cmake cppcheck python-is-python3 pip git gcovr unzip curl libopencv-dev
build:ubuntu_cpp:
stage: build
needs: []
tags:
- docker
script:
# Download dependencies
- DEPENDENCY_JOB="build:ubuntu_cpp"
# aidge_core
- DEPENDENCY_NAME="aidge_core"
- !reference [.download_dependency, script]
# aidge_backend_cpu
- DEPENDENCY_NAME="aidge_backend_cpu"
- !reference [.download_dependency, script]
# Build current module
- export CMAKE_PREFIX_PATH=../install_cpp
- mkdir -p build_cpp
- cd build_cpp
- cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
- make -j4 all install
artifacts:
expire_in: 1 week
paths:
- build_cpp/
- install_cpp/
build:ubuntu_cpp_g++10:
stage: build
needs: []
tags:
- docker
script:
# Download dependencies
- DEPENDENCY_JOB="build:ubuntu_cpp"
# aidge_core
- DEPENDENCY_NAME="aidge_core"
- !reference [.download_dependency, script]
# aidge_backend_cpu
- DEPENDENCY_NAME="aidge_backend_cpu"
- !reference [.download_dependency, script]
# Build current module
- export CMAKE_PREFIX_PATH=../install_cpp
- apt install -y g++-10
- mkdir -p build_cpp
- mkdir -p install_cpp
- cd build_cpp
- export CXX=/usr/bin/g++-10
- cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
- make -j4 all install
build:ubuntu_cpp_g++12:
stage: build
needs: []
tags:
- docker
script:
# Download dependencies
- DEPENDENCY_JOB="build:ubuntu_cpp"
# aidge_core
- DEPENDENCY_NAME="aidge_core"
- !reference [.download_dependency, script]
# aidge_backend_cpu
- DEPENDENCY_NAME="aidge_backend_cpu"
- !reference [.download_dependency, script]
# Build current module
- export CMAKE_PREFIX_PATH=../install_cpp
- apt install -y g++-12
- mkdir -p build_cpp
- mkdir -p install_cpp
- cd build_cpp
- export CXX=/usr/bin/g++-12
- cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
- make -j4 all install
build:ubuntu_cpp_clang12:
stage: build
needs: []
tags:
- docker
script:
# Download dependencies
- DEPENDENCY_JOB="build:ubuntu_cpp"
# aidge_core
- DEPENDENCY_NAME="aidge_core"
- !reference [.download_dependency, script]
# aidge_backend_cpu
- DEPENDENCY_NAME="aidge_backend_cpu"
- !reference [.download_dependency, script]
# Build current module
- export CMAKE_PREFIX_PATH=../install_cpp
- apt install -y clang-12
- mkdir -p build_cpp
- mkdir -p install_cpp
- cd build_cpp
- export CXX=/usr/bin/clang++-12
- cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
- make -j4 all install
build:ubuntu_cpp_clang15:
stage: build
needs: []
tags:
- docker
script:
# Download dependencies
- DEPENDENCY_JOB="build:ubuntu_cpp"
# aidge_core
- DEPENDENCY_NAME="aidge_core"
- !reference [.download_dependency, script]
# aidge_backend_cpu
- DEPENDENCY_NAME="aidge_backend_cpu"
- !reference [.download_dependency, script]
# Build current module
- export CMAKE_PREFIX_PATH=../install_cpp
- apt install -y clang-15
- mkdir -p build_cpp
- mkdir -p install_cpp
- cd build_cpp
- export CXX=/usr/bin/clang++-15
- cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
- make -j4 all install
build:ubuntu_python:
stage: build
needs: []
tags:
- docker
script:
# Download dependencies
- DEPENDENCY_JOB="build:ubuntu_python"
# aidge_core
- DEPENDENCY_NAME="aidge_core"
- !reference [.download_dependency, script]
# aidge_backend_cpu
- DEPENDENCY_NAME="aidge_backend_cpu"
- !reference [.download_dependency, script]
- python3 -m pip install virtualenv
- virtualenv venv
- source venv/bin/activate
- python3 -m pip install -r requirements.txt
- python3 -m pip install .
artifacts:
expire_in: 1 week
paths:
- venv/
# build:windows_cpp:
# stage: build
# needs: []
# tags:
# - windows
# image: buildtools
# before_script:
# # Install Chocolatey
# - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# # Install dependencies
# - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
# - choco install git -Y
# - choco install python -Y
# - choco install cuda -Y
# # Update PATH
# - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
# script:
# # Download dependencies
# # aidge_core
# - 'curl "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:windows_cpp" -o build_artifacts.zip'
# - Expand-Archive -Path .\build_artifacts.zip -DestinationPath . -Force
# - Remove-Item .\build_cpp\ -Recurse
# # aidge_backend_cpu
# - 'curl "https://gitlab.eclipse.org/api/v4/projects/5140/jobs/artifacts/master/download?job=build:windows_cpp" -o build_artifacts.zip'
# - Expand-Archive -Path .\build_artifacts.zip -DestinationPath . -Force
# - Remove-Item .\build_cpp\ -Recurse
# - $env:CMAKE_PREFIX_PATH = '../install_cpp'
# - mkdir -p build_cpp
# - cd build_cpp
# - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
# - cmake --build . -j2
# - cmake --install . --config Debug
# artifacts:
# expire_in: 1 week
# paths:
# - build_cpp/
# - install_cpp/
# build:windows_python:
# stage: build
# needs: []
# tags:
# - windows
# image: buildtools
# before_script:
# # Install Chocolatey
# - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# # Install dependencies
# - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
# - choco install git -Y
# - choco install python -Y
# - choco install cuda -Y
# # Update PATH
# - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
# script:
# # Download dependencies
# # aidge_core (Python)
# - 'curl "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:windows_python" -o build_artifacts.zip'
# - Expand-Archive -Path .\build_artifacts.zip -DestinationPath . -Force
# # aidge_backend_cpu (Python)
# - 'curl "https://gitlab.eclipse.org/api/v4/projects/5140/jobs/artifacts/master/download?job=build:windows_python" -o build_artifacts.zip'
# - Expand-Archive -Path .\build_artifacts.zip -DestinationPath . -Force
# - python -m pip install virtualenv
# - virtualenv venv
# - venv\Scripts\Activate.ps1
# - python -m pip install -r requirements.txt
# - python -m pip install .
# artifacts:
# expire_in: 1 week
# paths:
# - venv/
coverage:ubuntu_cpp:
stage: coverage
needs: ["build:ubuntu_cpp"]
tags:
- docker
script:
- cd build_cpp
- ctest --output-on-failure
# HTML report for visualization
- gcovr --html-details --exclude-unreachable-branches -o coverage.html --root ${CI_PROJECT_DIR} --filter '\.\./include/' --filter '\.\./src/'
# Coberta XML report for Gitlab integration
- gcovr --xml-pretty --exclude-unreachable-branches --print-summary -o coverage.xml --root ${CI_PROJECT_DIR} --filter '\.\./include/' --filter '\.\./src/'
coverage: /^\s*lines:\s*\d+.\d+\%/
artifacts:
name: ${CI_JOB_NAME}-${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHA}
expire_in: 2 days
reports:
coverage_report:
coverage_format: cobertura
path: build_cpp/coverage.xml
coverage:ubuntu_python:
stage: coverage
needs: ["build:ubuntu_python"]
tags:
- docker
script:
- source venv/bin/activate
- python3 -m pip install numpy coverage
- cd ${CI_PROJECT_NAME}
# Retrieve the installation path of the module, since it is installed with pip.
- export MODULE_LOCATION=`python -c "import ${CI_PROJECT_NAME} as _; print(_.__path__[0])"`
- python3 -m coverage run --source=$MODULE_LOCATION -m unittest discover -s unit_tests/ -v -b
- python3 -m coverage report
- python3 -m coverage xml
coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
artifacts:
reports:
coverage_report:
coverage_format: cobertura
path: ${CI_PROJECT_NAME}/coverage.xml
test:ubuntu_cpp:
stage: test
needs: ["build:ubuntu_cpp"]
tags:
- docker
script:
- cd build_cpp
- ctest --output-junit ctest-results.xml --output-on-failure
artifacts:
reports:
junit: build_cpp/ctest-results.xml
test:ubuntu_python:
stage: test
needs: ["build:ubuntu_python"]
tags:
- docker
script:
- source venv/bin/activate
- cd ${CI_PROJECT_NAME}
- python3 -m pip install numpy unittest-xml-reporting
- python3 -m pip list
# Run on discovery all tests located in core/unit_tests/python
- python3 -m xmlrunner discover -s unit_tests/ -v -b --output-file xmlrunner-results.xml
artifacts:
reports:
junit: ${CI_PROJECT_NAME}/xmlrunner-results.xml
# test:windows_cpp:
# stage: test
# needs: ["build:windows_cpp"]
# tags:
# - windows
# image: buildtools
# before_script:
# # Install Chocolatey
# - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# # Install dependencies
# - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
# - choco install python -Y
# # Update PATH
# - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
# script:
# - cd build_cpp
# - ctest --output-junit ctest-results.xml --output-on-failure
# artifacts:
# reports:
# junit: build_cpp/ctest-results.xml
......@@ -9,12 +9,13 @@
*
********************************************************************************/
#ifndef AIDGE_OPENCV_IMPORTS_H_
#define AIDGE_OPENCV_IMPORTS_H_
#ifndef AIDGE_OPENCV_OPENCV_H_
#define AIDGE_OPENCV_OPENCV_H_
#include "aidge/backend/opencv/data/DataUtils.hpp"
#include "aidge/backend/opencv/data/TensorImpl.hpp"
#include "aidge/backend/opencv/stimuli/StimuliImpl_opencv_imread.hpp"
#include "aidge/backend/opencv/database/MNIST.hpp"
#include "aidge/backend/opencv/stimuli/StimulusImpl_opencv_imread.hpp"
#include "aidge/backend/opencv/utils/Utils.hpp"
#endif /* AIDGE_OPENCV_IMPORTS_H_ */
\ No newline at end of file
#endif /* AIDGE_OPENCV_OPENCV_H_ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_OPENCV_DATA_DATAUTILS_H_
#define AIDGE_OPENCV_DATA_DATAUTILS_H_
#include "opencv2/core.hpp"
#include <cstdint>
namespace Aidge {
namespace detail {
template <typename T> struct CV_C1_CPP { static constexpr int value = -1; };
template <> struct CV_C1_CPP<std::int8_t> {
static constexpr int value = CV_8SC1;
};
template <> struct CV_C1_CPP<std::int16_t> {
static constexpr int value = CV_16SC1;
};
template <> struct CV_C1_CPP<std::int32_t> {
static constexpr int value = CV_32SC1;
};
template <> struct CV_C1_CPP<std::uint8_t> {
static constexpr int value = CV_8UC1;
};
template <> struct CV_C1_CPP<std::uint16_t> {
static constexpr int value = CV_16UC1;
};
template <> struct CV_C1_CPP<float> {
static constexpr int value = CV_32FC1;
};
template <> struct CV_C1_CPP<double> {
static constexpr int value = CV_64FC1;
};
template <typename T> constexpr int CV_C1_CPP_v = CV_C1_CPP<T>::value;
} // namespace detail
} // namespace Aidge
#endif /* AIDGE_OPENCV_DATA_DATAUTILS_H_ */
\ No newline at end of file
#ifndef TensorImpl_opencv_H_
#define TensorImpl_opencv_H_
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_OPENCV_DATA_TENSORIMPL_H_
#define AIDGE_OPENCV_DATA_TENSORIMPL_H_
#include "opencv2/core.hpp"
......@@ -9,23 +20,11 @@
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/future_std/span.hpp"
#include "aidge/backend/opencv/data/DataUtils.hpp"
#include <iostream>
namespace {
template <typename T> struct OpenCvType { static const int type; };
template <> const int OpenCvType<char>::type = CV_8SC1;
template <> const int OpenCvType<signed char>::type = CV_8SC1;
template <> const int OpenCvType<short>::type = CV_16SC1;
template <> const int OpenCvType<int>::type = CV_32SC1;
template <> const int OpenCvType<unsigned char>::type = CV_8UC1;
template <> const int OpenCvType<unsigned short>::type = CV_16UC1;
template <> const int OpenCvType<float>::type = CV_32FC1;
template <> const int OpenCvType<double>::type = CV_64FC1;
} // namespace
namespace Aidge {
class TensorImpl_opencv_ {
public:
virtual const cv::Mat& getCvMat() const = 0;
......@@ -39,8 +38,8 @@ private:
future_std::span<cv::Mat> mData;
std::unique_ptr<cv::Mat> mDataOwner = std::unique_ptr<cv::Mat>(new cv::Mat(0,0,OpenCvType<T>::type));
std::unique_ptr<cv::Mat> mDataOwner = std::unique_ptr<cv::Mat>(new cv::Mat(0, 0, detail::CV_C1_CPP_v<T>));
public:
static constexpr const char *Backend = "opencv";
......@@ -51,7 +50,7 @@ public:
// Create iterators for both matrices
cv::MatConstIterator_<T> it1 = mDataOwner->begin<T>();
const future_std::span<cv::Mat> tmp = reinterpret_cast<const TensorImpl_opencv<T> &>(otherImpl).data();
const cv::Mat otherData = *(tmp.data());
cv::MatConstIterator_<T> it2 = otherData.begin<T>();
......@@ -71,9 +70,9 @@ public:
// native interface
const future_std::span<cv::Mat> data() const { return mData; }
std::size_t scalarSize() const override { return sizeof(T); }
inline std::size_t scalarSize() const override { return sizeof(T); }
std::size_t size() const override { return mData.size(); }
inline std::size_t size() const override { return mData.size(); }
void setDevice(DeviceIdx_t device) override {
AIDGE_ASSERT(device == 0, "device cannot be != 0 for Opencv backend");
......@@ -91,56 +90,57 @@ public:
}
AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
if (srcDt == DataType::Float64) {
std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
switch (srcDt) {
case DataType::Float64:
std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::Float32) {
std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
break;
case DataType::Float32:
std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::Float16) {
std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
break;
case DataType::Float16:
std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::Int64) {
std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
break;
case DataType::Int64:
std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::UInt64) {
std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
break;
case DataType::UInt64:
std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::Int32) {
std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
break;
case DataType::Int32:
std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::UInt32) {
std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
break;
case DataType::UInt32:
std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::Int16) {
std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
break;
case DataType::Int16:
std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::UInt16) {
std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
break;
case DataType::UInt16:
std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::Int8) {
std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
break;
case DataType::Int8:
std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else if (srcDt == DataType::UInt8) {
std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
break;
case DataType::UInt8:
std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
static_cast<T *>(rawPtr()));
}
else {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
break;
default:
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
}
}
void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, DeviceIdx_t>& device) override {
AIDGE_ASSERT(device.first == Backend, "backend must match");
AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
......@@ -181,7 +181,7 @@ public:
const cv::Mat& getCvMat() const override { return *mDataOwner.get(); }
void setCvMat(const cv::Mat& mat) override {mDataOwner.reset(new cv::Mat(std::move(mat)));}
virtual ~TensorImpl_opencv() = default;
......@@ -198,14 +198,14 @@ private:
: (mTensor.nbDims() > 0) ? 1
: 0),
(mTensor.nbDims() > 0) ? static_cast<int>(mTensor.dims()[0]) : 0,
OpenCvType<T>::type);
detail::CV_C1_CPP_v<T>);
} else {
std::vector<cv::Mat> channels;
for (std::size_t k = 0; k < mTensor.dims()[2]; ++k) {
channels.push_back(cv::Mat(static_cast<int>(mTensor.dims()[1]),
static_cast<int>(mTensor.dims()[0]),
OpenCvType<T>::type));
detail::CV_C1_CPP_v<T>));
}
cv::merge(channels, myNewMatrix);
......@@ -213,7 +213,7 @@ private:
mDataOwner.reset(new cv::Mat(std::forward<cv::Mat>(myNewMatrix)));
mData = future_std::span<cv::Mat>(mDataOwner.get(), mTensor.size());
}
}
};
......@@ -224,16 +224,16 @@ static Registrar<Tensor> registrarTensorImpl_opencv_Float64(
static Registrar<Tensor> registrarTensorImpl_opencv_Float32(
{"opencv", DataType::Float32}, Aidge::TensorImpl_opencv<float>::create);
static Registrar<Tensor> registrarTensorImpl_opencv_Int32(
{"opencv", DataType::Int32}, Aidge::TensorImpl_opencv<int>::create);
{"opencv", DataType::Int32}, Aidge::TensorImpl_opencv<std::int32_t>::create);
static Registrar<Tensor> registrarTensorImpl_opencv_Int16(
{"opencv", DataType::Int16}, Aidge::TensorImpl_opencv<int16_t>::create);
{"opencv", DataType::Int16}, Aidge::TensorImpl_opencv<std::int16_t>::create);
static Registrar<Tensor> registrarTensorImpl_opencv_UInt16(
{"opencv", DataType::UInt16}, Aidge::TensorImpl_opencv<uint16_t>::create);
{"opencv", DataType::UInt16}, Aidge::TensorImpl_opencv<std::uint16_t>::create);
static Registrar<Tensor> registrarTensorImpl_opencv_Int8(
{"opencv", DataType::Int8}, Aidge::TensorImpl_opencv<int8_t>::create);
{"opencv", DataType::Int8}, Aidge::TensorImpl_opencv<std::int8_t>::create);
static Registrar<Tensor> registrarTensorImpl_opencv_UInt8(
{"opencv", DataType::UInt8}, Aidge::TensorImpl_opencv<uint8_t>::create);
{"opencv", DataType::UInt8}, Aidge::TensorImpl_opencv<std::uint8_t>::create);
} // namespace
} // namespace Aidge
#endif /* TensorImpl_opencv_H_ */
#endif /* AIDGE_OPENCV_DATA_TENSORIMPL_H_ */
#ifndef MNIST_H
#define MNIST_H
#include <fstream>
#include <iomanip>
#include <tuple>
#include "opencv2/core.hpp"
#include <opencv2/opencv.hpp>
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_OPENCV_DATABASE_MNIST_H_
#define AIDGE_OPENCV_DATABASE_MNIST_H_
#include <algorithm> // std::reverse
#include <cstddef> // std:size_t
#include <cstdint> // std::uint32_t
#include <string>
#include <tuple> // std::tuple_size
#include <vector>
#include "aidge/data/Database.hpp"
#include "aidge/stimuli/Stimuli.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/scheduler/Scheduler.hpp"
#include "aidge/stimuli/Stimulus.hpp"
// #include "aidge/graph/GraphView.hpp"
// #include "aidge/scheduler/Scheduler.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/backend/opencv/utils/Utils.hpp"
// #include "aidge/backend/opencv/data/TensorImpl.hpp"
......@@ -21,7 +31,7 @@ namespace Aidge {
template <class T> void swapEndian(T& obj)
{
unsigned char* memp = reinterpret_cast<unsigned char*>(&obj);
std::uint8_t* memp = reinterpret_cast<unsigned char*>(&obj);
std::reverse(memp, memp + sizeof(T));
}
......@@ -35,40 +45,12 @@ inline bool isBigEndian()
return bint.c[0] == 1;
}
class MNIST : public Database {
public:
MNIST(const std::string& dataPath,
// const GraphView transformations,
bool train,
bool loadDataInMemory = false)
: Database(),
mDataPath(dataPath),
// mDataTransformations(transformations),
mTrain(train),
mLoadDataInMemory(loadDataInMemory)
{
// Uncompress train database
if (mTrain){
MNIST::uncompress(mDataPath + "/train-images-idx3-ubyte",
dataPath + "/train-labels-idx1-ubyte");
}else { // Uncompress test database
MNIST::uncompress(mDataPath + "/t10k-images-idx3-ubyte",
dataPath + "/t10k-labels-idx1-ubyte");
}
}
std::vector<std::shared_ptr<Tensor>> getItem(std::size_t index) override;
void uncompress(const std::string& dataPath,
const std::string& labelPath);
std::size_t getLen() override;
std::size_t getNbModalities() override;
union MagicNumber {
unsigned int value;
unsigned char byte[4];
std::uint32_t value;
std::uint8_t byte[4];
};
enum DataType {
......@@ -78,25 +60,24 @@ public:
Int = 0x0C,
Float = 0x0D,
Double = 0x0E
};
};
protected:
/// Stimulus data path
const std::string mDataPath;
/// Stimuli data path
std::string mDataPath;
// True select the train database, False Select the test database
bool mTrain;
// True Load images in memory, False reload at each call
bool mLoadDataInMemory;
/// Stimuli data
/// Stimulus data
// Each index of the vector is one item of the database
// One item of the MNIST database is the tuple <Image,label>
// First stimuli of the tuple is a gray scale image stimuli of a writen digit
// Second stimuli of the tuple is the label associated to the digit : unsigned integer 0-9
std::vector<std::tuple<Stimuli,Stimuli>> mStimulis;
mutable std::vector<std::tuple<Stimulus,Stimulus>> mStimuli;
/// Data Transformations
// Data transformations use the GraphView mecanism
......@@ -106,7 +87,43 @@ protected:
// Scheduler to run the graph of data transformations
// Scheduler mScheduler;
public:
MNIST(const std::string& dataPath,
// const GraphView transformations,
bool train,
bool loadDataInMemory = false)
: Database(),
mDataPath(dataPath),
// mDataTransformations(transformations),
mTrain(train),
mLoadDataInMemory(loadDataInMemory)
{
// Uncompress train database
if (mTrain) {
uncompress(mDataPath + "/train-images-idx3-ubyte",
dataPath + "/train-labels-idx1-ubyte");
} else { // Uncompress test database
uncompress(mDataPath + "/t10k-images-idx3-ubyte",
dataPath + "/t10k-labels-idx1-ubyte");
}
}
~MNIST() noexcept;
public:
std::vector<std::shared_ptr<Tensor>> getItem(const std::size_t index) const override final;
inline std::size_t getLen() const noexcept override final {
return mStimuli.size();
}
inline std::size_t getNbModalities() const noexcept override final {
return std::tuple_size<decltype(mStimuli)::value_type>::value;
}
private:
void uncompress(const std::string& dataPath, const std::string& labelPath);
};
}
#endif // MNIST_H
#endif // AIDGE_OPENCV_DATABASE_MNIST_H_
......@@ -9,51 +9,53 @@
*
********************************************************************************/
#ifndef LOAD_H_
#define LOAD_H_
#ifndef AIDGE_OPENCV_STIMULI_STIMULUSIMPLOPENCVIMREAD_H_
#define AIDGE_OPENCV_STIMULI_STIMULUSIMPLOPENCVIMREAD_H_
#include <cstring>
#include <string>
#include <memory>
#include <iostream>
#include "opencv2/core.hpp"
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgcodecs.hpp> // cv::IMREAD_COLOR
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/backend/StimuliImpl.hpp"
#include "aidge/stimuli/Stimuli.hpp"
#include "aidge/backend/StimulusImpl.hpp"
#include "aidge/stimuli/Stimulus.hpp"
#include "aidge/backend/opencv/data/TensorImpl.hpp"
#include "aidge/backend/opencv/utils/Utils.hpp"
namespace Aidge {
class StimuliImpl_opencv_imread : public StimuliImpl {
public:
StimuliImpl_opencv_imread(const std::string& dataPath="", int colorFlag=cv::IMREAD_COLOR) :
mDataPath(dataPath),
mColorFlag(colorFlag) {}
virtual ~StimuliImpl_opencv_imread() {};
std::shared_ptr<Tensor> load() override;
class StimulusImpl_opencv_imread : public StimulusImpl {
private:
/// Stimulus data path
const std::string mDataPath;
const int mColorFlag;
static std::unique_ptr<StimuliImpl_opencv_imread> create(const std::string& dataPath) {
return std::make_unique<StimuliImpl_opencv_imread>(dataPath);
public:
StimulusImpl_opencv_imread(const std::string& dataPath="", std::int32_t colorFlag=cv::IMREAD_COLOR)
: mDataPath(dataPath),
mColorFlag(colorFlag)
{
// ctor
}
protected:
~StimulusImpl_opencv_imread() noexcept;
/// Stimuli data path
std::string mDataPath;
int mColorFlag;
public:
static std::unique_ptr<StimulusImpl_opencv_imread> create(const std::string& dataPath) {
return std::make_unique<StimulusImpl_opencv_imread>(dataPath);
}
public:
std::shared_ptr<Tensor> load() const override;
};
namespace {
static Registrar<Aidge::Stimuli> registrarStimuliImpl_opencv_png(
{"opencv", "png"}, Aidge::StimuliImpl_opencv_imread::create);
static Registrar<Aidge::Stimuli> registrarStimuliImpl_opencv_pgm(
{"opencv", "pgm"}, Aidge::StimuliImpl_opencv_imread::create);
static Registrar<Aidge::Stimulus> registrarStimulusImpl_opencv_png(
{"opencv", "png"}, Aidge::StimulusImpl_opencv_imread::create);
static Registrar<Aidge::Stimulus> registrarStimulusImpl_opencv_pgm(
{"opencv", "pgm"}, Aidge::StimulusImpl_opencv_imread::create);
} // namespace
} // namespace Aidge
#endif /* LOAD_H_ */
#endif /* AIDGE_OPENCV_STIMULI_STIMULUSIMPLOPENCVIMREAD_H_ */
......@@ -9,92 +9,49 @@
*
********************************************************************************/
#ifndef AIDGE_BACKEND_OPENCV_UTILS_ATTRIBUTES_H_
#define AIDGE_BACKEND_OPENCV_UTILS_ATTRIBUTES_H_
#ifndef AIDGE_OPENCV_UTILS_UTILS_H_
#define AIDGE_OPENCV_UTILS_UTILS_H_
#include <opencv2/core/mat.hpp> // cv::Mat
#include <memory>
#include <tuple>
#include <vector>
#include "opencv2/core.hpp"
#include <opencv2/opencv.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/backend/opencv/data/TensorImpl.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
/**
* @brief Instanciate an aidge tensor with backend "opencv" from an opencv matrix
*
* @param mat the cv::mat to instanciate the tensor from
* @return std::shared_ptr<Tensor> aidge tensor
*/
inline std::shared_ptr<Tensor> tensorOpencv(cv::Mat mat){
// Get Mat dims
std::vector<DimSize_t> matDims = std::vector<DimSize_t>({static_cast<DimSize_t>(mat.cols),
static_cast<DimSize_t>(mat.rows),
static_cast<DimSize_t>(mat.channels())});
// Create tensor from the dims of the Cv::Mat
std::shared_ptr<Tensor> tensor = std::make_shared<Tensor>(matDims);
// Set beackend opencv
tensor->setBackend("opencv");
// Set Data Type
switch (mat.depth()) {
case CV_8U:
tensor->setDataType(Aidge::DataType::UInt8);
break;
case CV_8S:
tensor->setDataType(Aidge::DataType::Int8);
break;
case CV_16U:
tensor->setDataType(Aidge::DataType::UInt16);
break;
case CV_16S:
tensor->setDataType(Aidge::DataType::Int16);
break;
case CV_32S:
tensor->setDataType(Aidge::DataType::Int32);
break;
case CV_32F:
tensor->setDataType(Aidge::DataType::Float32);
break;
case CV_64F:
tensor->setDataType(Aidge::DataType::Float64);
break;
default:
throw std::runtime_error(
"Cannot convert cv::Mat to Tensor: incompatible types.");
}
/**
* @brief Instanciate an aidge tensor with backend "opencv" from an opencv matrix
*
* @param mat the cv::mat to instanciate the tensor from
* @return std::shared_ptr<Tensor> aidge tensor
*/
std::shared_ptr<Tensor> tensorOpencv(cv::Mat mat);
/**
* @brief Copy the data from a source 2D cv::mat to a destination pointer with an offset
*
* @tparam CV_T The standard type corresponding to the opencv data type
* @param mat opencv 2D mat to copy the data from
* @param data destination pointer
* @param offset offset an the destination data pointer
*/
template <class CV_T>
void convert(const cv::Mat& mat, void* data, std::size_t offset);
// Cast the tensorImpl to access setCvMat function
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor->getImpl().get());
tImpl_opencv->setCvMat(mat);
return tensor;
}
/**
* @brief Copy the data from a source 2D cv::mat to a destination pointer with an offset
*
* @tparam CV_T The standard type corresponding to the opencv data type
* @param mat opencv 2D mat to copy the data from
* @param data destination pointer
* @param offset offset an the destination data pointer
*/
template <class CV_T>
void convert(const cv::Mat& mat, void* data, size_t offset);
/**
* @brief Convert a tensor backend opencv into a tensor backend cpu
*
* @param tensorOpencv tensor with backend opencv (contains a cv::mat)
* @return std::shared_ptr<Tensor> tensor backend cpu (contains a std::vector)
*/
std::shared_ptr<Tensor> convertCpu(std::shared_ptr<Aidge::Tensor> tensorOpencv);
/**
* @brief Convert a tensor backend opencv into a tensor backend cpu
*
* @param tensorOpencv tensor with backend opencv (contains a cv::mat)
* @return std::shared_ptr<Tensor> tensor backend cpu (contains a std::vector)
*/
std::shared_ptr<Tensor> convertCpu(std::shared_ptr<Aidge::Tensor> tensorOpencv);
} // namespace
#endif // AIDGE_BACKEND_OPENCV_UTILS_ATTRIBUTES_H_
\ No newline at end of file
#endif // AIDGE_OPENCV_UTILS_UTILS_H_
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/backend/opencv/database/MNIST.hpp"
#include <cstdint>
#include <fstream>
#include <iomanip>
#include <tuple>
#include "opencv2/core.hpp"
#include <opencv2/opencv.hpp>
#include "aidge/backend/opencv/utils/Utils.hpp"
Aidge::MNIST::~MNIST() noexcept = default;
void Aidge::MNIST::uncompress(const std::string& dataPath,
const std::string& labelPath)
{
......@@ -10,9 +33,9 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
throw std::runtime_error("Could not open images file: " + dataPath);
MagicNumber magicNumber;
unsigned int nbImages;
unsigned int nbRows;
unsigned int nbColumns;
std::uint32_t nbImages;
std::uint32_t nbRows;
std::uint32_t nbColumns;
images.read(reinterpret_cast<char*>(&magicNumber.value),
sizeof(magicNumber));
......@@ -27,8 +50,9 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
Aidge::swapEndian(nbColumns);
}
if (magicNumber.byte[3] != 0 || magicNumber.byte[2] != 0
|| magicNumber.byte[1] != Unsigned || magicNumber.byte[0] != 3) {
// if (magicNumber.byte[3] != 0 || magicNumber.byte[2] != 0
// || magicNumber.byte[1] != Unsigned || magicNumber.byte[0] != 3) {
if (magicNumber.value != 0x00000803) { // 0, 0, unisgned, 3
throw std::runtime_error("Wrong file format for images file: "
+ dataPath);
}
......@@ -40,7 +64,7 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
throw std::runtime_error("Could not open labels file: " + labelPath);
MagicNumber magicNumberLabels;
unsigned int nbItemsLabels;
std::uint32_t nbItemsLabels;
labels.read(reinterpret_cast<char*>(&magicNumberLabels.value),
sizeof(magicNumberLabels));
......@@ -51,9 +75,10 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
Aidge::swapEndian(nbItemsLabels);
}
if (magicNumberLabels.byte[3] != 0 || magicNumberLabels.byte[2] != 0
|| magicNumberLabels.byte[1] != Unsigned
|| magicNumberLabels.byte[0] != 1) {
// if (magicNumberLabels.byte[3] != 0 || magicNumberLabels.byte[2] != 0
// || magicNumberLabels.byte[1] != Unsigned
// || magicNumberLabels.byte[0] != 1) {
if (magicNumberLabels.value != 0x00000801) { // 0, 0, unsigned, 1
throw std::runtime_error("Wrong file format for labels file: "
+ labelPath);
}
......@@ -63,8 +88,8 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
"The number of images and the number of labels does not match.");
// For each image...
for (unsigned int i = 0; i < nbImages; ++i) {
unsigned char buff;
for (std::uint32_t i = 0; i < nbImages; ++i) {
std::uint8_t buff;
std::ostringstream nameStr;
nameStr << dataPath << "[" << std::setfill('0') << std::setw(5) << i
......@@ -74,10 +99,10 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
if (!std::ifstream(nameStr.str()).good()) {
cv::Mat frame(cv::Size(nbColumns, nbRows), CV_8UC1);
for (unsigned int y = 0; y < nbRows; ++y) {
for (unsigned int x = 0; x < nbColumns; ++x) {
for (std::uint32_t y = 0; y < nbRows; ++y) {
for (std::uint32_t x = 0; x < nbColumns; ++x) {
images.read(reinterpret_cast<char*>(&buff), sizeof(buff));
frame.at<unsigned char>(y, x) = buff;
frame.at<std::uint8_t>(y, x) = buff;
}
}
......@@ -90,18 +115,18 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
}
// Create the stimuli of the image
Aidge::Stimuli StimuliImg(nameStr.str(), mLoadDataInMemory);
StimuliImg.setBackend("opencv");
// Create the stimuli of the corresponding label by filing integer to the stimuli directly
Aidge::Stimulus StimulusImg(nameStr.str(), mLoadDataInMemory);
StimulusImg.setBackend("opencv");
// Create the stimulus of the corresponding label by filing integer to the stimulus directly
labels.read(reinterpret_cast<char*>(&buff), sizeof(buff));
int label = static_cast<int>(buff);
const std::int32_t label = std::move(static_cast<std::int32_t>(buff));
std::shared_ptr<Tensor> lbl = std::make_shared<Tensor>(Array1D<int, 1>{label});
Aidge::Stimuli StimuliLabel(lbl);
Aidge::Stimulus StimulusLabel(lbl);
// Push back the corresponding image & label in the vector
mStimulis.push_back(std::make_tuple(StimuliImg, StimuliLabel));
mStimuli.push_back(std::make_tuple(StimulusImg, StimulusLabel));
}
if (images.eof())
......@@ -124,23 +149,14 @@ void Aidge::MNIST::uncompress(const std::string& dataPath,
}
std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::MNIST::getItem(std::size_t index) {
std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::MNIST::getItem(const std::size_t index) const {
std::vector<std::shared_ptr<Tensor>> item;
// Load the digit tensor
// Load the digit tensor
// TODO : Currently converts the tensor Opencv but this operation will be carried by a convert operator in the preprocessing graph
item.push_back(Aidge::convertCpu((std::get<0>(mStimulis.at(index))).load()));
// item.push_back((std::get<0>(mStimulis.at(index))).load());
// Load the label tensor
item.push_back((std::get<1>(mStimulis.at(index))).load());
return item;
}
item.push_back(Aidge::convertCpu((std::get<0>(mStimuli.at(index))).load()));
// item.push_back((std::get<0>(mStimuli.at(index))).load());
// Load the label tensor
item.push_back((std::get<1>(mStimuli.at(index))).load());
std::size_t Aidge::MNIST::getLen(){
return mStimulis.size();
}
std::size_t Aidge::MNIST::getNbModalities(){
size_t tupleSize = std::tuple_size<decltype(mStimulis)::value_type>::value;
return tupleSize;
return item;
}
\ No newline at end of file
#include "aidge/backend/opencv/stimuli/StimuliImpl_opencv_imread.hpp"
std::shared_ptr<Aidge::Tensor> Aidge::StimuliImpl_opencv_imread::load() {
cv::Mat cvImg = cv::imread(mDataPath, mColorFlag);
if (cvImg.empty()) {
throw std::runtime_error("Could not open images file: " + mDataPath);
}
return tensorOpencv(cvImg);
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/backend/opencv/stimuli/StimulusImpl_opencv_imread.hpp"
#include <memory>
#include <stdexcept>
#include <string>
#include "opencv2/core.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/backend/opencv/utils/Utils.hpp"
Aidge::StimulusImpl_opencv_imread::~StimulusImpl_opencv_imread() noexcept = default;
std::shared_ptr<Aidge::Tensor> Aidge::StimulusImpl_opencv_imread::load() const {
cv::Mat cvImg = cv::imread(mDataPath, mColorFlag);
if (cvImg.empty()) {
throw std::runtime_error("Could not open images file: " + mDataPath);
}
return tensorOpencv(cvImg);
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <opencv2/core.hpp> // cv::Mat, cv::split
#include <cstddef>
#include <cstdint>
#include <cstring> // std::memcpy, std::strcmp
#include <stdexcept> // std::runtime_error
#include <memory>
#include <vector>
#include "aidge/backend/opencv/utils/Utils.hpp"
#include "aidge/backend/opencv/data/DataUtils.hpp" // detail::CvtoAidge
#include "aidge/backend/cpu/data/TensorImpl.hpp"
#include "aidge/backend/opencv/data/TensorImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/utils/Types.h" // DimSize_t
static Aidge::DataType CVtoAidge(const int matDepth) {
Aidge::DataType res;
switch (matDepth) {
case CV_8U:
res = Aidge::DataType::UInt8;
break;
case CV_8S:
res = Aidge::DataType::Int8;
break;
case CV_16U:
res = Aidge::DataType::UInt16;
break;
case CV_16S:
res = Aidge::DataType::Int16;
break;
case CV_16F:
res = Aidge::DataType::Float16;
break;
case CV_32S:
res = Aidge::DataType::Int32;
break;
case CV_32F:
res = Aidge::DataType::Float32;
break;
case CV_64F:
res = Aidge::DataType::Float64;
break;
default:
throw std::runtime_error(
"Cannot convert cv::Mat to Tensor: incompatible types.");
}
return res;
}
std::shared_ptr<Aidge::Tensor> Aidge::tensorOpencv(cv::Mat mat) {
// Get Mat dims
const std::vector<DimSize_t> matDims = std::vector<DimSize_t>({static_cast<DimSize_t>(mat.cols),
static_cast<DimSize_t>(mat.rows),
static_cast<DimSize_t>(mat.channels())});
// Create tensor from the dims of the Cv::Mat
std::shared_ptr<Tensor> tensor = std::make_shared<Tensor>(matDims);
// Set beackend opencv
tensor->setBackend("opencv");
// Set Data Type
tensor->setDataType(CVtoAidge(mat.depth()));
// Cast the tensorImpl to access setCvMat function
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor->getImpl().get());
tImpl_opencv->setCvMat(mat);
return tensor;
}
template <class CV_T>
void Aidge::convert(const cv::Mat& mat, void* data, size_t offset)
{
void Aidge::convert(const cv::Mat& mat, void* data, std::size_t offset)
{
if (mat.isContinuous())
std::memcpy(reinterpret_cast<void*>(reinterpret_cast<CV_T*>(data) + offset), mat.ptr<CV_T>(), sizeof(CV_T)*(mat.cols*mat.rows));
else {
throw std::runtime_error(
"Poui pwoup convert not support if matrix not contiguous");
}
}
......@@ -29,40 +107,15 @@ std::shared_ptr<Aidge::Tensor> Aidge::convertCpu(std::shared_ptr<Aidge::Tensor>
cv::split(dataOpencv, channels);
// set the datatype of the cpu tensor
switch (channels[0].depth()) {
case CV_8U:
tensorCpu->setDataType(Aidge::DataType::UInt8);
break;
case CV_8S:
tensorCpu->setDataType(Aidge::DataType::Int8);
break;
case CV_16U:
tensorCpu->setDataType(Aidge::DataType::UInt16);
break;
case CV_16S:
tensorCpu->setDataType(Aidge::DataType::Int16);
break;
case CV_32S:
tensorCpu->setDataType(Aidge::DataType::Int32);
break;
case CV_32F:
tensorCpu->setDataType(Aidge::DataType::Float32);
break;
case CV_64F:
tensorCpu->setDataType(Aidge::DataType::Float64);
break;
default:
throw std::runtime_error(
"Cannot convert cv::Mat to Tensor: incompatible types.");
}
// Set backend cpu
tensorCpu->setDataType(CVtoAidge(channels[0].depth()));
// Set backend cpu
tensorCpu->setBackend("cpu");
// Convert & copy the cv::Mat into the tensor using the rawPtr of tensor cpu
std::size_t count = 0;
for (std::vector<cv::Mat>::const_iterator itChannel = channels.begin();
itChannel != channels.end();
for (std::vector<cv::Mat>::const_iterator itChannel = channels.cbegin();
itChannel != channels.cend();
++itChannel)
{
switch ((*itChannel).depth()) {
......@@ -94,4 +147,4 @@ std::shared_ptr<Aidge::Tensor> Aidge::convertCpu(std::shared_ptr<Aidge::Tensor>
++count;
}
return tensorCpu;
}
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "aidge/backend/opencv/database/MNIST.hpp"
......@@ -11,7 +22,7 @@
using namespace Aidge;
TEST_CASE("DataProvider instanciation & test mnist","[Data][OpenCV]") {
// Create database
std::string path = "/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/test_mnist_database";
bool train = false;
......@@ -20,7 +31,7 @@ TEST_CASE("DataProvider instanciation & test mnist","[Data][OpenCV]") {
// DataProvider settings
unsigned int batchSize = 256;
unsigned int number_batch = std::ceil(mnist.getLen() / batchSize);
// Instanciate the dataloader
DataProvider provider(mnist, batchSize);
......@@ -29,7 +40,7 @@ TEST_CASE("DataProvider instanciation & test mnist","[Data][OpenCV]") {
auto batch = provider.readBatch(i*batchSize);
auto data_batch_ptr = static_cast<uint8_t*>(batch[0]->getImpl()->rawPtr());
auto label_batch_ptr = static_cast<int*>(batch[1]->getImpl()->rawPtr());
for (unsigned int s = 0; s < batchSize; ++s){
auto data = mnist.getItem(i*batchSize+s)[0];
auto label = mnist.getItem(i*batchSize+s)[1];
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "opencv2/core.hpp"
#include <opencv2/imgcodecs.hpp>
#include <memory>
#include <iostream>
#include "aidge/stimuli/Stimuli.hpp"
#include "aidge/stimuli/Stimulus.hpp"
#include "aidge/backend/opencv/data/TensorImpl.hpp"
#include "aidge/data/Tensor.hpp"
using namespace Aidge;
TEST_CASE("Stimuli creation", "[Stimuli][OpenCV]") {
TEST_CASE("Stimulus creation", "[Stimulus][OpenCV]") {
SECTION("Instanciation & load an image") {
// Load image with imread
cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm");
REQUIRE(true_mat.empty()==false);
// Create Stimuli
Stimuli stimg("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm", true);
// Create Stimulus
Stimulus stimg("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm", true);
stimg.setBackend("opencv");
// Load the image in a tensor & save it in memory
......@@ -35,7 +46,7 @@ TEST_CASE("Stimuli creation", "[Stimuli][OpenCV]") {
// This time the tensor is already loaded in memory
std::shared_ptr<Tensor> tensor_load_2;
tensor_load_2 = stimg.load();
// Access the cv::Mat with the tensor
TensorImpl_opencv_* tImpl_opencv_2 = dynamic_cast<TensorImpl_opencv_*>(tensor_load_2->getImpl().get());
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "opencv2/core.hpp"
#include <opencv2/imgcodecs.hpp>
#include <memory>
#include <iostream>
#include "aidge/backend/opencv/stimuli/StimuliImpl_opencv_imread.hpp"
#include "aidge/backend/opencv/stimuli/StimulusImpl_opencv_imread.hpp"
#include "aidge/backend/opencv/data/TensorImpl.hpp"
#include "aidge/data/Tensor.hpp"
using namespace Aidge;
TEST_CASE("StimuliImpl_opencv_imread creation", "[StimuliImpl_opencv_imread][OpenCV]") {
TEST_CASE("StimulusImpl_opencv_imread creation", "[StimulusImpl_opencv_imread][OpenCV]") {
SECTION("Instanciation & load an image") {
// Load image with imread
// cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/Lenna.png");
cv::Mat true_mat = cv::imread("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm");
REQUIRE(true_mat.empty()==false);
// Create StimuliImpl_opencv_imread
// StimuliImpl_opencv_imread stImpl("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/Lenna.png");
StimuliImpl_opencv_imread stImpl("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm");
// Create StimulusImpl_opencv_imread
// StimulusImpl_opencv_imread stImpl("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/Lenna.png");
StimulusImpl_opencv_imread stImpl("/data1/is156025/tb256203/dev/eclipse_aidge/aidge/user_tests/train-images-idx3-ubyte[00001].pgm");
std::shared_ptr<Tensor> tensor_load;
tensor_load = stImpl.load();
// Access the cv::Mat with the tensor
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensor_load->getImpl().get());
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <catch2/catch_template_test_macros.hpp>
#include <memory>
......@@ -25,7 +36,7 @@ cv::Mat createRandomMat(int rows, int cols) {
// TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", char, unsigned char, short, unsigned short, int, float, double) {
// TODO : perform test for char and double
TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char, short, unsigned short, int, float, double) {
constexpr int num_test_matrices = 50;
SECTION("Test create tensor from opencv and convert to cpu") {
......@@ -42,7 +53,7 @@ TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char
for (int c = 0; c < ch; ++c){
// Create a random matrix
cv::Mat randomMat = createRandomMat<TestType>(rows, cols);
// Add each random matrix to the vector
// Add each random matrix to the vector
channels.push_back(randomMat);
}
// Merge the vector of cv mat into one cv mat
......@@ -65,7 +76,7 @@ TEMPLATE_TEST_CASE("Opencv Utils", "[Utils][OpenCV]", signed char, unsigned char
// Check the matrix inside the tensor coorresponds to the matrix
TensorImpl_opencv_* tImpl_opencv = dynamic_cast<TensorImpl_opencv_*>(tensorOcv->getImpl().get());
auto mat_tensor = tImpl_opencv->getCvMat();
REQUIRE(mat_tensor.size() == mat.size());
REQUIRE(cv::countNonZero(mat_tensor != mat) == 0);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment