Skip to content
Snippets Groups Projects
Commit 662ecccd authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'fix/GenericOp' into standardization

parents c0acb6b5 3a091045
No related branches found
No related tags found
1 merge request!15Remove CParameter memory leak
Pipeline #31158 failed
Showing
with 971 additions and 81 deletions
...@@ -10,9 +10,12 @@ stages: ...@@ -10,9 +10,12 @@ stages:
- build - build
# Unit test stage # Unit test stage
- test - test
# Code coverage
- coverage
include: include:
- local: '/.gitlab/ci/_global.gitlab-ci.yml' - local: '/.gitlab/ci/_global.gitlab-ci.yml'
- local: '/.gitlab/ci/static_analysis.gitlab-ci.yml' - local: '/.gitlab/ci/static_analysis.gitlab-ci.yml'
- local: '/.gitlab/ci/build.gitlab-ci.yml' - local: '/.gitlab/ci/build.gitlab-ci.yml'
- local: '/.gitlab/ci/test.gitlab-ci.yml' - local: '/.gitlab/ci/test.gitlab-ci.yml'
- local: '/.gitlab/ci/coverage.gitlab-ci.yml'
...@@ -9,5 +9,8 @@ variables: ...@@ -9,5 +9,8 @@ variables:
GIT_SSL_NO_VERIFY: 1 GIT_SSL_NO_VERIFY: 1
DEBIAN_FRONTEND: noninteractive DEBIAN_FRONTEND: noninteractive
default:
image: n2d2-ci/ubuntu20.04/cpu:latest image: nvidia/cuda:12.2.0-devel-ubuntu22.04
\ No newline at end of file before_script:
- apt update
- apt install -y cmake cppcheck python-is-python3 pip git gcovr
build:ubuntu_cpp: build:ubuntu_cpp:
stage: build stage: build
needs: []
tags: tags:
- docker - docker
image: n2d2-ci/ubuntu20.04/cpu:latest
script: script:
- mkdir -p build_cpp - mkdir -p build_cpp
- mkdir -p install_cpp - mkdir -p install_cpp
- cd build_cpp - cd build_cpp
- cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON .. - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
- make -j4 all install - make -j4 all install
artifacts: artifacts:
...@@ -18,9 +18,9 @@ build:ubuntu_cpp: ...@@ -18,9 +18,9 @@ build:ubuntu_cpp:
build:ubuntu_python: build:ubuntu_python:
stage: build stage: build
needs: []
tags: tags:
- docker - docker
image: n2d2-ci/ubuntu20.04/cpu:latest
script: script:
- python3 -m pip install virtualenv - python3 -m pip install virtualenv
...@@ -30,4 +30,33 @@ build:ubuntu_python: ...@@ -30,4 +30,33 @@ build:ubuntu_python:
- python3 -m pip install . - python3 -m pip install .
artifacts: artifacts:
paths: paths:
- venv/ - venv/
\ No newline at end of file
build:windows_cpp:
stage: build
needs: []
tags:
- windows
image: buildtools
before_script:
# Install Chocolatey
- Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# Install dependencies
- choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
- choco install git -Y
- choco install python -Y
# Update PATH
- $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
script:
- mkdir -p build_cpp
- mkdir -p install_cpp
- cd build_cpp
- cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
- cmake --build . -j2
- cmake --install . --config Debug
artifacts:
paths:
- build_cpp/
- install_cpp/
coverage:ubuntu_cpp:
stage: coverage
needs: ["build:ubuntu_cpp"]
tags:
- docker
script:
- cd build_cpp
- ctest --output-on-failure
- gcovr --xml-pretty --exclude-unreachable-branches --print-summary -o coverage.xml --root ${CI_PROJECT_DIR} --filter '\.\./include/' --filter '\.\./src/'
coverage: /^\s*lines:\s*\d+.\d+\%/
artifacts:
name: ${CI_JOB_NAME}-${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHA}
expire_in: 2 days
reports:
coverage_report:
coverage_format: cobertura
path: build_cpp/coverage.xml
coverage:ubuntu_python:
stage: coverage
needs: ["build:ubuntu_python"]
tags:
- docker
script:
- source venv/bin/activate
- python3 -m pip install numpy coverage
- cd aidge_core
- python3 -m coverage run --source=. -m unittest discover -s unit_tests/ -v -b
- python3 -m coverage report
- python3 -m coverage xml
coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
artifacts:
reports:
coverage_report:
coverage_format: cobertura
path: aidge_core/coverage.xml
...@@ -3,21 +3,45 @@ test:ubuntu_cpp: ...@@ -3,21 +3,45 @@ test:ubuntu_cpp:
needs: ["build:ubuntu_cpp"] needs: ["build:ubuntu_cpp"]
tags: tags:
- docker - docker
image: n2d2-ci/ubuntu20.04/cpu:latest
script: script:
- cd build_cpp - cd build_cpp
- ctest --output-on-failure - ctest --output-junit ctest-results.xml --output-on-failure
artifacts:
reports:
junit: build_cpp/ctest-results.xml
test:ubuntu_python: test:ubuntu_python:
stage: test stage: test
needs: ["build:ubuntu_python"] needs: ["build:ubuntu_python"]
tags: tags:
- docker - docker
image: n2d2-ci/ubuntu20.04/cpu:latest
script: script:
- source venv/bin/activate - source venv/bin/activate
- cd aidge_core - cd aidge_core
- python3 -m pip install unittest-xml-reporting
- python3 -m pip list - python3 -m pip list
# Run on discovery all tests located in core/unit_tests/python and discard the stdout # Run on discovery all tests located in core/unit_tests/python
# only to show the errors/warnings and the results of the tests - python3 -m xmlrunner discover -s unit_tests/ -v -b --output-file xmlrunner-results.xml
- python3 -m unittest discover -s unit_tests/ -v -b 1> /dev/null artifacts:
reports:
junit: aidge_core/xmlrunner-results.xml
test:windows_cpp:
stage: test
needs: ["build:windows_cpp"]
tags:
- windows
image: buildtools
before_script:
# Install Chocolatey
- Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# Install dependencies
- choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
# Update PATH
- $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
script:
- cd build_cpp
- ctest --output-junit ctest-results.xml --output-on-failure
artifacts:
reports:
junit: build_cpp/ctest-results.xml
...@@ -12,20 +12,27 @@ set(module_name _${project}) # target name ...@@ -12,20 +12,27 @@ set(module_name _${project}) # target name
project(${project}) project(${project})
##############################################
# Import utils CMakeLists
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
include(PybindModuleCreation)
############################################## ##############################################
# Define options # Define options
option(PYBIND "python binding" ON) option(PYBIND "python binding" ON)
option(WERROR "Warning as error" OFF) option(WERROR "Warning as error" OFF)
option(TEST "Enable tests" ON) option(TEST "Enable tests" ON)
option(COVERAGE "Enable coverage" OFF)
##############################################
# Import utils CMakeLists
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
include(PybindModuleCreation)
if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
Include(CodeCoverage)
endif()
############################################## ##############################################
# Find system dependencies # Find system dependencies
############################################## ##############################################
# Create target and set properties # Create target and set properties
...@@ -76,6 +83,10 @@ else() ...@@ -76,6 +83,10 @@ else()
/W4>) /W4>)
endif() endif()
if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
append_coverage_compiler_flags()
endif()
############################################## ##############################################
# Installation instructions # Installation instructions
......
![Pipeline status](https://gitlab.eclipse.org/eclipse/aidge/aidge_core/badges/main/pipeline.svg?ignore_skipped=true) ![C++ coverage](https://gitlab.eclipse.org/eclipse/aidge/aidge_core/badges/main/coverage.svg?job=coverage:ubuntu_cpp&key_text=C%2B%2B+coverage&key_width=90) ![Python coverage](https://gitlab.eclipse.org/eclipse/aidge/aidge_core/badges/main/coverage.svg?job=coverage:ubuntu_python&key_text=Python+coverage&key_width=100)
# Aidge Core library # Aidge Core library
You can find here the C++ code of the Core library of Aidge. You can find here the C++ code of the Core library of Aidge.
......
This diff is collapsed.
...@@ -93,14 +93,15 @@ public: ...@@ -93,14 +93,15 @@ public:
assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator."); assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
return *(mInputs[inputIdx].get()); return *(mInputs[inputIdx].get());
} }
inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator."); assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
return mInputs[inputIdx]; return mInputs[inputIdx];
} }
inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "Add Operators has only 1 outputs"); assert(outputIdx == 0 && "Add Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput; return mOutput;
} }
...@@ -108,8 +109,9 @@ public: ...@@ -108,8 +109,9 @@ public:
assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator."); assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
return std::static_pointer_cast<Data>(mInputs[inputIdx]); return std::static_pointer_cast<Data>(mInputs[inputIdx]);
} }
std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output"); assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput); return std::static_pointer_cast<Data>(mOutput);
} }
......
...@@ -63,8 +63,9 @@ public: ...@@ -63,8 +63,9 @@ public:
setDatatype(DataType::Float32); setDatatype(DataType::Float32);
} }
constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs"); assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type"); assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data); mInput = std::dynamic_pointer_cast<Tensor>(data);
...@@ -91,29 +92,34 @@ public: ...@@ -91,29 +92,34 @@ public:
bool outputDimsForwarded() const override final { return !(mOutput->empty()); } bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs"); assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return *(mInput.get()); return *(mInput.get());
} }
inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "AvgPooling Operators supports only 1 inputs"); assert(inputIdx == 0 && "AvgPooling Operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return mInput; return mInput;
} }
inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "AvgPooling Operators has only 1 outputs"); assert(outputIdx == 0 && "AvgPooling Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput; return mOutput;
} }
std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs"); assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput); return std::static_pointer_cast<Data>(mInput);
} }
std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output"); assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput); return std::static_pointer_cast<Data>(mOutput);
} }
......
...@@ -65,7 +65,7 @@ public: ...@@ -65,7 +65,7 @@ public:
// return *in; // return *in;
// } // }
constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 5 && "operators supports only 5 inputs"); assert(inputIdx < 5 && "operators supports only 5 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type"); assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
...@@ -90,15 +90,16 @@ public: ...@@ -90,15 +90,16 @@ public:
assert(inputIdx < 5 && "operators supports only 5 inputs"); assert(inputIdx < 5 && "operators supports only 5 inputs");
return *(mInputs[inputIdx].get()); } return *(mInputs[inputIdx].get()); }
inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 5 && "BatchNorm Operators supports only 5 inputs"); assert(inputIdx < 5 && "BatchNorm Operators supports only 5 inputs");
return mInputs[inputIdx]; return mInputs[inputIdx];
} }
inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "BatchNorm Operator has only 1 output"); assert((outputIdx == 0) && "BatchNorm Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput; return mOutput;
} }
...@@ -107,8 +108,9 @@ public: ...@@ -107,8 +108,9 @@ public:
assert(inputIdx < 5 && "operators supports only 5 inputs"); assert(inputIdx < 5 && "operators supports only 5 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]); return std::static_pointer_cast<Data>(mInputs[inputIdx]);
} }
std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output"); assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput); return std::static_pointer_cast<Data>(mOutput);
} }
......
...@@ -79,7 +79,7 @@ public: ...@@ -79,7 +79,7 @@ public:
// } // }
constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs"); assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type"); assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
...@@ -114,15 +114,16 @@ public: ...@@ -114,15 +114,16 @@ public:
inline Tensor& input(const IOIndex_t inputIdx) const override final { inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs"); assert(inputIdx < 3 && "operators supports only 3 inputs");
return *(mInputs[inputIdx].get()); } return *(mInputs[inputIdx].get()); }
inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "Conv Operators supports only 3 inputs"); assert(inputIdx < 3 && "Conv Operators supports only 3 inputs");
return mInputs[inputIdx]; return mInputs[inputIdx];
} }
inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Conv Operator has only 1 output"); assert((outputIdx == 0) && "Conv Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput; return mOutput;
} }
...@@ -131,8 +132,9 @@ public: ...@@ -131,8 +132,9 @@ public:
assert(inputIdx < 3 && "operators supports only 3 inputs"); assert(inputIdx < 3 && "operators supports only 3 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]); return std::static_pointer_cast<Data>(mInputs[inputIdx]);
} }
std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output"); assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput); return std::static_pointer_cast<Data>(mOutput);
} }
......
...@@ -71,7 +71,7 @@ class ConvDepthWise_Op : public Operator, ...@@ -71,7 +71,7 @@ class ConvDepthWise_Op : public Operator,
setDatatype(DataType::Float32); setDatatype(DataType::Float32);
} }
constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs"); assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type"); assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
...@@ -114,15 +114,16 @@ class ConvDepthWise_Op : public Operator, ...@@ -114,15 +114,16 @@ class ConvDepthWise_Op : public Operator,
assert(inputIdx < 3 && "operators supports only 3 inputs"); assert(inputIdx < 3 && "operators supports only 3 inputs");
return *(mInputs[inputIdx].get()); return *(mInputs[inputIdx].get());
} }
inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "ConvDepthWise Operators supports only 3 inputs"); assert(inputIdx < 3 && "ConvDepthWise Operators supports only 3 inputs");
return mInputs[inputIdx]; return mInputs[inputIdx];
} }
inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "ConvDepthWise Operator has only 1 output"); assert((outputIdx == 0) && "ConvDepthWise Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput; return mOutput;
} }
...@@ -131,8 +132,9 @@ class ConvDepthWise_Op : public Operator, ...@@ -131,8 +132,9 @@ class ConvDepthWise_Op : public Operator,
assert(inputIdx < 3 && "operators supports only 3 inputs"); assert(inputIdx < 3 && "operators supports only 3 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]); return std::static_pointer_cast<Data>(mInputs[inputIdx]);
} }
std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output"); assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput); return std::static_pointer_cast<Data>(mOutput);
} }
......
...@@ -57,7 +57,7 @@ public: ...@@ -57,7 +57,7 @@ public:
setDatatype(DataType::Float32); setDatatype(DataType::Float32);
} }
void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs"); assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type"); assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
if (inputIdx == 2) { if (inputIdx == 2) {
...@@ -89,15 +89,16 @@ public: ...@@ -89,15 +89,16 @@ public:
inline Tensor& input(const IOIndex_t inputIdx) const override final { inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs"); assert(inputIdx < 3 && "operators supports only 3 inputs");
return *(mInputs[inputIdx].get()); } return *(mInputs[inputIdx].get()); }
inline Tensor& output(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mOutput.get()); } inline Tensor& output(const IOIndex_t /*inputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "FC Operators supports only 3 inputs"); assert(inputIdx < 3 && "FC Operators supports only 3 inputs");
return mInputs[inputIdx]; return mInputs[inputIdx];
} }
inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "FC Operator has only 1 output"); assert((outputIdx == 0) && "FC Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput; return mOutput;
} }
...@@ -106,8 +107,9 @@ public: ...@@ -106,8 +107,9 @@ public:
assert(inputIdx < 3 && "operators supports only 3 inputs"); assert(inputIdx < 3 && "operators supports only 3 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]); return std::static_pointer_cast<Data>(mInputs[inputIdx]);
} }
std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output"); assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput); return std::static_pointer_cast<Data>(mOutput);
} }
......
...@@ -62,7 +62,12 @@ class GenericOperator_Op ...@@ -62,7 +62,12 @@ class GenericOperator_Op
* @return template<class T> The parameter. * @return template<class T> The parameter.
*/ */
template <class T> template <class T>
T getParameter(std::string const &key) const { const T& getParameter(std::string const &key) const {
return mParams.Get<const T>(key);
}
template <class T>
T& getParameter(std::string const &key) {
return mParams.Get<T>(key); return mParams.Get<T>(key);
} }
...@@ -75,8 +80,8 @@ class GenericOperator_Op ...@@ -75,8 +80,8 @@ class GenericOperator_Op
/// internal buffer in a new location (previous value is still in memory at /// internal buffer in a new location (previous value is still in memory at
/// its previous location) /// its previous location)
template <class T> template <class T>
void addParameter(std::string const &key, T const &value) { void addParameter(std::string const &key, T&& value) {
mParams.Add<T>(key, value); mParams.Add<T>(key, std::forward<T>(value));
} }
...@@ -85,7 +90,7 @@ class GenericOperator_Op ...@@ -85,7 +90,7 @@ class GenericOperator_Op
std::vector<std::string> getParametersName() { return mParams.getParametersName(); } std::vector<std::string> getParametersName() { return mParams.getParametersName(); }
// Override Virtual Opertor methods // Override Virtual Opertor methods
void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, __attribute__((unused)) std::shared_ptr<Data> data) override final { void associateInput(const IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final {
printf("Info: using associateInput() on a GenericOperator.\n"); printf("Info: using associateInput() on a GenericOperator.\n");
} }
......
...@@ -53,8 +53,9 @@ public: ...@@ -53,8 +53,9 @@ public:
setDatatype(DataType::Float32); setDatatype(DataType::Float32);
} }
void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input"); assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type"); assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data); mInput = std::dynamic_pointer_cast<Tensor>(data);
} }
...@@ -69,26 +70,30 @@ public: ...@@ -69,26 +70,30 @@ public:
} }
inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mInput.get()); } inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "LeakyReLU Operator has only 1 input"); assert((inputIdx == 0) && "LeakyReLU Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return mInput; return mInput;
} }
inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "LeakyReLU Operator has only 1 output"); assert((outputIdx == 0) && "LeakyReLU Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput; return mOutput;
} }
std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operator supports only 1 input"); assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput); return std::static_pointer_cast<Data>(mInput);
} }
std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output"); assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput; return mOutput;
} }
......
...@@ -55,7 +55,7 @@ public: ...@@ -55,7 +55,7 @@ public:
setDatatype(DataType::Float32); setDatatype(DataType::Float32);
} }
void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 2 && "operators supports only 2 inputs"); assert(inputIdx < 2 && "operators supports only 2 inputs");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type"); assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
...@@ -81,15 +81,16 @@ public: ...@@ -81,15 +81,16 @@ public:
inline Tensor& input(const IOIndex_t inputIdx) const override final { inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 2 && "operators supports only 2 inputs"); assert(inputIdx < 2 && "operators supports only 2 inputs");
return *(mInputs[inputIdx].get()); } return *(mInputs[inputIdx].get()); }
inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 2 && "MatMul Operators has 2 inputs"); assert(inputIdx < 2 && "MatMul Operators has 2 inputs");
return mInputs[inputIdx]; return mInputs[inputIdx];
} }
inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "MatMul Operators has 1 output"); assert((outputIdx == 0) && "MatMul Operators has 1 output");
(void) outputIdx; // avoid unused warning
return mOutput; return mOutput;
} }
...@@ -98,8 +99,9 @@ public: ...@@ -98,8 +99,9 @@ public:
assert(inputIdx < 2 && "operators supports only 2 inputs"); assert(inputIdx < 2 && "operators supports only 2 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]); return std::static_pointer_cast<Data>(mInputs[inputIdx]);
} }
std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output"); assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput); return std::static_pointer_cast<Data>(mOutput);
} }
......
...@@ -51,39 +51,41 @@ public: ...@@ -51,39 +51,41 @@ public:
setDatatype(tensor->dataType()); setDatatype(tensor->dataType());
} }
void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, __attribute__((unused)) std::shared_ptr<Data> data) override final { void associateInput(const IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final {
assert(false && "Producer operator takes no input"); assert(false && "Producer operator takes no input");
} }
constexpr void computeOutputDims() override final {} void computeOutputDims() override final {}
constexpr bool outputDimsForwarded() const override final {return true;} bool outputDimsForwarded() const override final {return true;}
[[noreturn]] inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { [[noreturn]] inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final {
assert(false); assert(false);
exit(-1); exit(-1);
} }
inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { inline std::shared_ptr<Tensor> getInput(const IOIndex_t /*inputIdx*/) const override final {
assert(false && "Producer Operator has no input"); assert(false && "Producer Operator has no input");
return nullptr; return nullptr;
} }
inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Producer Operator has only 1 output"); assert((outputIdx == 0) && "Producer Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput; return mOutput;
} }
std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { std::shared_ptr<Data> getRawInput(const IOIndex_t /*inputIdx*/) const override final {
assert(false && "Producer operator takes no input"); assert(false && "Producer operator takes no input");
return nullptr; return nullptr;
} }
std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output"); assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput); return std::static_pointer_cast<Data>(mOutput);
} }
......
...@@ -42,8 +42,9 @@ public: ...@@ -42,8 +42,9 @@ public:
setDatatype(DataType::Float32); setDatatype(DataType::Float32);
} }
void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input"); assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type"); assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data); mInput = std::dynamic_pointer_cast<Tensor>(data);
} }
...@@ -58,26 +59,30 @@ public: ...@@ -58,26 +59,30 @@ public:
} }
inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mInput.get()); } inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "ReLU Operator has only 1 input"); assert((inputIdx == 0) && "ReLU Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return mInput; return mInput;
} }
inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "ReLU Operator has only 1 output"); assert((outputIdx == 0) && "ReLU Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput; return mOutput;
} }
std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operator supports only 1 input"); assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput); return std::static_pointer_cast<Data>(mInput);
} }
std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output"); assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput); return std::static_pointer_cast<Data>(mOutput);
} }
......
...@@ -42,8 +42,9 @@ public: ...@@ -42,8 +42,9 @@ public:
setDatatype(DataType::Float32); setDatatype(DataType::Float32);
} }
void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx == 0 && "operator supports only 1 input"); assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type"); assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data); mInput = std::dynamic_pointer_cast<Tensor>(data);
} }
...@@ -58,26 +59,30 @@ public: ...@@ -58,26 +59,30 @@ public:
} }
inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mInput.get()); } inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert((inputIdx == 0) && "Softmax Operator has only 1 input"); assert((inputIdx == 0) && "Softmax Operator has only 1 input");
(void) inputIdx; // avoid unused warning
return mInput; return mInput;
} }
inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "Softmax Operator has only 1 output"); assert((outputIdx == 0) && "Softmax Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput; return mOutput;
} }
std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operator supports only 1 input"); assert(inputIdx == 0 && "operator supports only 1 input");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput); return std::static_pointer_cast<Data>(mInput);
} }
std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output"); assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput); return std::static_pointer_cast<Data>(mOutput);
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment