Skip to content
Snippets Groups Projects
Commit 17f62d96 authored by Inna Kucher's avatar Inna Kucher
Browse files

Merge branch 'master' into 'test_LeNet'

# Conflicts:
#   README.md
parents 2f7d46f1 ebda3e10
No related branches found
No related tags found
1 merge request!1Adding LeNet generation scripts
cmake_minimum_required(VERSION 3.15)
file(READ "${CMAKE_SOURCE_DIR}/version.txt" version)
file(READ "${CMAKE_SOURCE_DIR}/project_name.txt" project)
message(STATUS "Project name: ${project}")
message(STATUS "Project version: ${version}")
# Note : project name is {project} and python module name is also {project}
set(module_name _${project}) # target name
project(${project})
##############################################
# Import utils CMakeLists
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
#include(PybindModuleCreation)
##############################################
# Define options
option(PYBIND "python binding" ON)
option(WERROR "Warning as error" OFF)
option(TEST "Enable tests" ON)
option(COVERAGE "Enable coverage" OFF)
##############################################
# Import utils CMakeLists
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
#include(PybindModuleCreation)
if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
Include(CodeCoverage)
endif()
##############################################
# Find system dependencies
find_package(aidge_core REQUIRED)
find_package(aidge_backend_cpu REQUIRED)
##############################################
# Create target and set properties
file(GLOB_RECURSE src_files "src/*.cpp")
file(GLOB_RECURSE inc_files "include/*.hpp")
add_library(${module_name} ${src_files} ${inc_files})
target_link_libraries(${module_name}
PUBLIC
_aidge_core # _ is added because we link the target not the project
_aidge_backend_cpu
)
#Set target properties
set_property(TARGET ${module_name} PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(${module_name}
PUBLIC
$<INSTALL_INTERFACE:include>
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/src
)
# PYTHON BINDING
if (PYBIND)
generate_python_binding(${project} ${module_name})
# Handles Python + pybind11 headers dependencies
target_link_libraries(${module_name}
PUBLIC
pybind11::pybind11
PRIVATE
Python::Python
)
endif()
target_compile_features(${module_name} PRIVATE cxx_std_14)
target_compile_options(${module_name} PRIVATE
$<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror>>)
target_compile_options(${module_name} PRIVATE
$<$<CXX_COMPILER_ID:MSVC>:
/W4>)
if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
append_coverage_compiler_flags()
endif()
##############################################
# Installation instructions
include(GNUInstallDirs)
set(INSTALL_CONFIGDIR ${CMAKE_INSTALL_LIBDIR}/cmake/${project})
install(TARGETS ${module_name} EXPORT ${project}-targets
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
)
install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
#Export the targets to a script
install(EXPORT ${project}-targets
FILE "${project}-targets.cmake"
DESTINATION ${INSTALL_CONFIGDIR}
COMPONENT ${module_name}
)
#Create a ConfigVersion.cmake file
include(CMakePackageConfigHelpers)
write_basic_package_version_file(
"${CMAKE_CURRENT_BINARY_DIR}/${project}-config-version.cmake"
VERSION ${version}
COMPATIBILITY AnyNewerVersion
)
configure_package_config_file("${project}-config.cmake.in"
"${CMAKE_CURRENT_BINARY_DIR}/${project}-config.cmake"
INSTALL_DESTINATION ${INSTALL_CONFIGDIR}
)
#Install the config, configversion and custom find modules
install(FILES
"${CMAKE_CURRENT_BINARY_DIR}/${project}-config.cmake"
"${CMAKE_CURRENT_BINARY_DIR}/${project}-config-version.cmake"
DESTINATION ${INSTALL_CONFIGDIR}
)
##############################################
## Exporting from the build tree
export(EXPORT ${project}-targets
FILE "${CMAKE_CURRENT_BINARY_DIR}/${project}-targets.cmake")
##############################################
## Add test
if(TEST)
enable_testing()
add_subdirectory(unit_tests)
endif()
# Aidge # Aidge module quantization
[![License-badge](https://img.shields.io/badge/License-EPL%202.0-blue.svg)](LICENSE) [![Documentation Status](https://readthedocs.org/projects/eclipse-aidge/badge/?version=latest)](https://eclipse-aidge.readthedocs.io/en/latest/?badge=latest) This is C++ module for Post-training quantization only for the moment.
Python binding and Quantization-aware training will be added later.
The Eclipse Aidge platform is a comprehensive solution for fast and accurate Deep Neural Network (DNN) simulation and full and automated DNN-based applications building. The platform integrates database construction, data pre-processing, network building, benchmarking and hardware export to various targets. It is particularly useful for DNN design and exploration, allowing simple and fast prototyping of DNN with different topologies. It is possible to define and learn multiple network topology variations and compare the performances (in terms of recognition rate and computationnal cost) automatically. Export hardware targets include CPU, DSP and GPU with OpenMP, OpenCL, Cuda, cuDNN and TensorRT programming models as well as custom hardware IP code generation with High-Level Synthesis for FPGA and dedicated configurable DNN accelerator IP. ## General steps for PTQ developement
1) Simple case : conv/fc cell only, no bias
2) Simple case with the bias : conv/fc cell only, bias
2) More complex case : provide all for usual types of networks : mobilenet/resnet.
| Module | Status | Coverage | ## Technical part
| -------- | ------- | ------- |
| [aidge_core](https://gitlab.eclipse.org/eclipse/aidge/aidge_core) | ![Pipeline status](https://gitlab.eclipse.org/eclipse/aidge/aidge_core/badges/main/pipeline.svg?ignore_skipped=true) | ![C++ coverage](https://gitlab.eclipse.org/eclipse/aidge/aidge_core/badges/main/coverage.svg?job=coverage:ubuntu_cpp&key_text=C%2B%2B+coverage&key_width=90) ![Python coverage](https://gitlab.eclipse.org/eclipse/aidge/aidge_core/badges/main/coverage.svg?job=coverage:ubuntu_python&key_text=Python+coverage&key_width=100) |
| [aidge_backend_cpu](https://gitlab.eclipse.org/eclipse/aidge/aidge_backend_cpu) | ![Pipeline status](https://gitlab.eclipse.org/eclipse/aidge/aidge_backend_cpu/badges/master/pipeline.svg?ignore_skipped=true) | ![C++ coverage](https://gitlab.eclipse.org/eclipse/aidge/aidge_backend_cpu/badges/master/coverage.svg?job=coverage:ubuntu_cpp&key_text=C%2B%2B+coverage&key_width=90) ![Python coverage](https://gitlab.eclipse.org/eclipse/aidge/aidge_backend_cpu/badges/master/coverage.svg?job=coverage:ubuntu_python&key_text=Python+coverage&key_width=100) |
| [aidge_onnx](https://gitlab.eclipse.org/eclipse/aidge/aidge_onnx) | ![Pipeline status](https://gitlab.eclipse.org/eclipse/aidge/aidge_onnx/badges/master/pipeline.svg?ignore_skipped=true) | ![Python coverage](https://gitlab.eclipse.org/eclipse/aidge/aidge_onnx/badges/master/coverage.svg?job=coverage:ubuntu_python&key_text=Python+coverage&key_width=100) |
We separate normalization of the network, which is the step0 for PTQ, and quantization part. This fact will help to debug different networks in the future.
## Installation The methods themselves are based on the ones written in [DeepNetQuantization class.](https://github.com/CEA-LIST/N2D2/blob/master/src/DeepNetQuantization.cpp)
### Build on Linux using pip
Each Aidge module are built independantly from one another. ## Weights (and bias) quantization
To install Aidge on Linux using pip, follow those steps :
1. Create your python environnement with python >= 3.7. For example using virtualenv :
``` bash
virtualenv -p python3.8 py_env_aidge
source py_env_aidge/bin/activate
```
2. Set the desired install path : 1) Normalization between -1 and 1;
``` bash 2) Quantization to the correct range with scaling factor (2^nbBits - 1) - 1;
export AIDGE_INSTALL = '<path_to_aidge>/install'
```
3. First build aidge_core : ## Activations quantization
``` bash
cd aidge/aidge_core/
pip install . -v
```
4. Then build other modules (for example aidge_backend_cpu, aidge_onnx) : 1) Normalization between -1 and 1 (or 0 and 1);
``` bash 2) Quantization to the correct range with the scaling factor;
cd aidge/aidge_backend_cpu
pip install . -v
```
## Docker Image Both for normalization and quantization we need the cell theshold : the max value of the cell outputs.
This the most naive approach, which can be replaced with more sophisticated methods to find the optimal (instead of the maximum) value to cut on.
Feel free to use one of the Dockerfiles available in the [`docker`](docker) folder. To get the correct value of the activation scaling we need to take into account 3 factors:
1) Bias scaling
2) Scaling factor of the current cell
3) Parent scaling factor
To build the image, run where your DockerFile is ## Additional methods
```
docker build --pull --rm -f "name_of_os.Dockerfile" -t aidge:myenv .
```
Then to run a container, run There are additional methods called during the [PTQ method](https://github.com/CEA-LIST/N2D2/blob/master/src/utils/Helper.cpp#L640):
```
docker run --name mycontainer aidge:myenv
```
## Contributing 1) [remove dropout](https://github.com/CEA-LIST/N2D2/blob/master/src/utils/Helper.cpp#L646)
2) [batch normalization fusion with conv](https://github.com/CEA-LIST/N2D2/blob/master/src/utils/Helper.cpp#L649)
3) [fuse padding](https://github.com/CEA-LIST/N2D2/blob/master/src/utils/Helper.cpp#L694)
4) [cross-layer equalization](https://github.com/CEA-LIST/N2D2/blob/master/src/utils/Helper.cpp#L699)
If you would like to contribute to the Aidge project, we’re happy to have your help!
Everyone is welcome to contribute code via merge requests, to file issues on Gitlab,
to help people asking for help, fix bugs that people have filed,
to add to our documentation, or to help out in any other way.
We grant commit access (which includes full rights to the issue database, such as being able to edit labels)
to people who have gained our trust and demonstrated a commitment to Aidge.
## License
Aidge has a Eclipse Public License 2.0, as found in the [LICENSE](LICENSE).
\ No newline at end of file
@PACKAGE_INIT@
include(${CMAKE_CURRENT_LIST_DIR}/aidge_quantization-config-version.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/aidge_quantization-targets.cmake)
///\file QuantPTQ.h
///\brief QuantPTQ __insert lib module brief description__
///\version file 1.0.0
///\date Creation 14 February 2023
///\date 14 February 2023
///\par ChangeLog
///\par
/// v1.0.0, 14 February 2023<br>
/// - Initial version.
///\author ik243221
///\copyright
/// Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
/// rights reserved.
#ifndef QuantPTQ_H_
#define QuantPTQ_H_
//#include <aidge/aidge.hpp>
#include "aidge/backend/cpu.hpp"
#include <numeric>
#include <vector>
#include <cmath>
#include <cstdint>
#include <unordered_map>
using namespace Aidge;
namespace Aidge_HELPER{
float getCellThreshold(std::shared_ptr<Node> node);
float getMaxParentsScaling(std::shared_ptr<Node> node);
void rescaleParentsToScaling(std::shared_ptr<Node> node, std::unordered_map<std::string, long double>& scalingForCells, long double scaling);
long double quantizeFreeParams(std::shared_ptr<Node> node, std::size_t nbBits, std::unordered_map<std::string, long double> biasScalings, bool verbose);
long double quantizeActivation(std::shared_ptr<Node> node, std::size_t nbBits, std::unordered_map<std::string, long double> biasScalings, std::unordered_map<std::string, long double> activationScalings, bool verbose);
void quantizeNetwork(std::vector<std::shared_ptr<Node>> orderedGraphView, std::size_t nbBits, bool verbose);
//void quantizeNetwork(std::shared_ptr<GraphView> graphView, std::size_t nbBits, bool verbose);
}
#endif /* QuantPTQ_H_ */
aidge_quantization
\ No newline at end of file
This diff is collapsed.
Include(FetchContent)
FetchContent_Declare(
Catch2
GIT_REPOSITORY https://github.com/catchorg/Catch2.git
GIT_TAG v3.0.1 # or a later release
)
FetchContent_MakeAvailable(Catch2)
file(GLOB_RECURSE src_files "*.cpp")
add_executable(tests${module_name} ${src_files})
target_link_libraries(tests${module_name} PUBLIC ${module_name})
target_link_libraries(tests${module_name} PRIVATE Catch2::Catch2WithMain)
list(APPEND CMAKE_MODULE_PATH ${catch2_SOURCE_DIR}/extras)
include(CTest)
include(Catch)
catch_discover_tests(tests${module_name})
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/backend/cpu.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/operator/Scaling.hpp"
#include "aidge/operator/GenericOperator.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/QuantPTQ.hpp"
#include "aidge/scheduler/Scheduler.hpp"
#include "aidge/hook/outputRange.hpp"
#include "aidge/operator/Producer.hpp"
#include <unordered_map>
using namespace Aidge;
using namespace Aidge_HELPER;
TEST_CASE("[aidge_module_template/ref_cpp/quantization] PTQ : Quantize Graph") {
std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
bool verbose = true;
std::shared_ptr<Node> myConv1 = Conv(3,4,{3,3}, "myConv1");
myConv1->getOperator()->setDatatype(DataType::Float32);
myConv1->getOperator()->setBackend("cpu");
Tensor myWeights = Array4D<float,4,3,3,3> {
{
{
{{ 0., 1., 2.},
{ 3., 4., 5.},
{ 6., 7., 8.}},
{{ 9., 10., 11.},
{ 12., 13., 14.},
{ 15., 16., 17.}},
{{ 18., 19., 20.},
{ 21., 22., 23.},
{ 24., 25., 26.}}
},
{
{{ 27., 28., 29.},
{ 30., 31., 32.},
{ 33., 34., 35.}},
{{ 36., 37., 38.},
{ 39., 40., 41.},
{ 42., 43., 44.}},
{{ 45., 46., 47.},
{ 48., 49., 50.},
{ 51., 52., 53.}}
},
{
{{ 54., 55., 56.},
{ 57., 58., 59.},
{ 60., 61., 62.}},
{{ 63., 64., 65.},
{ 66., 67., 68.},
{ 69., 70., 71.}},
{{ 72., 73., 74.},
{ 75., 76., 77.},
{ 78., 79., 80.}}
},
{
{{ 81., 82., 83.},
{ 84., 85., 86.},
{ 87., 88., 89.}},
{{ 90., 91., 92.},
{ 93., 94., 95.},
{ 96., 97., 98.}},
{{ 99., 100., 101.},
{102., 103., 104.},
{105., 106., 107.}}
}
}
};
Tensor myBias = Array1D<float,4> {{7.,0.,9.,0.}};
std::shared_ptr<Tensor> myInput =
std::make_shared<Tensor>(
Array4D<float,2,3,5,5> {
{
{
{{ 0., 1., 2., 3., 4.},
{ 5., 6., 7., 8., 9.},
{ 10, 11, 12, 13, 14.},
{ 15, 16, 17, 18, 19.},
{ 20, 21, 22, 23, 24.}},
{{ 25, 26., 27., 28., 29.},
{ 30., 31., 32., 33., 34.},
{ 35., 36., 37., 38., 39.},
{ 40., 41., 42., 43., 44.},
{ 45., 46., 47., 48., 49.}},
{{ 50., 51., 52., 53., 54.},
{ 55., 56., 57., 58., 59.},
{ 60., 61., 62., 63., 64.},
{ 65., 66., 67., 68., 69.},
{ 70., 71., 72., 73., 74.}}
},
{
{{ 75., 76., 77., 78., 79.},
{ 80., 81., 82., 83., 84.},
{ 85., 86., 87., 88., 89.},
{ 90., 91., 92., 93., 94.},
{ 95., 96., 97., 98., 99.}},
{{100, 101, 102, 103, 104.},
{105, 106, 107, 108, 109.},
{110, 111, 112, 113, 114.},
{115, 116, 117, 118, 119.},
{120, 121, 122, 123, 124.}},
{{125, 126, 127, 128, 129.},
{130, 131, 132, 133, 134.},
{135, 136, 137, 138, 139.},
{140, 141, 142, 143, 144.},
{145, 146, 147, 148, 149.}}
}
}
}
);
auto dataProvider = Producer(myInput, "dataProvider");
Tensor myOutput = Array4D<float,2,4,3,3> {
{
{
{{ 15226., 15577., 15928.},
{ 16981., 17332., 17683.},
{ 18736., 19087., 19438.}},
{{ 37818., 38898., 39978.},
{ 43218., 44298., 45378.},
{ 48618., 49698., 50778.}},
{{ 60426., 62235., 64044.},
{ 69471., 71280., 73089.},
{ 78516., 80325., 82134.}},
{{ 83016., 85554., 88092.},
{ 95706., 98244., 100782.},
{108396., 110934., 113472.}}
},
{
{{ 41551., 41902., 42253.},
{ 43306., 43657., 44008.},
{ 45061., 45412., 45763.}},
{{118818., 119898., 120978.},
{124218., 125298., 126378.},
{129618., 130698., 131778.}},
{{196101., 197910., 199719.},
{205146., 206955., 208764.},
{214191., 216000., 217809.}},
{{273366., 275904., 278442.},
{286056., 288594., 291132.},
{298746., 301284., 303822.}}
}
}
};
myConv1->getOperator()->input(0) = *myInput;
myConv1->getOperator()->input(1) = myWeights;
myConv1->getOperator()->input(2) = myBias;
myConv1->getOperator()->computeOutputDims();
myConv1->getOperator()->addHook("output_range");
dataProvider->getOperator()->setDatatype(DataType::Float32);
dataProvider->getOperator()->setBackend("cpu");
dataProvider->addChild(myConv1, 0);
g1->add(myConv1);
std::shared_ptr<Node> myReLU1 = ReLU("ReLu1");
myReLU1->getOperator()->setDatatype(DataType::Float32);
myReLU1->getOperator()->setBackend("cpu");
myReLU1->getOperator()->computeOutputDims();
myReLU1->getOperator()->addHook("output_range");
g1->addChild(myReLU1);
g1->setBackend("cpu");
g1->forwardDims();
//check hook functioning
SequentialScheduler scheduler(g1);
scheduler.forward();
float max_output_conv = std::static_pointer_cast<OutputRange>(myConv1->getOperator()->getHook("output_range"))->getOutput(0);
if(verbose) {
printf("[hook] OutputRange(forward) :: Conv output max: "
"\x1b[1;37m"
"%f"
"\n",
max_output_conv);
}
float max_output_relu = std::static_pointer_cast<OutputRange>(myReLU1->getOperator()->getHook("output_range"))->getOutput(0);
if(verbose) {
printf("[hook] OutputRange(forward) :: ReLU output max: "
"\x1b[1;37m"
"%f"
"\n",
max_output_relu);
}
//no need to do this anymore, forward does it autimatically now ...
//scheduler.generateScheduling(true);
std::vector<std::shared_ptr<Node>> ordered_graph_view = scheduler.getStaticScheduling();
printf("Going to quantize network :\n");
quantizeNetwork(ordered_graph_view, 8, verbose);
printf("After quantize network !!! \n");
if(verbose) {
printf("Graph after quantization :\n");
for (const std::shared_ptr<Node>& nodePtr : g1->getNodes()) {
printf("\t- node type: "
"\x1b[1;37m"
"%s"
" , node name: "
"\x1b[1;37m"
"%s"
"\n",
(nodePtr->type()).c_str(), (nodePtr->name()).c_str());
}
}
SequentialScheduler scheduler_v2(g1);
scheduler_v2.forward();
scheduler_v2.generateScheduling(false);
std::vector<std::shared_ptr<Node>> ordered_graph_view_v2 = scheduler_v2.getStaticScheduling();
if(verbose) {
printf("Ordered graph after quantization :\n");
for (const std::shared_ptr<Node>& nodePtr : ordered_graph_view_v2) {
printf("\t- node type: "
"\x1b[1;37m"
"%s"
" , node name: "
"\x1b[1;37m"
"%s"
"\n",
(nodePtr->type()).c_str(), (nodePtr->name()).c_str());
}
}
}
\ No newline at end of file
#include <catch2/catch_test_macros.hpp>
//#include "aidge/aidge.hpp"
#include "aidge/backend/cpu.hpp"
#include "aidge/QuantPTQ.hpp"
#include "aidge/hook/execTime.hpp"
using namespace Aidge;
using namespace Aidge_HELPER;
#include <iostream>
#include <ctime>
#include <chrono>
#include <iomanip>
TEST_CASE("[hook] ExecTime(forward)") {
std::shared_ptr<Node> myConv1 = Conv(3,4,{3,3}, "myConv1");
myConv1->getOperator()->setDatatype(DataType::Float32);
myConv1->getOperator()->setBackend("cpu");
Tensor myWeights = Array4D<float,4,3,3,3> {
{
{
{{ 0., 1., 2.},
{ 3., 4., 5.},
{ 6., 7., 8.}},
{{ 9., 10., 11.},
{ 12., 13., 14.},
{ 15., 16., 17.}},
{{ 18., 19., 20.},
{ 21., 22., 23.},
{ 24., 25., 26.}}
},
{
{{ 27., 28., 29.},
{ 30., 31., 32.},
{ 33., 34., 35.}},
{{ 36., 37., 38.},
{ 39., 40., 41.},
{ 42., 43., 44.}},
{{ 45., 46., 47.},
{ 48., 49., 50.},
{ 51., 52., 53.}}
},
{
{{ 54., 55., 56.},
{ 57., 58., 59.},
{ 60., 61., 62.}},
{{ 63., 64., 65.},
{ 66., 67., 68.},
{ 69., 70., 71.}},
{{ 72., 73., 74.},
{ 75., 76., 77.},
{ 78., 79., 80.}}
},
{
{{ 81., 82., 83.},
{ 84., 85., 86.},
{ 87., 88., 89.}},
{{ 90., 91., 92.},
{ 93., 94., 95.},
{ 96., 97., 98.}},
{{ 99., 100., 101.},
{102., 103., 104.},
{105., 106., 107.}}
}
}
};
Tensor myBias = Array1D<float,4> {{7.,0.,9.,0.}};
Tensor myInput = Array4D<float,2,3,5,5> {
{
{
{{ 0., 1., 2., 3., 4.},
{ 5., 6., 7., 8., 9.},
{ 10, 11, 12, 13, 14.},
{ 15, 16, 17, 18, 19.},
{ 20, 21, 22, 23, 24.}},
{{ 25, 26., 27., 28., 29.},
{ 30., 31., 32., 33., 34.},
{ 35., 36., 37., 38., 39.},
{ 40., 41., 42., 43., 44.},
{ 45., 46., 47., 48., 49.}},
{{ 50., 51., 52., 53., 54.},
{ 55., 56., 57., 58., 59.},
{ 60., 61., 62., 63., 64.},
{ 65., 66., 67., 68., 69.},
{ 70., 71., 72., 73., 74.}}
},
{
{{ 75., 76., 77., 78., 79.},
{ 80., 81., 82., 83., 84.},
{ 85., 86., 87., 88., 89.},
{ 90., 91., 92., 93., 94.},
{ 95., 96., 97., 98., 99.}},
{{100, 101, 102, 103, 104.},
{105, 106, 107, 108, 109.},
{110, 111, 112, 113, 114.},
{115, 116, 117, 118, 119.},
{120, 121, 122, 123, 124.}},
{{125, 126, 127, 128, 129.},
{130, 131, 132, 133, 134.},
{135, 136, 137, 138, 139.},
{140, 141, 142, 143, 144.},
{145, 146, 147, 148, 149.}}
}
}
};
Tensor myOutput = Array4D<float,2,4,3,3> {
{
{
{{ 15226., 15577., 15928.},
{ 16981., 17332., 17683.},
{ 18736., 19087., 19438.}},
{{ 37818., 38898., 39978.},
{ 43218., 44298., 45378.},
{ 48618., 49698., 50778.}},
{{ 60426., 62235., 64044.},
{ 69471., 71280., 73089.},
{ 78516., 80325., 82134.}},
{{ 83016., 85554., 88092.},
{ 95706., 98244., 100782.},
{108396., 110934., 113472.}}
},
{
{{ 41551., 41902., 42253.},
{ 43306., 43657., 44008.},
{ 45061., 45412., 45763.}},
{{118818., 119898., 120978.},
{124218., 125298., 126378.},
{129618., 130698., 131778.}},
{{196101., 197910., 199719.},
{205146., 206955., 208764.},
{214191., 216000., 217809.}},
{{273366., 275904., 278442.},
{286056., 288594., 291132.},
{298746., 301284., 303822.}}
}
}
};
myConv1->getOperator()->input(0) = myInput;
myConv1->getOperator()->input(1) = myWeights;
myConv1->getOperator()->input(2) = myBias;
myConv1->getOperator()->computeOutputDims();
myConv1->getOperator()->addHook("execution_time");
myConv1->forward();
//std::static_pointer_cast<Tensor>(myConv1->getOperator()->getOutput(0))->print();
assert(*std::static_pointer_cast<Tensor>(myConv1->getOperator()->getOutput(0)) == myOutput);
//std::static_pointer_cast<Tensor>(myConv1->getOperator()->getInput(1))->print();
std::chrono::time_point<std::chrono::system_clock> time_conv = std::static_pointer_cast<ExecTime>(myConv1->getOperator()->getHook("execution_time"))->getTime(0);
const std::time_t t_c = std::chrono::system_clock::to_time_t(time_conv);
//std::cout << "the execution time of the module was " << std::put_time(std::localtime(&t_c), "%F %T.\n") << std::flush;
}
\ No newline at end of file
#include <catch2/catch_test_macros.hpp>
#include <iostream>
#include <ctime>
#include <chrono>
#include <iomanip>
//#include "aidge/aidge.hpp"
#include "aidge/backend/cpu.hpp"
#include "aidge/QuantPTQ.hpp"
#include "aidge/hook/outputRange.hpp"
using namespace Aidge;
using namespace Aidge_HELPER;
TEST_CASE("[hook] OutputRange(forward)") {
std::shared_ptr<Node> myConv1 = Conv(3,4,{3,3}, "myConv1");
myConv1->getOperator()->setDatatype(DataType::Float32);
myConv1->getOperator()->setBackend("cpu");
Tensor myWeights = Array4D<float,4,3,3,3> {
{
{
{{ 0., 1., 2.},
{ 3., 4., 5.},
{ 6., 7., 8.}},
{{ 9., 10., 11.},
{ 12., 13., 14.},
{ 15., 16., 17.}},
{{ 18., 19., 20.},
{ 21., 22., 23.},
{ 24., 25., 26.}}
},
{
{{ 27., 28., 29.},
{ 30., 31., 32.},
{ 33., 34., 35.}},
{{ 36., 37., 38.},
{ 39., 40., 41.},
{ 42., 43., 44.}},
{{ 45., 46., 47.},
{ 48., 49., 50.},
{ 51., 52., 53.}}
},
{
{{ 54., 55., 56.},
{ 57., 58., 59.},
{ 60., 61., 62.}},
{{ 63., 64., 65.},
{ 66., 67., 68.},
{ 69., 70., 71.}},
{{ 72., 73., 74.},
{ 75., 76., 77.},
{ 78., 79., 80.}}
},
{
{{ 81., 82., 83.},
{ 84., 85., 86.},
{ 87., 88., 89.}},
{{ 90., 91., 92.},
{ 93., 94., 95.},
{ 96., 97., 98.}},
{{ 99., 100., 101.},
{102., 103., 104.},
{105., 106., 107.}}
}
}
};
Tensor myBias = Array1D<float,4> {{7.,0.,9.,0.}};
Tensor myInput = Array4D<float,2,3,5,5> {
{
{
{{ 0., 1., 2., 3., 4.},
{ 5., 6., 7., 8., 9.},
{ 10, 11, 12, 13, 14.},
{ 15, 16, 17, 18, 19.},
{ 20, 21, 22, 23, 24.}},
{{ 25, 26., 27., 28., 29.},
{ 30., 31., 32., 33., 34.},
{ 35., 36., 37., 38., 39.},
{ 40., 41., 42., 43., 44.},
{ 45., 46., 47., 48., 49.}},
{{ 50., 51., 52., 53., 54.},
{ 55., 56., 57., 58., 59.},
{ 60., 61., 62., 63., 64.},
{ 65., 66., 67., 68., 69.},
{ 70., 71., 72., 73., 74.}}
},
{
{{ 75., 76., 77., 78., 79.},
{ 80., 81., 82., 83., 84.},
{ 85., 86., 87., 88., 89.},
{ 90., 91., 92., 93., 94.},
{ 95., 96., 97., 98., 99.}},
{{100, 101, 102, 103, 104.},
{105, 106, 107, 108, 109.},
{110, 111, 112, 113, 114.},
{115, 116, 117, 118, 119.},
{120, 121, 122, 123, 124.}},
{{125, 126, 127, 128, 129.},
{130, 131, 132, 133, 134.},
{135, 136, 137, 138, 139.},
{140, 141, 142, 143, 144.},
{145, 146, 147, 148, 149.}}
}
}
};
Tensor myOutput = Array4D<float,2,4,3,3> {
{
{
{{ 15226., 15577., 15928.},
{ 16981., 17332., 17683.},
{ 18736., 19087., 19438.}},
{{ 37818., 38898., 39978.},
{ 43218., 44298., 45378.},
{ 48618., 49698., 50778.}},
{{ 60426., 62235., 64044.},
{ 69471., 71280., 73089.},
{ 78516., 80325., 82134.}},
{{ 83016., 85554., 88092.},
{ 95706., 98244., 100782.},
{108396., 110934., 113472.}}
},
{
{{ 41551., 41902., 42253.},
{ 43306., 43657., 44008.},
{ 45061., 45412., 45763.}},
{{118818., 119898., 120978.},
{124218., 125298., 126378.},
{129618., 130698., 131778.}},
{{196101., 197910., 199719.},
{205146., 206955., 208764.},
{214191., 216000., 217809.}},
{{273366., 275904., 278442.},
{286056., 288594., 291132.},
{298746., 301284., 303822.}}
}
}
};
myConv1->getOperator()->input(0) = myInput;
myConv1->getOperator()->input(1) = myWeights;
myConv1->getOperator()->input(2) = myBias;
myConv1->getOperator()->computeOutputDims();
myConv1->getOperator()->addHook("output_range");
myConv1->forward();
//std::static_pointer_cast<Tensor>(myConv->getOperator()->getOutput(0))->print();
assert(*std::static_pointer_cast<Tensor>(myConv1->getOperator()->getOutput(0)) == myOutput);
float max_output = std::static_pointer_cast<OutputRange>(myConv1->getOperator()->getHook("output_range"))->getOutput(0);
//std::cout << "the output of the conv was " << max_output << std::flush;
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment