diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml index b9fdd937b358b714fd83a36d8417ad2b417d0385..18963ced1084c56c1e4c04dceec735126bba962a 100644 --- a/.gitlab/ci/build.gitlab-ci.yml +++ b/.gitlab/ci/build.gitlab-ci.yml @@ -1,3 +1,6 @@ +include: + - remote: 'https://gitlab.eclipse.org/eclipse/aidge/gitlab_shared_files/-/raw/main/.gitlab/ci/shared_script.gitlab-ci.yml' + build:ubuntu_cpp: stage: build needs: [] @@ -6,9 +9,9 @@ build:ubuntu_cpp: script: # Download dependencies # aidge_core - - 'curl --location --output build_artifacts.zip "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:ubuntu_cpp"' - - unzip -o build_artifacts.zip -d . - - rm -rf build_cpp + - DEPENDENCY_NAME="aidge_core" + - DEPENDENCY_JOB="build:ubuntu_cpp" + - !reference [.download_dependency, script] # Build current module - export CMAKE_PREFIX_PATH=../install_cpp @@ -32,9 +35,9 @@ build:ubuntu_cpp_g++10: script: # Download dependencies # aidge_core - - 'curl --location --output build_artifacts.zip "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:ubuntu_cpp"' - - unzip -o build_artifacts.zip -d . - - rm -rf build_cpp + - DEPENDENCY_NAME="aidge_core" + - DEPENDENCY_JOB="build:ubuntu_cpp" + - !reference [.download_dependency, script] # Build current module - export CMAKE_PREFIX_PATH=../install_cpp @@ -55,9 +58,9 @@ build:ubuntu_cpp_g++12: script: # Download dependencies # aidge_core - - 'curl --location --output build_artifacts.zip "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:ubuntu_cpp"' - - unzip -o build_artifacts.zip -d . - - rm -rf build_cpp + - DEPENDENCY_NAME="aidge_core" + - DEPENDENCY_JOB="build:ubuntu_cpp" + - !reference [.download_dependency, script] # Build current module - export CMAKE_PREFIX_PATH=../install_cpp @@ -78,9 +81,9 @@ build:ubuntu_cpp_clang12: script: # Download dependencies # aidge_core - - 'curl --location --output build_artifacts.zip "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:ubuntu_cpp"' - - unzip -o build_artifacts.zip -d . - - rm -rf build_cpp + - DEPENDENCY_NAME="aidge_core" + - DEPENDENCY_JOB="build:ubuntu_cpp" + - !reference [.download_dependency, script] # Build current module - export CMAKE_PREFIX_PATH=../install_cpp @@ -101,9 +104,9 @@ build:ubuntu_cpp_clang15: script: # Download dependencies # aidge_core - - 'curl --location --output build_artifacts.zip "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:ubuntu_cpp"' - - unzip -o build_artifacts.zip -d . - - rm -rf build_cpp + - DEPENDENCY_NAME="aidge_core" + - DEPENDENCY_JOB="build:ubuntu_cpp" + - !reference [.download_dependency, script] # Build current module - export CMAKE_PREFIX_PATH=../install_cpp @@ -120,86 +123,92 @@ build:ubuntu_python: needs: [] tags: - docker + script: # Download dependencies # aidge_core (Python) - - 'curl --location --output build_artifacts.zip "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:ubuntu_python"' - - unzip -o build_artifacts.zip -d . + - DEPENDENCY_NAME="aidge_core" + - DEPENDENCY_JOB="build:ubuntu_python" + - !reference [.download_dependency, script] - python3 -m pip install virtualenv - virtualenv venv - source venv/bin/activate - python3 -m pip install -r requirements.txt - python3 -m pip install . + - python3 -m pip install numpy unittest-xml-reporting + - python3 -m pip list artifacts: expire_in: 1 week paths: - venv/ -# build:windows_cpp: -# stage: build -# needs: [] -# tags: -# - windows - -# image: buildtools -# before_script: -# # Install Chocolatey -# - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) -# # Install dependencies -# - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y -# - choco install git -Y -# - choco install python -Y -# # Update PATH -# - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") -# script: -# # Download dependencies -# # aidge_core -# - 'curl "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:windows_cpp" -o build_artifacts.zip' -# - Expand-Archive -Path .\build_artifacts.zip -DestinationPath . -Force -# - Remove-Item .\build_cpp\ -Recurse - -# - $env:CMAKE_PREFIX_PATH = '../install_cpp' -# - mkdir -p build_cpp -# - cd build_cpp -# - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug .. -# - cmake --build . -j2 -# - cmake --install . --config Debug - -# artifacts: -# expire_in: 1 week -# paths: -# - build_cpp/ -# - install_cpp/ - -# build:windows_python: -# stage: build -# needs: [] -# tags: -# - windows - -# image: buildtools -# before_script: -# # Install Chocolatey -# - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) -# # Install dependencies -# - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y -# - choco install git -Y -# - choco install python -Y -# # Update PATH -# - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") -# script: -# # Download dependencies -# # aidge_core (Python) -# - 'curl "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:windows_python" -o build_artifacts.zip' -# - Expand-Archive -Path .\build_artifacts.zip -DestinationPath . -Force - -# - python -m pip install virtualenv -# - virtualenv venv -# - venv\Scripts\Activate.ps1 -# - python -m pip install -r requirements.txt -# - python -m pip install . -# artifacts: -# expire_in: 1 week -# paths: -# - venv/ +build:windows_cpp: + stage: build + needs: [] + tags: + - windows + + image: buildtools + before_script: + # Install Chocolatey + - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + # Install dependencies + - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y + - choco install git -Y + - choco install python -Y + # Update PATH + - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") + script: + # Download dependencies + # aidge_core + - $DEPENDENCY_NAME="aidge_core" + - $DEPENDENCY_JOB="build:windows_cpp" + - !reference [.download_dependency_windows, script] + - Remove-Item .\build_cpp\ -Recurse -Force -ErrorAction Ignore + + - $env:CMAKE_PREFIX_PATH = '../install_cpp' + - mkdir -p build_cpp + - cd build_cpp + - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug .. + - cmake --build . -j2 + - cmake --install . --config Debug + + artifacts: + expire_in: 1 week + paths: + - build_cpp/ + - install_cpp/ + +build:windows_python: + stage: build + needs: [] + tags: + - windows + + image: buildtools + before_script: + # Install Chocolatey + - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + # Install dependencies + - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y + - choco install git -Y + - choco install python -Y + # Update PATH + - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") + script: + # Download dependencies + # aidge_core (Python) + - $DEPENDENCY_NAME="aidge_core" + - $DEPENDENCY_JOB="build:windows_python" + - !reference [.download_dependency_windows, script] + + - python -m pip install virtualenv + - virtualenv venv + - venv\Scripts\Activate.ps1 + - python -m pip install -r requirements.txt + - python -m pip install . + artifacts: + expire_in: 1 week + paths: + - venv/ diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml index 8f6b1e54109c4c2dcfa026fd477a93b6c0a1c641..3cada635eb25b3eb87e8318eb6e26723f7a27dd6 100644 --- a/.gitlab/ci/test.gitlab-ci.yml +++ b/.gitlab/ci/test.gitlab-ci.yml @@ -18,32 +18,31 @@ test:ubuntu_python: script: - source venv/bin/activate - cd ${CI_PROJECT_NAME} - - python3 -m pip install numpy unittest-xml-reporting - - python3 -m pip list - # Run on discovery all tests located in core/unit_tests/python and discard the stdout + + # Run on discovery all tests located in core/unit_tests/python and discard the stdout # only to show the errors/warnings and the results of the tests - python3 -m xmlrunner discover -s unit_tests/ -v -b --output-file xmlrunner-results.xml artifacts: reports: junit: ${CI_PROJECT_NAME}/xmlrunner-results.xml -# test:windows_cpp: -# stage: test -# needs: ["build:windows_cpp"] -# tags: -# - windows -# image: buildtools -# before_script: -# # Install Chocolatey -# - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) -# # Install dependencies -# - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y -# - choco install python -Y -# # Update PATH -# - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") -# script: -# - cd build_cpp -# - ctest --output-junit ctest-results.xml --output-on-failure -# artifacts: -# reports: -# junit: build_cpp/ctest-results.xml +test:windows_cpp: + stage: test + needs: ["build:windows_cpp"] + tags: + - windows + image: buildtools + before_script: + # Install Chocolatey + - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + # Install dependencies + - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y + - choco install python -Y + # Update PATH + - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") + script: + - cd build_cpp + - ctest --output-junit ctest-results.xml --output-on-failure + artifacts: + reports: + junit: build_cpp/ctest-results.xml diff --git a/CMakeLists.txt b/CMakeLists.txt index 51a6ebe10d7b8d03fcb94898de55734dbabf9b0c..229110d9c1a5b8b202a6811a0a2276f91ba6b73a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,10 +7,11 @@ file(READ "${CMAKE_SOURCE_DIR}/project_name.txt" project) message(STATUS "Project name: ${project}") message(STATUS "Project version: ${version}") -# Note : project name is {project} and python module name is also {project} +# Note : project name is {project} and python module name is also {project} set(module_name _${project}) # target name project(${project}) +set(CXX_STANDARD 14) ############################################## # Define options @@ -18,6 +19,7 @@ option(PYBIND "python binding" ON) option(WERROR "Warning as error" OFF) option(TEST "Enable tests" ON) option(COVERAGE "Enable coverage" OFF) +option(ENABLE_ASAN "Enable ASan (AddressSanitizer) for runtime analysis of memory use (over/underflow, memory leak, ...)" OFF) ############################################## # Import utils CMakeLists @@ -34,7 +36,6 @@ find_package(aidge_core REQUIRED) ############################################## # Create target and set properties - file(GLOB_RECURSE src_files "src/*.cpp") file(GLOB_RECURSE inc_files "include/*.hpp") @@ -43,9 +44,23 @@ target_link_libraries(${module_name} PUBLIC _aidge_core # _ is added because we link the target not the project ) + #Set target properties set_property(TARGET ${module_name} PROPERTY POSITION_INDEPENDENT_CODE ON) +if( ${ENABLE_ASAN} ) + message("Building ${module_name} with ASAN.") + set(SANITIZE_FLAGS -fsanitize=address -fno-omit-frame-pointer) + target_link_libraries(${module_name} + PUBLIC + -fsanitize=address + ) + target_compile_options(${module_name} + PRIVATE + ${SANITIZE_FLAGS} + ) +endif() + target_include_directories(${module_name} PUBLIC $<INSTALL_INTERFACE:include> @@ -60,7 +75,7 @@ if (PYBIND) # Handles Python + pybind11 headers dependencies target_link_libraries(${module_name} - PUBLIC + PUBLIC pybind11::pybind11 PRIVATE Python::Python @@ -99,8 +114,8 @@ install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) install(EXPORT ${project}-targets FILE "${project}-targets.cmake" DESTINATION ${INSTALL_CONFIGDIR} - COMPONENT ${module_name} -) + COMPONENT ${module_name} +) #Create a ConfigVersion.cmake file include(CMakePackageConfigHelpers) diff --git a/aidge_backend_cpu/unit_tests/test_recipies.py b/aidge_backend_cpu/unit_tests/test_recipes.py similarity index 90% rename from aidge_backend_cpu/unit_tests/test_recipies.py rename to aidge_backend_cpu/unit_tests/test_recipes.py index e343fad1aeda82555a57778a394a4590b1e8772e..5586ab246e61d04b5754421b90ef3cd30629c1c3 100644 --- a/aidge_backend_cpu/unit_tests/test_recipies.py +++ b/aidge_backend_cpu/unit_tests/test_recipes.py @@ -15,7 +15,7 @@ import aidge_backend_cpu from functools import reduce import numpy as np -class test_recipies(unittest.TestCase): +class test_recipes(unittest.TestCase): def setUp(self): pass @@ -33,12 +33,9 @@ class test_recipies(unittest.TestCase): conv = aidge_core.Conv2D(1, 1, [3, 3], name="Conv0") bn = aidge_core.BatchNorm2D(1, name="Add0") - graph_view = aidge_core.sequential([conv, bn]) + graph_view = aidge_core.sequential([input_node, conv, bn]) # Add random values to conv and BatchNorm parameters - input_node.add_child(graph_view) - input_node.get_operator().set_datatype(aidge_core.DataType.Float32) - input_node.get_operator().set_backend("cpu") graph_view.set_datatype(aidge_core.DataType.Float32) graph_view.set_backend("cpu") diff --git a/aidge_backend_cpu/unit_tests/test_scheduler.py b/aidge_backend_cpu/unit_tests/test_scheduler.py index 2f174efed32fc814010ff61cd42c1bae1105674e..0c41d59963c7633151745f2efe1f1fac3ee07815 100644 --- a/aidge_backend_cpu/unit_tests/test_scheduler.py +++ b/aidge_backend_cpu/unit_tests/test_scheduler.py @@ -40,18 +40,14 @@ class test_scheduler(unittest.TestCase): input_data = np.array([0]).astype(np.float32) input_tensor = aidge_core.Tensor(input_data) - input_node = aidge_core.Producer(input_tensor, "X") - graph_view = aidge_core.sequential([ + aidge_core.Producer(input_tensor, "X"), aidge_core.FC(1, 50, name='0'), aidge_core.FC(50, 50, name='1'), aidge_core.FC(50, 10, name='2'), ]) EXPECTED_SCHEDULE = ['0', '1', '2'] - input_node.add_child(graph_view) - input_node.get_operator().set_datatype(aidge_core.DataType.Float32) - input_node.get_operator().set_backend("cpu") graph_view.set_datatype(aidge_core.DataType.Float32) graph_view.set_backend("cpu") @@ -60,15 +56,17 @@ class test_scheduler(unittest.TestCase): scheduler = aidge_core.SequentialScheduler(graph_view) scheduler.generate_scheduling() - self.assertListEqual([i.name() for i in scheduler.get_static_scheduling()], EXPECTED_SCHEDULE) + self.assertEqual(len(scheduler.get_static_scheduling()), 10) + # Do not care about the order of execution of the producers + self.assertListEqual([i.name() for i in scheduler.get_static_scheduling()[-3:]], EXPECTED_SCHEDULE) def test_parallel_scheduling(self): input_data = np.array([0]).astype(np.float32) input_tensor = aidge_core.Tensor(input_data) - input_node = aidge_core.Producer(input_tensor, "X") graph_view = aidge_core.sequential([ + aidge_core.Producer(input_tensor, "X"), aidge_core.FC(1, 50, name='0'), aidge_core.parallel([aidge_core.FC(50, 50, name='1'), aidge_core.FC(50, 50, name='3')]), aidge_core.Add(2, name='2'), @@ -76,9 +74,6 @@ class test_scheduler(unittest.TestCase): EXPECTED_SCHEDULE = [['0', '1', '3', '2'], ['0', '3', '1', '2']] # Both scheduling are valid ! - input_node.add_child(graph_view) - input_node.get_operator().set_datatype(aidge_core.DataType.Float32) - input_node.get_operator().set_backend("cpu") graph_view.set_datatype(aidge_core.DataType.Float32) graph_view.set_backend("cpu") @@ -87,7 +82,9 @@ class test_scheduler(unittest.TestCase): scheduler = aidge_core.SequentialScheduler(graph_view) scheduler.generate_scheduling() - self.assertTrue([i.name() for i in scheduler.get_static_scheduling()] in EXPECTED_SCHEDULE) + self.assertEqual(len(scheduler.get_static_scheduling()), 11) + # Do not care about the order of execution of the producers + self.assertTrue([i.name() for i in scheduler.get_static_scheduling()[-4:]] in EXPECTED_SCHEDULE) if __name__ == '__main__': unittest.main() diff --git a/aidge_backend_cpu/unit_tests/test_tensor.py b/aidge_backend_cpu/unit_tests/test_tensor.py deleted file mode 100644 index 37531b43cf7755dfb760e575450b70bfa9a6ff68..0000000000000000000000000000000000000000 --- a/aidge_backend_cpu/unit_tests/test_tensor.py +++ /dev/null @@ -1,71 +0,0 @@ -import unittest -import aidge_core -import aidge_backend_cpu -import numpy as np - - -class test_tensor(unittest.TestCase): - """Test tensor binding - """ - def setUp(self): - pass - def tearDown(self): - pass - - def test_getavailable_backends(self): - self.assertTrue("cpu" in aidge_core.Tensor.get_available_backends()) - - def test_numpy_int_to_tensor(self): - np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32) - # Numpy -> Tensor - t = aidge_core.Tensor(np_array) - self.assertEqual(t.dtype(), aidge_core.DataType.Int32) - for i_t, i_n in zip(t, np_array.flatten()): - self.assertTrue(i_t == i_n) - for i,j in zip(t.dims(), np_array.shape): - self.assertEqual(i,j) - def test_tensor_int_to_numpy(self): - np_array = np.arange(9).reshape(1,1,3,3) - # Numpy -> Tensor - t = aidge_core.Tensor(np_array) - # Tensor -> Numpy - nnarray = np.array(t) - for i_nn, i_n in zip(nnarray.flatten(), np_array.flatten()): - self.assertTrue(i_nn == i_n) - for i,j in zip(t.dims(), nnarray.shape): - self.assertEqual(i,j) - - def test_numpy_int64_to_tensor(self): - np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64) - # Numpy -> Tensor - t = aidge_core.Tensor(np_array) - self.assertEqual(t.dtype(), aidge_core.DataType.Int64) - for i_t, i_n in zip(t, np_array.flatten()): - self.assertTrue(i_t == i_n) - for i,j in zip(t.dims(), np_array.shape): - self.assertEqual(i,j) - - def test_numpy_float_to_tensor(self): - t = aidge_core.Tensor() - np_array = np.random.rand(1, 1, 3, 3).astype(np.float32) - # Numpy -> Tensor - t = aidge_core.Tensor(np_array) - self.assertEqual(t.dtype(), aidge_core.DataType.Float32) - for i_t, i_n in zip(t, np_array.flatten()): - self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference - for i,j in zip(t.dims(), np_array.shape): - self.assertEqual(i,j) - - def test_get_set(self): - dims = [2,2,2] - - np_array = np.arange(8).reshape(dims).astype(np.int32) - # Numpy -> Tensor - t = aidge_core.Tensor(np_array) - for i in range(8): - self.assertEqual(t[i], i) - t[i] = 5 - self.assertEqual(t[i], 5) - -if __name__ == '__main__': - unittest.main() diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp index f78598057cafe0b5b02d268bd5a73ede5a2981d8..6b8b7b9208abd95f312ee53e5909f7de2b163624 100644 --- a/include/aidge/backend/cpu.hpp +++ b/include/aidge/backend/cpu.hpp @@ -12,7 +12,6 @@ #ifndef AIDGE_CPU_IMPORTS_H_ #define AIDGE_CPU_IMPORTS_H_ -#include "aidge/backend/cpu/data/TensorImpl.hpp" #include "aidge/backend/cpu/operator/AddImpl.hpp" #include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp" #include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp" @@ -21,18 +20,30 @@ #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp" #include "aidge/backend/cpu/operator/ConvImpl.hpp" #include "aidge/backend/cpu/operator/DivImpl.hpp" +#include "aidge/backend/cpu/operator/ErfImpl.hpp" #include "aidge/backend/cpu/operator/FCImpl.hpp" +#include "aidge/backend/cpu/operator/GatherImpl.hpp" +#include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp" #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp" #include "aidge/backend/cpu/operator/MatMulImpl.hpp" +#include "aidge/backend/cpu/operator/MemorizeImpl.hpp" #include "aidge/backend/cpu/operator/MulImpl.hpp" #include "aidge/backend/cpu/operator/PadImpl.hpp" +#include "aidge/backend/cpu/operator/PopImpl.hpp" #include "aidge/backend/cpu/operator/PowImpl.hpp" -#include "aidge/backend/cpu/operator/ProducerImpl.hpp" +#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp" #include "aidge/backend/cpu/operator/ReLUImpl.hpp" +#include "aidge/backend/cpu/operator/ReshapeImpl.hpp" #include "aidge/backend/cpu/operator/ScalingImpl.hpp" +#include "aidge/backend/cpu/operator/SigmoidImpl.hpp" #include "aidge/backend/cpu/operator/SliceImpl.hpp" #include "aidge/backend/cpu/operator/SqrtImpl.hpp" #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp" #include "aidge/backend/cpu/operator/SubImpl.hpp" +#include "aidge/backend/cpu/operator/TanhImpl.hpp" +#include "aidge/backend/cpu/operator/TransposeImpl.hpp" + +#include "aidge/backend/cpu/data/TensorImpl.hpp" + +#endif /* AIDGE_CPU_IMPORTS_H_ */ -#endif /* AIDGE_CPU_IMPORTS_H_ */ \ No newline at end of file diff --git a/include/aidge/backend/cpu/data/Broadcasting.hpp b/include/aidge/backend/cpu/data/Broadcasting.hpp new file mode 100644 index 0000000000000000000000000000000000000000..cb969cb54806a204072763a1672ee5266fb6347e --- /dev/null +++ b/include/aidge/backend/cpu/data/Broadcasting.hpp @@ -0,0 +1,49 @@ +/******************************************************************************** + * Copyright (c) 2024 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_DATA_BROADCASTING_H_ +#define AIDGE_CPU_DATA_BROADCASTING_H_ + +#include <vector> + +namespace Aidge { + +// Function to broadCast an input dims vector into the same size as an outputDims vector + + /** + * @brief Broadcast an input dims vector into the same size as an outputDims vector + * @details The missing dimensions would be completed by 1 + * @param outputDims The vector of dimensions to follow + * @param dimsToBroadcast The vecotr of dimensions to braodcast + * @return std::vector<std::size_t> a broadcasted vector by addding 1 on the missing dimensions. + */ + std::vector<std::size_t> getBroadcastedDims(const std::vector<std::size_t>& outputDims, const std::vector<std::size_t>& dimsToBroadcast); + + /** + * @brief Get a vector of indexes along the dimensions vector from a flattened index + * @param dimensions The vector of dimensions we want the indexes on + * @param idx The flattened index + * @return std::vector<std::size_t> vector of indexes along dimensions. + */ + std::vector<std::size_t> getMultiDimIndices(const std::vector<std::size_t>& dimensions, std::size_t idx); + + // Function to get a flattened index from multi-dimensional indices + /** + * @brief Get a flattened index the dimensions vector from a given vector of indices on a broadcasted vector + * @param dimensions The vector of dimensions we want the flattened index on + * @param indices The vector of indices we want to flatten + * @return std::size_t The flattened index on the dimensions vector + */ + std::size_t getFlattenedIndex(const std::vector<std::size_t>& dimensions, const std::vector<std::size_t>& indices); + +} // namespace Aidge + +#endif // AIDGE_CPU_DATA_BROADCASTING_H_ \ No newline at end of file diff --git a/include/aidge/backend/cpu/data/GetCPUPtr.h b/include/aidge/backend/cpu/data/GetCPUPtr.h deleted file mode 100644 index 38ea848afc29fa4c23ff500f97e0c57954695021..0000000000000000000000000000000000000000 --- a/include/aidge/backend/cpu/data/GetCPUPtr.h +++ /dev/null @@ -1,23 +0,0 @@ -/******************************************************************************** - * Copyright (c) 2023 CEA-List - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License 2.0 which is available at - * http://www.eclipse.org/legal/epl-2.0. - * - * SPDX-License-Identifier: EPL-2.0 - * - ********************************************************************************/ - -#ifndef AIDGE_CPU_DATA_GETCPUPTR_H_ -#define AIDGE_CPU_DATA_GETCPUPTR_H_ - -#include "aidge/data/Tensor.hpp" - -namespace Aidge { -inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) { - return std::static_pointer_cast<Tensor>(data)->getImpl()->rawPtr(); -} -} // namespace Aidge - -#endif // AIDGE_CPU_DATA_GETCPUPTR_H_ \ No newline at end of file diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp deleted file mode 100644 index c451b4a5beccacb7980c834d56b979c1b76cdd3f..0000000000000000000000000000000000000000 --- a/include/aidge/backend/cpu/data/TensorImpl.hpp +++ /dev/null @@ -1,197 +0,0 @@ -/******************************************************************************** - * Copyright (c) 2023 CEA-List - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License 2.0 which is available at - * http://www.eclipse.org/legal/epl-2.0. - * - * SPDX-License-Identifier: EPL-2.0 - * - ********************************************************************************/ - -#ifndef AIDGE_CPU_DATA_TENSORIMPL_H_ -#define AIDGE_CPU_DATA_TENSORIMPL_H_ - -#include "aidge/backend/TensorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/half.hpp" -#include "aidge/utils/Registrar.hpp" -#include "aidge/utils/Types.h" -#include "aidge/utils/ErrorHandling.hpp" -#include "aidge/utils/future_std/span.hpp" - -namespace Aidge { - -template <class T> -class TensorImpl_cpu : public TensorImpl { -private: - const Tensor &mTensor; // Impl needs to access Tensor information, but is not - // supposed to change it! - /// Pointer to the data and its capacity - future_std::span<T> mData; - /// If this instance own the data, std::unique_ptr manages it - std::unique_ptr<T[]> mDataOwner; - -public: - static constexpr const char *Backend = "cpu"; - - TensorImpl_cpu(const Tensor &tensor) : TensorImpl(Backend), mTensor(tensor) {} - - bool operator==(const TensorImpl &otherImpl) const override final { - const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl); - AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mTensor.size()); - - std::size_t i = 0; - for (; i < mTensor.size() && - *(mData.data()+i) == *static_cast<const T*>(typedOtherImpl.rawPtr(i)); - ++i) { - } - return i == mTensor.size(); - } - - static std::unique_ptr<TensorImpl_cpu> create(const Tensor &tensor) { - return std::make_unique<TensorImpl_cpu<T>>(tensor); - } - - inline std::size_t size() const noexcept override final { return mData.size(); } - inline std::size_t scalarSize() const noexcept override final { return sizeof(T); } - - void setDevice(DeviceIdx_t device) override final { - AIDGE_ASSERT(device == 0, "device cannot be != 0 for CPU backend"); - } - - void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final { - AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity"); - std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length, - static_cast<T *>(rawPtr()) + offset); - } - - void copyCast(const void *src, NbElts_t length, const DataType srcDt) override final { - if (length == 0) { - return; - } - - AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity"); - switch (srcDt) - { - case DataType::Float64: - std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length, - static_cast<T *>(rawPtr())); - break; - case DataType::Float32: - std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length, - static_cast<T *>(rawPtr())); - break; - case DataType::Float16: - std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length, - static_cast<T *>(rawPtr())); - break; - case DataType::Int64: - std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length, - static_cast<T *>(rawPtr())); - break; - case DataType::UInt64: - std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length, - static_cast<T *>(rawPtr())); - break; - case DataType::Int32: - std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length, - static_cast<T *>(rawPtr())); - break; - case DataType::UInt32: - std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length, - static_cast<T *>(rawPtr())); - break; - case DataType::Int16: - std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length, - static_cast<T *>(rawPtr())); - break; - case DataType::UInt16: - std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length, - static_cast<T *>(rawPtr())); - break; - case DataType::Int8: - std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length, - static_cast<T *>(rawPtr())); - break; - case DataType::UInt8: - std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length, - static_cast<T *>(rawPtr())); - break; - default: - AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type."); - break; - } - } - - void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, DeviceIdx_t>& device) override final { - AIDGE_ASSERT(device.first == Backend, "backend must match"); - AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend"); - copy(src, length); - } - - inline void copyFromHost(const void *src, NbElts_t length) override final { - copy(src, length); - } - - void copyToHost(void *dst, NbElts_t length) const override final { - AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity"); - const T* src = static_cast<const T*>(rawPtr()); - std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length, - static_cast<T *>(dst)); - } - - void *rawPtr(NbElts_t offset = 0) override final { - lazyInit(); - return (mData.data() + offset); - }; - - const void *rawPtr(NbElts_t offset = 0) const override final { - AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const rawPtr"); - return (mData.data() + offset); - }; - - void *hostPtr(NbElts_t offset = 0) override final { - lazyInit(); - return (mData.data() + offset); - }; - - const void *hostPtr(NbElts_t offset = 0) const override final { - AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const hostPtr"); - return (mData.data() + offset); - }; - - void setRawPtr(void *ptr, NbElts_t length) override final { - AIDGE_ASSERT(length >= mTensor.size(), "trying to set raw pointer of insufficient capacity"); - mData = future_std::span<T>(static_cast<T *>(ptr), length); - mDataOwner.reset(); - }; - - virtual ~TensorImpl_cpu() = default; - -private: - void lazyInit() { - if (mData.size() < mTensor.size()) { - // Need more data, a re-allocation will occur - AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data"); - mDataOwner.reset(new T[mTensor.size()]); - mData = future_std::span<T>(mDataOwner.get(), mTensor.size()); - } - } -}; - -namespace { -static Registrar<Tensor> registrarTensorImpl_cpu_Float64( - {"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create); -static Registrar<Tensor> registrarTensorImpl_cpu_Float32( - {"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create); -static Registrar<Tensor> registrarTensorImpl_cpu_Float16( - {"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create); -static Registrar<Tensor> registrarTensorImpl_cpu_Int32( - {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create); -static Registrar<Tensor> registrarTensorImpl_cpu_Int64( - {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create); -} // namespace -} // namespace Aidge - -#endif /* AIDGE_CPU_DATA_TENSORIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/AddImpl.hpp b/include/aidge/backend/cpu/operator/AddImpl.hpp index 0299148d086ae6e2be967232e8157c6a6229b0f7..7a1497a2f4a2ae0e6005897ae504502505bbe60a 100644 --- a/include/aidge/backend/cpu/operator/AddImpl.hpp +++ b/include/aidge/backend/cpu/operator/AddImpl.hpp @@ -12,34 +12,35 @@ #ifndef AIDGE_CPU_OPERATOR_ADDIMPL_H_ #define AIDGE_CPU_OPERATOR_ADDIMPL_H_ +#include <cstddef> // std::size_t +#include <memory> // std::unique_ptr, std::make_unique +#include <string> +#include <vector> + #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Add.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" -#include <memory> -#include <vector> namespace Aidge { -// class Add_Op<2>; // compute kernel registry for forward and backward class AddImplForward_cpu - : public Registrable<AddImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const std::vector<const void*>, void*)> {}; + : public Registrable<AddImplForward_cpu, std::tuple<DataType, DataType>, void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)> {}; class AddImplBackward_cpu - : public Registrable<AddImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const std::vector<const void*>, void*)> {}; + : public Registrable<AddImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)> {}; class AddImpl_cpu : public OperatorImpl { public: - AddImpl_cpu(const Add_Op& op) : OperatorImpl(op) {} + AddImpl_cpu(const Add_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<AddImpl_cpu> create(const Add_Op& op) { return std::make_unique<AddImpl_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final; + Elts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final; void forward() override; }; diff --git a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp index 631ad44a562c17d41ad019a1da112dbf8a69185c..478a0226f43ccbc64d567a56ab89a558179438c5 100644 --- a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp @@ -14,12 +14,13 @@ #include "aidge/utils/Registrar.hpp" +#include "aidge/backend/cpu/data/Broadcasting.hpp" #include "aidge/backend/cpu/operator/AddImpl.hpp" namespace Aidge { template <class I, class O> -void AddImpl_cpu_forward_kernel(const std::size_t inputLength, const std::vector<const void*> inputs_, void* output_) { +void AddImpl_cpu_forward_kernel(const std::vector<const void*> inputs_, const std::vector<std::vector<std::size_t>>& inputDims, const std::size_t outputLength, const std::vector<std::size_t>& outDims, void* output_) { // FIXME: missing Add attributes as arguments std::vector<const I*> inputs; for (const auto& input_ : inputs_) { @@ -27,12 +28,15 @@ void AddImpl_cpu_forward_kernel(const std::size_t inputLength, const std::vector } O* output = static_cast<O*>(output_); - for (std::size_t oIndex = 0; oIndex < inputLength; ++oIndex) { + for (std::size_t oIndex = 0; oIndex < outputLength; ++oIndex) + { output[oIndex] = 0; - for (std::size_t iIndex = 0; iIndex < inputs.size(); ++iIndex) { - output[oIndex] += inputs[iIndex][oIndex]; - } - } + std::vector<size_t> indexes = getMultiDimIndices(outDims, oIndex); + for(std::size_t iIndex = 0; iIndex < inputs.size(); ++iIndex) { + std::size_t idx = getFlattenedIndex(inputDims[iIndex], indexes); + output[oIndex] += inputs[iIndex][idx]; + } + } } namespace { diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp index bfb2b1947281fc30e38fd1fe1663bd5de415d3ee..ce126dc2b870d6ac767c15bc6fbca2deb07e8772 100644 --- a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp +++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp @@ -38,13 +38,13 @@ class AvgPoolingImpl2DBackward_cpu class AvgPoolingImpl2D_cpu : public OperatorImpl { public: - AvgPoolingImpl2D_cpu(const AvgPooling_Op<2> &op) : OperatorImpl(op) {} + AvgPoolingImpl2D_cpu(const AvgPooling_Op<2> &op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<AvgPoolingImpl2D_cpu> create(const AvgPooling_Op<2> &op) { return std::make_unique<AvgPoolingImpl2D_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; void forward() override; }; diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp index a599aeb7b427161eb7541829242820c0306d0d31..8bd567dab3d564ccdeffdc581585e404fc4697a4 100644 --- a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp +++ b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp @@ -53,13 +53,13 @@ class BatchNormImpl2DBackward_cpu class BatchNormImpl2D_cpu : public OperatorImpl { public: - BatchNormImpl2D_cpu(const BatchNorm_Op<2> &op) : OperatorImpl(op) {} + BatchNormImpl2D_cpu(const BatchNorm_Op<2> &op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<BatchNormImpl2D_cpu> create(const BatchNorm_Op<2> &op) { return std::make_unique<BatchNormImpl2D_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; void forward() override; }; diff --git a/include/aidge/backend/cpu/operator/ConcatImpl.hpp b/include/aidge/backend/cpu/operator/ConcatImpl.hpp index d0d3e06365c524da1af485583dda6d6208ef3fb9..a997ffa9860f87fe0d9bc4e64239a656053416a6 100644 --- a/include/aidge/backend/cpu/operator/ConcatImpl.hpp +++ b/include/aidge/backend/cpu/operator/ConcatImpl.hpp @@ -41,25 +41,13 @@ class ConcatImplBackward_cpu class ConcatImpl_cpu : public OperatorImpl { public: - ConcatImpl_cpu(const Concat_Op& op) : OperatorImpl(op) {} + ConcatImpl_cpu(const Concat_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<ConcatImpl_cpu> create(const Concat_Op& op) { return std::make_unique<ConcatImpl_cpu>(op); } public: - NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final; - - NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final; - - NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& /*inputsSize*/) const override final; - - NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final; - - NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final; - - void updateConsummerProducer() override final; - void forward() override; void backward() override; diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp index f72890d8903ca4a9876809759587ed4b1ac22e67..a61a7299ed6bd5c5a3e41c09e9d5b5f1f7ae3326 100644 --- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp +++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp @@ -40,13 +40,13 @@ class ConvDepthWiseImpl2DBackward_cpu class ConvDepthWiseImpl2D_cpu : public OperatorImpl { public: - ConvDepthWiseImpl2D_cpu(const ConvDepthWise_Op<2> &op) : OperatorImpl(op) {} + ConvDepthWiseImpl2D_cpu(const ConvDepthWise_Op<2> &op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<ConvDepthWiseImpl2D_cpu> create(const ConvDepthWise_Op<2> &op) { return std::make_unique<ConvDepthWiseImpl2D_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; void forward() override; }; diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp index 95a1aaeccbe728eb2bb957913a5b79f4b8a9548b..801bd315f9e5058ffade574fc92179b1e3c513e4 100644 --- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp @@ -64,7 +64,7 @@ void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Attrs &at for (std::size_t batch = 0; batch < dims[0]; ++batch) { for (std::size_t ch = 0; ch < std::get<2>(attrs); ++ch) { const std::size_t oIndex = (ch + batch*std::get<2>(attrs)) * oxSize * oySize; - B biasVal = (biases != nullptr) ? biases[ch] : B(0); + B biasVal = ((!std::get<4>(attrs)) && biases != nullptr) ? biases[ch] : B(0); std::fill(output + oIndex, output+(oIndex+oxSize*oySize), biasVal); const std::size_t iIndex = (ch + batch*dims[1]) * dims[2] * dims[3]; const std::size_t wIndex = ch * std::get<3>(attrs)[0] * std::get<3>(attrs)[1]; diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp index 9bc2f27412f388a7fd03db06ac97c612044fab5f..e7ce0892a6241009a8e80821e341b3209a19faa4 100644 --- a/include/aidge/backend/cpu/operator/ConvImpl.hpp +++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp @@ -40,14 +40,14 @@ class ConvImpl2DBackward_cpu class ConvImpl2D_cpu : public OperatorImpl { public: - ConvImpl2D_cpu(const Conv_Op<2>& op) : OperatorImpl(op) {} + ConvImpl2D_cpu(const Conv_Op<2>& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<ConvImpl2D_cpu> create(const Conv_Op<2> &op) { return std::make_unique<ConvImpl2D_cpu>(op); } public: - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; void forward() override; }; diff --git a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp index 83607f280f53e5e477db7d8bbbbd1634dd9c584d..00d34f6596780f42aa5864058ea543f046f8edb1 100644 --- a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp @@ -106,7 +106,8 @@ void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Attrs &attrs, const std::ar for (std::size_t batch = 0; batch < dims[0]; ++batch) { for (std::size_t outCh = 0; outCh < std::get<3>(attrs); ++outCh) { const std::size_t oIndex = (outCh + batch*std::get<3>(attrs)) * oxSize * oySize; - B biasVal = (biases != nullptr) ? biases[outCh] : B(0); + // If NoBias or bias = nullptr, set B(0) + B biasVal = ((!std::get<5>(attrs)) && biases != nullptr) ? biases[outCh] : B(0); std::fill(output + oIndex, output+(oIndex+oxSize*oySize), biasVal); for (std::size_t inCh = 0; inCh < dims[1]; ++inCh) { const std::size_t iIndex = (inCh + batch*dims[1]) * dims[2] * dims[3]; diff --git a/include/aidge/backend/cpu/operator/DivImpl.hpp b/include/aidge/backend/cpu/operator/DivImpl.hpp index 73809ee81e26fff23e40763405857ddd2c95db0c..3a19d7303464e3543bd1ce83e334c4a6bdb713a2 100644 --- a/include/aidge/backend/cpu/operator/DivImpl.hpp +++ b/include/aidge/backend/cpu/operator/DivImpl.hpp @@ -12,35 +12,37 @@ #ifndef AIDGE_CPU_OPERATOR_DIVIMPL_H_ #define AIDGE_CPU_OPERATOR_DIVIMPL_H_ +#include <memory> +#include <tuple> +#include <vector> + #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Div.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" -#include <memory> -#include <vector> namespace Aidge { -// class Div_Op; // compute kernel registry for forward and backward class DivImplForward_cpu - : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> { + // : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> { + : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const std::size_t, const void*, const void*,void*)> { }; class DivImplBackward_cpu - : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> { + : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> { }; class DivImpl_cpu : public OperatorImpl { public: - DivImpl_cpu(const Div_Op& op) : OperatorImpl(op) {} + DivImpl_cpu(const Div_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<DivImpl_cpu> create(const Div_Op& op) { return std::make_unique<DivImpl_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; - void forward() override; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + + void forward() override final; }; namespace { diff --git a/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp index e2ead9ca8de3ed8328b659906336766fbfbb6a47..3cdcefa9e1c865f66b64ed527605d46af31be8af 100644 --- a/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp @@ -12,42 +12,64 @@ #ifndef AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_ #define AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_ +#include <numeric> // std::accumulate +#include <cstddef> // std::size_t +#include <functional> // std::multiplies + #include "aidge/utils/Registrar.hpp" +#include "aidge/backend/cpu/data/Broadcasting.hpp" #include "aidge/backend/cpu/operator/DivImpl.hpp" namespace Aidge { +// template <class I1, class I2, class O> +// void DivImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims, +// const std::vector<std::size_t>& input2Dims, +// const std::vector<std::size_t>& outputDims, +// const void* input1_, +// const void* input2_, +// void* output_) { + +// const I1* input_1 = static_cast<const I1*>(input1_); +// const I2* input_2 = static_cast<const I2*>(input2_); +// O* output = static_cast<O*>(output_); + +// const std::size_t totalElements = std::accumulate(outputDims.cbegin(), outputDims.cend(), std::size_t(1), std::multiplies<std::size_t>()); + +// for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex) +// { +// std::vector<std::size_t> indexes = getMultiDimIndices(outputDims, oIndex); + +// std::size_t idx1 = getFlattenedIndex(input1Dims, indexes); +// std::size_t idx2 = getFlattenedIndex(input2Dims, indexes); + +// // TODO assert if input_2 is bad? +// output[oIndex] = input_1[idx1] / input_2[idx2]; +// } +// } + template <class I1, class I2, class O> -void DivImpl_cpu_forward_kernel(std::size_t input1Length, - std::size_t input2Length, - const void* input1_, - const void* input2_, - void* output_) { +constexpr void DivImpl_cpu_forward_kernel(const std::size_t input1size_, + const std::size_t input2size_, + const std::size_t output1size_, + const void* input1_, + const void* input2_, + void* output_) { const I1* input_1 = static_cast<const I1*>(input1_); const I2* input_2 = static_cast<const I2*>(input2_); O* output = static_cast<O*>(output_); - if (input2Length == input1Length) - { - for (std::size_t i = 0; i < input1Length; ++i) { - output[i] = input_1[i] / input_2[i]; - } - } - else if (input2Length == 1) - { - for (std::size_t i = 0; i < input1Length; ++i) { - output[i] = input_1[i] / input_2[0]; - } - } - else // input_2 is 1d and of size the number of channels of input_1 - { - for (std::size_t i = 0; i < input1Length; ++i) { - std::size_t channelIdx = i % input2Length; - output[i] = input_1[i] / input_2[channelIdx]; - } + +// suppose values are contiguous in memory + for (std::size_t i = 0; i < output1size_; ++i) { + const std::size_t in1_id = (input1size_ != 1) ? i : 0; + const std::size_t in2_id = (input2size_ != 1) ? i : 0; + output[i] = static_cast<O>(input_1[in1_id] / input_2[in2_id]); } } + + namespace { static Registrar<DivImplForward_cpu> registrarDivImplForward_cpu_Float32( {DataType::Float32, DataType::Float32, DataType::Float32}, diff --git a/include/aidge/backend/cpu/operator/ErfImpl.hpp b/include/aidge/backend/cpu/operator/ErfImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..6864803a542e4beed0259be9c4722d4215bec449 --- /dev/null +++ b/include/aidge/backend/cpu/operator/ErfImpl.hpp @@ -0,0 +1,50 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_ERFIMPL_H_ +#define AIDGE_CPU_OPERATOR_ERFIMPL_H_ + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/Erf.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include <memory> +#include <vector> + +namespace Aidge { +// class Erf_Op; + +// compute kernel registry for forward and backward +class ErfImplForward_cpu + : public Registrable<ErfImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { +}; +class ErfImplBackward_cpu + : public Registrable<ErfImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { +}; + +class ErfImpl_cpu : public OperatorImpl { +public: + ErfImpl_cpu(const Erf_Op& op) : OperatorImpl(op, "cpu") {} + + static std::unique_ptr<ErfImpl_cpu> create(const Erf_Op& op) { + return std::make_unique<ErfImpl_cpu>(op); + } + + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; + +namespace { +static Registrar<Erf_Op> registrarErfImpl_cpu("cpu", Aidge::ErfImpl_cpu::create); +} +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_ERFIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..bb92401b6e72b1528d0342474bf394a7c29a4042 --- /dev/null +++ b/include/aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp @@ -0,0 +1,45 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_ + +#include <cmath> + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/operator/ErfImpl.hpp" + +namespace Aidge { +template <class I, class O> +void ErfImpl_cpu_forward_kernel(std::size_t inputLenght, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + for (std::size_t i = 0; i < inputLenght; ++i) { + output[i] = std::erf(input[i]); + } +} + +namespace { +static Registrar<ErfImplForward_cpu> registrarErfImplForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::ErfImpl_cpu_forward_kernel<float, float>); +static Registrar<ErfImplForward_cpu> registrarErfImplForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::ErfImpl_cpu_forward_kernel<int, int>); +static Registrar<ErfImplForward_cpu> registrarErfImplForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::ErfImpl_cpu_forward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp index 86bb7fd1271e5857b595dda8efc0354851c94b7e..fedd8b38b2dbee9e5fd288a07d5cd722470723e5 100644 --- a/include/aidge/backend/cpu/operator/FCImpl.hpp +++ b/include/aidge/backend/cpu/operator/FCImpl.hpp @@ -26,23 +26,42 @@ namespace Aidge { // compute kernel registry for forward and backward class FCImplForward_cpu : public Registrable<FCImplForward_cpu, - std::tuple<DataType, DataType, DataType, DataType>, - void(const FC_Op::Attrs &, const DimSize_t, const DimSize_t, - const void *, const void *, const void *, void *)> {}; + std::tuple<DataType, + DataType, + DataType, + DataType>, + void(const FC_Op::Attrs&, + const DimSize_t, + const DimSize_t, + const void *, + const void *, + const void *, + void *)> {}; class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu, - std::tuple<DataType, DataType, DataType, DataType>, - void(const FC_Op::Attrs &, const DimSize_t, const DimSize_t, - const void *, const void *, const void *, void *)> {}; + std::tuple<DataType, + DataType, + DataType, + DataType>, + void(const FC_Op::Attrs&, + const DimSize_t, + const DimSize_t, + const void *, + const void *, + const void *, + void *, + void *, + void *)> {}; class FCImpl_cpu : public OperatorImpl { public: - FCImpl_cpu(const FC_Op &op) : OperatorImpl(op) {} + FCImpl_cpu(const FC_Op &op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<FCImpl_cpu> create(const FC_Op &op) { return std::make_unique<FCImpl_cpu>(op); } - void forward() override; + void forward() override final; + void backward() override final; }; namespace { diff --git a/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..50fb5f49033cccd3c554d692bc336c7d5d677384 --- /dev/null +++ b/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp @@ -0,0 +1,84 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_FCIMPL_BACKWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_FCIMPL_BACKWARD_KERNEL_H_ + +#include "aidge/utils/Registrar.hpp" +#include <algorithm> + +#include "aidge/backend/cpu/operator/FCImpl.hpp" + +namespace Aidge { +template <class I, class O, class W, class B> +void FCImpl_cpu_backward_kernel(const FC_Op::Attrs& attrs, const DimSize_t batchSize, const DimSize_t oneInputSize, + const void* input_, const void* originalInput_, const void* weight_, void* output_, void* weightGrad_, void* biasesGrad_) { + // FIXME: missing FC attributes as arguments + const I* input = static_cast<const I*>(input_); + const I* originalInput = static_cast<const I*>(originalInput_); + const W* weight = static_cast<const W*>(weight_); + O* output = static_cast<O*>(output_); + W* weightGrad = static_cast<W*>(weightGrad_); + B* biasesGrad = static_cast<B*>(biasesGrad_); + + + // bias grad + if (std::get<1>(attrs)) { // no bias + std::fill(biasesGrad, biasesGrad + std::get<0>(attrs), B(0)); + } else { + for (std::size_t o = 0; o < std::get<0>(attrs); ++o) { // nb outputs + B sum{0}; + for (std::size_t b = 0; b < batchSize; ++b) { + sum += input[b*std::get<0>(attrs) + o]; + } + biasesGrad[o] = sum; + } + } + + // weight grad + for (std::size_t o = 0; o < std::get<0>(attrs); ++o) { + for (std::size_t c = 0; c < oneInputSize; ++c) { + W sum{0}; + for (std::size_t b = 0; b < batchSize; ++b) { + sum += originalInput[b*oneInputSize + c]*input[b*std::get<0>(attrs) + o]; + } + weightGrad[o*oneInputSize + c] = sum; + } + } + + // input grad + for (std::size_t b = 0; b < batchSize; ++b) { + for (std::size_t c = 0; c < oneInputSize; ++c) { + O sum{0}; + for (std::size_t o = 0; o < std::get<0>(attrs); ++o) { + sum += weight[o*oneInputSize + c] * input[b*std::get<0>(attrs) + o]; + } + output[b*oneInputSize + c] = sum; + } + } +} + + +namespace { +static Registrar<FCImplBackward_cpu> registrarFCImpl2DBackward_cpu_Float32( + {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32}, + Aidge::FCImpl_cpu_backward_kernel<float, float, float, float>); +static Registrar<FCImplBackward_cpu> registrarFCImpl2DBackward_cpu_Int32( + {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32}, + Aidge::FCImpl_cpu_backward_kernel<int, int, int, int>); +static Registrar<FCImplBackward_cpu> registrarFCImpl2DBackward_cpu_Float64( + {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64}, + Aidge::FCImpl_cpu_backward_kernel<double, double, double, double>); +} // namespace + +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_FCIMPL_BACKWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp index 91e2558a7ef1079cbc9fb11f78fab53ef4246149..64f3b3e18f7255b74decad5137cbb5ccd6966123 100644 --- a/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp @@ -12,10 +12,10 @@ #ifndef AIDGE_CPU_OPERATOR_FCIMPL_FORWARD_KERNEL_H_ #define AIDGE_CPU_OPERATOR_FCIMPL_FORWARD_KERNEL_H_ -#include "aidge/utils/Registrar.hpp" #include <algorithm> #include "aidge/backend/cpu/operator/FCImpl.hpp" +#include "aidge/utils/Registrar.hpp" namespace Aidge { // template <class I, class W, class B, class O> diff --git a/include/aidge/backend/cpu/operator/GatherImpl.hpp b/include/aidge/backend/cpu/operator/GatherImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..2164f6c4f26dca64c672f62bc8fdc0895c642ae4 --- /dev/null +++ b/include/aidge/backend/cpu/operator/GatherImpl.hpp @@ -0,0 +1,49 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_GATHERIMPL_H_ +#define AIDGE_CPU_OPERATOR_GATHERIMPL_H_ + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/Gather.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include <memory> +#include <vector> + +namespace Aidge { +// class Gather_Op; + +// compute kernel registry for forward and backward +class GatherImplForward_cpu + : public Registrable<GatherImplForward_cpu, std::tuple<DataType, DataType>, void(const typename Gather_Op::Attrs&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class GatherImplBackward_cpu + : public Registrable<GatherImplBackward_cpu, std::tuple<DataType, DataType>, void(const typename Gather_Op::Attrs&, const std::vector<DimSize_t>&, const void*, void*)> { +}; + +class GatherImpl_cpu : public OperatorImpl { +public: + GatherImpl_cpu(const Gather_Op& op) : OperatorImpl(op, "cpu") {} + + static std::unique_ptr<GatherImpl_cpu> create(const Gather_Op& op) { + return std::make_unique<GatherImpl_cpu>(op); + } + + void forward() override; +}; + +namespace { +static Registrar<Gather_Op> registrarGatherImpl_cpu("cpu", Aidge::GatherImpl_cpu::create); +} +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_GATHERIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..0d312e3c143720c7d920128c8d484d4c68439a24 --- /dev/null +++ b/include/aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp @@ -0,0 +1,66 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_GATHERIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_GATHERIMPL_FORWARD_KERNEL_H_ + +#include "aidge/utils/Registrar.hpp" +#include <cstddef> +#include <cmath> +#include "aidge/data/Data.hpp" +#include "aidge/utils/Types.h" + +#include "aidge/backend/cpu/operator/GatherImpl.hpp" + +namespace Aidge { +template <class I, class O> +void GatherImpl_cpu_forward_kernel(const typename Gather_Op::Attrs& attrs, const std::vector<DimSize_t>& inputDims, const void* input_, void* output_) +{ + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + const std::size_t axisIdx = std::get<2>(attrs)>=0 ? + std::get<2>(attrs) : + static_cast<std::size_t>(std::get<2>(attrs)) + inputDims.size(); + + std::size_t postAxisElems = 1; + for (std::size_t i = axisIdx + 1; i < inputDims.size(); ++i) { + postAxisElems *= inputDims[i]; + } + std::size_t preAxisElems = 1; + for (std::size_t i = 0; i < axisIdx; ++i) { + preAxisElems *= inputDims[i]; + } + + const std::vector<std::int64_t> indices = std::get<0>(attrs); + for (std::size_t i=0; i<preAxisElems; ++i) + { + for(std::size_t j=0; j<indices.size(); ++j) + { + const std::size_t idx = indices[j] >= 0 ? indices[j] : static_cast<std::size_t>(indices[j]) + inputDims[axisIdx]; + const I* startPtr = std::next(input, i * postAxisElems * inputDims[axisIdx] + idx * postAxisElems); + std::copy_n(startPtr, postAxisElems, output); + output += postAxisElems; + } + } +} + +namespace { +static Registrar<GatherImplForward_cpu> registrarGatherImplForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::GatherImpl_cpu_forward_kernel<float, float>); +static Registrar<GatherImplForward_cpu> registrarGatherImplForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::GatherImpl_cpu_forward_kernel<int, int>); +static Registrar<GatherImplForward_cpu> registrarGatherImplForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::GatherImpl_cpu_forward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_GATHERIMPL_FORWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp b/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..758535de4cc506b8de4adf7004afbbfdd8185941 --- /dev/null +++ b/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp @@ -0,0 +1,55 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_H_ +#define AIDGE_CPU_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_H_ + +#include <memory> +#include <vector> + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/GlobalAveragePooling.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +namespace Aidge { +// class GlobalAveragePooling_Op; + +class GlobalAveragePoolingImplForward_cpu + : public Registrable< + GlobalAveragePoolingImplForward_cpu, std::tuple<DataType, DataType>, + void(const std::vector<DimSize_t> &, const void *, void *)> {}; + +class GlobalAveragePoolingImplBackward_cpu + : public Registrable< + GlobalAveragePoolingImplBackward_cpu, std::tuple<DataType, DataType>, + void(const std::vector<DimSize_t> &, const void *, void *)> {}; + +class GlobalAveragePoolingImpl_cpu : public OperatorImpl { +public: + GlobalAveragePoolingImpl_cpu(const GlobalAveragePooling_Op &op) + : OperatorImpl(op, "cpu") {} + + static std::unique_ptr<GlobalAveragePoolingImpl_cpu> + create(const GlobalAveragePooling_Op &op) { + return std::make_unique<GlobalAveragePoolingImpl_cpu>(op); + } + + void forward() override; +}; + +namespace { +static Registrar<GlobalAveragePooling_Op> registrarGlobalAveragePoolingImpl_cpu( + "cpu", Aidge::GlobalAveragePoolingImpl_cpu::create); +} +} // namespace Aidge + +#endif /* _AIDGE_CPU_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl_forward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..81f10975cc107a23448da3df14b88f6b31d55146 --- /dev/null +++ b/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl_forward_kernels.hpp @@ -0,0 +1,79 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_FORWARD_KERNEL_H_ + +#include <cstddef> +#include <functional> // std::multiplies +#include <numeric> // std::accumulate +#include <vector> + +#include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp" +#include "aidge/data/Data.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + + +namespace Aidge { +template <class I, class O> +void GlobalAveragePoolingImpl_cpu_forward_kernel( + const std::vector<DimSize_t> &dims, const void *input_, void *output_) { + // error checking + AIDGE_ASSERT(dims.size() >= 3,"GlobalAveragePool needs at least a 3 dimensions " + "input, number of input dim : {}", + dims.size()); + + // computation + const I *input = static_cast<const I *>(input_); + O *output = static_cast<O *>(output_); + + DimSize_t nb_elems = std::accumulate(dims.begin(), dims.end(), std::size_t(1), + std::multiplies<std::size_t>()); + + const DimSize_t in_batch_nb_elems{nb_elems / dims[0]}; + const DimSize_t in_channel_nb_elems{in_batch_nb_elems / dims[1]}; + const DimSize_t out_batch_nb_elems{dims[1]}; + // parse channel by channel and fill each output with the average of the + // values in the channel + for (DimSize_t batch = 0; batch < dims[0]; ++batch) { + for (DimSize_t channel = 0; channel < dims[1]; ++channel) { + const I *filter_start = std::next( + input, (batch * in_batch_nb_elems) + (channel * in_channel_nb_elems)); + I mean = 0; + for (size_t i = 0; i < in_channel_nb_elems; ++i) { + // Single pass numerically stable mean, using the fmaf + mean = fmaf(filter_start[i] - mean, 1.0f/(i+1), mean); + } + output[batch * out_batch_nb_elems + channel] = mean; + } + } +} + +// Then we add the Registrar declaration for different input/output types +namespace { +static Registrar<GlobalAveragePoolingImplForward_cpu> + registrarGlobalAveragePoolingImplForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, + Aidge::GlobalAveragePoolingImpl_cpu_forward_kernel<float, float>); +static Registrar<GlobalAveragePoolingImplForward_cpu> + registrarGlobalAveragePoolingImplForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, + Aidge::GlobalAveragePoolingImpl_cpu_forward_kernel<int, int>); +static Registrar<GlobalAveragePoolingImplForward_cpu> + registrarGlobalAveragePoolingImplForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, + Aidge::GlobalAveragePoolingImpl_cpu_forward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_FORWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp index 4a1da034935e6b1f6c2069b4f91153b77a9f0636..880a59b3aeae2598f6b1ed5e287af18fd7bcfd6f 100644 --- a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp +++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp @@ -12,17 +12,17 @@ #ifndef AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_ #define AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_ +#include <memory> +#include <tuple> +#include <vector> + #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/LeakyReLU.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" #include "aidge/backend/cpu/data/GetCPUPtr.h" -#include <memory> -#include <vector> namespace Aidge { -// class LeakyReLU_Op; - // compute kernel registry for forward and backward class LeakyReLUImplForward_cpu : public Registrable<LeakyReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Attrs&, std::size_t, const void*, void*)> { @@ -33,14 +33,17 @@ class LeakyReLUImplBackward_cpu class LeakyReLUImpl_cpu : public OperatorImpl { public: - LeakyReLUImpl_cpu(const LeakyReLU_Op& op) : OperatorImpl(op) {} + LeakyReLUImpl_cpu(const LeakyReLU_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<LeakyReLUImpl_cpu> create(const LeakyReLU_Op& op) { return std::make_unique<LeakyReLUImpl_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; - void forward() override; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + + void forward() override final; + + void backward() override final; }; namespace { diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..949e6af66a476693b347f38a45edea10e21bc933 --- /dev/null +++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp @@ -0,0 +1,45 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_BACKWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_BACKWARD_KERNEL_H_ + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp" + +namespace Aidge { +template <class I, class O> +void LeakyReLUImpl_cpu_backward_kernel(const LeakyReLU_Op::Attrs& attrs, + std::size_t inputLenght, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + I negativeSlope = static_cast<I>(std::get<0>(attrs)); + + for (std::size_t i = 0; i < inputLenght; ++i) { + output[i] = input[i] > 0 ? input[i] : negativeSlope*input[i]; + } +} + +namespace { +static Registrar<LeakyReLUImplBackward_cpu> registrarLeakyReLUImplBackward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::LeakyReLUImpl_cpu_backward_kernel<float, float>); +static Registrar<LeakyReLUImplBackward_cpu> registrarLeakyReLUImplBackward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::LeakyReLUImpl_cpu_backward_kernel<int, int>); +static Registrar<LeakyReLUImplBackward_cpu> registrarLeakyReLUImplBackward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::LeakyReLUImpl_cpu_backward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_BACKWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp index 761b9579c3c3dc187e4b0fac24812fa77f916e65..d10b32e18ee983fc1270bc4a7cce35e18f601071 100644 --- a/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp @@ -25,7 +25,7 @@ void LeakyReLUImpl_cpu_forward_kernel(const LeakyReLU_Op::Attrs& attrs, const I* input = static_cast<const I*>(input_); O* output = static_cast<O*>(output_); - I negativeSlope = static_cast<I>(std::get<0>(attrs)); + const I negativeSlope = static_cast<const I>(std::get<0>(attrs)); for (std::size_t i = 0; i < inputLenght; ++i) { output[i] = input[i] >= 0 ? input[i] : input[i] * negativeSlope; diff --git a/include/aidge/backend/cpu/operator/MatMulImpl.hpp b/include/aidge/backend/cpu/operator/MatMulImpl.hpp index e8654c6e9cc8fab9080bbb5ed57ea78ee0b7978c..e4b76d64baadbcb1baa7d24180c4bb13ed47215b 100644 --- a/include/aidge/backend/cpu/operator/MatMulImpl.hpp +++ b/include/aidge/backend/cpu/operator/MatMulImpl.hpp @@ -23,21 +23,19 @@ #include "aidge/backend/cpu/data/GetCPUPtr.h" namespace Aidge { -// class MatMul_Op; -// compute kernel registry for forward and backward class MatMulImplForward_cpu - : public Registrable<MatMulImplForward_cpu, std::tuple<DataType, DataType, DataType>, - void(const MatMul_Op::Attrs &, const DimSize_t, const DimSize_t, + : public Registrable<MatMulImplForward_cpu, std::tuple<DataType, DataType>, + void(const std::size_t, const std::size_t, const std::size_t, const void *, const void *, void *)> {}; class MatMulImplBackward_cpu - : public Registrable<MatMulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, - void(const MatMul_Op::Attrs &, const DimSize_t, const DimSize_t, + : public Registrable<MatMulImplBackward_cpu, std::tuple<DataType, DataType>, + void(const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void *, const void *, void *)> {}; class MatMulImpl_cpu : public OperatorImpl { public: - MatMulImpl_cpu(const MatMul_Op &op): OperatorImpl(op) {} + MatMulImpl_cpu(const MatMul_Op &op): OperatorImpl(op, "cpu") {} static std::unique_ptr<MatMulImpl_cpu> create(const MatMul_Op &op) { return std::make_unique<MatMulImpl_cpu>(op); diff --git a/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp index bc52779eff274379a853ea84fb839c9486652433..5045580fa599aac64f2c1414bfdf2b87ea57e313 100644 --- a/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp @@ -12,45 +12,39 @@ #ifndef AIDGE_CPU_OPERATOR_MATMULIMPL_FORWARD_KERNEL_H_ #define AIDGE_CPU_OPERATOR_MATMULIMPL_FORWARD_KERNEL_H_ -#include "aidge/utils/Registrar.hpp" -#include <algorithm> - #include "aidge/backend/cpu/operator/MatMulImpl.hpp" namespace Aidge { -template <class I, class W, class O> -void MatMulImpl_cpu_forward_kernel(const MatMul_Op::Attrs& attrs, const DimSize_t batchSize, const DimSize_t oneInputSize, - const void* input_, const void* weights_, void* output_) { +template <class I, class O> +void MatMulImpl_cpu_forward_kernel(const std::size_t n, const std::size_t k, const std::size_t m, + const void* input1_, const void* input2_, void* output_) { // FIXME: missing MatMul parameters as arguments - const I* input = static_cast<const I*>(input_); - const W* weights = static_cast<const W*>(weights_); + const I* input1 = static_cast<const I*>(input1_); + const I* input2 = static_cast<const I*>(input2_); O* output = static_cast<O*>(output_); - - std::fill(output, output+(batchSize*std::get<0>(attrs)), O(0)); - - for (std::size_t batch = 0; batch < batchSize; ++batch) { - for (std::size_t out = 0; out < std::get<0>(attrs); ++out) { - output[out + batch*std::get<0>(attrs)] = std::inner_product(input + batch*oneInputSize, - input + (batch + 1)*oneInputSize, - weights + out*oneInputSize, - output[out + batch*std::get<0>(attrs)]); + for (std::size_t i = 0; i < n; ++i) { + for (std::size_t j = 0; j < m; ++j) { + O sum = O(0); + for (std::size_t l = 0; l < k; ++l) { + sum += static_cast<O>(input1[i*k + l] * input2[l*m + j]); + } + output[i*m + j] = sum; } } } - namespace { static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Float32( - {DataType::Float32, DataType::Float32, DataType::Float32}, - Aidge::MatMulImpl_cpu_forward_kernel<float, float, float>); + {DataType::Float32, DataType::Float32}, + Aidge::MatMulImpl_cpu_forward_kernel<float, float>); static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Int32( - {DataType::Int32, DataType::Int32, DataType::Int32}, - Aidge::MatMulImpl_cpu_forward_kernel<int, int, int>); + {DataType::Int32, DataType::Int32}, + Aidge::MatMulImpl_cpu_forward_kernel<int, int>); static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Float64( - {DataType::Float64, DataType::Float64, DataType::Float64}, - Aidge::MatMulImpl_cpu_forward_kernel<double, double, double>); + {DataType::Float64, DataType::Float64}, + Aidge::MatMulImpl_cpu_forward_kernel<double, double>); } // namespace } // namespace Aidge diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp index 6cde34d9b123b4f83cbfce412ffa62e0144af8d4..d2d30aa7db5b1522712faa846ef33e1b21772d5e 100644 --- a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp +++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp @@ -38,13 +38,13 @@ class MaxPoolingImpl2DBackward_cpu class MaxPoolingImpl2D_cpu : public OperatorImpl { public: - MaxPoolingImpl2D_cpu(const MaxPooling_Op<2> &op) : OperatorImpl(op) {} + MaxPoolingImpl2D_cpu(const MaxPooling_Op<2> &op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<MaxPoolingImpl2D_cpu> create(const MaxPooling_Op<2> &op) { return std::make_unique<MaxPoolingImpl2D_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; void forward() override; }; diff --git a/include/aidge/backend/cpu/operator/MemorizeImpl.hpp b/include/aidge/backend/cpu/operator/MemorizeImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..5ea0c9d4f3802490e5b41b5ea1c8454c87c65b28 --- /dev/null +++ b/include/aidge/backend/cpu/operator/MemorizeImpl.hpp @@ -0,0 +1,44 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_MEMORIZEIMPL_H_ +#define AIDGE_CPU_OPERATOR_MEMORIZEIMPL_H_ + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/Memorize.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" +#include <memory> +#include <vector> + +namespace Aidge { +class MemorizeImpl_cpu : public OperatorImpl { +public: + MemorizeImpl_cpu(const Memorize_Op& op) : OperatorImpl(op, "cpu") {} + + static std::unique_ptr<MemorizeImpl_cpu> create(const Memorize_Op& op) { + return std::make_unique<MemorizeImpl_cpu>(op); + } + + Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final; + Elts_t getRequiredMemory(const Aidge::IOIndex_t outputIdx, + const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const override final; + void updateConsummerProducer() override final; + void forward() override; +}; + +namespace { +static Registrar<Memorize_Op> registrarMemorizeImpl_cpu("cpu", Aidge::MemorizeImpl_cpu::create); +} +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_MEMORIZEIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/MulImpl.hpp b/include/aidge/backend/cpu/operator/MulImpl.hpp index f1b58e59b9ac1d3a1d34162a1054534830b8d508..2d42194c417bd7d57c00f4325a4585cf59d95b24 100644 --- a/include/aidge/backend/cpu/operator/MulImpl.hpp +++ b/include/aidge/backend/cpu/operator/MulImpl.hpp @@ -25,21 +25,21 @@ namespace Aidge { // compute kernel registry for forward and backward class MulImplForward_cpu - : public Registrable<MulImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> { + : public Registrable<MulImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> { }; class MulImplBackward_cpu - : public Registrable<MulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> { + : public Registrable<MulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> { }; class MulImpl_cpu : public OperatorImpl { public: - MulImpl_cpu(const Mul_Op& op) : OperatorImpl(op) {} + MulImpl_cpu(const Mul_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<MulImpl_cpu> create(const Mul_Op& op) { return std::make_unique<MulImpl_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; void forward() override; }; diff --git a/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp index 9caef8b88af3ca779309b60eba984a72db35f84a..e1387768ea02e2a9f35790c64c7674c321a1faa7 100644 --- a/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp @@ -14,37 +14,35 @@ #include "aidge/utils/Registrar.hpp" +#include "aidge/backend/cpu/data/Broadcasting.hpp" #include "aidge/backend/cpu/operator/MulImpl.hpp" namespace Aidge { template <class I1, class I2, class O> -void MulImpl_cpu_forward_kernel(std::size_t input1Length, - std::size_t input2Length, - const void* input1_, - const void* input2_, - void* output_) { +void MulImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims, + const std::vector<std::size_t>& input2Dims, + const std::vector<std::size_t>& outputDims, + const void* input1_, + const void* input2_, + void* output_) { const I1* input_1 = static_cast<const I1*>(input1_); const I2* input_2 = static_cast<const I2*>(input2_); O* output = static_cast<O*>(output_); - if (input2Length == input1Length) - { - for (std::size_t i = 0; i < input1Length; ++i) { - output[i] = input_1[i] * input_2[i]; - } - } - else if (input2Length == 1) - { - for (std::size_t i = 0; i < input1Length; ++i) { - output[i] = input_1[i] * input_2[0]; - } + + size_t totalElements = 1; + for (size_t dimSize : outputDims) { + totalElements *= dimSize; } - else // input_2 is 1d and of size the number of channels of input_1 - { - for (std::size_t i = 0; i < input1Length; ++i) { - std::size_t channelIdx = i % input2Length; - output[i] = input_1[i] * input_2[channelIdx]; - } + + for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex) + { + std::vector<size_t> indexes = getMultiDimIndices(outputDims, oIndex); + + std::size_t idx1 = getFlattenedIndex(input1Dims, indexes); + std::size_t idx2 = getFlattenedIndex(input2Dims, indexes); + + output[oIndex] = input_1[idx1] * input_2[idx2]; } } diff --git a/include/aidge/backend/cpu/operator/PadImpl.hpp b/include/aidge/backend/cpu/operator/PadImpl.hpp index 2320662710f9802878811e51ec4439bd812aea67..b3c91a43419e9a5e9e1299f4a2118a51b6b64fc7 100644 --- a/include/aidge/backend/cpu/operator/PadImpl.hpp +++ b/include/aidge/backend/cpu/operator/PadImpl.hpp @@ -40,13 +40,13 @@ class PadImpl2DBackward_cpu class PadImpl2D_cpu : public OperatorImpl { public: - PadImpl2D_cpu(const Pad_Op<2> &op) : OperatorImpl(op) {} + PadImpl2D_cpu(const Pad_Op<2> &op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<PadImpl2D_cpu> create(const Pad_Op<2> &op) { return std::make_unique<PadImpl2D_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; void forward() override; }; diff --git a/include/aidge/backend/cpu/operator/PopImpl.hpp b/include/aidge/backend/cpu/operator/PopImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..19d5903973da378ce003daf4de9e1ae54d7b1b0e --- /dev/null +++ b/include/aidge/backend/cpu/operator/PopImpl.hpp @@ -0,0 +1,51 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_POPIMPL_H_ +#define AIDGE_CPU_OPERATOR_POPIMPL_H_ + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/Pop.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" +#include <memory> +#include <vector> + +namespace Aidge { +// class Pop_Op; + +// compute kernel registry for forward and backward +class PopImplForward_cpu + : public Registrable<PopImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { +}; +class PopImplBackward_cpu + : public Registrable<PopImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { +}; + +class PopImpl_cpu : public OperatorImpl { +public: + PopImpl_cpu(const Pop_Op& op) : OperatorImpl(op, "cpu") {} + + static std::unique_ptr<PopImpl_cpu> create(const Pop_Op& op) { + return std::make_unique<PopImpl_cpu>(op); + } + + Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final; + void forward() override; +}; + +namespace { +static Registrar<Pop_Op> registrarPopImpl_cpu("cpu", Aidge::PopImpl_cpu::create); +} +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_POPIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/PowImpl.hpp b/include/aidge/backend/cpu/operator/PowImpl.hpp index d3cafa7e7380e31dd331950e381e08210c3f3a4c..514e63af5ae5d1d1d00f7f328f5367df2bfa163d 100644 --- a/include/aidge/backend/cpu/operator/PowImpl.hpp +++ b/include/aidge/backend/cpu/operator/PowImpl.hpp @@ -25,22 +25,23 @@ namespace Aidge { // compute kernel registry for forward and backward class PowImplForward_cpu - : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> { + : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> { }; class PowImplBackward_cpu - : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> { + : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> { }; class PowImpl_cpu : public OperatorImpl { public: - PowImpl_cpu(const Pow_Op& op) : OperatorImpl(op) {} + PowImpl_cpu(const Pow_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<PowImpl_cpu> create(const Pow_Op& op) { return std::make_unique<PowImpl_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; void forward() override; + void backward() override; }; namespace { diff --git a/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp index c9c5db7e9aef07d24ba8f80c94b8f2494865e004..1146cfa77464f8bd1c33a0ec0113415dcf599b53 100644 --- a/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp @@ -15,39 +15,36 @@ #include "aidge/utils/Registrar.hpp" #include <cmath> +#include "aidge/backend/cpu/data/Broadcasting.hpp" #include "aidge/backend/cpu/operator/PowImpl.hpp" namespace Aidge { template <class I1, class I2, class O> -void PowImpl_cpu_forward_kernel(std::size_t input1Length, - std::size_t input2Length, - const void* input1_, - const void* input2_, - void* output_) { +void PowImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims, + const std::vector<std::size_t>& input2Dims, + const std::vector<std::size_t>& outputDims, + const void* input1_, + const void* input2_, + void* output_) { const I1* input_1 = static_cast<const I1*>(input1_); const I2* input_2 = static_cast<const I2*>(input2_); O* output = static_cast<O*>(output_); - if (input2Length == input1Length) - { - for (std::size_t i = 0; i < input1Length; ++i) { - output[i] = std::pow(input_1[i], input_2[i]); - } - } - else if (input2Length == 1) - { - for (std::size_t i = 0; i < input1Length; ++i) { - output[i] = std::pow(input_1[i], input_2[0]); - } - } - else // input_2 is 1d and of size the number of channels of input_1 - { - for (std::size_t i = 0; i < input1Length; ++i) { - std::size_t channelIdx = i % input2Length; - output[i] = std::pow(input_1[i], input_2[channelIdx]); - } + size_t totalElements = 1; + for (size_t dimSize : outputDims) { + totalElements *= dimSize; } + + for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex) + { + std::vector<size_t> indexes = getMultiDimIndices(outputDims, oIndex); + + std::size_t idx1 = getFlattenedIndex(input1Dims, indexes); + std::size_t idx2 = getFlattenedIndex(input2Dims, indexes); + + output[oIndex] = std::pow(input_1[idx1], input_2[idx2]); + } } namespace { diff --git a/include/aidge/backend/cpu/operator/ProducerImpl.hpp b/include/aidge/backend/cpu/operator/ProducerImpl.hpp deleted file mode 100644 index c1d27f7efc4457fd3b02b6cde006401e2ca71661..0000000000000000000000000000000000000000 --- a/include/aidge/backend/cpu/operator/ProducerImpl.hpp +++ /dev/null @@ -1,41 +0,0 @@ -/******************************************************************************** - * Copyright (c) 2023 CEA-List - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License 2.0 which is available at - * http://www.eclipse.org/legal/epl-2.0. - * - * SPDX-License-Identifier: EPL-2.0 - * - ********************************************************************************/ - -#ifndef AIDGE_CPU_OPERATOR_PRODUCERIMPL_H_ -#define AIDGE_CPU_OPERATOR_PRODUCERIMPL_H_ - -#include <memory> - -#include "aidge/backend/OperatorImpl.hpp" -#include "aidge/operator/Producer.hpp" -#include "aidge/utils/Registrar.hpp" -#include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" - -namespace Aidge { -class ProducerImpl_cpu : public OperatorImpl { -public: - ProducerImpl_cpu(const Producer_Op &op) : OperatorImpl(op) {} - - static std::unique_ptr<ProducerImpl_cpu> create(const Producer_Op &op) { - return std::make_unique<ProducerImpl_cpu>(op); - } - - NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final; - void forward() override; -}; - -namespace { -static Registrar<Producer_Op> registrarProducerImpl_cpu("cpu", Aidge::ProducerImpl_cpu::create); -} // namespace -} // namespace Aidge - -#endif /* AIDGE_CPU_OPERATOR_PRODUCERIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/ReLUImpl.hpp b/include/aidge/backend/cpu/operator/ReLUImpl.hpp index 3338d0c40c057995fe37b1652966241bf4a96b59..cef82482813757312c638aebac9f2afd738493db 100644 --- a/include/aidge/backend/cpu/operator/ReLUImpl.hpp +++ b/include/aidge/backend/cpu/operator/ReLUImpl.hpp @@ -12,13 +12,15 @@ #ifndef AIDGE_CPU_OPERATOR_RELUIMPL_H_ #define AIDGE_CPU_OPERATOR_RELUIMPL_H_ +#include <cstddef> // std::size_t +#include <memory> +#include <tuple> // std::tuple +#include <vector> + #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/ReLU.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" -#include <memory> -#include <vector> namespace Aidge { // class ReLU_Op; @@ -33,14 +35,17 @@ class ReLUImplBackward_cpu class ReLUImpl_cpu : public OperatorImpl { public: - ReLUImpl_cpu(const ReLU_Op& op) : OperatorImpl(op) {} + ReLUImpl_cpu(const ReLU_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<ReLUImpl_cpu> create(const ReLU_Op& op) { return std::make_unique<ReLUImpl_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; - void forward() override; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + + void forward() override final; + + void backward() override final; }; namespace { diff --git a/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..b68ea076cb94eb9550b4a7af89ef58162ee15aea --- /dev/null +++ b/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp @@ -0,0 +1,45 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_ + +#include <cstddef> // std::size_t + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/operator/ReLUImpl.hpp" + +namespace Aidge { +template <class I, class O> +void ReLUImpl_cpu_backward_kernel(const std::size_t inputLenght, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + for (std::size_t i = 0; i < inputLenght; ++i) { + output[i] = (input[i] > I(0)) ? static_cast<O>(input[i]) : O(0); + } +} + +namespace { +static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::ReLUImpl_cpu_backward_kernel<float, float>); +static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::ReLUImpl_cpu_backward_kernel<int, int>); +static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::ReLUImpl_cpu_backward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp index 955099a6fe76352e6ea692b99a2a2d1561a30a6d..aa533786d3ce5b6f5cd501b6ba74b1be2823d407 100644 --- a/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp @@ -25,6 +25,7 @@ void ReLUImpl_cpu_forward_kernel(std::size_t inputLenght, const I* input = static_cast<const I*>(input_); O* output = static_cast<O*>(output_); +//#pragma omp parallel for if (inputLenght > 1024) for (std::size_t i = 0; i < inputLenght; ++i) { output[i] = input[i] > 0 ? input[i] : 0; } diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..7355a2bd46f45ab5019a31832001ae3335c1d8e8 --- /dev/null +++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp @@ -0,0 +1,124 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_H_ +#define AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_H_ + +#include <array> +#include <memory> +#include <tuple> +#include <vector> + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/ReduceMean.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +namespace Aidge { +// class ReduceMean_Op; + +// Every DIM +class ReduceMeanImplForward_cpu + : public Registrable<ReduceMeanImplForward_cpu, + std::tuple<DataType, DataType>, + void(const ReduceMean_Op::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; +class ReduceMeanImpl1DBackward_cpu + : public Registrable<ReduceMeanImpl1DBackward_cpu, + std::tuple<DataType, DataType>, + void(const ReduceMean_Op::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; + +class ReduceMeanImpl_cpu : public OperatorImpl { + public: + ReduceMeanImpl_cpu(const ReduceMean_Op& op) : OperatorImpl(op, "cpu") {} + + static std::unique_ptr<ReduceMeanImpl_cpu> create(const ReduceMean_Op &op) { + return std::make_unique<ReduceMeanImpl_cpu>(op); + } + + public: + void forward() override; +}; + +// // compute kernel registry for forward and backward +// // DIM 1 +// class ReduceMeanImpl1DForward_cpu +// : public Registrable<ReduceMeanImpl1DForward_cpu, +// std::tuple<DataType, DataType>, +// void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; +// class ReduceMeanImpl1DBackward_cpu +// : public Registrable<ReduceMeanImpl1DBackward_cpu, +// std::tuple<DataType, DataType>, +// void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; + +// // DIM 2 +// class ReduceMeanImpl2DForward_cpu +// : public Registrable<ReduceMeanImpl2DForward_cpu, +// std::tuple<DataType, DataType>, +// void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; +// class ReduceMeanImpl2DBackward_cpu +// : public Registrable<ReduceMeanImpl2DBackward_cpu, +// std::tuple<DataType, DataType>, +// void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; +// // DIM 3 +// class ReduceMeanImpl3DForward_cpu +// : public Registrable<ReduceMeanImpl3DForward_cpu, +// std::tuple<DataType, DataType>, +// void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; +// class ReduceMeanImpl3DBackward_cpu +// : public Registrable<ReduceMeanImpl3DBackward_cpu, +// std::tuple<DataType, DataType>, +// void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; + +// class ReduceMeanImpl1D_cpu : public OperatorImpl { +// public: +// ReduceMeanImpl1D_cpu(const ReduceMean_Op<1>& op) : OperatorImpl(op, "cpu") {} + +// static std::unique_ptr<ReduceMeanImpl1D_cpu> create(const ReduceMean_Op<1> &op) { +// return std::make_unique<ReduceMeanImpl1D_cpu>(op); +// } + +// public: +// void forward() override; +// }; + +// class ReduceMeanImpl2D_cpu : public OperatorImpl { +// public: +// ReduceMeanImpl2D_cpu(const ReduceMean_Op<2>& op) : OperatorImpl(op, "cpu") {} + +// static std::unique_ptr<ReduceMeanImpl2D_cpu> create(const ReduceMean_Op<2> &op) { +// return std::make_unique<ReduceMeanImpl2D_cpu>(op); +// } + +// public: +// void forward() override; +// }; + +// class ReduceMeanImpl3D_cpu : public OperatorImpl { +// public: +// ReduceMeanImpl3D_cpu(const ReduceMean_Op<3>& op) : OperatorImpl(op, "cpu") {} + +// static std::unique_ptr<ReduceMeanImpl3D_cpu> create(const ReduceMean_Op<3> &op) { +// return std::make_unique<ReduceMeanImpl3D_cpu>(op); +// } + +// public: +// void forward() override; +// }; +namespace { +// add cpu backend to ReduceMean_Op<2> implementation registry +static Registrar<ReduceMean_Op> registrarReduceMeanImpl_cpu("cpu", Aidge::ReduceMeanImpl_cpu::create); +// static Registrar<ReduceMean_Op<1>> registrarReduceMeanImpl1D_cpu("cpu", Aidge::ReduceMeanImpl1D_cpu::create); +// static Registrar<ReduceMean_Op<2>> registrarReduceMeanImpl2D_cpu("cpu", Aidge::ReduceMeanImpl2D_cpu::create); +// static Registrar<ReduceMean_Op<3>> registrarReduceMeanImpl3D_cpu("cpu", Aidge::ReduceMeanImpl3D_cpu::create); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..6533f7b19eac07d429cd8c5ed05ea082457b9e7b --- /dev/null +++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp @@ -0,0 +1,141 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_ + +#include <algorithm> // std::for_each +#include <cstddef> // std::size_t +#include <cstdint> // std::int32_t +#include <functional> //std::multiplies +#include <numeric> //std::accumulate +#include <vector> + +#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp" +#include "aidge/data/Data.hpp" +#include "aidge/operator/ReduceMean.hpp" +#include "aidge/utils/Registrar.hpp" + +namespace Aidge { +template <class I, class O> +void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op::Attrs& attrs, + const std::vector<DimSize_t>& inputDims, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + const std::vector<std::int32_t>& axes = std::get<0>(attrs); + const std::size_t nb_dims = inputDims.size(); + const std::size_t totalElements = std::accumulate(inputDims.cbegin(), inputDims.cend(), 1, std::multiplies<std::size_t>()); + + if (axes.size() == 1) { + const std::size_t stride_pre = std::accumulate(inputDims.cbegin(), inputDims.cbegin() + axes[0], 1, std::multiplies<std::size_t>()); + const std::size_t stride_post = std::accumulate(inputDims.crbegin(), inputDims.crbegin() + nb_dims -1 - axes[0], 1, std::multiplies<std::size_t>()); + + const std::size_t dim_i = inputDims[axes[0]]; + for (std::size_t pre = 0; pre < stride_pre; ++pre) { + for (std::size_t post = 0; post < stride_post; ++post) { + const std::size_t idx_i = pre * dim_i * stride_post + post; + const std::size_t idx_o = pre * stride_post + post; + O mean = 0; + for (std::size_t i = 0; i < dim_i; ++i) { + // Single pass numerically stable mean, using the fmaf + mean = fmaf(input[idx_i + i*stride_post] - mean, 1.0f/(i+1), mean); + } + output[idx_o] = mean; + } + } + } else { + std::size_t outputElements = totalElements; + + auto stride_post = std::unique_ptr<std::size_t[]>(new std::size_t[nb_dims]); + stride_post[nb_dims - 1] = 1; + for (std::size_t i = nb_dims-2; i != static_cast<std::size_t>(-1); --i) { + stride_post[i] = stride_post[i+1]*inputDims[i+1]; + } + auto stride_pre = std::unique_ptr<std::size_t[]>(new std::size_t[nb_dims]); + stride_pre[0] = 1; + for (std::size_t i = 1; i < nb_dims; ++i) { + stride_pre[i] = stride_pre[i-1]*inputDims[i-1]; + } + + const I* inputAccumulation = input; + I* outputAccumulation = nullptr; + + for (const auto& axisInt : axes) { + const std::size_t a = static_cast<std::size_t>(axisInt); + outputElements /= inputDims[a]; + outputAccumulation = new I[outputElements]; + const std::size_t dim_i = inputDims[a]; + for (std::size_t pre = 0; pre < stride_pre[a]; ++pre) { + for (std::size_t post = 0; post < stride_post[a]; ++post) { + const std::size_t idx_i = pre * dim_i * stride_post[a] + post; + const std::size_t idx_o = pre * stride_post[a] + post; + I mean = 0; + for (std::size_t i = 0; i < dim_i; ++i) { + // Single pass numerically stable mean, using the fmaf + mean = fmaf(inputAccumulation[idx_i + i*stride_post[a]] - mean, 1.0f/(i+1), mean); + } + outputAccumulation[idx_o] = mean; + } + } + std::for_each(stride_pre.get()+a+1, stride_pre.get()+nb_dims, [dim_i] (std::size_t& val) { val /= dim_i; }); + if (inputAccumulation != input) { + delete[] inputAccumulation; + } + inputAccumulation = outputAccumulation; + } + + // Copy elements from inputAccumulation to output while dividing by divisor + std::copy(inputAccumulation, inputAccumulation + outputElements, output); + if (outputAccumulation) { + delete[] outputAccumulation; + } + } +} + +namespace { +static Registrar<ReduceMeanImplForward_cpu> registrarReduceMeanImplForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float>); +static Registrar<ReduceMeanImplForward_cpu> registrarReduceMeanImplForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int>); +static Registrar<ReduceMeanImplForward_cpu> registrarReduceMeanImplForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double>); + +// // DIM = 1 +// static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float32( +// {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,1>); +// static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Int32( +// {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,1>); +// static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float64( +// {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,1>); + +// // DIM = 2 +// static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float32( +// {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,2>); +// static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Int32( +// {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,2>); +// static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float64( +// {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,2>); + +// // DIM = 3 +// static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float32( +// {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,3>); +// static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Int32( +// {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,3>); +// static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float64( +// {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,3>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/ReshapeImpl.hpp b/include/aidge/backend/cpu/operator/ReshapeImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..1dc5fa2a09533494568ffea78153887d01368a7d --- /dev/null +++ b/include/aidge/backend/cpu/operator/ReshapeImpl.hpp @@ -0,0 +1,50 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_ +#define AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_ + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/Reshape.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include <memory> +#include <vector> + +namespace Aidge { +// class Reshape_Op; + +// compute kernel registry for forward and backward +class ReshapeImplForward_cpu + : public Registrable<ReshapeImplForward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const void*, void*)> { +}; +class ReshapeImplBackward_cpu + : public Registrable<ReshapeImplBackward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const void*, void*)> { +}; + +class ReshapeImpl_cpu : public OperatorImpl { +public: + ReshapeImpl_cpu(const Reshape_Op& op) : OperatorImpl(op, "cpu") {} + + static std::unique_ptr<ReshapeImpl_cpu> create(const Reshape_Op& op) { + return std::make_unique<ReshapeImpl_cpu>(op); + } + + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; + +namespace { +static Registrar<Reshape_Op> registrarReshapeImpl_cpu("cpu", Aidge::ReshapeImpl_cpu::create); +} +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_RESHAPEIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..cefdab57ee41ffab0b98a87698d95f5d89a0206d --- /dev/null +++ b/include/aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp @@ -0,0 +1,45 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_ + +#include "aidge/utils/Registrar.hpp" +#include <cmath> + +#include "aidge/backend/cpu/operator/ReshapeImpl.hpp" + +namespace Aidge { +template <class I, class O> +void ReshapeImpl_cpu_forward_kernel(std::size_t inputLength, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + std::copy_n(input, inputLength, output); +} + +namespace { +static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, + Aidge::ReshapeImpl_cpu_forward_kernel<float, float>); +static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, + Aidge::ReshapeImpl_cpu_forward_kernel<int, int>); +static Registrar<ReshapeImplForward_cpu> registrarReshapeImplForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, + Aidge::ReshapeImpl_cpu_forward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_RESHAPEIMPL_FORWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/ScalingImpl.hpp b/include/aidge/backend/cpu/operator/ScalingImpl.hpp index bbcb4553d7aa4b17d733e0f455373bebb9c3581c..66bb42f7fb909ee9b6c91a6321ee3fa32c977626 100644 --- a/include/aidge/backend/cpu/operator/ScalingImpl.hpp +++ b/include/aidge/backend/cpu/operator/ScalingImpl.hpp @@ -34,13 +34,13 @@ class ScalingImplBackward_cpu class ScalingImpl_cpu : public OperatorImpl { public: - ScalingImpl_cpu(const Scaling_Op& op) : OperatorImpl(op) {} + ScalingImpl_cpu(const Scaling_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<ScalingImpl_cpu> create(const Scaling_Op& op) { return std::make_unique<ScalingImpl_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; void forward() override; }; diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..2e43023d678c8a4258c80fb91d82d2858fcdf188 --- /dev/null +++ b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp @@ -0,0 +1,51 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_SIGMOIDIMPL_H_ +#define AIDGE_CPU_OPERATOR_SIGMOIDIMPL_H_ + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/Sigmoid.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" +#include <memory> +#include <vector> + +namespace Aidge { +// class Sigmoid_Op; + +// compute kernel registry for forward and backward +class SigmoidImplForward_cpu + : public Registrable<SigmoidImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { +}; +class SigmoidImplBackward_cpu + : public Registrable<SigmoidImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { +}; + +class SigmoidImpl_cpu : public OperatorImpl { +public: + SigmoidImpl_cpu(const Sigmoid_Op& op) : OperatorImpl(op, "cpu") {} + + static std::unique_ptr<SigmoidImpl_cpu> create(const Sigmoid_Op& op) { + return std::make_unique<SigmoidImpl_cpu>(op); + } + + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; + +namespace { +static Registrar<Sigmoid_Op> registrarSigmoidImpl_cpu("cpu", Aidge::SigmoidImpl_cpu::create); +} +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_SIGMOIDIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a53650942540e6368855ffe19e2f7f651ab5b6bc --- /dev/null +++ b/include/aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp @@ -0,0 +1,42 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_SIGMOIDIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_SIGMOIDIMPL_FORWARD_KERNEL_H_ + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/operator/SigmoidImpl.hpp" + +namespace Aidge { +template <class I, class O> +void SigmoidImpl_cpu_forward_kernel(std::size_t inputLenght, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + +//#pragma omp parallel for if (inputLenght > 1024) + for (std::size_t i = 0; i < inputLenght; ++i) { + output[i] = static_cast<O>(1.0) / (static_cast<O>(1.0) + std::exp(-input[i])); + } +} + +namespace { +static Registrar<SigmoidImplForward_cpu> registrarSigmoidImplForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::SigmoidImpl_cpu_forward_kernel<float, float>); +static Registrar<SigmoidImplForward_cpu> registrarSigmoidImplForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::SigmoidImpl_cpu_forward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_SIGMOIDIMPL_FORWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/SliceImpl.hpp b/include/aidge/backend/cpu/operator/SliceImpl.hpp index 1cba5906064c51a4f0da2f1f3682b0828a080d43..1583435c12a243ef5861299434a7fc1409307538 100644 --- a/include/aidge/backend/cpu/operator/SliceImpl.hpp +++ b/include/aidge/backend/cpu/operator/SliceImpl.hpp @@ -40,20 +40,12 @@ class SliceImplBackward_cpu class SliceImpl_cpu : public OperatorImpl { public: - SliceImpl_cpu(const Slice_Op& op) : OperatorImpl(op) {} + SliceImpl_cpu(const Slice_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<SliceImpl_cpu> create(const Slice_Op& op) { return std::make_unique<SliceImpl_cpu>(op); } - NbElts_t getNbRequiredData(const IOIndex_t /*inputIdx*/) const override final; - NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final; - NbElts_t getRequiredMemory(const IOIndex_t outputIdx, - const std::vector<DimSize_t>& inputsSize) const override final; - NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final; - NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final; - void updateConsummerProducer() override final; - void forward() override; void backward() override; }; diff --git a/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp index 9f08fab758a1d8c717ccb5f0a0357f94fd86e5e4..d92e9008aff2a4e3c9e392fcc51871001020ce5a 100644 --- a/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp @@ -35,7 +35,7 @@ void SliceImpl_cpu_forward_kernel(const typename Slice_Op::Attrs& attrs, const std::int64_t axis_ = std::get<2>(attrs)[i]; const std::int64_t start_ = std::get<0>(attrs)[i]; const std::int64_t end_ = std::get<1>(attrs)[i]; - const std::size_t axis = axis_ >= 0 ? axis_ : static_cast<std::size_t>(axis_ + static_cast<std::int32_t>(inputDims.size())); + const std::size_t axis = axis_ >= 0 ? axis_ : static_cast<std::size_t>(axis_) + inputDims.size(); const std::size_t start = start_ >= 0 ? start_ : start_ + inputDims[axis]; const std::size_t end = end_ >= 0 ? end_ : end_ + inputDims[axis]; std::size_t stride = 1; diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp index 15fb2b5d30e32febca7c8028c8b5212e5b96775f..2b2fab485656efdc37ee134cb4ae574b6b403405 100644 --- a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp +++ b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp @@ -25,21 +25,21 @@ namespace Aidge { // compute kernel registry for forward and backward class SoftmaxImplForward_cpu - : public Registrable<SoftmaxImplForward_cpu, std::tuple<DataType, DataType>, void(const DimSize_t, const DimSize_t, const DimSize_t, const void*, void*)> { + : public Registrable<SoftmaxImplForward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const std::vector<DimSize_t>&, const void*, void*)> { }; class SoftmaxImplBackward_cpu - : public Registrable<SoftmaxImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { + : public Registrable<SoftmaxImplBackward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const std::vector<DimSize_t>&, const void*, void*)> { }; class SoftmaxImpl_cpu : public OperatorImpl { public: - SoftmaxImpl_cpu(const Softmax_Op& op) : OperatorImpl(op) {} + SoftmaxImpl_cpu(const Softmax_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<SoftmaxImpl_cpu> create(const Softmax_Op& op) { return std::make_unique<SoftmaxImpl_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; void forward() override; }; diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp index a5a168a08cf85e952cffd556e0cc34d29d35fffa..cc384c38e34d01887fc328d11de383aeef39fb8e 100644 --- a/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp @@ -23,30 +23,33 @@ namespace Aidge { template <class I, class O> -void SoftmaxImpl_cpu_forward_kernel(const DimSize_t batchSize, - const DimSize_t channelSize, - const DimSize_t featureSize, - const void* input_, - void* output_) { - +void SoftmaxImpl_cpu_forward_kernel(std::size_t axisIdx, const std::vector<DimSize_t>& inputDims, const void* input_, void* output_) +{ const I* input = static_cast<const I*>(input_); O* output = static_cast<O*>(output_); - for (std::size_t batch = 0; batch < batchSize; ++batch) { - for (std::size_t feature = 0; feature < featureSize; ++feature) { - std::size_t ioIndex = batch*channelSize*featureSize + feature; + std::size_t postAxisElems = 1; + for (std::size_t i = axisIdx + 1; i < inputDims.size(); ++i) { + postAxisElems *= inputDims[i]; + } + std::size_t preAxisElems = 1; + for (std::size_t i = 0; i < axisIdx; ++i) { + preAxisElems *= inputDims[i]; + } - I sum(0.0); - for (std::size_t ch = 0; ch < channelSize; ++ch) { - output[ioIndex] = std::exp(input[ioIndex]); - sum += output[ioIndex]; - ioIndex+=featureSize; + for (std::size_t i = 0; i < preAxisElems; ++i) { + for (std::size_t j = 0; j < postAxisElems; ++j) { + // Calculate sum of exponentials within the axis + I sumExp = 0; + for (std::size_t k = 0; k < inputDims[axisIdx]; ++k) { + std::size_t inIdx = i * inputDims[axisIdx] * postAxisElems + k * postAxisElems + j; + sumExp += std::exp(input[inIdx]); } - ioIndex = batch*channelSize*featureSize + feature; - for (std::size_t ch = 0; ch < channelSize; ++ch) { - output[ioIndex] /= sum; - ioIndex += featureSize; + // Calculate softmax for the current slice along the axis + for (std::size_t k = 0; k < inputDims[axisIdx]; ++k) { + std::size_t inIdx = i * inputDims[axisIdx] * postAxisElems + k * postAxisElems + j; + output[inIdx] = std::exp(input[inIdx]) / sumExp; } } } diff --git a/include/aidge/backend/cpu/operator/SqrtImpl.hpp b/include/aidge/backend/cpu/operator/SqrtImpl.hpp index b3723f27b077b9d5ea7e69fd33bd012d02654ffe..1691d951678509274736d558360c8110958820a9 100644 --- a/include/aidge/backend/cpu/operator/SqrtImpl.hpp +++ b/include/aidge/backend/cpu/operator/SqrtImpl.hpp @@ -12,16 +12,17 @@ #ifndef AIDGE_CPU_OPERATOR_SQRTIMPL_H_ #define AIDGE_CPU_OPERATOR_SQRTIMPL_H_ +#include <cstddef> // std::size_t +#include <memory> +#include <tuple> +#include <vector> + #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Sqrt.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" -#include <memory> -#include <vector> namespace Aidge { -// class Sqrt_Op; // compute kernel registry for forward and backward class SqrtImplForward_cpu @@ -33,14 +34,17 @@ class SqrtImplBackward_cpu class SqrtImpl_cpu : public OperatorImpl { public: - SqrtImpl_cpu(const Sqrt_Op& op) : OperatorImpl(op) {} + SqrtImpl_cpu(const Sqrt_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<SqrtImpl_cpu> create(const Sqrt_Op& op) { return std::make_unique<SqrtImpl_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; - void forward() override; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + + void forward() override final; + + void backward() override final; }; namespace { diff --git a/include/aidge/backend/cpu/operator/SqrtImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/SqrtImpl_backward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9cf5118a5ac81520d7a180b6aba22417ca512890 --- /dev/null +++ b/include/aidge/backend/cpu/operator/SqrtImpl_backward_kernels.hpp @@ -0,0 +1,46 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_SQRTIMPL_BACKWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_SQRTIMPL_BACKWARD_KERNEL_H_ + +#include <cmath> // std::sqrt +#include <cstddef> // std::size_t + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/operator/SqrtImpl.hpp" + +namespace Aidge { +template <class I, class O> +void SqrtImpl_cpu_backward_kernel(const std::size_t inputLenght, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + for (std::size_t i = 0; i < inputLenght; ++i) { + output[i] = static_cast<O>(0.5/(std::sqrt(static_cast<float>(input[i])))); + } +} + +namespace { +static Registrar<SqrtImplBackward_cpu> registrarSqrtImplBackward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::SqrtImpl_cpu_backward_kernel<float, float>); +static Registrar<SqrtImplBackward_cpu> registrarSqrtImplBackward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::SqrtImpl_cpu_backward_kernel<int, int>); +static Registrar<SqrtImplBackward_cpu> registrarSqrtImplBackward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::SqrtImpl_cpu_backward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_SQRTIMPL_BACKWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp index a180fc2cc206ef27b52d506a981f9f50f7bf8a3e..886b978c2345ce555d229d684ba83f952be9e00e 100644 --- a/include/aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp @@ -12,14 +12,16 @@ #ifndef AIDGE_CPU_OPERATOR_SQRTIMPL_FORWARD_KERNEL_H_ #define AIDGE_CPU_OPERATOR_SQRTIMPL_FORWARD_KERNEL_H_ +#include <cmath> // std::sqrt +#include <cstddef> // std::size_t + #include "aidge/utils/Registrar.hpp" -#include <cmath> #include "aidge/backend/cpu/operator/SqrtImpl.hpp" namespace Aidge { template <class I, class O> -void SqrtImpl_cpu_forward_kernel(std::size_t inputLenght, +void SqrtImpl_cpu_forward_kernel(const std::size_t inputLenght, const void* input_, void* output_) { @@ -27,7 +29,7 @@ void SqrtImpl_cpu_forward_kernel(std::size_t inputLenght, O* output = static_cast<O*>(output_); for (std::size_t i = 0; i < inputLenght; ++i) { - output[i] = std::sqrt(input[i]); + output[i] = static_cast<O>(std::sqrt(static_cast<float>(input[i]))); } } diff --git a/include/aidge/backend/cpu/operator/SubImpl.hpp b/include/aidge/backend/cpu/operator/SubImpl.hpp index 2d4c22f0d7f5e850ce805e0c78fb3e64bfa8f42b..15c028ae6289f39e0b6e6fd74e51e138b1f2675c 100644 --- a/include/aidge/backend/cpu/operator/SubImpl.hpp +++ b/include/aidge/backend/cpu/operator/SubImpl.hpp @@ -25,21 +25,21 @@ namespace Aidge { // compute kernel registry for forward and backward class SubImplForward_cpu - : public Registrable<SubImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> { + : public Registrable<SubImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> { }; class SubImplBackward_cpu - : public Registrable<SubImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> { + : public Registrable<SubImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> { }; class SubImpl_cpu : public OperatorImpl { public: - SubImpl_cpu(const Sub_Op& op) : OperatorImpl(op) {} + SubImpl_cpu(const Sub_Op& op) : OperatorImpl(op, "cpu") {} static std::unique_ptr<SubImpl_cpu> create(const Sub_Op& op) { return std::make_unique<SubImpl_cpu>(op); } - NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; void forward() override; }; diff --git a/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp index 08f2e24fa38d2739943279666187a55d7076a89b..19b0bd21de129ed303151987323234364ce5f6f2 100644 --- a/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp @@ -14,39 +14,35 @@ #include "aidge/utils/Registrar.hpp" +#include "aidge/backend/cpu/data/Broadcasting.hpp" #include "aidge/backend/cpu/operator/SubImpl.hpp" + namespace Aidge { template <class I1, class I2, class O> -void SubImpl_cpu_forward_kernel(std::size_t input1Length, - std::size_t input2Length, - const void* input1_, - const void* input2_, - void* output_) { +void SubImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims, + const std::vector<std::size_t>& input2Dims, + const std::vector<std::size_t>& outputDims, + const void* input1_, + const void* input2_, + void* output_) { const I1* input_1 = static_cast<const I1*>(input1_); const I2* input_2 = static_cast<const I2*>(input2_); O* output = static_cast<O*>(output_); - if (input2Length == input1Length) - { - for (std::size_t i = 0; i < input1Length; ++i) { - output[i] = input_1[i] - input_2[i]; - } - } - else if (input2Length == 1) - { - for (std::size_t i = 0; i < input1Length; ++i) { - output[i] = input_1[i] - input_2[0]; - } - } - else // input_2 is 1d and of size the number of channels of input_1 - { - for (std::size_t i = 0; i < input1Length; ++i) { - std::size_t channelIdx = i % input2Length; - output[i] = input_1[i] - input_2[channelIdx]; - } + size_t totalElements = 1; + for (size_t dimSize : outputDims) { + totalElements *= dimSize; } + + for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex) + { + std::vector<size_t> indexes = getMultiDimIndices(outputDims, oIndex); + std::size_t idx1 = getFlattenedIndex(input1Dims, indexes); + std::size_t idx2 = getFlattenedIndex(input2Dims, indexes); + output[oIndex] = input_1[idx1] - input_2[idx2]; + } } namespace { diff --git a/include/aidge/backend/cpu/operator/TanhImpl.hpp b/include/aidge/backend/cpu/operator/TanhImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9e44f7bcd2b2392c634421478a096258b3e39795 --- /dev/null +++ b/include/aidge/backend/cpu/operator/TanhImpl.hpp @@ -0,0 +1,51 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_TANHIMPL_H_ +#define AIDGE_CPU_OPERATOR_TANHIMPL_H_ + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/Tanh.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" +#include <memory> +#include <vector> + +namespace Aidge { +// class Tanh_Op; + +// compute kernel registry for forward and backward +class TanhImplForward_cpu + : public Registrable<TanhImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { +}; +class TanhImplBackward_cpu + : public Registrable<TanhImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { +}; + +class TanhImpl_cpu : public OperatorImpl { +public: + TanhImpl_cpu(const Tanh_Op& op) : OperatorImpl(op, "cpu") {} + + static std::unique_ptr<TanhImpl_cpu> create(const Tanh_Op& op) { + return std::make_unique<TanhImpl_cpu>(op); + } + + Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; + +namespace { +static Registrar<Tanh_Op> registrarTanhImpl_cpu("cpu", Aidge::TanhImpl_cpu::create); +} +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_TANHIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9e57b6dfcb0da322f5b21944fb10ec7a10cd0ab8 --- /dev/null +++ b/include/aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp @@ -0,0 +1,42 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_TANHIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_TANHIMPL_FORWARD_KERNEL_H_ + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/operator/TanhImpl.hpp" + +namespace Aidge { +template <class I, class O> +void TanhImpl_cpu_forward_kernel(std::size_t inputLenght, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + +//#pragma omp parallel for if (inputLenght > 1024) + for (std::size_t i = 0; i < inputLenght; ++i) { + output[i] = std::tanh(input[i]); + } +} + +namespace { +static Registrar<TanhImplForward_cpu> registrarTanhImplForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::TanhImpl_cpu_forward_kernel<float, float>); +static Registrar<TanhImplForward_cpu> registrarTanhImplForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::TanhImpl_cpu_forward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_TANHIMPL_FORWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/TransposeImpl.hpp b/include/aidge/backend/cpu/operator/TransposeImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..8bdcc612ea434e266a97724d45aaeefc8e033bf0 --- /dev/null +++ b/include/aidge/backend/cpu/operator/TransposeImpl.hpp @@ -0,0 +1,118 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_TransposeIMPL_H_ +#define AIDGE_CPU_OPERATOR_TransposeIMPL_H_ + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/Transpose.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include <memory> +#include <vector> + +namespace Aidge { +// class Transpose_Op; + +// compute kernel registry for forward and backward +class TransposeImpl2DForward_cpu + : public Registrable<TransposeImpl2DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<2>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl3DForward_cpu + : public Registrable<TransposeImpl3DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<3>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl4DForward_cpu + : public Registrable<TransposeImpl4DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<4>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl5DForward_cpu + : public Registrable<TransposeImpl5DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<5>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl6DForward_cpu + : public Registrable<TransposeImpl6DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<6>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl2DBackward_cpu + : public Registrable<TransposeImpl2DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<2>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl3DBackward_cpu + : public Registrable<TransposeImpl3DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<3>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl4DBackward_cpu + : public Registrable<TransposeImpl4DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<4>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl5DBackward_cpu + : public Registrable<TransposeImpl5DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<5>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; +class TransposeImpl6DBackward_cpu + : public Registrable<TransposeImpl6DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<6>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> { +}; + + +class TransposeImpl2D_cpu : public OperatorImpl { +public: + TransposeImpl2D_cpu(const Transpose_Op<2>& op) : OperatorImpl(op, "cpu") {} + + static std::unique_ptr<TransposeImpl2D_cpu> create(const Transpose_Op<2>& op) { + return std::make_unique<TransposeImpl2D_cpu>(op); + } + + void forward() override; +}; +class TransposeImpl3D_cpu : public OperatorImpl { +public: + TransposeImpl3D_cpu(const Transpose_Op<3>& op) : OperatorImpl(op, "cpu") {} + + static std::unique_ptr<TransposeImpl3D_cpu> create(const Transpose_Op<3>& op) { + return std::make_unique<TransposeImpl3D_cpu>(op); + } + + void forward() override; +}; +class TransposeImpl4D_cpu : public OperatorImpl { +public: + TransposeImpl4D_cpu(const Transpose_Op<4>& op) : OperatorImpl(op, "cpu") {} + + static std::unique_ptr<TransposeImpl4D_cpu> create(const Transpose_Op<4>& op) { + return std::make_unique<TransposeImpl4D_cpu>(op); + } + + void forward() override; +}; +class TransposeImpl5D_cpu : public OperatorImpl { +public: + TransposeImpl5D_cpu(const Transpose_Op<5>& op) : OperatorImpl(op, "cpu") {} + + static std::unique_ptr<TransposeImpl5D_cpu> create(const Transpose_Op<5>& op) { + return std::make_unique<TransposeImpl5D_cpu>(op); + } + + void forward() override; +}; +class TransposeImpl6D_cpu : public OperatorImpl { +public: + TransposeImpl6D_cpu(const Transpose_Op<6>& op) : OperatorImpl(op, "cpu") {} + + static std::unique_ptr<TransposeImpl6D_cpu> create(const Transpose_Op<6>& op) { + return std::make_unique<TransposeImpl6D_cpu>(op); + } + + void forward() override; +}; + +namespace { +static Registrar<Transpose_Op<2>> registrarTransposeImpl2D_cpu("cpu", Aidge::TransposeImpl2D_cpu::create); +static Registrar<Transpose_Op<3>> registrarTransposeImpl3D_cpu("cpu", Aidge::TransposeImpl3D_cpu::create); +static Registrar<Transpose_Op<4>> registrarTransposeImpl4D_cpu("cpu", Aidge::TransposeImpl4D_cpu::create); +static Registrar<Transpose_Op<5>> registrarTransposeImpl5D_cpu("cpu", Aidge::TransposeImpl5D_cpu::create); +static Registrar<Transpose_Op<6>> registrarTransposeImpl6D_cpu("cpu", Aidge::TransposeImpl6D_cpu::create); +} +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_TransposeIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9fd5e5b58ed8e850c0a902e2de93b65cc75d274a --- /dev/null +++ b/include/aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp @@ -0,0 +1,110 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_TRANSPOSEIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_TRANSPOSEIMPL_FORWARD_KERNEL_H_ + +#include "aidge/utils/Registrar.hpp" +#include <cstddef> +#include <cmath> +#include "aidge/data/Data.hpp" +#include "aidge/utils/Types.h" + +#include "aidge/backend/cpu/operator/TransposeImpl.hpp" + +namespace Aidge { +template <class I, class O, DimSize_t DIM> +void TransposeImpl_cpu_forward_kernel( const typename Transpose_Op<DIM>::Attrs& attrs, const std::vector<DimSize_t>& inputDims, const std::vector<DimSize_t>& outputDims, const void* input_, void* output_) +{ + O* output = static_cast<O*>(output_); + const I* input = static_cast<const I*>(input_); + + // Compute total number of elements in the input array + size_t totalElements = 1; + for (size_t dimSize : inputDims) { + totalElements *= dimSize; + } + + std::vector<std::size_t> outStrides(DIM, 1); + for (size_t i = 0; i < DIM; ++i) { + for (size_t j = i+1; j < DIM; ++j) + { + outStrides[i] *= outputDims[j]; + } + } + + std::vector<size_t> indices(outputDims.size(), 0); + for (size_t i = 0; i < totalElements; ++i) { + size_t idx = 0; + // Permute indices based on OutputDimsOrder attr + std::vector<size_t> permutedIndices(DIM); + for (size_t j = 0; j < DIM; ++j) { + permutedIndices[j] = indices[std::get<0>(attrs)[j]]; + } + + for (int j = DIM -1; j >=0; --j) { + idx += permutedIndices[j] * outStrides[j]; + } + // Copy the value in output + output[idx] = input[i]; + + // Update indices for the next iteration + for (int j = DIM - 1; j >= 0; --j) { + if (indices[j] < inputDims[j] - 1) { + indices[j]++; + break; + } else { + indices[j] = 0; + } + } + } + +} +namespace { +// DIM = 2 +static Registrar<TransposeImpl2DForward_cpu> registrarTransposeImpl2DForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 2>); +static Registrar<TransposeImpl2DForward_cpu> registrarTransposeImpl2DForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 2>); +static Registrar<TransposeImpl2DForward_cpu> registrarTransposeImpl2DForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 2>); +// DIM = 3 +static Registrar<TransposeImpl3DForward_cpu> registrarTransposeImpl3DForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 3>); +static Registrar<TransposeImpl3DForward_cpu> registrarTransposeImpl3DForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 3>); +static Registrar<TransposeImpl3DForward_cpu> registrarTransposeImpl3DForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 3>); +// DIM = 4 +static Registrar<TransposeImpl4DForward_cpu> registrarTransposeImpl4DForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 4>); +static Registrar<TransposeImpl4DForward_cpu> registrarTransposeImpl4DForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 4>); +static Registrar<TransposeImpl4DForward_cpu> registrarTransposeImpl4DForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 4>); +// DIM = 5 +static Registrar<TransposeImpl5DForward_cpu> registrarTransposeImpl5DForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 5>); +static Registrar<TransposeImpl5DForward_cpu> registrarTransposeImpl5DForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 5>); +static Registrar<TransposeImpl5DForward_cpu> registrarTransposeImpl5DForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 5>); +// DIM = 6 +static Registrar<TransposeImpl6DForward_cpu> registrarTransposeImpl6DForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 6>); +static Registrar<TransposeImpl6DForward_cpu> registrarTransposeImpl6DForward_cpu_Int32( + {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 6>); +static Registrar<TransposeImpl6DForward_cpu> registrarTransposeImpl6DForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 6>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_TRANSPOSEIMPL_FORWARD_KERNEL_H_ */ diff --git a/src/data/Broadcasting.cpp b/src/data/Broadcasting.cpp new file mode 100644 index 0000000000000000000000000000000000000000..22977aa772e3f3f4810a59ff1fc024cc21c66bd1 --- /dev/null +++ b/src/data/Broadcasting.cpp @@ -0,0 +1,46 @@ +/******************************************************************************** + * Copyright (c) 2024 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/backend/cpu/data/Broadcasting.hpp" + +std::vector<std::size_t> Aidge::getBroadcastedDims(const std::vector<std::size_t>& outputDims, const std::vector<std::size_t>& dimsToBroadcast){ + std::vector<std::size_t> broadcastedDims(outputDims.size(), 1); + for(int j=dimsToBroadcast.size()-1; j>=0; --j) + { + std::size_t idx = outputDims.size() - (dimsToBroadcast.size()-j); + broadcastedDims[idx] = dimsToBroadcast[j]; + } + return broadcastedDims; +} + +std::vector<std::size_t> Aidge::getMultiDimIndices(const std::vector<std::size_t>& dimensions, std::size_t idx){ + std::vector<std::size_t> indices(dimensions.size(), 0); + + for (int i = dimensions.size() - 1; i >= 0; --i) { + indices[i] = idx % dimensions[i]; + idx /= dimensions[i]; + } + + return indices; +} + +std::size_t Aidge::getFlattenedIndex(const std::vector<std::size_t>& dimensions, const std::vector<std::size_t>& indices){ + std::size_t flattenedIdx = 0; + std::size_t stride = 1; + + for (int i = dimensions.size() - 1; i >= 0; --i) { + std::size_t idx = dimensions[i]>1 ? indices[i] : 0; + flattenedIdx += idx * stride; + stride *= dimensions[i]; + } + return flattenedIdx; +} + diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp index 3b53eaf3b88fb418746ab5a7a2297a15606974d3..d6d75a608e4da7d8b9ed8a28912ff2eb1751e042 100644 --- a/src/operator/AddImpl.cpp +++ b/src/operator/AddImpl.cpp @@ -9,33 +9,37 @@ * ********************************************************************************/ +#include "aidge/backend/cpu/operator/AddImpl.hpp" + #include <cassert> #include <numeric> // std::accumulate #include <vector> -#include "aidge/utils/Types.h" #include "aidge/backend/cpu/data/GetCPUPtr.h" +#include "aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp" #include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" +#include "aidge/utils/Types.h" +#include "aidge/utils/ErrorHandling.hpp" -#include "aidge/backend/cpu/operator/AddImpl.hpp" -#include "aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp" - -Aidge::NbElts_t Aidge::AddImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::AddImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::AddImpl_cpu::forward() { - assert(mOp.getRawInput(0) && "missing input in Add operator"); - DataType datatypeFirstInput = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(); - for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) { - assert(mOp.getRawInput(i) && "missing input in Add operator"); - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->dataType() == datatypeFirstInput); + const auto& opTensor = static_cast<const OperatorTensor&>(mOp); + AIDGE_ASSERT(opTensor.getInput(0)->hasImpl(), "cannot run Add forward because the 0-th input has no implementation."); + assert(opTensor.getInput(0) && "missing input in Add operator"); + DataType datatypeFirstInput = opTensor.getInput(0)->dataType(); + for (IOIndex_t i = 1; i < opTensor.nbInputs(); ++i) { + AIDGE_ASSERT(opTensor.getInput(i)->hasImpl(), "cannot run Add forward because the {}-th input has no implementation.", i); + assert(opTensor.getInput(i) && "missing input in Add operator"); + assert(opTensor.getInput(i)->dataType() == datatypeFirstInput); } // Find the correct kernel type - const auto outputDataType = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType(); + const auto outputDataType = opTensor.getOutput(0)->dataType(); const Registrar<AddImplForward_cpu>::registrar_key registrarKey = { datatypeFirstInput, outputDataType}; @@ -55,15 +59,26 @@ void Aidge::AddImpl_cpu::forward() { // TODO: right now, if needed, memory will be allocated/deallocated at each // call to forward(). We might put the following shared_ptr as members of // this class to avoid that. + const std::size_t nbDims = opTensor.getOutput(0)->nbDims(); + std::vector<std::vector<std::size_t>> inputsDims; std::vector<const void*> opInputs; - std::vector<std::shared_ptr<Tensor>> inputsFallback(mOp.nbInputs()); - for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) { - const auto& input = std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->refCastFrom(inputsFallback[i], *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); + std::vector<std::shared_ptr<Tensor>> inputsFallback(opTensor.nbInputs()); + for (IOIndex_t i = 0; i < opTensor.nbInputs(); ++i) { + std::vector<std::size_t> inputDims(nbDims, 1); + auto dims = opTensor.getInput(i)->dims(); + for(std::size_t j=dims.size()-1; j+1>0; --j) + { + std::size_t idx = nbDims - (dims.size()-j); + inputDims[idx] = dims[j]; + } + inputsDims.push_back(inputDims); + const auto& input = opTensor.getInput(i)->refCastFrom(inputsFallback[i], *opTensor.getOutput(0)); opInputs.push_back(input.getImpl()->rawPtr()); } - // Call kernel - kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), - opInputs, - getCPUPtr(mOp.getRawOutput(0))); + kernelFunc(opInputs, + inputsDims, + opTensor.getOutput(0)->size(), + opTensor.getOutput(0)->dims(), + getCPUPtr(opTensor.getRawOutput(0))); } diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp index 9e0a77e3285c1e3701142828c74898cb9da5b405..8ba6751bf4068a69ed07e362924f59d0f4aca6c5 100644 --- a/src/operator/AvgPoolingImpl.cpp +++ b/src/operator/AvgPoolingImpl.cpp @@ -21,9 +21,9 @@ #include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp" #include "aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp" -Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::AvgPoolingImpl2D_cpu::forward() { diff --git a/src/operator/BatchNormImpl.cpp b/src/operator/BatchNormImpl.cpp index c84f2cb6b09c707f68ed83cc7554624fc6489b84..96179d11850624f831333c9a4badaddf2221ecff 100644 --- a/src/operator/BatchNormImpl.cpp +++ b/src/operator/BatchNormImpl.cpp @@ -20,9 +20,9 @@ #include "aidge/backend/cpu/operator/BatchNormImpl.hpp" #include "aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp" -Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::BatchNormImpl2D_cpu::forward() { diff --git a/src/operator/ConcatImpl.cpp b/src/operator/ConcatImpl.cpp index ceefb9031f279be417a8ab0485567a56edea7824..605f4a19ff3856924593b0e6d7815d5de1579c01 100644 --- a/src/operator/ConcatImpl.cpp +++ b/src/operator/ConcatImpl.cpp @@ -21,46 +21,6 @@ #include "aidge/backend/cpu/operator/ConcatImpl.hpp" #include "aidge/backend/cpu/operator/ConcatImpl_forward_kernels.hpp" -Aidge::NbElts_t Aidge::ConcatImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const { - assert(mOp.getRawInput(inputIdx) && "requires valid input"); - - // Requires the whole tensors - const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->dims(); - return std::accumulate(inputDims.begin(), inputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>()); -} - -Aidge::NbElts_t Aidge::ConcatImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { - // for the direct convolution algorithm, convolutions can be in-place, if there is no padding! - return 0; -} - -Aidge::NbElts_t Aidge::ConcatImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const { - // Requires the whole tensors, regardless of available data on inputs - assert(outputIdx == 0 && "operator has only one output"); - (void) outputIdx; - - const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(); - return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>()); -} - -Aidge::NbElts_t Aidge::ConcatImpl_cpu::getNbConsumedData(const Aidge::IOIndex_t inputIdx) const { - assert(inputIdx < mNbConsumedData.size()); - return mNbConsumedData[inputIdx]; -} - -Aidge::NbElts_t Aidge::ConcatImpl_cpu::getNbProducedData(const Aidge::IOIndex_t outputIdx) const { - assert(outputIdx < mNbProducedData.size()); - return mNbProducedData[outputIdx]; -} - -void Aidge::ConcatImpl_cpu::updateConsummerProducer() { - for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx) - mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass - - mNbProducedData[0]+= getRequiredMemory(0, {}); - -} - void Aidge::ConcatImpl_cpu::forward() { assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input in Concat operator"); DataType datatypeFirstInput = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(); @@ -87,4 +47,4 @@ void Aidge::ConcatImpl_cpu::forward() { getCPUPtr(mOp.getRawOutput(0))); } -void Aidge::ConcatImpl_cpu::backward() { printf("Not implemented yet.\n"); } \ No newline at end of file +void Aidge::ConcatImpl_cpu::backward() { fmt::print("Not implemented yet.\n"); } \ No newline at end of file diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp index 1b4262e394f78ab0bda4a36440ac7b9cb15c164c..5c8d2fe307c70bd7ee3f64e14735417f7ffb0c67 100644 --- a/src/operator/ConvDepthWiseImpl.cpp +++ b/src/operator/ConvDepthWiseImpl.cpp @@ -22,9 +22,9 @@ #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp" #include "aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp" -Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::ConvDepthWiseImpl2D_cpu::forward() { diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp index b849142dd3abe0131fb0c6c448530a7669ce27dc..7457a1a0b75af1f922c5a65ac88aabc813d00069 100644 --- a/src/operator/ConvImpl.cpp +++ b/src/operator/ConvImpl.cpp @@ -22,23 +22,25 @@ #include "aidge/backend/cpu/operator/ConvImpl.hpp" #include "aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp" -Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::ConvImpl2D_cpu::forward() { + const auto& opTensor = static_cast<const OperatorTensor&>(mOp); + // FIXME: uncomment the following code once memory handling will work assert(mOp.getRawInput(0) && "missing input #0"); assert(mOp.getRawInput(1) && "missing input #1"); assert(mOp.getRawInput(2) && "missing input #2"); // Find the correct kernel type - const auto outputDataType = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType(); + const auto outputDataType = opTensor.getOutput(0)->dataType(); const Registrar<ConvImpl2DForward_cpu>::registrar_key registrarKey = { - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->dataType(), + opTensor.getInput(0)->dataType(), + opTensor.getInput(1)->dataType(), + opTensor.getInput(2)->dataType(), outputDataType}; Registrar<ConvImpl2DForward_cpu>::registrar_type kernelFunc; @@ -57,12 +59,12 @@ void Aidge::ConvImpl2D_cpu::forward() { // call to forward(). We might put the following shared_ptr as members of // this class to avoid that. std::shared_ptr<Tensor> input0Fallback, input1Fallback, input2Fallback; - const auto& input0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->refCastFrom(input0Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); - const auto& input1 = std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->refCastFrom(input1Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); - const auto& input2 = std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->refCastFrom(input2Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); + const auto& input0 = opTensor.getInput(0)->refCastFrom(input0Fallback, *opTensor.getOutput(0)); + const auto& input1 = opTensor.getInput(1)->refCastFrom(input1Fallback, *opTensor.getOutput(0)); + const auto& input2 = opTensor.getInput(2)->refCastFrom(input2Fallback, *opTensor.getOutput(0)); // Call kernel - kernelFunc(dynamic_cast<const Conv_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(), + kernelFunc(dynamic_cast<const Conv_Op<2>&>(mOp).getStaticAttributes(), opTensor.getInput(0)->template dims<4>(), input0.getImpl()->rawPtr(), input1.getImpl()->rawPtr(), input2.getImpl()->rawPtr(), getCPUPtr(mOp.getRawOutput(0))); } diff --git a/src/operator/DivImpl.cpp b/src/operator/DivImpl.cpp index f5cde077bd5a414d8b9add8b8b8715952a27ad01..098b20776888c6d72110e4bc4c0c3e191febd41c 100644 --- a/src/operator/DivImpl.cpp +++ b/src/operator/DivImpl.cpp @@ -9,35 +9,156 @@ * ********************************************************************************/ -#include <cassert> -#include <chrono> // std::chrono::milliseconds -#include <numeric> // std::accumulate -#include <thread> // std::this_thread::sleep_for +#include <memory> #include <vector> -#include "aidge/operator/Div.hpp" -#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/Broadcasting.hpp" #include "aidge/backend/cpu/data/GetCPUPtr.h" - #include "aidge/backend/cpu/operator/DivImpl.hpp" #include "aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Types.h" -Aidge::NbElts_t Aidge::DivImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::DivImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::DivImpl_cpu::forward() { + // Find the correct kernel type + // auto kernelFunc = Registrar<DivImplForward_cpu>::create({ + // std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + // std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(), + // std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // const std::vector<std::size_t> inputDims0 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + // std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()); + // const std::vector<std::size_t> inputDims1 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + // std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims()); + + + // auto a = std::static_pointer_cast<Tensor>(mOp.getRawInput(0)); + // auto b = std::static_pointer_cast<Tensor>(mOp.getRawInput(1)); + + // // Call kernel + // kernelFunc(inputDims0, + // inputDims1, + // std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + // getCPUPtr(mOp.getRawInput(0)), + // getCPUPtr(mOp.getRawInput(1)), + // getCPUPtr(mOp.getRawOutput(0))); + +///////////////////////////////////////////////////////////////// + + // [5,2,1,7] & [2,6,7] + // 1. Same number of dimensions -> [5,2,1,7] & [1,2,6,7] + // 2. Find the highest equal dimension -> 3 + // Exception: if the first diverging dimension is the last one, then -> 4 (dims.size()) + // 3. Compute the highest number of contiguous data -> 7 + // 4. Compute stride and offset step for the broadcast mechnism + // 5. Call a simple kernel + const auto& opTensor = static_cast<const Div_Op&>(mOp); + // Find the correct kernel type auto kernelFunc = Registrar<DivImplForward_cpu>::create({ - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); - - // Call kernel - kernelFunc(std::static_pointer_cast<Tensor>(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)))->size(), - std::static_pointer_cast<Tensor>(std::static_pointer_cast<Tensor>(mOp.getRawInput(1)))->size(), - getCPUPtr(mOp.getRawInput(0)), - getCPUPtr(mOp.getRawInput(1)), - getCPUPtr(mOp.getRawOutput(0))); + opTensor.getInput(0)->dataType(), + opTensor.getInput(1)->dataType(), + opTensor.getOutput(0)->dataType()}); + + // Compute compatible input dimensions + std::vector<std::size_t> dims0 = opTensor.getInput(0)->dims(); + std::vector<std::size_t> dims1 = opTensor.getInput(1)->dims(); + const std::vector<std::size_t>& outDims = opTensor.getOutput(0)->dims(); + + // if (dims0 == dims1) { + // const std::size_t input0_contiguous_size = std::accumulate(dims0.cbegin(), dims0.cend(), std::size_t(1), std::multiplies<std::size_t>()); + // kernelFunc(input0_contiguous_size, input0_contiguous_size, input0_contiguous_size, + // getCPUPtr(mOp.getRawInput(0)), + // getCPUPtr(mOp.getRawInput(1)), + // getCPUPtr(mOp.getRawOutput(0))); + // return; + // } + + if (dims0.size() > dims1.size()) { + dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1)); + } + else if (dims1.size() > dims0.size()) { + dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1)); + } + + const std::size_t nbDims = dims0.size(); + + // Find the highest equal dimension + std::size_t contiguousIdx = nbDims - 1; + for (; contiguousIdx+1 > 0; --contiguousIdx) { + if (dims0[contiguousIdx] != dims1[contiguousIdx]) { + if (contiguousIdx == (nbDims -1)) { // last dimensions of one of the input Tensor are of size 1 + const std::vector<std::size_t>& dims = (dims0[contiguousIdx] == 1) ? dims0 : dims1; + while ((contiguousIdx+1 > 0) && (dims[contiguousIdx] == 1)) { + --contiguousIdx; + } + } + break; + } + } + ++contiguousIdx; + + // Compute the highest number of contiguous data for each Tensor + const std::size_t input0_contiguous_size = std::accumulate(dims0.cbegin()+contiguousIdx, dims0.cend(), std::size_t(1), std::multiplies<std::size_t>()); + const std::size_t input1_contiguous_size = std::accumulate(dims1.cbegin()+contiguousIdx, dims1.cend(), std::size_t(1), std::multiplies<std::size_t>()); + const std::size_t output_contiguous_size = std::accumulate(outDims.cbegin()+contiguousIdx, outDims.cend(), std::size_t(1), std::multiplies<std::size_t>()); + + // initialize strides to iterate through data because of broadcasting + std::int32_t *stride_post0; + std::int32_t *stride_post1; + std::int32_t *stride_step0; + std::int32_t *stride_step1; + if (contiguousIdx > 0) { + stride_post0 = new std::int32_t[contiguousIdx]; + stride_post0[contiguousIdx - 1] = 1; + stride_post1 = new std::int32_t[contiguousIdx]; + stride_post1[contiguousIdx - 1] = 1; + for (std::size_t i = contiguousIdx - 2; i != static_cast<std::size_t>(-1); --i) { + stride_post0[i] = stride_post0[i+1]*static_cast<std::int32_t>(dims0[i+1]); + stride_post1[i] = stride_post1[i+1]*static_cast<std::int32_t>(dims1[i+1]); + } + stride_step0 = new std::int32_t[contiguousIdx]; + stride_step1 = new std::int32_t[contiguousIdx]; + for (std::size_t i = 0; i != contiguousIdx; ++i) { + stride_step0[i] = (dims0[i] == 1) ? 1 - stride_post0[i] : 1; + stride_step1[i] = (dims1[i] == 1) ? 1 - stride_post1[i] : 1; + } + } + + // variables for arrays offsets + std::size_t offsetIn0 = 0; + std::size_t offsetIn1 = 0; + std::size_t offsetOut = 0; + + + std::size_t dim = contiguousIdx - 1; + const std::size_t nbStacks = std::accumulate(outDims.cbegin(), outDims.cbegin() + contiguousIdx, std::size_t(1), std::multiplies<std::size_t>()); + for (std::size_t stack = 0; stack < nbStacks;) { + kernelFunc(input0_contiguous_size, input1_contiguous_size, output_contiguous_size, + getCPUPtr(mOp.getRawInput(0), offsetIn0*input0_contiguous_size), + getCPUPtr(mOp.getRawInput(1), offsetIn1*input1_contiguous_size), + getCPUPtr(mOp.getRawOutput(0), offsetOut*output_contiguous_size)); + if (++stack < nbStacks) { + std::size_t tmp_stack = stack; + while(tmp_stack % outDims[dim] == 0) { + tmp_stack /= outDims[dim]; + dim--; + } + offsetIn0 += stride_step0[dim]; + offsetIn1 += stride_step1[dim]; + ++offsetOut; + dim = contiguousIdx - 1; + } + } + if (contiguousIdx > 0) { + delete[] stride_post0; + delete[] stride_post1; + delete[] stride_step0; + delete[] stride_step1; + } } diff --git a/src/operator/ErfImpl.cpp b/src/operator/ErfImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ace098468c05b80c4116e6f85d00b5fabaf754cd --- /dev/null +++ b/src/operator/ErfImpl.cpp @@ -0,0 +1,42 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/backend/cpu/operator/ErfImpl.hpp" + +#include <memory> +#include <vector> + +#include "aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Erf.hpp" +#include "aidge/utils/Types.h" + +Aidge::Elts_t Aidge::ErfImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return Elts_t::DataElts(0); +} + +void Aidge::ErfImpl_cpu::forward() { + const Erf_Op& op = static_cast<const Erf_Op&>(mOp); + + // Find the correct kernel type + auto kernelFunc = Registrar<ErfImplForward_cpu>::create({ + op.getInput(0)->dataType(), + op.getOutput(0)->dataType() + }); + + // Call kernel + kernelFunc( + op.getInput(0)->size(), + op.getInput(0)->getImpl()->rawPtr(), + op.getOutput(0)->getImpl()->rawPtr() + ); +} diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp index bc4a7a7cab91049c623e9a9e95ee63367da00722..eecff38afd4d4487d51a070d6c0f4c2507a2b478 100644 --- a/src/operator/FCImpl.cpp +++ b/src/operator/FCImpl.cpp @@ -9,31 +9,34 @@ * ********************************************************************************/ -#include <cassert> -#include <chrono> // std::chrono::milliseconds -#include <numeric> // std::accumulate -#include <thread> // std::this_thread::sleep_for -#include <vector> +#include "aidge/backend/cpu/operator/FCImpl.hpp" + +#include <cstddef> // std::size_t +#include <functional> +#include <memory> +#include <tuple> +#include "aidge/backend/cpu/data/GetCPUPtr.h" +#include "aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp" +#include "aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp" #include "aidge/operator/FC.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" -#include "aidge/backend/cpu/operator/FCImpl.hpp" -#include "aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp" void Aidge::FCImpl_cpu::forward() { - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(1)) && "missing input #1"); - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(2)) && "missing input #2"); + const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp); + AIDGE_ASSERT(op_.getInput(0), "missing input #0"); + AIDGE_ASSERT(op_.getInput(1), "missing input #1"); + AIDGE_ASSERT(op_.getInput(2), "missing input #2"); // Find the correct kernel type - const auto outputDataType = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType(); + const auto outputDataType = op_.getOutput(0)->dataType(); const Registrar<FCImplForward_cpu>::registrar_key registrarKey = { - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->dataType(), + op_.getInput(0)->dataType(), + op_.getInput(1)->dataType(), + op_.getInput(2)->dataType(), outputDataType}; Registrar<FCImplForward_cpu>::registrar_type kernelFunc; @@ -52,14 +55,61 @@ void Aidge::FCImpl_cpu::forward() // call to forward(). We might put the following shared_ptr as members of // this class to avoid that. std::shared_ptr<Tensor> input0Fallback, input1Fallback, input2Fallback; - const auto& input0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->refCastFrom(input0Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); - const auto& input1 = std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->refCastFrom(input1Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); - const auto& input2 = std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->refCastFrom(input2Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); + const auto& input0 = op_.getInput(0)->refCastFrom(input0Fallback, *(op_.getOutput(0))); + const auto& input1 = op_.getInput(1)->refCastFrom(input1Fallback, *(op_.getOutput(0))); + const auto& input2 = op_.getInput(2)->refCastFrom(input2Fallback, *(op_.getOutput(0))); // Call kernel + const auto batchSize = (input0.dims().size() > 1) ? input0.dims()[0] : 1; kernelFunc(dynamic_cast<const FC_Op&>(mOp).getStaticAttributes(), - input0.dims()[0], - input0.size() / input0.dims()[0], + batchSize, + input0.size() / batchSize, input0.getImpl()->rawPtr(), input1.getImpl()->rawPtr(), input2.getImpl()->rawPtr(), getCPUPtr(mOp.getRawOutput(0))); } + +void Aidge::FCImpl_cpu::backward() +{ + const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp); + const auto& fc_grad = op_.getOutput(0)->grad(); + assert(fc_grad && "missing ouput #0 gradient"); + + // Find the correct kernel type + const Registrar<FCImplBackward_cpu>::registrar_key registrarKey = { + fc_grad->dataType(), + op_.getInput(0)->grad()->dataType(), + op_.getInput(1)->grad()->dataType(), + op_.getInput(2)->grad()->dataType()}; + + Registrar<FCImplBackward_cpu>::registrar_type kernelFunc; + if (Registrar<FCImplBackward_cpu>::exists(registrarKey)) { + // One exists with the right inputs/output types + kernelFunc = Registrar<FCImplBackward_cpu>::create(registrarKey); + } + else { + // Otherwise, fallback to the kernel with all types matching output type + kernelFunc = Registrar<FCImplBackward_cpu>::create({ + fc_grad->dataType(), fc_grad->dataType(), fc_grad->dataType(), fc_grad->dataType()}); + } + + // Convert input data (no overhead if not needed!) + // TODO: right now, if needed, memory will be allocated/deallocated at each + // call to forward(). We might put the following shared_ptr as members of + // this class to avoid that. + std::shared_ptr<Tensor> input0gradFallback, input1gradFallback, input2gradFallback; + const auto& input0grad = op_.getInput(0)->grad()->refCastFrom(input0gradFallback, *(op_.getOutput(0))); + const auto& input1grad = op_.getInput(1)->grad()->refCastFrom(input1gradFallback, *(op_.getOutput(0))); + const auto& input2grad = op_.getInput(2)->grad()->refCastFrom(input2gradFallback, *(op_.getOutput(0))); + + // Call kernel + const auto batchSize = (input0grad.dims().size() > 1) ? input0grad.dims()[0] : 1; + kernelFunc(dynamic_cast<const FC_Op&>(mOp).getStaticAttributes(), + batchSize, + input0grad.size() / batchSize, + getCPUPtr(fc_grad), + getCPUPtr(op_.getInput(0)), + getCPUPtr(mOp.getRawInput(1)), + input0grad.getImpl()->rawPtr(), + input1grad.getImpl()->rawPtr(), + input2grad.getImpl()->rawPtr()); +} diff --git a/src/operator/GatherImpl.cpp b/src/operator/GatherImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5384f64536955b7cb2ed85af81e52697e9b84a2a --- /dev/null +++ b/src/operator/GatherImpl.cpp @@ -0,0 +1,37 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/backend/cpu/operator/GatherImpl.hpp" + +#include <memory> +#include <vector> + +#include "aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp" +#include "aidge/data/Data.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Gather.hpp" +#include "aidge/utils/Types.h" + +void Aidge::GatherImpl_cpu::forward() { + const Gather_Op& op = static_cast<const Gather_Op&>(mOp); + + auto kernelFunc = Registrar<GatherImplForward_cpu>::create({ + op.getInput(0)->dataType(), + op.getOutput(0)->dataType() + }); + + // Call kernel + kernelFunc(dynamic_cast<const Gather_Op&>(mOp).getStaticAttributes(), + op.getInput(0)->dims(), + op.getInput(0)->getImpl()->rawPtr(), + op.getOutput(0)->getImpl()->rawPtr() + ); +} diff --git a/src/operator/GlobalAveragePoolingImpl.cpp b/src/operator/GlobalAveragePoolingImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f7280360a4486fe5db6c4dfdd4c492bbe6ba302b --- /dev/null +++ b/src/operator/GlobalAveragePoolingImpl.cpp @@ -0,0 +1,41 @@ +/******************************************************************************** + * Copyright (c) 2024 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp" + +#include <functional> +#include <memory> +#include <vector> + +#include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl_forward_kernels.hpp" +#include "aidge/data/Data.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/GlobalAveragePooling.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + + +void Aidge::GlobalAveragePoolingImpl_cpu::forward() +{ + const GlobalAveragePooling_Op& op_ = static_cast<const GlobalAveragePooling_Op&>(mOp); + // Check if input is provided + AIDGE_ASSERT(op_.getInput(0), "missing input 0"); + + // Create the forward kernal with the wanted types + auto kernelFunc = Registrar<GlobalAveragePoolingImplForward_cpu>::create({op_.getInput(0)->dataType(), + op_.getOutput(0)->dataType()}); + + // Call kernel + kernelFunc(op_.getInput(0)->dims(), + op_.getInput(0)->getImpl()->rawPtr(), + op_.getOutput(0)->getImpl()->rawPtr()); +} \ No newline at end of file diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp index 17912eb1dc75930eaf7595eb189af39df4d4fa2e..340af3eeaf370988f9b12d8535812c938e47078a 100644 --- a/src/operator/LeakyReLUImpl.cpp +++ b/src/operator/LeakyReLUImpl.cpp @@ -10,34 +10,56 @@ ********************************************************************************/ #include <cassert> -#include <chrono> // std::chrono::milliseconds -#include <numeric> // std::accumulate -#include <thread> // std::this_thread::sleep_for #include <vector> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/LeakyReLU.hpp" #include "aidge/utils/Types.h" +#include "aidge/utils/Registrar.hpp" #include "aidge/backend/cpu/data/GetCPUPtr.h" #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp" #include "aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp" +#include "aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp" -Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::LeakyReLUImpl_cpu::forward() { - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); + const LeakyReLU_Op& op_ = dynamic_cast<const LeakyReLU_Op&>(mOp); + std::shared_ptr<Tensor> in0 = op_.getInput(0); + std::shared_ptr<Tensor> out0 = op_.getOutput(0); + AIDGE_ASSERT(in0, "missing input #0"); // Find the correct kernel type auto kernelFunc = Registrar<LeakyReLUImplForward_cpu>::create({ - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + in0->dataType(), + out0->dataType()}); // Call kernel kernelFunc(dynamic_cast<const LeakyReLU_Op&>(mOp).getStaticAttributes(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), + in0->size(), getCPUPtr(mOp.getRawInput(0)), getCPUPtr(mOp.getRawOutput(0))); } + +void Aidge::LeakyReLUImpl_cpu::backward() { + // reversing in and out Data for backprop + const LeakyReLU_Op& op_ = dynamic_cast<const LeakyReLU_Op&>(mOp); + std::shared_ptr<Tensor> in0 = op_.getOutput(0)->grad(); + std::shared_ptr<Tensor> out0 = op_.getInput(0)->grad(); + AIDGE_ASSERT(in0, "missing input #0"); + + // Find the correct kernel type + auto kernelFunc = Registrar<LeakyReLUImplForward_cpu>::create({ + in0->dataType(), + out0->dataType()}); + + // Call kernel + kernelFunc(dynamic_cast<const LeakyReLU_Op&>(mOp).getStaticAttributes(), + in0->size(), + getCPUPtr(in0), + getCPUPtr(out0)); +} \ No newline at end of file diff --git a/src/operator/MatMulImpl.cpp b/src/operator/MatMulImpl.cpp index f02effb3172e2c0624c6c7532513a2b794ee3a89..488af17617d556ad7a9d9b73909324d67a672459 100644 --- a/src/operator/MatMulImpl.cpp +++ b/src/operator/MatMulImpl.cpp @@ -9,15 +9,14 @@ * ********************************************************************************/ -#include <cassert> -#include <chrono> // std::chrono::milliseconds -#include <numeric> // std::accumulate -#include <thread> // std::this_thread::sleep_for +#include <cstddef> // std::size_t +#include <cstdint> // std::int32_t +#include <numeric> // std::accumulate #include <vector> +#include "aidge/backend/cpu/data/GetCPUPtr.h" #include "aidge/operator/MatMul.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" #include "aidge/backend/cpu/operator/MatMulImpl.hpp" #include "aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp" @@ -30,27 +29,110 @@ void Aidge::MatMulImpl_cpu::forward() // Find the correct kernel type auto kernelFunc = Registrar<MatMulImplForward_cpu>::create( {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); - // Call kernel - // if (mOp.getInput(0)->nbDims() == 4) { - // kernelFunc( - // mOp.getStaticAttributes(), - // std::static_pointer_cast<Tensor>(mOp.getInput(0))->template dims<4>(), - // mOp.getInput(0))->getImpl()->rawPtr(), - // mOp.mInputs[1]->getImpl()->rawPtr(), - // mOp.mInputs[2]->getImpl()->rawPtr(), - // getCPUPtr(mOp.getRawOutput(0)); - // } - // else - kernelFunc( - dynamic_cast<const MatMul_Op&>(mOp).getStaticAttributes(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0], - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size() / std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0], - getCPUPtr(mOp.getRawInput(0)), - getCPUPtr(mOp.getRawInput(1)), - getCPUPtr(mOp.getRawOutput(0))); + // Compute compatible input dimensions + std::vector<std::size_t> dims0 = static_cast<const MatMul_Op&>(mOp).getInput(0)->dims(); + std::vector<std::size_t> dims1 = static_cast<const MatMul_Op&>(mOp).getInput(1)->dims(); + + // keep second-to-last dimension of dims0 + const std::size_t keepDim0 = (dims0.size() > 1) ? 1 : 0; + // keep last dimension of dims1 + const std::size_t keepDim1 = (dims1.size() > 1) ? 1 : 0; + + if (dims0.size() == 1) { + dims0.insert(dims0.cbegin(), 1); + } + if (dims1.size() == 1) { + dims1.push_back(1); + } + + if (dims0.size() > dims1.size()) { + dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1)); + } + else if (dims1.size() > dims0.size()) { + dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1)); + } + // const std::size_t dims_size = std::max(dims0.size(), dims1.size()); + // at this point, dims0.size() == dims1.size() + const std::size_t nbDims = dims0.size(); + // initialize strides to iterate through data because of broadcasting + std::size_t *stride_post0; + std::size_t *stride_post1; + std::int32_t *stride_step0; + std::int32_t *stride_step1; + if (nbDims > 2) { + stride_post0 = new std::size_t[nbDims-2]; + stride_post0[nbDims - 3] = 1; + stride_post1 = new std::size_t[nbDims-2]; + stride_post1[nbDims - 3] = 1; + for (std::size_t i = nbDims-4; i != static_cast<std::size_t>(-1); --i) { + stride_post0[i] = stride_post0[i+1]*dims0[i+1]; + stride_post1[i] = stride_post1[i+1]*dims1[i+1]; + } + stride_step0 = new std::int32_t[nbDims-2]; + stride_step1 = new std::int32_t[nbDims-2]; + for (std::size_t i = 0; i != nbDims-2; ++i) { + stride_step0[i] = (dims0[i] == 1) ? 1 - static_cast<std::int32_t>(stride_post0[i]) : 1; + stride_step1[i] = (dims1[i] == 1) ? 1 - static_cast<std::int32_t>(stride_post1[i]) : 1; + } + } + + const std::vector<std::size_t>& outDims = static_cast<const MatMul_Op&>(mOp).getOutput(0)->dims(); + const std::size_t nbMatrices = std::accumulate(outDims.cbegin(), outDims.cend() - keepDim0 - keepDim1, 1, std::multiplies<std::size_t>()); + std::size_t dim = outDims.size() - 1 - keepDim0 - keepDim1; + + // variables for arrays offsets + std::size_t offsetIn0 = 0; + std::size_t offsetIn1 = 0; + std::size_t offsetOut = 0; + const std::size_t n = dims0[nbDims - 2]; + const std::size_t k = dims0[nbDims - 1]; + const std::size_t m = dims1[nbDims - 1]; + const std::size_t matrix0Size = n*k; + const std::size_t matrix1Size = k*m; + const std::size_t matrixOutSize = n*m; + for (std::size_t stack = 0; stack < nbMatrices;) { + kernelFunc(n, k, m, + getCPUPtr(mOp.getRawInput(0), offsetIn0*matrix0Size), + getCPUPtr(mOp.getRawInput(1), offsetIn1*matrix1Size), + getCPUPtr(mOp.getRawOutput(0), offsetOut*matrixOutSize)); + if (++stack < nbMatrices) { + std::size_t tmp_stack = stack; + while(tmp_stack % outDims[dim] == 0) { + tmp_stack /= outDims[dim]; + dim--; + } + offsetIn0 += stride_step0[dim]; + offsetIn1 += stride_step1[dim]; + ++offsetOut; + dim = outDims.size() - 1 - keepDim0 - keepDim1; + } + } + if (nbDims > 2) { + delete[] stride_post0; + delete[] stride_post1; + delete[] stride_step0; + delete[] stride_step1; + } } + +// void Aidge::MatMulImpl_cpu::forward() +// { +// assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); +// assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(1)) && "missing input #1"); + +// // Find the correct kernel type +// auto kernelFunc = Registrar<MatMulImplForward_cpu>::create( +// {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), +// std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + +// kernelFunc( +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), +// std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims(), +// getCPUPtr(mOp.getRawInput(0)), +// getCPUPtr(mOp.getRawInput(1)), +// getCPUPtr(mOp.getRawOutput(0))); +// } diff --git a/src/operator/MaxPoolingImpl.cpp b/src/operator/MaxPoolingImpl.cpp index e21dab07df4c20eb7253e680146042f205bc210b..94591eaa9848b24aeb7afa1e8b6b87a3e6e2b45f 100644 --- a/src/operator/MaxPoolingImpl.cpp +++ b/src/operator/MaxPoolingImpl.cpp @@ -21,9 +21,9 @@ #include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp" #include "aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp" -Aidge::NbElts_t Aidge::MaxPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::MaxPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::MaxPoolingImpl2D_cpu::forward() { diff --git a/src/operator/MemorizeImpl.cpp b/src/operator/MemorizeImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8a23bd35585c03c91567c0da5b0727fe1323b754 --- /dev/null +++ b/src/operator/MemorizeImpl.cpp @@ -0,0 +1,81 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> +#include <chrono> // std::chrono::milliseconds +#include <numeric> // std::accumulate +#include <thread> // std::this_thread::sleep_for +#include <vector> + +#include "aidge/operator/Memorize.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" + +#include "aidge/backend/cpu/operator/MemorizeImpl.hpp" + +Aidge::Elts_t Aidge::MemorizeImpl_cpu::getNbRequiredData( + Aidge::IOIndex_t inputIdx) const +{ + const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp); + const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>(); + + if (scheduleStep == 0 && inputIdx == 0) { + // No data input is required for the initial step. + // Initialization data is required however. + return Elts_t::NoneElts(); + } + else if (scheduleStep > 0 && inputIdx == 1) { + // No initialization data is required after the initial step. + return Elts_t::NoneElts(); + } + else { + return OperatorImpl::getNbRequiredData(inputIdx); + } +} + +Aidge::Elts_t Aidge::MemorizeImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, + const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const { + assert(mOp.getRawOutput(outputIdx) && "requires valid output"); + + const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp); + const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>(); + const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>(); + + if (endStep > 0 && outputIdx == 1 && scheduleStep >= endStep) { + return Elts_t::NoneElts(); + } + else { + return Elts_t::DataElts(std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx))->size()); + } +} + +void Aidge::MemorizeImpl_cpu::updateConsummerProducer() { + OperatorImpl::updateConsummerProducer(); + + const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp); + const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>(); + const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>(); + AIDGE_ASSERT(endStep == 0 || scheduleStep <= endStep, "cannot update consumer producer anymore, number of cycles exceeded"); +} + +void Aidge::MemorizeImpl_cpu::forward() { + const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp); + const unsigned int forwardStep = op.template getAttr<MemorizeAttr::ForwardStep>(); + const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>(); + AIDGE_ASSERT(endStep == 0 || forwardStep <= endStep, "cannot forward anymore, number of cycles exceeded"); + + if (forwardStep == 0) { + op.getOutput(0)->getImpl()->copy(op.getInput(1)->getImpl()->rawPtr(), op.getInput(1)->size()); + } + else { + op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size()); + } +} diff --git a/src/operator/MulImpl.cpp b/src/operator/MulImpl.cpp index fda49c3f20ed5cbe519d729a0bf759f0964a99fd..d7feb9b76e25a0e874b3682cdc5b3e53bf8e9228 100644 --- a/src/operator/MulImpl.cpp +++ b/src/operator/MulImpl.cpp @@ -17,14 +17,15 @@ #include "aidge/operator/Mul.hpp" #include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/Broadcasting.hpp" #include "aidge/backend/cpu/data/GetCPUPtr.h" #include "aidge/backend/cpu/operator/MulImpl.hpp" #include "aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp" -Aidge::NbElts_t Aidge::MulImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::MulImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::MulImpl_cpu::forward() { @@ -34,9 +35,15 @@ void Aidge::MulImpl_cpu::forward() { std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + const std::vector<std::size_t> inputDims0 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()); + const std::vector<std::size_t> inputDims1 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims()); + // Call kernel - kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(), + kernelFunc(inputDims0, + inputDims1, + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), getCPUPtr(mOp.getRawInput(0)), getCPUPtr(mOp.getRawInput(1)), getCPUPtr(mOp.getRawOutput(0))); diff --git a/src/operator/PadImpl.cpp b/src/operator/PadImpl.cpp index 219bf425fa34cdaaa378c49dd7c9837f9d94d97e..cd420a6241723c5d3fa5836838f84ce6bfe965d1 100644 --- a/src/operator/PadImpl.cpp +++ b/src/operator/PadImpl.cpp @@ -22,7 +22,7 @@ #include "aidge/backend/cpu/operator/PadImpl.hpp" #include "aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp" -Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbRequiredProtected(IOIndex_t inputIdx) const { +Aidge::Elts_t Aidge::PadImpl2D_cpu::getNbRequiredProtected(IOIndex_t inputIdx) const { assert(inputIdx == 0 && "operator has only one input"); (void) inputIdx; @@ -30,7 +30,7 @@ Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbRequiredProtected(IOIndex_t inputIdx) // We must ensure that we do not override data that has not been consummed yet. const auto inputSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(); const auto outputSize = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size(); - return (outputSize - inputSize); + return Elts_t::DataElts(outputSize - inputSize); } void Aidge::PadImpl2D_cpu::forward() { diff --git a/src/operator/PopImpl.cpp b/src/operator/PopImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..02bbddbaed6d9d89e729d6c778a1765fcbab4b4f --- /dev/null +++ b/src/operator/PopImpl.cpp @@ -0,0 +1,39 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> +#include <chrono> // std::chrono::milliseconds +#include <numeric> // std::accumulate +#include <thread> // std::this_thread::sleep_for +#include <vector> + +#include "aidge/operator/Pop.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" + +#include "aidge/backend/cpu/operator/PopImpl.hpp" + +Aidge::Elts_t Aidge::PopImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const { + assert(mOp.getRawInput(inputIdx) && "requires valid input"); + + return Elts_t::DataElts(std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->size() + / std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->dims()[0]); +} + +void Aidge::PopImpl_cpu::forward() { + assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); + + const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp); + const unsigned int forwardStep = op.template getAttr<PopAttr::ForwardStep>(); + + *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)) + = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->extract({forwardStep}); +} diff --git a/src/operator/PowImpl.cpp b/src/operator/PowImpl.cpp index 496646402e33869cfcbe7dae96e1fc81b875d0dd..811d13804cffdd2477fc830f1779b0fb6271eb0b 100644 --- a/src/operator/PowImpl.cpp +++ b/src/operator/PowImpl.cpp @@ -17,14 +17,15 @@ #include "aidge/operator/Pow.hpp" #include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/Broadcasting.hpp" #include "aidge/backend/cpu/data/GetCPUPtr.h" #include "aidge/backend/cpu/operator/PowImpl.hpp" #include "aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp" -Aidge::NbElts_t Aidge::PowImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::PowImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::PowImpl_cpu::forward() { @@ -34,10 +35,38 @@ void Aidge::PowImpl_cpu::forward() { std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + const std::vector<std::size_t> inputDims0 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()); + const std::vector<std::size_t> inputDims1 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims()); + // Call kernel - kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(), + kernelFunc(inputDims0, + inputDims1, + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), getCPUPtr(mOp.getRawInput(0)), getCPUPtr(mOp.getRawInput(1)), getCPUPtr(mOp.getRawOutput(0))); } + +void Aidge::PowImpl_cpu::backward() { + // Find the correct kernel type + const Pow_Op& op_ = dynamic_cast<const Pow_Op&>(mOp); + auto kernelFunc = Registrar<PowImplForward_cpu>::create({ + op_.getOutput(0)->grad()->dataType(), + op_.getInput(0)->grad()->dataType(), + op_.getInput(1)->grad()->dataType()}); + + const std::vector<std::size_t> input0gradDims = getBroadcastedDims(op_.getInput(0)->grad()->dims(), + op_.getOutput(0)->grad()->dims()); + const std::vector<std::size_t> input1gradDims = getBroadcastedDims(op_.getInput(1)->grad()->dims(), + op_.getOutput(0)->grad()->dims()); + + // Call kernel + kernelFunc(op_.getOutput(0)->grad()->dims(), + input0gradDims, + input1gradDims, + getCPUPtr(mOp.getRawOutput(0)), + getCPUPtr(mOp.getRawInput(0)), + getCPUPtr(mOp.getRawInput(1))); +} \ No newline at end of file diff --git a/src/operator/ProducerImpl.cpp b/src/operator/ProducerImpl.cpp deleted file mode 100644 index 4c5883a9b0155e7bb6e16cbac1b8de1a3a9e9e16..0000000000000000000000000000000000000000 --- a/src/operator/ProducerImpl.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/******************************************************************************** - * Copyright (c) 2023 CEA-List - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License 2.0 which is available at - * http://www.eclipse.org/legal/epl-2.0. - * - * SPDX-License-Identifier: EPL-2.0 - * - ********************************************************************************/ - -#include <cassert> -#include <numeric> // std::accumulate -#include <vector> - -#include "aidge/data/Tensor.hpp" -#include "aidge/operator/Producer.hpp" -#include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" - -#include "aidge/backend/cpu/operator/ProducerImpl.hpp" - -Aidge::DimSize_t Aidge::ProducerImpl_cpu::getNbProducedData( - Aidge::IOIndex_t outputIdx) const -{ - // Requires the whole tensors, regardless of available data on inputs - assert(outputIdx == 0 && "operator has only one output"); - (void) outputIdx; - - return std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size(); -} - -void Aidge::ProducerImpl_cpu::forward() -{ -} diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp index 8863be282ce0c7b7bfbfb938372cf304bc4cc4bd..4bba09b6fbeea1552bf5b7cc7e491291345fca45 100644 --- a/src/operator/ReLUImpl.cpp +++ b/src/operator/ReLUImpl.cpp @@ -9,34 +9,52 @@ * ********************************************************************************/ -#include <cassert> -#include <chrono> // std::chrono::milliseconds -#include <numeric> // std::accumulate -#include <thread> // std::this_thread::sleep_for +#include <memory> #include <vector> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/ReLU.hpp" #include "aidge/utils/Types.h" #include "aidge/backend/cpu/data/GetCPUPtr.h" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/backend/cpu/operator/ReLUImpl.hpp" #include "aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp" +#include "aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp" -Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::ReLUImpl_cpu::forward() { - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); + std::shared_ptr<Tensor> in0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0)); + AIDGE_ASSERT(in0, "missing input #0"); // Find the correct kernel type auto kernelFunc = Registrar<ReLUImplForward_cpu>::create({ - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + in0->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); // Call kernel - kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), + kernelFunc(in0->size(), getCPUPtr(mOp.getRawInput(0)), getCPUPtr(mOp.getRawOutput(0))); } + +void Aidge::ReLUImpl_cpu::backward() { + // reversing in and out Tensors + const ReLU_Op& op_ = dynamic_cast<const ReLU_Op&>(mOp); + std::shared_ptr<Tensor> in0 = op_.getOutput(0)->grad(); + std::shared_ptr<Tensor> out0 = op_.getInput(0)->grad(); + AIDGE_ASSERT(out0, "current {} operator output#0 has not gradient Tensor.", op_.type()); + + // Find the correct kernel type + auto kernelFunc = Registrar<ReLUImplBackward_cpu>::create({ + in0->dataType(), + out0->dataType() + }); + + // Call kernel + kernelFunc(in0->size(), getCPUPtr(in0), getCPUPtr(out0)); +} diff --git a/src/operator/ReduceMeanImpl.cpp b/src/operator/ReduceMeanImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a9f17a28a2a47ec7bc50820d587e8d0f359d2bb3 --- /dev/null +++ b/src/operator/ReduceMeanImpl.cpp @@ -0,0 +1,78 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp" + +#include <memory> +#include <vector> + +#include "aidge/utils/Types.h" +#include "aidge/operator/ReduceMean.hpp" +#include "aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp" + +void Aidge::ReduceMeanImpl_cpu::forward() { + const ReduceMean_Op& op_ = dynamic_cast<const ReduceMean_Op&>(mOp); + // Find the correct kernel type + auto kernelFunc = Registrar<ReduceMeanImplForward_cpu>::create({ + op_.getInput(0)->dataType(), + op_.getOutput(0)->dataType()}); + + // Call kernel + kernelFunc(op_.getStaticAttributes(), + op_.getInput(0)->dims(), + op_.getInput(0)->getImpl()->rawPtr(), + op_.getOutput(0)->getImpl()->rawPtr()); +} + +// void Aidge::ReduceMeanImpl1D_cpu::forward() { + +// // Find the correct kernel type +// auto kernelFunc = +// Registrar<ReduceMeanImpl1DForward_cpu>::create({ +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), +// std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + +// // Call kernel +// kernelFunc(dynamic_cast<const ReduceMean_Op<1>&>(mOp).getStaticAttributes(), +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), +// std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +// } + +// void Aidge::ReduceMeanImpl2D_cpu::forward() { + +// // Find the correct kernel type +// auto kernelFunc = +// Registrar<ReduceMeanImpl2DForward_cpu>::create({ +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), +// std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + +// // Call kernel +// kernelFunc(dynamic_cast<const ReduceMean_Op<2>&>(mOp).getStaticAttributes(), +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), +// std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +// } + +// void Aidge::ReduceMeanImpl3D_cpu::forward() { + +// // Find the correct kernel type +// auto kernelFunc = +// Registrar<ReduceMeanImpl3DForward_cpu>::create({ +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), +// std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + +// // Call kernel +// kernelFunc(dynamic_cast<const ReduceMean_Op<3>&>(mOp).getStaticAttributes(), +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), +// std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), +// std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +// } \ No newline at end of file diff --git a/src/operator/ReshapeImpl.cpp b/src/operator/ReshapeImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..69c1c3135ce9f32d536bfd2c41b90eb55f7d8986 --- /dev/null +++ b/src/operator/ReshapeImpl.cpp @@ -0,0 +1,39 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/backend/cpu/operator/ReshapeImpl.hpp" + +#include "aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Reshape.hpp" +#include "aidge/utils/Types.h" +#include "aidge/utils/ErrorHandling.hpp" + +Aidge::Elts_t Aidge::ReshapeImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return Elts_t::DataElts(0); +} + +void Aidge::ReshapeImpl_cpu::forward() { + const Reshape_Op& op_ = static_cast<const Reshape_Op&>(mOp); + AIDGE_ASSERT(op_.getInput(0)->size() == op_.getOutput(0)->size(), + "input must have the same overall size as shape"); + + // Find the correct kernel type + auto kernelFunc = Registrar<ReshapeImplForward_cpu>::create({ + op_.getInput(0)->dataType(), + op_.getOutput(0)->dataType()}); + + // Call kernel + kernelFunc(op_.getInput(0)->size(), + op_.getInput(0)->getImpl()->rawPtr(), + op_.getOutput(0)->getImpl()->rawPtr()); +} diff --git a/src/operator/ScalingImpl.cpp b/src/operator/ScalingImpl.cpp index 6b9aab31a9d61d2d7a5ff89961de3fa6a2b5ebd2..d0b58702c73f01fb62114d335f5c2342908542ea 100644 --- a/src/operator/ScalingImpl.cpp +++ b/src/operator/ScalingImpl.cpp @@ -21,9 +21,9 @@ #include "aidge/backend/cpu/data/GetCPUPtr.h" #include <vector> -Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::ScalingImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::ScalingImpl_cpu::forward() { diff --git a/src/operator/SigmoidImpl.cpp b/src/operator/SigmoidImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dd7ec26cb36777f79d382c815b60d2381544a0bd --- /dev/null +++ b/src/operator/SigmoidImpl.cpp @@ -0,0 +1,42 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> +#include <chrono> // std::chrono::milliseconds +#include <numeric> // std::accumulate +#include <thread> // std::this_thread::sleep_for +#include <vector> + +#include "aidge/operator/Sigmoid.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" + +#include "aidge/backend/cpu/operator/SigmoidImpl.hpp" +#include "aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp" + +Aidge::Elts_t Aidge::SigmoidImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return Elts_t::DataElts(0); +} + +void Aidge::SigmoidImpl_cpu::forward() { + assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); + + // Find the correct kernel type + auto kernelFunc = Registrar<SigmoidImplForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), + getCPUPtr(mOp.getRawInput(0)), + getCPUPtr(mOp.getRawOutput(0))); +} diff --git a/src/operator/SliceImpl.cpp b/src/operator/SliceImpl.cpp index b60bbe60188f416f28ff2562875dce6e5ee15bd5..47b13c4694cea22421811c889b5627e9f1362ac0 100644 --- a/src/operator/SliceImpl.cpp +++ b/src/operator/SliceImpl.cpp @@ -22,42 +22,6 @@ #include <cassert> #include <tuple> -Aidge::NbElts_t Aidge::SliceImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t /*inputIdx*/) const { - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "requires valid input"); - - // Requires the whole tensors - const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(); - - return std::accumulate(inputDims.begin(), inputDims.end(), static_cast<NbElts_t>(1), - std::multiplies<NbElts_t>()); -} - -Aidge::NbElts_t Aidge::SliceImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { return 0; } - -Aidge::NbElts_t Aidge::SliceImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, - const std::vector<Aidge::DimSize_t>& inputsSize) const { - (void)outputIdx; - (void)inputsSize; - const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(); - return std::accumulate(outputDims.begin(), outputDims.end(), static_cast<NbElts_t>(1), - std::multiplies<NbElts_t>()); -} - -Aidge::NbElts_t Aidge::SliceImpl_cpu::getNbConsumedData(const Aidge::IOIndex_t /*inputIdx*/) const { - return mNbConsumedData[0]; -} - -Aidge::NbElts_t Aidge::SliceImpl_cpu::getNbProducedData(const Aidge::IOIndex_t /*outputIdx*/) const { - return mNbProducedData[0]; -} - -void Aidge::SliceImpl_cpu::updateConsummerProducer() { - // each input is consumed by the minimum amount for a forward pass - mNbConsumedData[0] += getNbRequiredData(0); - - mNbProducedData[0] += getRequiredMemory(0, {}); -} - void Aidge::SliceImpl_cpu::forward() { // FIXME: uncomment the following code once memory handling will work assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); @@ -79,4 +43,4 @@ void Aidge::SliceImpl_cpu::forward() { mNbProducedData[0] += getRequiredMemory(0, {}); } -void Aidge::SliceImpl_cpu::backward() { printf("Not implemented yet.\n"); } \ No newline at end of file +void Aidge::SliceImpl_cpu::backward() { fmt::print("Not implemented yet.\n"); } diff --git a/src/operator/SoftmaxImpl.cpp b/src/operator/SoftmaxImpl.cpp index c3086d8f9067996b9b0a8546b6deb3e281c777b4..240267613e557c20edcc00e81f4bf20d17d9962f 100644 --- a/src/operator/SoftmaxImpl.cpp +++ b/src/operator/SoftmaxImpl.cpp @@ -22,9 +22,9 @@ #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp" #include "aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp" -Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::SoftmaxImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::SoftmaxImpl_cpu::forward() { @@ -36,13 +36,12 @@ void Aidge::SoftmaxImpl_cpu::forward() { std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); - DimSize_t batchSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0]; - DimSize_t channelSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[1]; - DimSize_t featureSize = (std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size()/batchSize)/channelSize; + Softmax_Op::Attrs attr = dynamic_cast<const Softmax_Op&>(mOp).getStaticAttributes(); + const int& axisIdx = static_cast<const int&>(std::get<0>(attr)); + // Call kernel - kernelFunc(batchSize, - channelSize, - featureSize, - getCPUPtr(mOp.getRawInput(0)), - getCPUPtr(mOp.getRawOutput(0))); + kernelFunc(axisIdx, + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); } diff --git a/src/operator/SqrtImpl.cpp b/src/operator/SqrtImpl.cpp index 2766e8ae21738775aadad86629a99d0a180e537e..edb8858fc4ac07fa5725d24688b22d64134afb0e 100644 --- a/src/operator/SqrtImpl.cpp +++ b/src/operator/SqrtImpl.cpp @@ -9,34 +9,54 @@ * ********************************************************************************/ -#include <cassert> -#include <chrono> // std::chrono::milliseconds -#include <numeric> // std::accumulate -#include <thread> // std::this_thread::sleep_for +#include <memory> #include <vector> +#include "aidge/backend/cpu/data/GetCPUPtr.h" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Sqrt.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/Types.h" -#include "aidge/backend/cpu/data/GetCPUPtr.h" #include "aidge/backend/cpu/operator/SqrtImpl.hpp" #include "aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp" +#include "aidge/backend/cpu/operator/SqrtImpl_backward_kernels.hpp" -Aidge::NbElts_t Aidge::SqrtImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::SqrtImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::SqrtImpl_cpu::forward() { - assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); + std::shared_ptr<Tensor> in0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0)); + std::shared_ptr<Tensor> out0 = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)); + AIDGE_ASSERT(in0, "missing input #0"); // Find the correct kernel type auto kernelFunc = Registrar<SqrtImplForward_cpu>::create({ - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), - std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + in0->dataType(), + out0->dataType()}); // Call kernel - kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), + kernelFunc(in0->size(), getCPUPtr(mOp.getRawInput(0)), getCPUPtr(mOp.getRawOutput(0))); +} + +void Aidge::SqrtImpl_cpu::backward() { + // reversing in and out Data for backprop + const Sqrt_Op& op_ = dynamic_cast<const Sqrt_Op&>(mOp); + std::shared_ptr<Tensor> out0grad = op_.getOutput(0)->grad(); + std::shared_ptr<Tensor> in0grad = op_.getInput(0)->grad(); + AIDGE_ASSERT(out0grad, "missing output #0"); + + // Find the correct kernel type + auto kernelFunc = Registrar<SqrtImplForward_cpu>::create({ + out0grad->dataType(), + in0grad->dataType()}); + + // Call kernel + kernelFunc(out0grad->size(), + getCPUPtr(out0grad), + getCPUPtr(in0grad)); } \ No newline at end of file diff --git a/src/operator/SubImpl.cpp b/src/operator/SubImpl.cpp index 038a1154182ea8f359cf1b485c3de251ffbbaed5..ffddb59ee3373c4a0a6c2653747744a43fd471d9 100644 --- a/src/operator/SubImpl.cpp +++ b/src/operator/SubImpl.cpp @@ -17,14 +17,15 @@ #include "aidge/operator/Sub.hpp" #include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/Broadcasting.hpp" #include "aidge/backend/cpu/data/GetCPUPtr.h" #include "aidge/backend/cpu/operator/SubImpl.hpp" #include "aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp" -Aidge::NbElts_t Aidge::SubImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { +Aidge::Elts_t Aidge::SubImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place - return 0; + return Elts_t::DataElts(0); } void Aidge::SubImpl_cpu::forward() { @@ -35,9 +36,15 @@ void Aidge::SubImpl_cpu::forward() { std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + const std::vector<std::size_t> inputDims0 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()); + const std::vector<std::size_t> inputDims1 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims()); + // Call kernel - kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(), + kernelFunc(inputDims0, + inputDims1, + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), getCPUPtr(mOp.getRawInput(0)), getCPUPtr(mOp.getRawInput(1)), getCPUPtr(mOp.getRawOutput(0))); diff --git a/src/operator/TanhImpl.cpp b/src/operator/TanhImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..44e180739ed86e25d4be6d0beb693f73bdadbf35 --- /dev/null +++ b/src/operator/TanhImpl.cpp @@ -0,0 +1,42 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> +#include <chrono> // std::chrono::milliseconds +#include <numeric> // std::accumulate +#include <thread> // std::this_thread::sleep_for +#include <vector> + +#include "aidge/operator/Tanh.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" + +#include "aidge/backend/cpu/operator/TanhImpl.hpp" +#include "aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp" + +Aidge::Elts_t Aidge::TanhImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return Elts_t::DataElts(0); +} + +void Aidge::TanhImpl_cpu::forward() { + assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); + + // Find the correct kernel type + auto kernelFunc = Registrar<TanhImplForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), + getCPUPtr(mOp.getRawInput(0)), + getCPUPtr(mOp.getRawOutput(0))); +} diff --git a/src/operator/TransposeImpl.cpp b/src/operator/TransposeImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..710e67b4f5aaa5261a111a8e131a0dd740694a4b --- /dev/null +++ b/src/operator/TransposeImpl.cpp @@ -0,0 +1,102 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> +#include <chrono> // std::chrono::milliseconds +#include <numeric> // std::accumulate +#include <thread> // std::this_thread::sleep_for +#include <vector> + +#include "aidge/utils/Types.h" +#include "aidge/operator/Transpose.hpp" + +#include "aidge/backend/cpu/operator/TransposeImpl.hpp" +#include "aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp" + +void Aidge::TransposeImpl2D_cpu::forward() { + // Find the correct kernel type + auto kernelFunc = + Registrar<TransposeImpl2DForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // auto attr = dynamic_cast<const Transpose_Op<2>&>(mOp).getStaticAttributes(); + // std::vector<DimIdx_t> outDimsOrder; + // outDimsOrder.reserve(std::get<0>(attr).size()); // Reserve space for the new vector + + // std::transform(std::get<0>(attr).begin(), std::get<0>(attr).end(), std::back_inserter(outDimsOrder), + // [](int intValue) { return static_cast<DimIdx_t>(intValue); }); + + // Call kernel + kernelFunc(dynamic_cast<const Transpose_Op<2>&>(mOp).getStaticAttributes(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} + +void Aidge::TransposeImpl3D_cpu::forward() { + // Find the correct kernel type + auto kernelFunc = + Registrar<TransposeImpl3DForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(dynamic_cast<const Transpose_Op<3>&>(mOp).getStaticAttributes(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} + +void Aidge::TransposeImpl4D_cpu::forward() { + // Find the correct kernel type + auto kernelFunc = + Registrar<TransposeImpl4DForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(dynamic_cast<const Transpose_Op<4>&>(mOp).getStaticAttributes(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} +void Aidge::TransposeImpl5D_cpu::forward() { + // Find the correct kernel type + auto kernelFunc = + Registrar<TransposeImpl5DForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(dynamic_cast<const Transpose_Op<5>&>(mOp).getStaticAttributes(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} +void Aidge::TransposeImpl6D_cpu::forward() { + // Find the correct kernel type + auto kernelFunc = + Registrar<TransposeImpl6DForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(dynamic_cast<const Transpose_Op<6>&>(mOp).getStaticAttributes(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} \ No newline at end of file diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp index b75c49077f190ed61486fea8eaa18152423a73ed..31fbed4c090f5e4848df12f2bc2ccd36e3aedf9d 100644 --- a/unit_tests/data/Test_TensorImpl.cpp +++ b/unit_tests/data/Test_TensorImpl.cpp @@ -9,51 +9,184 @@ * ********************************************************************************/ -#include <array> - #include <catch2/catch_test_macros.hpp> +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <chrono> +#include <iostream> +#include <memory> +#include <numeric> // std::accumulate +#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution #include "aidge/data/Tensor.hpp" #include "aidge/backend/cpu/data/TensorImpl.hpp" +#include "aidge/operator/Add.hpp" +#include "aidge/backend/cpu/operator/AddImpl.hpp" -using namespace Aidge; +namespace Aidge { -TEST_CASE("Tensor creation") { - SECTION("from const array") { - Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; +TEST_CASE("Test addition of Tensors","[TensorImpl][Add]") { + constexpr std::uint16_t NBTRIALS = 10; + // Create a random number generator + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1 + std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10)); + std::uniform_int_distribution<int> boolDist(0,1); - Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; + // Create MatMul Operator + std::shared_ptr<Node> mySub = Add(2); + auto op = std::static_pointer_cast<OperatorTensor>(mySub-> getOperator()); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); - Tensor xFloat = - Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}}; + // Create 2 input Tensors + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + op->associateInput(0,T0); + T0->setDataType(DataType::Float32); + T0->setBackend("cpu"); + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(); + op -> associateInput(1,T1); + T1->setDataType(DataType::Float32); + T1->setBackend("cpu"); - SECTION("Tensor features") { - REQUIRE(x.nbDims() == 3); - REQUIRE(x.dims()[0] == 2); - REQUIRE(x.dims()[1] == 2); - REQUIRE(x.dims()[2] == 2); - REQUIRE(x.size() == 8); - } + // Create results Tensor + Tensor Tres{}; + Tres.setDataType(DataType::Float32); + Tres.setBackend("cpu"); - SECTION("Access to array") { - REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1); - REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8); - } + // To measure execution time of 'MatMul_Op::forward()' member function call + std::chrono::time_point<std::chrono::system_clock> start; + std::chrono::time_point<std::chrono::system_clock> end; + std::chrono::duration<double, std::micro> duration{}; - SECTION("get function") { - REQUIRE(x.get<int>({0, 0, 0}) == 1); - REQUIRE(x.get<int>({0, 0, 1}) == 2); - REQUIRE(x.get<int>({0, 1, 1}) == 4); - REQUIRE(x.get<int>({1, 1, 0}) == 7); - x.set<int>({1, 1, 1}, 36); - REQUIRE(x.get<int>({1, 1, 1}) == 36); - } + std::size_t number_of_operation = 0; + + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate 2 random Tensors + // handle dimensions, replace some dimensions with '1' to get broadcasting + constexpr std::size_t nbDims = 4; + std::vector<std::size_t> dims; + for (std::size_t i = 0; i < nbDims; ++i) { + dims.push_back(dimSizeDist(gen)); + } + std::vector<std::size_t> dims0 = dims; + std::vector<std::size_t> dims1 = dims; + std::vector<std::size_t> dimsOut = dims; + for (std::size_t i = 0; i < nbDims; ++i) { + if (boolDist(gen)) { + dims0[i] = 1; + } + if (boolDist(gen)) { + dims1[i] = 1; + } + dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i]; + } + + // create arrays and fill them with random values + float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]]; + float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]]; + float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]]; + + for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) { + array0[i] = valueDist(gen); + } + for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) { + array1[i] = valueDist(gen); + } + + // compute true result + const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1}; + const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1}; + for (std::size_t a = 0; a < dimsOut[0]; ++a) { + for (std::size_t b = 0; b < dimsOut[1]; ++b) { + const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0) + + strides0[1] * ((dims0[1] > 1) ? b : 0); + const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0) + + strides1[1] * ((dims1[1] > 1) ? b : 0); + for (std::size_t c = 0; c < dimsOut[2]; ++c) { + const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a)); + for (std::size_t d = 0; d < dimsOut[3]; ++d) { + std::size_t idx0 = idx0_0 + + strides0[2] * ((dims0[2] > 1) ? c : 0) + + ((dims0[3] > 1) ? d : 0); + std::size_t idx1 = idx1_0 + + strides1[2] * ((dims1[2] > 1) ? c : 0) + + ((dims1[3] > 1) ? d : 0); + result[idx_out + d] = array0[idx0] + array1[idx1]; + // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " - " << array1[idx1] << " -> " << idx_out + d << std::endl; + } + } + } + } + + // conversion to Aidge::Tensors + // input0 + T0->resize(dims0); + T0->getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]); + + // input1 + T1->resize(dims1); + T1->getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]); + + // results + Tres.resize(dimsOut); + Tres.getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]); - SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); } + Tensor T2 = *T0 + *T1; + REQUIRE(T2 == Tres); - SECTION("Tensor (in)equality") { - REQUIRE(x == xCopy); - REQUIRE_FALSE(x == xFloat); + // no implementation + Tensor T3(T1->dims()); + REQUIRE_THROWS(*T0 + T3); + + // // wrong backend + // static Registrar<Add_Op> registrarAddImpl_custom("custom", [](const Add_Op& op) { return std::make_unique<AddImpl_cpu>(op); } ); + // static Registrar<Tensor> registrarTensorImpl_custom_Int32({"custom", DataType::Int32}, + // [] (DeviceIdx_t device, std::vector<DimSize_t> dims) { + // return std::make_shared<TensorImpl_cpu<int>>(device, dims); + // } + // ); + // T1.setBackend("custom"); + // REQUIRE_THROWS(T0 + T1); + + // wrong datatype + Tensor T4(T1->dims()); + T4.setDataType(DataType::Float64); + REQUIRE_THROWS(*T0 + T4); } - } } + +TEST_CASE("Test substraction of Tensors","[TensorImpl][Sub]") { + Tensor T0 = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; + Tensor T1 = Array3D<int, 2, 2, 2>{{{{7, 1}, {3, 7}}, {{54, 0}, {7, 12}}}}; + Tensor T2 = T0 - T1; + T2.print(); + REQUIRE(T2 == Tensor(Array3D<int, 2, 2, 2>{{{{-6,1},{0,-3}},{{-49,6},{0,-4}}}})); + + Tensor T3(T1.dims()); + REQUIRE_THROWS(T0 - T3); +} + +TEST_CASE("Test multiplication of Tensors","[TensorImpl][Mul]") { + Tensor T0 = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; + Tensor T1 = Array3D<int, 2, 2, 2>{{{{7, 2}, {3, 7}}, {{5, 6}, {7, 8}}}}; + Tensor T2 = T0 * T1; + T2.print(); + REQUIRE(T2 == Tensor(Array3D<int, 2, 2, 2>{{{{7,4},{9,28}},{{25,36},{49,64}}}})); + + Tensor T3(T1.dims()); + REQUIRE_THROWS(T0 * T3); +} + +TEST_CASE("Test division of Tensors","[TensorImpl][Div]") { + Tensor T0 = Array3D<int, 2, 2, 2>{{{{7,4},{9,28}},{{25,36},{49,64}}}}; + Tensor T1 = Array3D<int, 2, 2, 2>{{{{7, 2}, {3, 7}}, {{5, 6}, {7, 8}}}}; + Tensor T2 = T0 / T1; + T2.print(); + REQUIRE(T2 == Tensor(Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}})); + + Tensor T3(T1.dims()); + REQUIRE_THROWS(T0 / T3); +} +} // namespace Aidge diff --git a/unit_tests/operator/Test_AddImpl.cpp b/unit_tests/operator/Test_AddImpl.cpp index 740b1a5322b55e2347d93ed2e515358080a108a5..e2e7051afda5e7f72c3142987587179bc759f1e8 100644 --- a/unit_tests/operator/Test_AddImpl.cpp +++ b/unit_tests/operator/Test_AddImpl.cpp @@ -117,4 +117,63 @@ TEST_CASE("[cpu/operator] Add(forward)", "[Add][CPU]") { REQUIRE(*op->getOutput(0) == *expectedOutput); } + + SECTION("Broadcasting") { + std::shared_ptr<Tensor> input_0 = std::make_shared<Tensor>(Array4D<int,3,1,3,2> { + { // + { // + {{0, 1},{2, 3},{4, 5}} // + }, // + { // + {{6, 7},{8, 9},{10, 11}} // + }, // + { // + {{12, 13},{14, 15},{16, 17}} // + } // + } // + }); // + std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> { + { // + { // + {{20, 21},{22, 23},{24, 25}}, // + {{26, 27},{28, 29},{30, 31}}, // + {{32, 33},{34, 35},{36, 37}} // + } // + } // + }); // + + std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<int,2> {{100,200}}); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> { + { // + { // + {{ 120, 222},{ 124, 226},{ 128, 230}}, // + {{ 126, 228},{ 130, 232},{ 134, 236}}, // + {{ 132, 234},{ 136, 238},{ 140, 242}} // + }, // + { // + {{ 126, 228},{ 130, 232},{ 134, 236}}, // + {{ 132, 234},{ 136, 238},{ 140, 242}}, // + {{ 138, 240},{ 142, 244},{ 146, 248}} // + }, // + { // + {{ 132, 234},{ 136, 238},{140, 242}}, // + {{ 138, 240},{ 142, 244},{146, 248}}, // + {{ 144, 246},{ 148, 250},{152, 254}} // + } // + } // + }); // + + std::shared_ptr<Node> myAdd = Add(3); + auto op = std::static_pointer_cast<OperatorTensor>(myAdd -> getOperator()); + op->associateInput(0, input_0); + op->associateInput(1, input_1); + op->associateInput(2, input_2); + op->setDataType(DataType::Int32); + op->setBackend("cpu"); + op->computeOutputDims(); + myAdd->forward(); + op->getOutput(0)->print(); + expectedOutput->print(); + REQUIRE(*op->getOutput(0) == *expectedOutput); + } } \ No newline at end of file diff --git a/unit_tests/operator/Test_BatchNormImpl.cpp b/unit_tests/operator/Test_BatchNormImpl.cpp index a1a749d805a45361c671544f5c94aed3421e557d..8c8c1dff3d74c2fce97abd8c3d88bf9840706ee4 100644 --- a/unit_tests/operator/Test_BatchNormImpl.cpp +++ b/unit_tests/operator/Test_BatchNormImpl.cpp @@ -14,6 +14,7 @@ #include "aidge/data/Tensor.hpp" #include "aidge/operator/BatchNorm.hpp" +#include "aidge/scheduler/SequentialScheduler.hpp" #include "aidge/backend/cpu.hpp" diff --git a/unit_tests/operator/Test_DivImpl.cpp b/unit_tests/operator/Test_DivImpl.cpp index 16f69db964a092f6be87e5d983ba00694e8006f8..a0ed261fe9622f36a9bb2e46c4796ae7f6f8f5e6 100644 --- a/unit_tests/operator/Test_DivImpl.cpp +++ b/unit_tests/operator/Test_DivImpl.cpp @@ -10,202 +10,307 @@ ********************************************************************************/ #include <catch2/catch_test_macros.hpp> +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <chrono> +#include <iostream> +#include <memory> +#include <numeric> // std::accumulate +#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution #include "aidge/data/Tensor.hpp" #include "aidge/operator/Div.hpp" +#include "aidge/utils/TensorUtils.hpp" -#include "aidge/backend/cpu.hpp" +namespace Aidge { -#include <memory> +TEST_CASE("[cpu/operator] Div", "[Div][CPU]") { + constexpr std::uint16_t NBTRIALS = 10; + // Create a random number generator + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1 + std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10)); + std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5)); + std::uniform_int_distribution<int> boolDist(0,1); -using namespace Aidge; + // Create MatMul Operator + std::shared_ptr<Node> myDiv = Div(); + auto op = std::static_pointer_cast<OperatorTensor>(myDiv-> getOperator()); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + + // Create 2 input Tensors + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + op->associateInput(0,T0); + T0->setDataType(DataType::Float32); + T0->setBackend("cpu"); + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(); + op -> associateInput(1,T1); + T1->setDataType(DataType::Float32); + T1->setBackend("cpu"); + + // Create results Tensor + std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(); + Tres->setDataType(DataType::Float32); + Tres->setBackend("cpu"); + + // To measure execution time of 'MatMul_Op::forward()' member function call + std::chrono::time_point<std::chrono::system_clock> start; + std::chrono::time_point<std::chrono::system_clock> end; + std::chrono::duration<double, std::micro> duration{}; + + SECTION("DivImpl_cpu::forward()") { + SECTION("Scalar / Scalar") { -TEST_CASE("[cpu/operator] Div(forward)", "[Div][CPU]") { - SECTION("2D Tensor by Singleton") { - std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {0.07607108, 0.44075000}, - {0.19494885, 0.20071143} - } - }); - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array2D<float,1,1>{{0.5}}); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {0.15214217, 0.88150001}, - {0.38989770, 0.40142286} - } - }); - - std::shared_ptr<Node> myDiv = Div(); - auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator()); - op -> associateInput(0, input_1); - op -> associateInput(1, input_2); - op -> setDataType(DataType::Float32); - op -> setBackend("cpu"); - op -> computeOutputDims(); - myDiv -> forward(); - - float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); - float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 4; ++i) { - REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); } + SECTION("Scalar / +1-D Tensor") { - } + } + SECTION("+1-D Tensor / +1-D Tensor - same dimensions") { + std::size_t number_of_operation = 0; - SECTION("2D Tensors") { - std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {0.79780143, 0.49322051}, - {0.84239346, 0.83737719} - } - }); - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array2D<float,2,2>{ - { - {0.59088874, 0.78858775}, - {0.42879432, 0.17615074} - } - }); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {1.35017204, 0.62544787}, - {1.96456301, 4.75375366} + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate 2 random Tensors + const std::size_t nbDims = nbDimsDist(gen); + std::vector<std::size_t> dims; + for (std::size_t i = 0; i < nbDims; ++i) { + dims.push_back(dimSizeDist(gen)); + } + const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>()); + number_of_operation += nb_elements; + + // without broadcasting + float* array0 = new float[nb_elements]; + float* array1 = new float[nb_elements]; + float* result = new float[nb_elements]; + + for (std::size_t i = 0; i < nb_elements; ++i) { + array0[i] = valueDist(gen); + array1[i] = valueDist(gen); + result[i] = array0[i] / array1[i]; + } + + // input0 + T0->resize(dims); + T0 -> getImpl() -> setRawPtr(array0, nb_elements); + + // input1 + T1->resize(dims); + T1 -> getImpl() -> setRawPtr(array1, nb_elements); + + // results + Tres->resize(dims); + Tres -> getImpl() -> setRawPtr(result, nb_elements); + + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myDiv->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + + delete[] array0; + delete[] array1; + delete[] result; + + // with broadcasting } - }); - - std::shared_ptr<Node> myDiv = Div(); - auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator()); - op -> associateInput(0, input_1); - op -> associateInput(1, input_2); - op -> setDataType(DataType::Float32); - op -> setBackend("cpu"); - op -> computeOutputDims(); - myDiv->forward(); - - float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); - float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 4; ++i) { - REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); + std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; + std::cout << "total time: " << duration.count() << "μs" << std::endl; } - } + SECTION("+1-D Tensor / +1-D Tensor - broadcasting") { + std::size_t number_of_operation = 0; - SECTION("3D Tensor by 1D Tensor") { - std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> { - { - {{0.24180168, 0.44319558, 0.06437260}, - {0.21270001, 0.34570599, 0.44151264}}, + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate 2 random Tensors + // handle dimensions, replace some dimensions with '1' to get broadcasting + constexpr std::size_t nbDims = 4; + std::vector<std::size_t> dims; + for (std::size_t i = 0; i < nbDims; ++i) { + dims.push_back(dimSizeDist(gen)); + } + std::vector<std::size_t> dims0 = dims; + std::vector<std::size_t> dims1 = dims; + std::vector<std::size_t> dimsOut = dims; + for (std::size_t i = 0; i < nbDims; ++i) { + if (boolDist(gen)) { + dims0[i] = 1; + } + if (boolDist(gen)) { + dims1[i] = 1; + } + dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i]; + } - {{0.62294692, 0.98043168, 0.18628585}, - {0.33591706, 0.03432965, 0.32130069}} - } - }); - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<float,3>{ - {0.63475525, 0.58620811, 0.69340748} - }); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> { - { - {{0.38093686, 0.75603795, 0.09283517}, - {0.33508980, 0.58973253, 0.63672900}}, - - {{0.98139703, 1.67249763, 0.26865280}, - {0.52920723, 0.05856223, 0.46336490}} + // create arrays and fill them with random values + float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]]; + float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]]; + float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]]; + + for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) { + array0[i] = valueDist(gen); + } + for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) { + array1[i] = valueDist(gen); + } + + // compute true result + const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1}; + const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1}; + for (std::size_t a = 0; a < dimsOut[0]; ++a) { + for (std::size_t b = 0; b < dimsOut[1]; ++b) { + const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0) + + strides0[1] * ((dims0[1] > 1) ? b : 0); + const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0) + + strides1[1] * ((dims1[1] > 1) ? b : 0); + for (std::size_t c = 0; c < dimsOut[2]; ++c) { + const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a)); + for (std::size_t d = 0; d < dimsOut[3]; ++d) { + std::size_t idx0 = idx0_0 + + strides0[2] * ((dims0[2] > 1) ? c : 0) + + ((dims0[3] > 1) ? d : 0); + std::size_t idx1 = idx1_0 + + strides1[2] * ((dims1[2] > 1) ? c : 0) + + ((dims1[3] > 1) ? d : 0); + result[idx_out + d] = array0[idx0] / array1[idx1]; + // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " / " << array1[idx1] << " -> " << idx_out + d << std::endl; + } + } + } + } + + // conversion to Aidge::Tensors + // input0 + T0->resize(dims0); + T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]); + + // input1 + T1->resize(dims1); + T1 -> getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]); + + // results + Tres->resize(dimsOut); + Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]); + + // compute result + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myDiv->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + // comparison between truth and computed result + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + + delete[] array0; + delete[] array1; + delete[] result; + + const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); + number_of_operation += nb_elements; } - }); - - std::shared_ptr<Node> myDiv = Div(); - auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator()); - op -> associateInput(0, input_1); - op -> associateInput(1, input_2); - op -> setDataType(DataType::Float32); - op -> setBackend("cpu"); - op -> computeOutputDims(); - myDiv->forward(); - - float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); - float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 12; ++i) { - REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); + std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; + std::cout << "total time: " << duration.count() << "μs" << std::endl; } + SECTION("+1-D Tensor / 1-D Tensor") { + std::size_t number_of_operation = 0; + std::uniform_int_distribution<std::size_t> nbRemovedDimsDist(std::size_t(1), std::size_t(3)); - } + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate 2 random Tensors + // handle dimensions + constexpr std::size_t nbDims = 4; + std::vector<std::size_t> dims0(4); + for (std::size_t i = 0; i < nbDims; ++i) { + dims0[i] = dimSizeDist(gen); + } + std::vector<std::size_t> dimsOut = dims0; + std::vector<std::size_t> dims1 = dims0; + for (std::size_t i = 0; i < nbDims; ++i) { + if (boolDist(gen)) { + dims1[i] = 1; + } + } + dims1.erase(dims1.cbegin(), dims1.cbegin() + nbRemovedDimsDist(gen)); - SECTION("4D Tensor") { - std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> { - { - { - {{0.25675946, 0.36265653, 0.22386390}, - {0.30483031, 0.97449398, 0.73871714}, - {0.36169255, 0.04510212, 0.27525920}}, - - {{0.73255682, 0.03885978, 0.24181491}, - {0.14465559, 0.86070061, 0.88848090}, - {0.74408931, 0.87412918, 0.19800508}}, - - {{0.43551809, 0.73437816, 0.37513995}, - {0.25414777, 0.06396711, 0.98708153}, - {0.02140611, 0.84974837, 0.62108254}} - }, - { - {{0.86227137, 0.69357753, 0.41814715}, - {0.76048166, 0.46306920, 0.05907208}, - {0.76625377, 0.91793799, 0.92988223}}, - - {{0.34362513, 0.85009813, 0.21107805}, - {0.65575773, 0.38140792, 0.48540717}, - {0.10045588, 0.85803932, 0.23778951}}, - - {{0.30316389, 0.04176688, 0.17290735}, - {0.07942408, 0.48647392, 0.39440966}, - {0.26543915, 0.92589515, 0.83948994}} + // create arrays and fill them with random values + float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]]; + std::size_t array1_size = std::accumulate(dims1.cbegin(), dims1.cend(), std::size_t(1), std::multiplies<std::size_t>()); + float* array1 = new float[array1_size]; + float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]]; + + for (std::size_t i = 0; i < (dims0[0]*dims0[1]*dims0[2]*dims0[3]); ++i) { + array0[i] = valueDist(gen); } - } - }); - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array2D<float,1,1>{{3.0}}); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<float,2,3,3,3> { - { - { - {{0.08558649, 0.12088551, 0.07462130}, - {0.10161010, 0.32483134, 0.24623905}, - {0.12056419, 0.01503404, 0.09175307}}, - - {{0.24418561, 0.01295326, 0.08060497}, - {0.04821853, 0.28690019, 0.29616031}, - {0.24802977, 0.29137638, 0.06600169}}, - - {{0.14517270, 0.24479271, 0.12504666}, - {0.08471593, 0.02132237, 0.32902718}, - {0.00713537, 0.28324947, 0.20702751}} - }, - { - {{0.28742379, 0.23119251, 0.13938238}, - {0.25349388, 0.15435641, 0.01969069}, - {0.25541791, 0.30597934, 0.30996075}}, - - {{0.11454171, 0.28336605, 0.07035935}, - {0.21858591, 0.12713598, 0.16180240}, - {0.03348529, 0.28601310, 0.07926317}}, - - {{0.10105463, 0.01392229, 0.05763578}, - {0.02647469, 0.16215797, 0.13146989}, - {0.08847972, 0.30863172, 0.27982998}} + for (std::size_t i = 0; i < array1_size; ++i) { + array1[i] = valueDist(gen); } + + // compute true result + auto dims1_tmp = dims1; + dims1_tmp.insert(dims1_tmp.cbegin(), 4 - dims1_tmp.size(), std::size_t(1)); + + const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1}; + const std::size_t strides1[nbDims] = {dims1_tmp[1]*dims1_tmp[2]*dims1_tmp[3], dims1_tmp[2]*dims1_tmp[3], dims1_tmp[3], 1}; + for (std::size_t a = 0; a < dimsOut[0]; ++a) { + for (std::size_t b = 0; b < dimsOut[1]; ++b) { + const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0) + + strides0[1] * ((dims0[1] > 1) ? b : 0); + const std::size_t idx1_0 = strides1[0] * ((dims1_tmp[0] > 1) ? a : 0) + + strides1[1] * ((dims1_tmp[1] > 1) ? b : 0); + for (std::size_t c = 0; c < dimsOut[2]; ++c) { + const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a)); + for (std::size_t d = 0; d < dimsOut[3]; ++d) { + std::size_t idx0 = idx0_0 + + strides0[2] * ((dims0[2] > 1) ? c : 0) + + ((dims0[3] > 1) ? d : 0); + std::size_t idx1 = idx1_0 + + strides1[2] * ((dims1_tmp[2] > 1) ? c : 0) + + ((dims1_tmp[3] > 1) ? d : 0); + result[idx_out + d] = array0[idx0] / array1[idx1]; + // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " / " << array1[idx1] << " -> " << idx_out + d << std::endl; + } + } + } + } + + // conversion to Aidge::Tensors + // input0 + T0->resize(dims0); + T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]); + + // input1 + T1->resize(dims1); + T1 -> getImpl() -> setRawPtr(array1, array1_size); + + // results + Tres->resize(dimsOut); + Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]); + + // compute result + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myDiv->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + // comparison between truth and computed result + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + + delete[] array0; + delete[] array1; + delete[] result; + + const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); + number_of_operation += nb_elements; } - }); - - std::shared_ptr<Node> myDiv = Div(); - auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator()); - op -> associateInput(0, input_1); - op -> associateInput(1, input_2); - op -> setDataType(DataType::Float32); - op -> setBackend("cpu"); - op -> computeOutputDims(); - myDiv->forward(); - - float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); - float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 54; ++i) { - REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); + + std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; + std::cout << "total time: " << duration.count() << "μs" << std::endl; } } -} \ No newline at end of file +} +} // namespace Aidge diff --git a/unit_tests/operator/Test_ErfImpl.cpp b/unit_tests/operator/Test_ErfImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..db2ae0437742d1cd1b298d62f5bdd7241b755ec4 --- /dev/null +++ b/unit_tests/operator/Test_ErfImpl.cpp @@ -0,0 +1,90 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Erf.hpp" + +#include "aidge/backend/cpu.hpp" + +#include <memory> + + +using namespace Aidge; + +TEST_CASE("[cpu/operator] Erf(forward)") { + SECTION("1D Tensor") { + std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<float,10> { + {0.41384590, 0.43120754, 0.93762982, 0.31049860, 0.77547199, 0.09514862, + 0.16145366, 0.42776686, 0.43487436, 0.41170865} + }); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<float,10> { + {0.44163144, 0.45801866, 0.81516320, 0.33941913, 0.72722000, 0.10704061, + 0.18061027, 0.45479023, 0.46144873, 0.43959764} + }); + + std::shared_ptr<Node> myErf = Erf(); + auto op = std::static_pointer_cast<OperatorTensor>(myErf -> getOperator()); + op->associateInput(0,input0); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myErf->forward(); + + float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); + float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); + for (std::size_t i = 0; i< expectedOutput->size(); ++i) { + REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); + } + } + + SECTION("3D Tensor") { + std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<float,2,2,3> { + { + { + {0.97037154, 0.86208081, 0.77767169}, + {0.38160080, 0.11422747, 0.77284443}, + }, + { + {0.51592529, 0.72543722, 0.54641193}, + {0.93866944, 0.97767913, 0.34172094} + } + } + }); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> { + { + { + {0.83003384, 0.77721894, 0.72857803}, + {0.41057193, 0.12833349, 0.72559172}, + }, + { + {0.53438270, 0.69507217, 0.56032562}, + {0.81564975, 0.83322692, 0.37109339} + } + } + }); + + std::shared_ptr<Node> myErf = Erf(); + auto op = std::static_pointer_cast<OperatorTensor>(myErf -> getOperator()); + op->associateInput(0,input0); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myErf->forward(); + + float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); + float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); + for (std::size_t i = 0; i< expectedOutput->size(); ++i) { + REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); + } + } +} \ No newline at end of file diff --git a/unit_tests/operator/Test_GatherImpl.cpp b/unit_tests/operator/Test_GatherImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a8345917ab0a141065e86638c09b2689902679ec --- /dev/null +++ b/unit_tests/operator/Test_GatherImpl.cpp @@ -0,0 +1,100 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Gather.hpp" + +#include "aidge/backend/cpu.hpp" + +#include <memory> + + +using namespace Aidge; + +TEST_CASE("[cpu/operator] Gather(forward)") { + SECTION("2D Tensor axis 0") { + std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> { + { + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9} + } + }); + std::shared_ptr<Tensor> indexes = std::make_shared<Tensor>(Array2D<int,1,2> { + { + {1, 2} + } + }); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,1,2,3> { + { + { + {4, 5, 6}, + {7, 8, 9} + } + } + }); + + std::shared_ptr<Node> myGather = Gather({1, 2}, {1, 2}, 0); + auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator()); + op->associateInput(0,input); + // op->associateInput(1,indexes); + op->setDataType(DataType::Int32); + op->setBackend("cpu"); + op->computeOutputDims(); + myGather->forward(); + op->getOutput(0)->print(); + expectedOutput->print(); + + REQUIRE(*(op->getOutput(0)) == *expectedOutput); + + } + SECTION("2D Tensor axis 1") { + std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> { + { + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9} + } + }); + std::shared_ptr<Tensor> indexes = std::make_shared<Tensor>(Array2D<int,1,2> { + { + {0, 2} + } + }); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,3,1,2> { + { + { + {1, 3} + }, + { + {4, 6} + }, + { + {7, 9} + } + } + }); + + std::shared_ptr<Node> myGather = Gather({0, 2}, {1, 2}, 1); + auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator()); + op->associateInput(0,input); + // op->associateInput(1,indexes); + op->setDataType(DataType::Int32); + op->setBackend("cpu"); + op->computeOutputDims(); + myGather->forward(); + + REQUIRE(*(op->getOutput(0)) == *expectedOutput); + + } +} \ No newline at end of file diff --git a/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp b/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c1db6c5eebcef13df970ec7e9fc415b5cba187a2 --- /dev/null +++ b/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp @@ -0,0 +1,565 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <aidge/utils/Types.h> +#include <catch2/catch_test_macros.hpp> +#include <chrono> +#include <cmath> +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <iostream> +#include <memory> +#include <numeric> // std::accumulate +#include <ostream> +#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/GlobalAveragePooling.hpp" +#include "aidge/utils/TensorUtils.hpp" + +// debug print function +void print_tensor(Aidge::Tensor &T) { + // Print tensors + std::cout << "Tensor : size =  ["; + for (auto &dim : T.dims()) { + std::cout << dim << " , "; + } + std::cout << "]" << std::endl; + T.print(); +} + +namespace Aidge { +TEST_CASE("[cpu/operator] GlobalAveragePooling", + "[GlobalAveragePooling][CPU]") { + constexpr std::uint16_t NBTRIALS = 10; + // Create a random number generator + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<float> valueDist( + 0.1f, 1.1f); // Random float distribution between 0 and 1 + std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), + std::size_t(10)); + + std::uniform_int_distribution<std::size_t> nbLowDimsDist(std::size_t(1), + std::size_t(2)); + std::uniform_int_distribution<std::size_t> nbHighDimsDist(std::size_t(3), + std::size_t(7)); + + // Create MatGlobalAveragePooling Operator + std::shared_ptr<Node> globAvgPool = GlobalAveragePooling(); + auto op = + std::static_pointer_cast<OperatorTensor>(globAvgPool->getOperator()); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + + // Create the input Tensor + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + op->associateInput(0, T0); + T0->setDataType(DataType::Float32); + T0->setBackend("cpu"); + + // Create results Tensor + std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(); + Tres->setDataType(DataType::Float32); + Tres->setBackend("cpu"); + + // To measure execution time of 'MatGlobalAveragePooling_Op::forward()' member + // function call + std::chrono::time_point<std::chrono::system_clock> start; + std::chrono::time_point<std::chrono::system_clock> end; + std::chrono::duration<double, std::micro> duration{}; + int number_of_operation{0}; + + SECTION("GlobalAveragePoolingImpl_cpu::forward()") { + SECTION( + "1-2Dim > not enough dimensions leads to function throwing an error") { + // generate a random tensors + const std::size_t nbDims = nbLowDimsDist(gen); + std::vector<std::size_t> dims; + for (std::size_t i = 0; i < nbDims; ++i) { + dims.push_back(dimSizeDist(gen)); + } + const std::size_t nb_elements = + std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), + std::multiplies<std::size_t>()); + + float *array0 = new float[nb_elements]; + for (std::size_t i = 0; i < nb_elements; ++i) { + array0[i] = valueDist(gen); + } + // input0 + T0->resize(dims); + T0->getImpl()->setRawPtr(array0, nb_elements); + + REQUIRE_THROWS(globAvgPool->forward()); + delete[] array0; + } + + SECTION("3+Dim") { + SECTION("Fill a tensor with all values set as N will result with every " + "output being N") { + // generate the tensor + const std::size_t nbDims = nbHighDimsDist(gen); + std::vector<std::size_t> dims_in; + for (std::size_t i = 0; i < nbDims; ++i) { + dims_in.push_back(dimSizeDist(gen)); + } + // create in nb_elems + const std::size_t in_nb_elems = + std::accumulate(dims_in.cbegin(), dims_in.cend(), std::size_t(1), + std::multiplies<std::size_t>()); + const DimSize_t in_batch_nb_elems = in_nb_elems / dims_in[0]; + const DimSize_t in_channel_nb_elems = in_batch_nb_elems / dims_in[1]; + + number_of_operation += + in_nb_elems + + dims_in[1]; // averaging per channel : 1 addition per element in + // the channel + 1 division this for every batch + // create out nb_elems + std::vector<std::size_t> dims_out{dims_in[0], dims_in[1]}; + const std::size_t out_nb_elems = + std::accumulate(dims_out.cbegin(), dims_out.cend(), std::size_t(1), + std::multiplies<std::size_t>()); + const DimSize_t out_batch_nb_elems = out_nb_elems / dims_out[0]; + + // iterate over each batch/channel + float *array0 = new float[in_nb_elems]; + float *result = new float[out_nb_elems]; + float val = valueDist(gen); + for (std::size_t batch = 0; batch < dims_in[0]; ++batch) { + for (std::size_t channel = 0; channel < dims_in[1]; ++channel) { + for (std::size_t i = 0; i < in_channel_nb_elems; ++i) + + { + array0[batch * in_batch_nb_elems + channel * in_channel_nb_elems + + i] = val; + } + result[batch * out_batch_nb_elems + channel] = val; + } + } + + // input0 + T0->resize(dims_in); + T0->getImpl()->setRawPtr(array0, in_nb_elems); + + // results + Tres->resize(dims_out); + Tres->getImpl()->setRawPtr(result, out_nb_elems); + + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + REQUIRE_NOTHROW(globAvgPool->forward()); + end = std::chrono::system_clock::now(); + duration += + std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + REQUIRE(Tres->nbDims() == op->getOutput(0)->nbDims()); + for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) { + REQUIRE(Tres->dims().at(i) == op->getOutput(0)->dims().at(i)); + } + + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + + delete[] array0; + delete[] result; + } + + SECTION("random testing") { + for (int trial = 0; trial < NBTRIALS; ++trial) { + // generate the tensor + const std::size_t nbDims = nbHighDimsDist(gen); + std::vector<std::size_t> dims_in; + for (std::size_t i = 0; i < nbDims; ++i) { + dims_in.push_back(dimSizeDist(gen)); + } + // create in nb_elems + const std::size_t in_nb_elems = + std::accumulate(dims_in.cbegin(), dims_in.cend(), std::size_t(1), + std::multiplies<std::size_t>()); + const DimSize_t in_batch_nb_elems = in_nb_elems / dims_in[0]; + const DimSize_t in_channel_nb_elems = in_batch_nb_elems / dims_in[1]; + number_of_operation += + in_nb_elems + + dims_in[1]; // averaging per channel : 1 addition per element in + // the channel + 1 division this for every batch + + // create out nb_elems + std::vector<std::size_t> dims_out{dims_in[0], dims_in[1]}; + const std::size_t out_nb_elems = + std::accumulate(dims_out.cbegin(), dims_out.cend(), + std::size_t(1), std::multiplies<std::size_t>()); + const DimSize_t out_batch_nb_elems = out_nb_elems / dims_out[0]; + + // iterate over each batch/channel + float *array0 = new float[in_nb_elems]; + float *result = new float[out_nb_elems]; + for (std::size_t batch = 0; batch < dims_in[0]; ++batch) { + for (std::size_t channel = 0; channel < dims_in[1]; ++channel) { + float channel_sum = 0; + for (std::size_t i = 0; i < in_channel_nb_elems; ++i) + + { + float val = valueDist(gen); + array0[batch * in_batch_nb_elems + + channel * in_channel_nb_elems + i] = val; + channel_sum += val; + } + result[batch * out_batch_nb_elems + channel] = + channel_sum / in_channel_nb_elems; + } + } + + // input0 + T0->resize(dims_in); + T0->getImpl()->setRawPtr(array0, in_nb_elems); + + // results + Tres->resize(dims_out); + Tres->getImpl()->setRawPtr(result, out_nb_elems); + + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + REQUIRE_NOTHROW(globAvgPool->forward()); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>( + end - start); + + REQUIRE(Tres->nbDims() == op->getOutput(0)->nbDims()); + for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) { + REQUIRE(Tres->dims().at(i) == op->getOutput(0)->dims().at(i)); + } + + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + + delete[] array0; + delete[] result; + } + } + SECTION("Using result from a pytorch function as groundtruth") { + DimSize_t batch_size = 2; + DimSize_t channels = 3; + DimSize_t height = 4; + DimSize_t width = 3; + DimSize_t depth = 2; + + SECTION("2D_img") { + const std::vector<DimSize_t> in_dims{batch_size, channels, height, + width}; + const std::vector<DimSize_t> out_dims{batch_size, channels}; + DimSize_t in_nb_elems = batch_size * channels * height * width; + DimSize_t out_nb_elems = batch_size * channels; + number_of_operation += + in_nb_elems + + channels; // averaging per channel : 1 addition per element in + // the channel + 1 division this for every batch + auto input = new float[in_nb_elems]; + auto result = new float[out_nb_elems]; + input[0] = 0.1807716; + input[1] = -0.0699881; + input[2] = -0.3596235; + input[3] = -0.9152045; + input[4] = 0.6257653; + input[5] = 0.0255099; + input[6] = 0.9545137; + input[7] = 0.0643485; + input[8] = 0.3611506; + input[9] = 1.1678782; + input[10] = -1.3498932; + input[11] = -0.5101767; + input[12] = 0.2359577; + input[13] = -0.2397784; + input[14] = -0.9211147; + input[15] = 1.5432971; + input[16] = 1.3488258; + input[17] = -0.1396417; + input[18] = 0.2857972; + input[19] = 0.9651205; + input[20] = -2.0371499; + input[21] = 0.4931363; + input[22] = 1.4869986; + input[23] = 0.5910330; + input[24] = 0.1260297; + input[25] = -1.5626874; + input[26] = -1.1601028; + input[27] = -0.3348408; + input[28] = 0.4477722; + input[29] = -0.8016447; + input[30] = 1.5236114; + input[31] = 2.5085869; + input[32] = -0.6630959; + input[33] = -0.2512752; + input[34] = 1.0101448; + input[35] = 0.1215468; + input[36] = 0.1583993; + input[37] = 1.1340188; + input[38] = -1.1538976; + input[39] = -0.2983968; + input[40] = -0.5075365; + input[41] = -0.9239212; + input[42] = 0.5467061; + input[43] = -1.4947776; + input[44] = -1.2057148; + input[45] = 0.5718198; + input[46] = -0.5973545; + input[47] = -0.6936757; + input[48] = 1.6455388; + input[49] = -0.8029931; + input[50] = 1.3514109; + input[51] = -0.2759193; + input[52] = -1.5108346; + input[53] = 2.1047730; + input[54] = 2.7629590; + input[55] = -1.7465292; + input[56] = 0.8353187; + input[57] = -1.9560477; + input[58] = -0.8002653; + input[59] = -0.5044988; + input[60] = -0.0711742; + input[61] = -0.5130699; + input[62] = -1.0307810; + input[63] = 0.9154347; + input[64] = -0.2282317; + input[65] = -0.6884708; + input[66] = 0.1832259; + input[67] = 0.6003584; + input[68] = -1.5429375; + input[69] = -0.3465560; + input[70] = -0.1476223; + input[71] = 0.6469797; + + result[0] = 0.0145876; + result[1] = 0.3010401; + result[2] = 0.0803371; + + result[3] = -0.3720275; + result[4] = 0.0919094; + result[5] = -0.1852371; + + // input0 + T0->resize(in_dims); + T0->getImpl()->setRawPtr(input, in_nb_elems); + + // results + Tres->resize(out_dims); + Tres->getImpl()->setRawPtr(result, out_nb_elems); + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + REQUIRE_NOTHROW(globAvgPool->forward()); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>( + end - start); + + REQUIRE(Tres->nbDims() == op->getOutput(0)->nbDims()); + for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) { + REQUIRE(Tres->dims().at(i) == op->getOutput(0)->dims().at(i)); + } + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + delete[] input; + delete[] result; + } + SECTION("3D_img") { + const std::vector<DimSize_t> in_dims{batch_size, channels, height, + width, depth}; + const std::vector<DimSize_t> out_dims{batch_size, channels}; + DimSize_t in_nb_elems = + batch_size * channels * height * width * depth; + number_of_operation += + in_nb_elems + + channels; // averaging per channel : 1 addition per element in + // the channel + 1 division this for every batch + DimSize_t out_nb_elems = batch_size * channels; + auto input = new float[in_nb_elems]; + auto result = new float[out_nb_elems]; + input[0] = 0.0061403; + input[1] = -0.9665052; + input[2] = 0.3582928; + input[3] = 0.1072854; + input[4] = 1.2463317; + input[5] = 1.2460036; + input[6] = 0.3534451; + input[7] = 0.9425349; + input[8] = -0.2103887; + input[9] = -0.7959853; + input[10] = 0.1297970; + input[11] = -1.9445597; + input[12] = 0.0609514; + input[13] = -0.2379328; + input[14] = 1.9020044; + input[15] = -1.1762751; + input[16] = 0.3404147; + input[17] = 1.1685153; + input[18] = -0.6526139; + input[19] = 0.3767620; + input[20] = 0.1887376; + input[21] = 0.5154487; + input[22] = 0.6371427; + input[23] = -0.3948864; + input[24] = -1.1571540; + input[25] = 0.2896117; + input[26] = 0.6163548; + input[27] = -0.4370409; + input[28] = 0.6589766; + input[29] = 0.6587803; + input[30] = -1.3702172; + input[31] = -1.6210355; + input[32] = 0.5872851; + input[33] = 0.2860694; + input[34] = 0.0082870; + input[35] = -0.2523253; + input[36] = -1.3247224; + input[37] = 0.1891782; + input[38] = 0.0211001; + input[39] = 0.9404197; + input[40] = -0.5576900; + input[41] = -0.6939272; + input[42] = -0.3252473; + input[43] = 1.2439330; + input[44] = -1.1671864; + input[45] = -0.4091243; + input[46] = 1.2600617; + input[47] = -1.5630058; + input[48] = 1.1346143; + input[49] = -0.0823837; + input[50] = 0.2893163; + input[51] = 0.8357732; + input[52] = -0.2449911; + input[53] = 0.2712233; + input[54] = 0.0936364; + input[55] = -0.8834321; + input[56] = -0.3274170; + input[57] = 0.0783938; + input[58] = -0.3807656; + input[59] = 0.3775077; + input[60] = 0.1119123; + input[61] = 2.3142793; + input[62] = -0.7989057; + input[63] = -0.5643027; + input[64] = -1.1346605; + input[65] = 0.1705271; + input[66] = 0.9946650; + input[67] = 1.2625724; + input[68] = 1.6218156; + input[69] = 1.0774711; + input[70] = 0.5947813; + input[71] = -1.5290873; + input[72] = 2.0437069; + input[73] = -0.1656267; + input[74] = 0.0870704; + input[75] = -0.5276564; + input[76] = -0.1002882; + input[77] = 1.0539219; + input[78] = -0.6230739; + input[79] = -1.5905718; + input[80] = -0.9741858; + input[81] = -0.1869211; + input[82] = 0.5816050; + input[83] = -2.6339815; + input[84] = -1.0764544; + input[85] = 2.5903966; + input[86] = 0.4940658; + input[87] = 0.4671729; + input[88] = 0.6588292; + input[89] = -0.7257792; + input[90] = 1.4280071; + input[91] = -1.2187740; + input[92] = 0.7380729; + input[93] = -1.1599953; + input[94] = -1.4355115; + input[95] = -1.5304037; + input[96] = 0.8474578; + input[97] = 0.0774260; + input[98] = 0.5433396; + input[99] = -0.8438400; + input[100] = -0.1089903; + input[101] = -0.6354192; + input[102] = 0.8772392; + input[103] = 0.2844733; + input[104] = 0.0975270; + input[105] = -0.9785872; + input[106] = -0.4320499; + input[107] = -1.4937501; + input[108] = -2.0644901; + input[109] = 0.0851217; + input[110] = 0.6644159; + input[111] = 0.4168026; + input[112] = 0.0958830; + input[113] = -1.5699565; + input[114] = 0.3739572; + input[115] = -0.1420672; + input[116] = -0.7864021; + input[117] = 0.2443752; + input[118] = -0.9811850; + input[119] = -0.0698569; + input[120] = 0.1463890; + input[121] = 0.2536245; + input[122] = 0.2136150; + input[123] = 0.3113698; + input[124] = 1.8353856; + input[125] = 1.4473228; + input[126] = -0.7373698; + input[127] = 0.2485314; + input[128] = -0.4789796; + input[129] = -0.3396149; + input[130] = 0.6438198; + input[131] = 0.7287521; + input[132] = -1.5119252; + input[133] = -0.1006494; + input[134] = 1.8955028; + input[135] = 1.0871323; + input[136] = 0.3620502; + input[137] = -0.8826663; + input[138] = 1.2220223; + input[139] = -1.2817260; + input[140] = 1.4153577; + input[141] = 0.4148015; + input[142] = 1.3458617; + input[143] = 1.9718349; + + result[0] = 0.1333608; + result[1] = -0.1716091; + result[2] = 0.2201060; + result[3] = -0.1585989; + result[4] = -0.2291074; + result[5] = 0.4254351; + + // input0 + T0->resize(in_dims); + T0->getImpl()->setRawPtr(input, in_nb_elems); + + // results + Tres->resize(out_dims); + Tres->getImpl()->setRawPtr(result, out_nb_elems); + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + REQUIRE_NOTHROW(globAvgPool->forward()); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>( + end - start); + + REQUIRE(Tres->nbDims() == op->getOutput(0)->nbDims()); + for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) { + REQUIRE(Tres->dims().at(i) == op->getOutput(0)->dims().at(i)); + } + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + delete[] input; + delete[] result; + } + } + std::cout << "GlobalAveragePooling total execution time : " + << duration.count() << "µs" << std::endl; + std::cout << "Number of operations : " << number_of_operation + << std::endl; + std::cout << "Operation / µs = " << number_of_operation / duration.count() + << std::endl; + } + } +} +} // namespace Aidge diff --git a/unit_tests/operator/Test_MatMulImpl.cpp b/unit_tests/operator/Test_MatMulImpl.cpp index 1edb915fb78e3e056f455ddecb8e704eee068cd9..168418372d94a7de2aee7ed2e6a41d90c68531af 100644 --- a/unit_tests/operator/Test_MatMulImpl.cpp +++ b/unit_tests/operator/Test_MatMulImpl.cpp @@ -10,102 +10,257 @@ ********************************************************************************/ #include <catch2/catch_test_macros.hpp> +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <chrono> +#include <iostream> #include <memory> +#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution #include "aidge/data/Tensor.hpp" #include "aidge/operator/MatMul.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/TensorUtils.hpp" #include "aidge/backend/cpu/operator/MatMulImpl.hpp" -using namespace Aidge; +namespace Aidge { TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") { - // Test MatMul forward with batch size = 2 and feature size = 75 - std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array2D<int, 5, 75>{ - {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, - 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, - 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, - 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, - 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, - 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}}); - std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<int, 2, 5>{ - {{23600, 23600, 23600, 23600, 23600}, {68600, 68600, 68600, 68600, 68600}}}); - - std::shared_ptr<Node> myMatMul = MatMul(75, 5, "mymatmul"); + const std::uint16_t NBTRIALS = 10; + // Create a random number generator + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<float> dis(0.0, 1.0); // Random float distribution between 0 and 1 + std::uniform_int_distribution<std::size_t> distDims(10, 100); + std::uniform_int_distribution<std::size_t> distNbMatrix(1, 5); + + // Create MatMul Operator + std::shared_ptr<Node> myMatMul = MatMul(); auto op = std::static_pointer_cast<OperatorTensor>(myMatMul -> getOperator()); - op->associateInput(1, myWeights); - - SECTION("2D input") { - std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<int, 2, 75>{ - {{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74}, - {75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, - 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, - 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, - 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149}}}); - op->associateInput(0, myInput); - op->setDataType(DataType::Int32); - op->setBackend("cpu"); - op->computeOutputDims(); - myMatMul->forward(); - REQUIRE(*(op->getOutput(0)) == *myOutput); + + // To measure execution time of 'MatMul_Op::forward()' member function call + std::chrono::time_point<std::chrono::system_clock> start; + std::chrono::time_point<std::chrono::system_clock> end; + std::chrono::duration<double, std::micro> duration; + + SECTION("2-D Tensors") { + std::size_t totalComputation = 0; + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate Tensors dimensions + const std::size_t dim0 = distDims(gen); + const std::size_t dim1 = distDims(gen); + const std::size_t dim2 = distDims(gen); + totalComputation += dim0*dim1*dim2; + + // Create and populate the array with random float values + float* bigArray1 = new float[dim0*dim1]; + for (int i = 0; i < dim0*dim1; ++i) { + bigArray1[i] = dis(gen); // Generate random float value + } + float* bigArray2 = new float[dim1*dim2]; + for (int i = 0; i < dim1*dim2; ++i) { + bigArray2[i] = dis(gen); // Generate random float value + } + float* res = new float[dim0*dim2]; + for (int i = 0; i < dim0; ++i) { + for (int j = 0; j < dim2; ++j) { + float sum = 0.0; + for (int k = 0; k < dim1; ++k) { + sum += bigArray1[i*dim1+k] * bigArray2[k*dim2+j]; + } + res[i*dim2+j] = sum; + } + } + + + // Convert bigArray1 to Tensor + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(DataType::Float32); + T1 -> resize({dim0,dim1}); + T1 -> setBackend("cpu"); + T1 -> getImpl() -> setRawPtr(bigArray1, dim0*dim1); + // Convert bigArray2 to Tensor + std::shared_ptr<Tensor> T2 = std::make_shared<Tensor>(DataType::Float32); + T2 -> resize({dim1,dim2}); + T2 -> setBackend("cpu"); + T2 -> getImpl() -> setRawPtr(bigArray2, dim1*dim2); + // convert res to Tensor + std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(DataType::Float32); + Tres -> resize({dim0,dim2}); + Tres -> setBackend("cpu"); + Tres -> getImpl() -> setRawPtr(res, dim0*dim2); + + op->associateInput(0, T1); + op->associateInput(1, T2); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myMatMul->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + } + std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl; + std::cout << "total time: " << duration.count() << std::endl; } - SECTION("4D input") { - std::shared_ptr<Tensor> myInput = - std::make_shared<Tensor>(Array4D<int, 2, 3, 5, 5>{{{{{0, 1, 2, 3, 4}, - {5, 6, 7, 8, 9}, - {10, 11, 12, 13, 14}, - {15, 16, 17, 18, 19}, - {20, 21, 22, 23, 24}}, - {{25, 26, 27, 28, 29}, - {30, 31, 32, 33, 34}, - {35, 36, 37, 38, 39}, - {40, 41, 42, 43, 44}, - {45, 46, 47, 48, 49}}, - {{50, 51, 52, 53, 54}, - {55, 56, 57, 58, 59}, - {60, 61, 62, 63, 64}, - {65, 66, 67, 68, 69}, - {70, 71, 72, 73, 74}}}, - {{{75, 76, 77, 78, 79}, - {80, 81, 82, 83, 84}, - {85, 86, 87, 88, 89}, - {90, 91, 92, 93, 94}, - {95, 96, 97, 98, 99}}, - {{100, 101, 102, 103, 104}, - {105, 106, 107, 108, 109}, - {110, 111, 112, 113, 114}, - {115, 116, 117, 118, 119}, - {120, 121, 122, 123, 124}}, - {{125, 126, 127, 128, 129}, - {130, 131, 132, 133, 134}, - {135, 136, 137, 138, 139}, - {140, 141, 142, 143, 144}, - {145, 146, 147, 148, 149}}}}}); - op->associateInput(0, myInput); - op->setDataType(DataType::Int32); + + SECTION("3-D Tensors") { + std::size_t totalComputation = 0; + duration = std::chrono::duration<double, std::micro>::zero(); + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate Tensors dimensions + const std::size_t dimNb = distNbMatrix(gen); + const std::size_t dim0 = distDims(gen); + const std::size_t dim1 = distDims(gen); + const std::size_t dim2 = distDims(gen); + totalComputation += dim0*dim1*dim2*dimNb; + + // Create and populate the array with random float values + float* bigArray1 = new float[dimNb*dim0*dim1]; + for (std::size_t i = 0; i < dimNb*dim0*dim1; ++i) { + bigArray1[i] = dis(gen); // Generate random float value + } + float* bigArray2 = new float[dimNb*dim1*dim2]; + for (int i = 0; i < dimNb*dim1*dim2; ++i) { + bigArray2[i] = dis(gen); // Generate random float value + } + float* res = new float[dimNb*dim0*dim2]; + for (std::size_t n = 0; n < dimNb; ++n) { + for (int i = 0; i < dim0; ++i) { + for (int j = 0; j < dim2; ++j) { + float sum = 0.0; + for (int k = 0; k < dim1; ++k) { + sum += bigArray1[n*dim0*dim1 + i*dim1 + k] * bigArray2[n*dim2*dim1+k*dim2+j]; + } + res[n*dim0*dim2+i*dim2+j] = sum; + } + } + } + // Convert bigArray1 to Tensor + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(DataType::Float32); + T1 -> resize({dimNb,dim0,dim1}); + T1 -> setBackend("cpu"); + T1 -> getImpl() -> setRawPtr(bigArray1, dimNb*dim0*dim1); + // Convert bigArray2 to Tensor + std::shared_ptr<Tensor> T2 = std::make_shared<Tensor>(DataType::Float32); + T2 -> resize({dimNb,dim1,dim2}); + T2 -> setBackend("cpu"); + T2 -> getImpl() -> setRawPtr(bigArray2, dimNb*dim1*dim2); + // convert res to Tensor + std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(DataType::Float32); + Tres -> resize({dimNb,dim0,dim2}); + Tres -> setBackend("cpu"); + Tres -> getImpl() -> setRawPtr(res, dimNb*dim0*dim2); + + op->associateInput(0, T1); + op->associateInput(1, T2); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myMatMul->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + } + std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl; + std::cout << "total time: " << duration.count() << std::endl; + } + + SECTION("4-D Tensors") { + std::size_t totalComputation = 0; + duration = std::chrono::duration<double, std::micro>::zero(); + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate Tensors dimensions + const std::size_t dimNb1 = distNbMatrix(gen); + const std::size_t dimNb2 = distNbMatrix(gen); + const std::size_t dim0 = distDims(gen); + const std::size_t dim1 = distDims(gen); + const std::size_t dim2 = distDims(gen); + totalComputation += dim0*dim1*dim2*dimNb1*dimNb2; + + // Create and populate the array with random float values + float* bigArray1 = new float[dimNb1*dimNb2*dim0*dim1]; + for (std::size_t i = 0; i < dimNb1*dimNb2*dim0*dim1; ++i) { + bigArray1[i] = dis(gen); // Generate random float value + } + float* bigArray2 = new float[dimNb1*dimNb2*dim1*dim2]; + for (std::size_t i = 0; i < dimNb1*dimNb2*dim1*dim2; ++i) { + bigArray2[i] = dis(gen); // Generate random float value + } + float* res = new float[dimNb1*dimNb2*dim0*dim2]; + for (std::size_t n1 = 0; n1 < dimNb1; ++n1) { + for (std::size_t n2 = 0; n2 < dimNb2; ++n2) { + for (int i = 0; i < dim0; ++i) { + for (int j = 0; j < dim2; ++j) { + float sum = 0.0; + for (int k = 0; k < dim1; ++k) { + sum += bigArray1[n1*dimNb2*dim0*dim1+n2*dim0*dim1+i*dim1+k] * bigArray2[n1*dimNb2*dim1*dim2+n2*dim1*dim2+k*dim2+j]; + } + res[n1*dimNb2*dim0*dim2+n2*dim0*dim2+i*dim2+j] = sum; + } + } + } + } + // Convert bigArray1 to Tensor + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(DataType::Float32); + T1 -> resize({dimNb1,dimNb2,dim0,dim1}); + T1 -> setBackend("cpu"); + T1 -> getImpl() -> setRawPtr(bigArray1, dimNb1*dimNb2*dim0*dim1); + // Convert bigArray2 to Tensor + std::shared_ptr<Tensor> T2 = std::make_shared<Tensor>(DataType::Float32); + T2 -> resize({dimNb1,dimNb2,dim1,dim2}); + T2 -> setBackend("cpu"); + T2 -> getImpl() -> setRawPtr(bigArray2, dimNb1*dimNb2*dim1*dim2); + // convert res to Tensor + std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(DataType::Float32); + Tres -> resize({dimNb1,dimNb2,dim0,dim2}); + Tres -> setBackend("cpu"); + Tres -> getImpl() -> setRawPtr(res, dimNb1*dimNb2*dim0*dim2); + + op->associateInput(0, T1); + op->associateInput(1, T2); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myMatMul->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + } + std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl; + std::cout << "total time: " << duration.count() << std::endl; + } + + SECTION("+2-D / 1-D") { + // allows to test both computation with a 1-D Tensor and broadcasting + // input_0 + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + op->associateInput(0,T0); + const std::size_t dim0 = distNbMatrix(gen); + const std::size_t dim1 = distNbMatrix(gen) + 1; + const std::size_t dim2 = distNbMatrix(gen); + const std::size_t dim3 = distNbMatrix(gen); + T0->resize({dim0,dim1,dim2,dim3}); + T0->setDataType(DataType::Float32); + T0->setBackend("cpu"); + + // input_1 + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(); + op -> associateInput(1,T1); + T1->resize({dim3}); + T1->setDataType(DataType::Float32); + T1->setBackend("cpu"); + + op->setDataType(DataType::Float32); op->setBackend("cpu"); op->computeOutputDims(); myMatMul->forward(); - REQUIRE(*(op->getOutput(0)) == *myOutput); - } - // std::cout << static_cast<Tensor>((*myMatMul->getOperator())["weight"])[0][0][0][0] << std::endl; -} \ No newline at end of file + } +} +} // namespace Aidge \ No newline at end of file diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp index 71646c92fa7f041d695a89858cf21ab0d0336f2c..63a11d19a025b5560075c4b85123d645522da09e 100644 --- a/unit_tests/operator/Test_MetaOperator.cpp +++ b/unit_tests/operator/Test_MetaOperator.cpp @@ -14,6 +14,7 @@ #include <cstdlib> #include <memory> +#include "aidge/utils/TensorUtils.hpp" #include "aidge/backend/cpu/operator/ConvImpl.hpp" #include "aidge/backend/cpu/operator/PadImpl.hpp" #include "aidge/data/Tensor.hpp" @@ -21,10 +22,14 @@ #include "aidge/operator/MetaOperator.hpp" #include "aidge/operator/MetaOperatorDefs.hpp" #include "aidge/operator/Pad.hpp" +#include "aidge/operator/Pop.hpp" +#include "aidge/scheduler/SequentialScheduler.hpp" +#include "aidge/scheduler/ParallelScheduler.hpp" using namespace Aidge; -TEST_CASE("[cpu/operator] MetaOperator/PaddedConv(forward)", "[MetaOperator][PaddedConv][CPU]") { +TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") { + SECTION("PaddedConv(forward)") { std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>( Array4D<double, 4, 3, 3, 3>{{{{{6.20986394e-01, 1.19775136e-03, 7.22876095e-02}, {1.16492919e-01, 8.21634093e-02, 1.17413265e-01}, @@ -187,4 +192,304 @@ TEST_CASE("[cpu/operator] MetaOperator/PaddedConv(forward)", "[MetaOperator][Pad std::shared_ptr<Node> myPaddedConv = PaddedConv(3, 4, {3, 3}, "myPaddedConv", {1, 1}, {1, 1, 1, 1}); + } + SECTION("LSTM(forward)") { + auto pop = Pop(); + auto myLSTM = LSTM(32, 64, 0, true, "ltsm"); + auto op = std::static_pointer_cast<OperatorTensor>(myLSTM->getOperator()); + + auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(op)->getMicroGraph(); + microGraph->save("lstm", false, false); + + REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8); + REQUIRE(myLSTM->nbData() == 1); + REQUIRE(myLSTM->nbOutputs() == 2); + + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>( + Array2D<float, 16, 32>{}); + std::shared_ptr<Tensor> myInit = std::make_shared<Tensor>( + Array2D<float, 32, 64>{}); + std::shared_ptr<Tensor> myInitW = std::make_shared<Tensor>( + Array2D<float, 64, 32>{}); + std::shared_ptr<Tensor> myInitR = std::make_shared<Tensor>( + Array2D<float, 64, 64>{}); + + pop->addChild(myLSTM, 0, 0); + pop->getOperator()->associateInput(0, myInput); + op->associateInput(17, myInit); + op->associateInput(18, myInit); + + // Weights X + myLSTM->input(1).first->getOperator()->setOutput(0, myInitW); + myLSTM->input(2).first->getOperator()->setOutput(0, myInitW); + myLSTM->input(3).first->getOperator()->setOutput(0, myInitW); + myLSTM->input(4).first->getOperator()->setOutput(0, myInitW); + // Weights H + myLSTM->input(5).first->getOperator()->setOutput(0, myInitR); + myLSTM->input(6).first->getOperator()->setOutput(0, myInitR); + myLSTM->input(7).first->getOperator()->setOutput(0, myInitR); + myLSTM->input(8).first->getOperator()->setOutput(0, myInitR); + + auto g = getConnectedGraphView(myLSTM); + g->setDataType(DataType::Float32); + g->setBackend("cpu"); + + auto scheduler = SequentialScheduler(g); + scheduler.forward(true); + + g->save("lstm_outside_dims", true, true); + + microGraph->save("lstm_dims", true, true); + REQUIRE(op->outputDimsForwarded()); + + auto microGraphScheduler = std::dynamic_pointer_cast<MetaOperator_Op>(op)->getMicroGraphScheduler(); + microGraphScheduler->saveSchedulingDiagram("lstm_scheduling"); + + REQUIRE(op->getNbConsumedData(0).data == 512); + REQUIRE(op->getNbConsumedData(1).data == 32768); + REQUIRE(op->getNbProducedData(0).data == 34816); + REQUIRE(op->getNbProducedData(1).data == 34816); + REQUIRE(microGraphScheduler->getStaticScheduling(0).size() == 26); + REQUIRE(microGraphScheduler->getStaticScheduling(1).size() == 24); + REQUIRE(microGraphScheduler->getStaticScheduling(15).size() == 24); + } + SECTION("LSTM(forward_values)") { + auto myLSTM = LSTM(2, 3, 0, true, "ltsm"); + auto op = std::static_pointer_cast<OperatorTensor>(myLSTM->getOperator()); + + auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(op)->getMicroGraph(); + microGraph->save("lstm", false, false); + + REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8); + REQUIRE(myLSTM->nbData() == 1); + REQUIRE(myLSTM->nbOutputs() == 2); + + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>( + Array2D<float, 3, 2>{{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}}); + std::shared_ptr<Tensor> myInit = std::make_shared<Tensor>( + Array2D<float, 3, 3>{{{0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}}}); + std::shared_ptr<Tensor> myInitW = std::make_shared<Tensor>( + Array2D<float, 3, 2>{{{0.1, 0.1}, {0.1, 0.1}, {0.1, 0.1}}}); + std::shared_ptr<Tensor> myInitR = std::make_shared<Tensor>( + Array2D<float, 3, 3>{{{0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}}}); + + op->associateInput(0, myInput); + op->associateInput(17, myInit); + op->associateInput(18, myInit); + + // Weights X + myLSTM->input(1).first->getOperator()->setOutput(0, myInitW); + myLSTM->input(2).first->getOperator()->setOutput(0, myInitW); + myLSTM->input(3).first->getOperator()->setOutput(0, myInitW); + myLSTM->input(4).first->getOperator()->setOutput(0, myInitW); + // Weights H + myLSTM->input(5).first->getOperator()->setOutput(0, myInitR); + myLSTM->input(6).first->getOperator()->setOutput(0, myInitR); + myLSTM->input(7).first->getOperator()->setOutput(0, myInitR); + myLSTM->input(8).first->getOperator()->setOutput(0, myInitR); + + auto g = getConnectedGraphView(myLSTM); + g->setDataType(DataType::Float32); + g->setBackend("cpu"); + + auto scheduler = SequentialScheduler(g); + scheduler.forward(); + + microGraph->save("lstm_values_dims", false, true); + + std::shared_ptr<Tensor> myHiddenState = std::make_shared<Tensor>( + Array2D<float, 3, 3>{{{0.0952412, 0.0952412, 0.0952412}, + {0.25606447, 0.25606447, 0.25606447}, + {0.40323776, 0.40323776, 0.40323776}}}); + + + auto microGraphScheduler = std::dynamic_pointer_cast<MetaOperator_Op>(op)->getMicroGraphScheduler(); + microGraphScheduler->saveSchedulingDiagram("lstm_values_scheduling"); + + op->getOutput(0)->print(); + myHiddenState->print(); + + REQUIRE(approxEq<float>(*(op->getOutput(0)), *myHiddenState)); + } + SECTION("LSTM(forward_values_seq)") { + auto pop = Pop(); + auto myLSTM = LSTM(2, 3, 2, true, "ltsm"); + auto myGraph = Sequential({pop, myLSTM}); + auto op = std::static_pointer_cast<OperatorTensor>(myLSTM->getOperator()); + + REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8); + REQUIRE(myLSTM->nbData() == 1); + REQUIRE(myLSTM->nbOutputs() == 2); + + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>( + Array3D<float, 2, 3, 2>{{{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}, {{2.0, 3.0}, {4.0, 5.0}, {6.0, 7.0}}}}); + std::shared_ptr<Tensor> myInit = std::make_shared<Tensor>( + Array2D<float, 3, 3>{{{0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}}}); + std::shared_ptr<Tensor> myInitW = std::make_shared<Tensor>( + Array2D<float, 3, 2>{{{0.1, 0.1}, {0.1, 0.1}, {0.1, 0.1}}}); + std::shared_ptr<Tensor> myInitR = std::make_shared<Tensor>( + Array2D<float, 3, 3>{{{0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}}}); + + pop->getOperator()->associateInput(0, myInput); + op->associateInput(17, myInit); + op->associateInput(18, myInit); + + // Weights X + myLSTM->input(1).first->getOperator()->setOutput(0, myInitW); + myLSTM->input(2).first->getOperator()->setOutput(0, myInitW); + myLSTM->input(3).first->getOperator()->setOutput(0, myInitW); + myLSTM->input(4).first->getOperator()->setOutput(0, myInitW); + // Weights H + myLSTM->input(5).first->getOperator()->setOutput(0, myInitR); + myLSTM->input(6).first->getOperator()->setOutput(0, myInitR); + myLSTM->input(7).first->getOperator()->setOutput(0, myInitR); + myLSTM->input(8).first->getOperator()->setOutput(0, myInitR); + + auto g = getConnectedGraphView(myLSTM); + g->setDataType(DataType::Float32); + g->setBackend("cpu"); + + g->save("lstm_seq", true, true); + + auto scheduler = SequentialScheduler(g); + scheduler.forward(true); + scheduler.saveSchedulingDiagram("lstm_seq_schedule"); + + std::shared_ptr<Tensor> myHiddenState = std::make_shared<Tensor>( + Array2D<float, 3, 3>{{{0.24439372, 0.24439372, 0.24439372}, + {0.49801484, 0.49801484, 0.49801484}, + {0.67162132, 0.67162132, 0.67162132}}}); + + myGraph->save("lstm_seq_mygraph", true, true); + + op->getOutput(0)->print(); + myHiddenState->print(); + + REQUIRE(approxEq<float>(*(op->getOutput(0)), *myHiddenState)); + } + SECTION("LSTM(forward_values_seq_flatten)(sequential)") { + auto pop = Pop(); + auto myLSTM = LSTM(2, 3, 2, true, "ltsm"); + auto op = std::static_pointer_cast<MetaOperator_Op>(myLSTM->getOperator()); + + // Here we test LSTM as it is was flatten in the graph. + // We just borrow its micro-graph into our larger myGraph graph. + auto myGraph = std::make_shared<GraphView>(); + pop->addChild(op->getMicroGraph()->getOrderedInputs()[0].first, 0, 0); + myGraph->add(op->getMicroGraph()); + myGraph->add(pop); + + REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8); + REQUIRE(myLSTM->nbData() == 1); + REQUIRE(myLSTM->nbOutputs() == 2); + + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>( + Array3D<float, 2, 3, 2>{{{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}, {{2.0, 3.0}, {4.0, 5.0}, {6.0, 7.0}}}}); + std::shared_ptr<Tensor> myInit = std::make_shared<Tensor>( + Array2D<float, 3, 3>{{{0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}}}); + std::shared_ptr<Tensor> myInitW = std::make_shared<Tensor>( + Array2D<float, 3, 2>{{{0.1, 0.1}, {0.1, 0.1}, {0.1, 0.1}}}); + std::shared_ptr<Tensor> myInitR = std::make_shared<Tensor>( + Array2D<float, 3, 3>{{{0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}}}); + + pop->getOperator()->associateInput(0, myInput); + op->associateInput(17, myInit); + op->associateInput(18, myInit); + + // Weights X + auto prodX = Producer(myInitW); + prodX->addChild(op->getMicroGraph()->getOrderedInputs()[1].first, 0, 1); + prodX->addChild(op->getMicroGraph()->getOrderedInputs()[2].first, 0, 1); + prodX->addChild(op->getMicroGraph()->getOrderedInputs()[3].first, 0, 1); + prodX->addChild(op->getMicroGraph()->getOrderedInputs()[4].first, 0, 1); + // Weights H + auto prodH = Producer(myInitR); + prodH->addChild(op->getMicroGraph()->getOrderedInputs()[5].first, 0, 1); + prodH->addChild(op->getMicroGraph()->getOrderedInputs()[6].first, 0, 1); + prodH->addChild(op->getMicroGraph()->getOrderedInputs()[7].first, 0, 1); + prodH->addChild(op->getMicroGraph()->getOrderedInputs()[8].first, 0, 1); + myGraph->add({prodX, prodH}); + + myGraph->setDataType(DataType::Float32); + myGraph->setBackend("cpu"); + myGraph->save("lstm_seq_flatten", true, true); + + std::shared_ptr<Tensor> myHiddenState = std::make_shared<Tensor>( + Array2D<float, 3, 3>{{{0.24439372, 0.24439372, 0.24439372}, + {0.49801484, 0.49801484, 0.49801484}, + {0.67162132, 0.67162132, 0.67162132}}}); + + auto scheduler = SequentialScheduler(myGraph); + scheduler.generateScheduling(); + scheduler.saveStaticSchedulingDiagram("lstm_static_schedule"); + scheduler.forward(true); + scheduler.saveSchedulingDiagram("lstm_seq_flatten_schedule_seq"); + + op->getOutput(0)->print(); + myHiddenState->print(); + + REQUIRE(approxEq<float>(*(op->getOutput(0)), *myHiddenState)); + } + SECTION("LSTM(forward_values_seq_flatten)(parallel)") { + auto pop = Pop(); + auto myLSTM = LSTM(2, 3, 2, true, "ltsm"); + auto op = std::static_pointer_cast<MetaOperator_Op>(myLSTM->getOperator()); + + // Here we test LSTM as it is was flatten in the graph. + // We just borrow its micro-graph into our larger myGraph graph. + auto myGraph = std::make_shared<GraphView>(); + pop->addChild(op->getMicroGraph()->getOrderedInputs()[0].first, 0, 0); + myGraph->add(op->getMicroGraph()); + myGraph->add(pop); + + REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8); + REQUIRE(myLSTM->nbData() == 1); + REQUIRE(myLSTM->nbOutputs() == 2); + + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>( + Array3D<float, 2, 3, 2>{{{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}, {{2.0, 3.0}, {4.0, 5.0}, {6.0, 7.0}}}}); + std::shared_ptr<Tensor> myInit = std::make_shared<Tensor>( + Array2D<float, 3, 3>{{{0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}}}); + std::shared_ptr<Tensor> myInitW = std::make_shared<Tensor>( + Array2D<float, 3, 2>{{{0.1, 0.1}, {0.1, 0.1}, {0.1, 0.1}}}); + std::shared_ptr<Tensor> myInitR = std::make_shared<Tensor>( + Array2D<float, 3, 3>{{{0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}}}); + + pop->getOperator()->associateInput(0, myInput); + op->associateInput(17, myInit); + op->associateInput(18, myInit); + + // Weights X + auto prodX = Producer(myInitW); + prodX->addChild(op->getMicroGraph()->getOrderedInputs()[1].first, 0, 1); + prodX->addChild(op->getMicroGraph()->getOrderedInputs()[2].first, 0, 1); + prodX->addChild(op->getMicroGraph()->getOrderedInputs()[3].first, 0, 1); + prodX->addChild(op->getMicroGraph()->getOrderedInputs()[4].first, 0, 1); + // Weights H + auto prodH = Producer(myInitR); + prodH->addChild(op->getMicroGraph()->getOrderedInputs()[5].first, 0, 1); + prodH->addChild(op->getMicroGraph()->getOrderedInputs()[6].first, 0, 1); + prodH->addChild(op->getMicroGraph()->getOrderedInputs()[7].first, 0, 1); + prodH->addChild(op->getMicroGraph()->getOrderedInputs()[8].first, 0, 1); + myGraph->add({prodX, prodH}); + + myGraph->setDataType(DataType::Float32); + myGraph->setBackend("cpu"); + myGraph->save("lstm_seq_flatten", true, true); + + std::shared_ptr<Tensor> myHiddenState = std::make_shared<Tensor>( + Array2D<float, 3, 3>{{{0.24439372, 0.24439372, 0.24439372}, + {0.49801484, 0.49801484, 0.49801484}, + {0.67162132, 0.67162132, 0.67162132}}}); + + auto scheduler = ParallelScheduler(myGraph); + scheduler.generateScheduling(); + scheduler.forward(true); + scheduler.saveSchedulingDiagram("lstm_seq_flatten_schedule_par"); + + op->getOutput(0)->print(); + myHiddenState->print(); + + REQUIRE(approxEq<float>(*(op->getOutput(0)), *myHiddenState)); + } } \ No newline at end of file diff --git a/unit_tests/operator/Test_MulImpl.cpp b/unit_tests/operator/Test_MulImpl.cpp index 1707bc81e0bb549bfe90078242f8a4eae77db3c3..5b5a05764ecb0298a08c3e9ceece448d46e63044 100644 --- a/unit_tests/operator/Test_MulImpl.cpp +++ b/unit_tests/operator/Test_MulImpl.cpp @@ -10,123 +10,307 @@ ********************************************************************************/ #include <catch2/catch_test_macros.hpp> +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <chrono> +#include <iostream> +#include <memory> +#include <numeric> // std::accumulate +#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution #include "aidge/data/Tensor.hpp" #include "aidge/operator/Mul.hpp" +#include "aidge/utils/TensorUtils.hpp" -#include "aidge/backend/cpu.hpp" +namespace Aidge { -#include <memory> +TEST_CASE("[cpu/operator] Mul", "[Mul][CPU]") { + constexpr std::uint16_t NBTRIALS = 10; + // Create a random number generator + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1 + std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10)); + std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5)); + std::uniform_int_distribution<int> boolDist(0,1); -using namespace Aidge; + // Create MatMul Operator + std::shared_ptr<Node> myMul = Mul(); + auto op = std::static_pointer_cast<OperatorTensor>(myMul-> getOperator()); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + + // Create 2 input Tensors + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + op->associateInput(0,T0); + T0->setDataType(DataType::Float32); + T0->setBackend("cpu"); + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(); + op -> associateInput(1,T1); + T1->setDataType(DataType::Float32); + T1->setBackend("cpu"); + + // Create results Tensor + std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(); + Tres->setDataType(DataType::Float32); + Tres->setBackend("cpu"); + + // To measure execution time of 'MatMul_Op::forward()' member function call + std::chrono::time_point<std::chrono::system_clock> start; + std::chrono::time_point<std::chrono::system_clock> end; + std::chrono::duration<double, std::micro> duration{}; + + SECTION("MulImpl_cpu::forward()") { + SECTION("Scalar / Scalar") { -TEST_CASE("[cpu/operator] Mul(forward)", "[Mul][CPU]") { - SECTION("2D Tensor by Singleton") { - std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {0.38977361, 0.34064174}, - {0.00427264, 0.90872520} - } - }); - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array2D<float,1,1>{{3.0}}); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {1.16932082, 1.02192521}, - {0.01281792, 2.72617555} - } - }); - - std::shared_ptr<Node> myMul = Mul(); - auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator()); - myMul->getOperator()->associateInput(0, input_1); - myMul->getOperator()->associateInput(1, input_2); - myMul->getOperator()->setDataType(DataType::Float32); - myMul->getOperator()->setBackend("cpu"); - op->computeOutputDims(); - myMul->forward(); - - float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); - float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 4; ++i) { - REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); } + SECTION("Scalar / +1-D Tensor") { - } + } + SECTION("+1-D Tensor / +1-D Tensor - same dimensions") { + std::size_t number_of_operation = 0; - SECTION("2D Tensors") { - std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {0.38977361, 0.34064174}, - {0.00427264, 0.90872520} - } - }); - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array2D<float,2,2>{ - { - {0.02362096, 0.24084556}, - {0.94690859, 0.13512510} - } - }); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {0.00920683, 0.08204205}, - {0.00404580, 0.12279158} + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate 2 random Tensors + const std::size_t nbDims = nbDimsDist(gen); + std::vector<std::size_t> dims; + for (std::size_t i = 0; i < nbDims; ++i) { + dims.push_back(dimSizeDist(gen)); + } + const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>()); + number_of_operation += nb_elements; + + // without broadcasting + float* array0 = new float[nb_elements]; + float* array1 = new float[nb_elements]; + float* result = new float[nb_elements]; + + for (std::size_t i = 0; i < nb_elements; ++i) { + array0[i] = valueDist(gen); + array1[i] = valueDist(gen); + result[i] = array0[i] * array1[i]; + } + + // input0 + T0->resize(dims); + T0 -> getImpl() -> setRawPtr(array0, nb_elements); + + // input1 + T1->resize(dims); + T1 -> getImpl() -> setRawPtr(array1, nb_elements); + + // results + Tres->resize(dims); + Tres -> getImpl() -> setRawPtr(result, nb_elements); + + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myMul->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + + delete[] array0; + delete[] array1; + delete[] result; + + // with broadcasting } - }); - - std::shared_ptr<Node> myMul = Mul(); - auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator()); - myMul->getOperator()->associateInput(0, input_1); - myMul->getOperator()->associateInput(1, input_2); - myMul->getOperator()->setDataType(DataType::Float32); - myMul->getOperator()->setBackend("cpu"); - op->computeOutputDims(); - myMul->forward(); - - float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); - float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 4; ++i) { - REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); + std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; + std::cout << "total time: " << duration.count() << "μs" << std::endl; } - } + SECTION("+1-D Tensor / +1-D Tensor - broadcasting") { + std::size_t number_of_operation = 0; - SECTION("3D Tensor by 1D Tensor") { - std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> { - { - {{0.33647752, 0.89360154, 0.46586215}, - {0.71518236, 0.71481097, 0.97991812}}, + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate 2 random Tensors + // handle dimensions, replace some dimensions with '1' to get broadcasting + constexpr std::size_t nbDims = 4; + std::vector<std::size_t> dims; + for (std::size_t i = 0; i < nbDims; ++i) { + dims.push_back(dimSizeDist(gen)); + } + std::vector<std::size_t> dims0 = dims; + std::vector<std::size_t> dims1 = dims; + std::vector<std::size_t> dimsOut = dims; + for (std::size_t i = 0; i < nbDims; ++i) { + if (boolDist(gen)) { + dims0[i] = 1; + } + if (boolDist(gen)) { + dims1[i] = 1; + } + dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i]; + } - {{0.17393428, 0.56849813, 0.18489265}, - {0.78397650, 0.00348300, 0.65758008}} - } - }); - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<float,3>{ - {0.15380561, 0.51063120, 0.93031412} - }); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> { - { - {{0.05175213, 0.45630082, 0.43339813}, - {0.10999906, 0.36500478, 0.91163164}}, - - {{0.02675207, 0.29029289, 0.17200825}, - {0.12057999, 0.00177853, 0.61175603}} + // create arrays and fill them with random values + float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]]; + float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]]; + float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]]; + + for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) { + array0[i] = valueDist(gen); + } + for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) { + array1[i] = valueDist(gen); + } + + // compute true result + const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1}; + const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1}; + for (std::size_t a = 0; a < dimsOut[0]; ++a) { + for (std::size_t b = 0; b < dimsOut[1]; ++b) { + const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0) + + strides0[1] * ((dims0[1] > 1) ? b : 0); + const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0) + + strides1[1] * ((dims1[1] > 1) ? b : 0); + for (std::size_t c = 0; c < dimsOut[2]; ++c) { + const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a)); + for (std::size_t d = 0; d < dimsOut[3]; ++d) { + std::size_t idx0 = idx0_0 + + strides0[2] * ((dims0[2] > 1) ? c : 0) + + ((dims0[3] > 1) ? d : 0); + std::size_t idx1 = idx1_0 + + strides1[2] * ((dims1[2] > 1) ? c : 0) + + ((dims1[3] > 1) ? d : 0); + result[idx_out + d] = array0[idx0] * array1[idx1]; + // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " * " << array1[idx1] << " -> " << idx_out + d << std::endl; + } + } + } + } + + // conversion to Aidge::Tensors + // input0 + T0->resize(dims0); + T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]); + + // input1 + T1->resize(dims1); + T1 -> getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]); + + // results + Tres->resize(dimsOut); + Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]); + + // compute result + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myMul->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + // comparison between truth and computed result + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + + delete[] array0; + delete[] array1; + delete[] result; + + const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); + number_of_operation += nb_elements; } - }); - - std::shared_ptr<Node> myMul = Mul(); - auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator()); - myMul->getOperator()->associateInput(0, input_1); - myMul->getOperator()->associateInput(1, input_2); - myMul->getOperator()->setDataType(DataType::Float32); - myMul->getOperator()->setBackend("cpu"); - op->computeOutputDims(); - myMul->forward(); - - float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); - float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 12; ++i) { - REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); + std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; + std::cout << "total time: " << duration.count() << "μs" << std::endl; } + SECTION("+1-D Tensor / 1-D Tensor") { + std::size_t number_of_operation = 0; + std::uniform_int_distribution<std::size_t> nbRemovedDimsDist(std::size_t(1), std::size_t(3)); + + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate 2 random Tensors + // handle dimensions + constexpr std::size_t nbDims = 4; + std::vector<std::size_t> dims0(4); + for (std::size_t i = 0; i < nbDims; ++i) { + dims0[i] = dimSizeDist(gen); + } + std::vector<std::size_t> dimsOut = dims0; + std::vector<std::size_t> dims1 = dims0; + for (std::size_t i = 0; i < nbDims; ++i) { + if (boolDist(gen)) { + dims1[i] = 1; + } + } + dims1.erase(dims1.cbegin(), dims1.cbegin() + nbRemovedDimsDist(gen)); + + // create arrays and fill them with random values + float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]]; + std::size_t array1_size = std::accumulate(dims1.cbegin(), dims1.cend(), std::size_t(1), std::multiplies<std::size_t>()); + float* array1 = new float[array1_size]; + float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]]; + + for (std::size_t i = 0; i < (dims0[0]*dims0[1]*dims0[2]*dims0[3]); ++i) { + array0[i] = valueDist(gen); + } + for (std::size_t i = 0; i < array1_size; ++i) { + array1[i] = valueDist(gen); + } + // compute true result + auto dims1_tmp = dims1; + dims1_tmp.insert(dims1_tmp.cbegin(), 4 - dims1_tmp.size(), std::size_t(1)); + + const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1}; + const std::size_t strides1[nbDims] = {dims1_tmp[1]*dims1_tmp[2]*dims1_tmp[3], dims1_tmp[2]*dims1_tmp[3], dims1_tmp[3], 1}; + for (std::size_t a = 0; a < dimsOut[0]; ++a) { + for (std::size_t b = 0; b < dimsOut[1]; ++b) { + const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0) + + strides0[1] * ((dims0[1] > 1) ? b : 0); + const std::size_t idx1_0 = strides1[0] * ((dims1_tmp[0] > 1) ? a : 0) + + strides1[1] * ((dims1_tmp[1] > 1) ? b : 0); + for (std::size_t c = 0; c < dimsOut[2]; ++c) { + const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a)); + for (std::size_t d = 0; d < dimsOut[3]; ++d) { + std::size_t idx0 = idx0_0 + + strides0[2] * ((dims0[2] > 1) ? c : 0) + + ((dims0[3] > 1) ? d : 0); + std::size_t idx1 = idx1_0 + + strides1[2] * ((dims1_tmp[2] > 1) ? c : 0) + + ((dims1_tmp[3] > 1) ? d : 0); + result[idx_out + d] = array0[idx0] * array1[idx1]; + // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " * " << array1[idx1] << " -> " << idx_out + d << std::endl; + } + } + } + } + + // conversion to Aidge::Tensors + // input0 + T0->resize(dims0); + T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]); + + // input1 + T1->resize(dims1); + T1 -> getImpl() -> setRawPtr(array1, array1_size); + + // results + Tres->resize(dimsOut); + Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]); + + // compute result + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myMul->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + // comparison between truth and computed result + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + + delete[] array0; + delete[] array1; + delete[] result; + + const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); + number_of_operation += nb_elements; + } + + std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; + std::cout << "total time: " << duration.count() << "μs" << std::endl; + } } -} \ No newline at end of file +} +} // namespace Aidge diff --git a/unit_tests/operator/Test_PaddedConv.cpp b/unit_tests/operator/Test_PaddedConv.cpp index 3baf0a7aa0f366a8f0dd4e3e9df6700a5cdb0cea..b7584ad069336a270ed07c32d4c07552888b6587 100644 --- a/unit_tests/operator/Test_PaddedConv.cpp +++ b/unit_tests/operator/Test_PaddedConv.cpp @@ -16,6 +16,7 @@ #include "aidge/data/Tensor.hpp" #include "aidge/operator/MetaOperator.hpp" #include "aidge/operator/MetaOperatorDefs.hpp" +#include "aidge/scheduler/SequentialScheduler.hpp" #include "aidge/backend/cpu.hpp" @@ -150,12 +151,15 @@ TEST_CASE("[cpu/operator] PaddedConv(forward)", "[PaddedConv][CPU]") { }); myConv->getOperator()->associateInput(0,myInput); - myConv->getOperator()->associateInput(1,myWeights); - myConv->getOperator()->associateInput(2,myBias); - myConv->getOperator()->setDataType(DataType::Int32); - myConv->getOperator()->setBackend("cpu"); - op->computeOutputDims(); - myConv->forward(); + myConv->input(1).first->getOperator()->setOutput(0, myWeights); + myConv->input(2).first->getOperator()->setOutput(0, myBias); + + auto g = getConnectedGraphView(myConv); + g->setDataType(DataType::Int32); + g->setBackend("cpu"); + + auto scheduler = SequentialScheduler(g); + scheduler.forward(); REQUIRE(*(op->getOutput(0)) == *myOutput); } @@ -309,12 +313,15 @@ TEST_CASE("[cpu/operator] PaddedConv(forward)", "[PaddedConv][CPU]") { }); myConv->getOperator()->associateInput(0,myInput); - myConv->getOperator()->associateInput(1,myWeights); - myConv->getOperator()->associateInput(2,myBias); - myConv->getOperator()->setDataType(DataType::Int32); - myConv->getOperator()->setBackend("cpu"); - op->computeOutputDims(); - myConv->forward(); + myConv->input(1).first->getOperator()->setOutput(0, myWeights); + myConv->input(2).first->getOperator()->setOutput(0, myBias); + + auto g = getConnectedGraphView(myConv); + g->setDataType(DataType::Int32); + g->setBackend("cpu"); + + auto scheduler = SequentialScheduler(g); + scheduler.forward(); REQUIRE(*(op->getOutput(0)) == *myOutput); } diff --git a/unit_tests/operator/Test_PowImpl.cpp b/unit_tests/operator/Test_PowImpl.cpp index 0c95e785958aca72b5ae1f5727134552310e5bef..01f9760275923b2249e5b6098b83b4ae27d5fb30 100644 --- a/unit_tests/operator/Test_PowImpl.cpp +++ b/unit_tests/operator/Test_PowImpl.cpp @@ -10,198 +10,308 @@ ********************************************************************************/ #include <catch2/catch_test_macros.hpp> +#include <cmath> +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <chrono> +#include <iostream> +#include <memory> +#include <numeric> // std::accumulate +#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution #include "aidge/data/Tensor.hpp" #include "aidge/operator/Pow.hpp" +#include "aidge/utils/TensorUtils.hpp" -#include "aidge/backend/cpu.hpp" +namespace Aidge { -#include <memory> +TEST_CASE("[cpu/operator] Pow", "[Pow][CPU]") { + constexpr std::uint16_t NBTRIALS = 10; + // Create a random number generator + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1 + std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10)); + std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5)); + std::uniform_int_distribution<int> boolDist(0,1); -using namespace Aidge; + // Create MatPow Operator + std::shared_ptr<Node> myPow = Pow(); + auto op = std::static_pointer_cast<OperatorTensor>(myPow-> getOperator()); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + + // Create 2 input Tensors + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + op->associateInput(0,T0); + T0->setDataType(DataType::Float32); + T0->setBackend("cpu"); + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(); + op -> associateInput(1,T1); + T1->setDataType(DataType::Float32); + T1->setBackend("cpu"); + + // Create results Tensor + std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(); + Tres->setDataType(DataType::Float32); + Tres->setBackend("cpu"); + + // To measure execution time of 'MatPow_Op::forward()' member function call + std::chrono::time_point<std::chrono::system_clock> start; + std::chrono::time_point<std::chrono::system_clock> end; + std::chrono::duration<double, std::micro> duration{}; + + SECTION("PowImpl_cpu::forward()") { + SECTION("Scalar / Scalar") { -TEST_CASE("[cpu/operator] Pow(forward)", "[Pow][CPU]") { - SECTION("2D Tensor by Singleton") { - std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {0.42139274, 0.51524192}, - {0.85247433, 0.13432795} - } - }); - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array2D<float,1,1>{{2.0}}); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {0.17757183, 0.26547423}, - {0.72671247, 0.01804400} - } - }); - - std::shared_ptr<Node> myPow = Pow(); - auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator()); - op->associateInput(0, input_1); - op->associateInput(1, input_2); - op->setDataType(DataType::Float32); - op->setBackend("cpu"); - op->computeOutputDims(); - myPow->forward(); - - float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); - float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 4; ++i) { - REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); } + SECTION("Scalar / +1-D Tensor") { - } + } + SECTION("+1-D Tensor / +1-D Tensor - same dimensions") { + std::size_t number_of_operation = 0; - SECTION("3D Tensor by 1D Tensor") { - std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> { - { - {{0.87519985, 0.10536593, 0.20268351}, - {0.75532353, 0.95977652, 0.03897029}}, + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate 2 random Tensors + const std::size_t nbDims = nbDimsDist(gen); + std::vector<std::size_t> dims; + for (std::size_t i = 0; i < nbDims; ++i) { + dims.push_back(dimSizeDist(gen)); + } + const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>()); + number_of_operation += nb_elements; - {{0.67554104, 0.35499334, 0.27741563}, - {0.94270861, 0.48397779, 0.35532343}} - } - }); - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<float,3>{ - {0.39333701, 0.08719915, 0.16713941} - }); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> { - { - {{0.94891787, 0.82182676, 0.76584703}, - {0.89549923, 0.99642646, 0.58137459}}, - - {{0.85702944, 0.91364944, 0.80709606}, - {0.97706109, 0.93867886, 0.84118503}} + // without broadcasting + float* array0 = new float[nb_elements]; + float* array1 = new float[nb_elements]; + float* result = new float[nb_elements]; + + for (std::size_t i = 0; i < nb_elements; ++i) { + array0[i] = valueDist(gen); + array1[i] = valueDist(gen); + result[i] = std::pow(array0[i], array1[i]); + } + + // input0 + T0->resize(dims); + T0 -> getImpl() -> setRawPtr(array0, nb_elements); + + // input1 + T1->resize(dims); + T1 -> getImpl() -> setRawPtr(array1, nb_elements); + + // results + Tres->resize(dims); + Tres -> getImpl() -> setRawPtr(result, nb_elements); + + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myPow->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + + delete[] array0; + delete[] array1; + delete[] result; + + // with broadcasting } - }); - - std::shared_ptr<Node> myPow = Pow(); - auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator()); - op->associateInput(0, input_1); - op->associateInput(1, input_2); - op->setDataType(DataType::Float32); - op->setBackend("cpu"); - op->computeOutputDims(); - myPow->forward(); - - float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); - float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 12; ++i) { - REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); + std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; + std::cout << "total time: " << duration.count() << "μs" << std::endl; } - } + SECTION("+1-D Tensor / +1-D Tensor - broadcasting") { + std::size_t number_of_operation = 0; - SECTION("2D Tensors") { - std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {0.79780143, 0.49322051}, - {0.84239346, 0.83737719} - } - }); - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array2D<float,2,2>{ - { - {0.59088874, 0.78858775}, - {0.42879432, 0.17615074} - } - }); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {0.87504572, 0.57271165}, - {0.92909741, 0.96922028} + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate 2 random Tensors + // handle dimensions, replace some dimensions with '1' to get broadcasting + constexpr std::size_t nbDims = 4; + std::vector<std::size_t> dims; + for (std::size_t i = 0; i < nbDims; ++i) { + dims.push_back(dimSizeDist(gen)); + } + std::vector<std::size_t> dims0 = dims; + std::vector<std::size_t> dims1 = dims; + std::vector<std::size_t> dimsOut = dims; + for (std::size_t i = 0; i < nbDims; ++i) { + if (boolDist(gen)) { + dims0[i] = 1; + } + if (boolDist(gen)) { + dims1[i] = 1; + } + dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i]; + } + + // create arrays and fill them with random values + float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]]; + float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]]; + float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]]; + + for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) { + array0[i] = valueDist(gen); + } + for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) { + array1[i] = valueDist(gen); + } + + // compute true result + const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1}; + const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1}; + for (std::size_t a = 0; a < dimsOut[0]; ++a) { + for (std::size_t b = 0; b < dimsOut[1]; ++b) { + const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0) + + strides0[1] * ((dims0[1] > 1) ? b : 0); + const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0) + + strides1[1] * ((dims1[1] > 1) ? b : 0); + for (std::size_t c = 0; c < dimsOut[2]; ++c) { + const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a)); + for (std::size_t d = 0; d < dimsOut[3]; ++d) { + std::size_t idx0 = idx0_0 + + strides0[2] * ((dims0[2] > 1) ? c : 0) + + ((dims0[3] > 1) ? d : 0); + std::size_t idx1 = idx1_0 + + strides1[2] * ((dims1[2] > 1) ? c : 0) + + ((dims1[3] > 1) ? d : 0); + result[idx_out + d] = std::pow(array0[idx0], array1[idx1]); + // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " ** " << array1[idx1] << " -> " << idx_out + d << std::endl; + } + } + } + } + + // conversion to Aidge::Tensors + // input0 + T0->resize(dims0); + T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]); + + // input1 + T1->resize(dims1); + T1 -> getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]); + + // results + Tres->resize(dimsOut); + Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]); + + // compute result + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myPow->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + // comparison between truth and computed result + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + + delete[] array0; + delete[] array1; + delete[] result; + + const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); + number_of_operation += nb_elements; } - }); - - std::shared_ptr<Node> myPow = Pow(); - auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator()); - op->associateInput(0, input_1); - op->associateInput(1, input_2); - op->setDataType(DataType::Float32); - op->setBackend("cpu"); - op->computeOutputDims(); - myPow->forward(); - - float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); - float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 4; ++i) { - REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); + std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; + std::cout << "total time: " << duration.count() << "μs" << std::endl; } + SECTION("+1-D Tensor / 1-D Tensor") { + std::size_t number_of_operation = 0; + std::uniform_int_distribution<std::size_t> nbRemovedDimsDist(std::size_t(1), std::size_t(3)); - } + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate 2 random Tensors + // handle dimensions + constexpr std::size_t nbDims = 4; + std::vector<std::size_t> dims0(4); + for (std::size_t i = 0; i < nbDims; ++i) { + dims0[i] = dimSizeDist(gen); + } + std::vector<std::size_t> dimsOut = dims0; + std::vector<std::size_t> dims1 = dims0; + for (std::size_t i = 0; i < nbDims; ++i) { + if (boolDist(gen)) { + dims1[i] = 1; + } + } + dims1.erase(dims1.cbegin(), dims1.cbegin() + nbRemovedDimsDist(gen)); - SECTION("4D Tensor") { - std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> { - { - { - {{0.80191749, 0.45388508, 0.86550850}, - {0.47226250, 0.55809456, 0.59451854}, - {0.45497441, 0.02653158, 0.44041735}}, - {{0.30726379, 0.73146582, 0.46462774}, - {0.30268502, 0.78075552, 0.65154958}, - {0.91332406, 0.62448132, 0.53238851}}, - {{0.13917381, 0.43061519, 0.30198061}, - {0.12880909, 0.08995515, 0.29609048}, - {0.46449280, 0.47559714, 0.24193990}} - }, - { - {{0.87349969, 0.51625526, 0.16921073}, - {0.95035923, 0.10066575, 0.56729180}, - {0.84686232, 0.05965143, 0.03635806}}, - {{0.61107808, 0.59954077, 0.45627308}, - {0.84114522, 0.77186388, 0.37427086}, - {0.13415480, 0.00617349, 0.84260136}}, - {{0.55090177, 0.57292056, 0.29158932}, - {0.67131883, 0.96988875, 0.69545972}, - {0.80979776, 0.18238151, 0.19527155}} + // create arrays and fill them with random values + float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]]; + std::size_t array1_size = std::accumulate(dims1.cbegin(), dims1.cend(), std::size_t(1), std::multiplies<std::size_t>()); + float* array1 = new float[array1_size]; + float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]]; + + for (std::size_t i = 0; i < (dims0[0]*dims0[1]*dims0[2]*dims0[3]); ++i) { + array0[i] = valueDist(gen); } - } - }); - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array2D<float,1,1>{{2.0}}); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<float,2,3,3,3> { - { - { - {{6.43071651e-01, 2.06011668e-01, 7.49104977e-01}, - {2.23031864e-01, 3.11469525e-01, 3.53452295e-01}, - {2.07001716e-01, 7.03924568e-04, 1.93967447e-01}}, - - {{9.44110379e-02, 5.35042226e-01, 2.15878934e-01}, - {9.16182250e-02, 6.09579206e-01, 4.24516857e-01}, - {8.34160864e-01, 3.89976919e-01, 2.83437520e-01}}, - - {{1.93693489e-02, 1.85429439e-01, 9.11922902e-02}, - {1.65917836e-02, 8.09192937e-03, 8.76695737e-02}, - {2.15753555e-01, 2.26192638e-01, 5.85349165e-02}} - }, - { - {{7.63001740e-01, 2.66519487e-01, 2.86322720e-02}, - {9.03182685e-01, 1.01335924e-02, 3.21819991e-01}, - {7.17175782e-01, 3.55829368e-03, 1.32190844e-03}}, - - {{3.73416424e-01, 3.59449148e-01, 2.08185121e-01}, - {7.07525253e-01, 5.95773816e-01, 1.40078679e-01}, - {1.79975089e-02, 3.81119971e-05, 7.09977031e-01}}, - - {{3.03492755e-01, 3.28237981e-01, 8.50243345e-02}, - {4.50668961e-01, 9.40684199e-01, 4.83664215e-01}, - {6.55772448e-01, 3.32630165e-02, 3.81309800e-02}} + for (std::size_t i = 0; i < array1_size; ++i) { + array1[i] = valueDist(gen); } + + // compute true result + auto dims1_tmp = dims1; + dims1_tmp.insert(dims1_tmp.cbegin(), 4 - dims1_tmp.size(), std::size_t(1)); + + const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1}; + const std::size_t strides1[nbDims] = {dims1_tmp[1]*dims1_tmp[2]*dims1_tmp[3], dims1_tmp[2]*dims1_tmp[3], dims1_tmp[3], 1}; + for (std::size_t a = 0; a < dimsOut[0]; ++a) { + for (std::size_t b = 0; b < dimsOut[1]; ++b) { + const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0) + + strides0[1] * ((dims0[1] > 1) ? b : 0); + const std::size_t idx1_0 = strides1[0] * ((dims1_tmp[0] > 1) ? a : 0) + + strides1[1] * ((dims1_tmp[1] > 1) ? b : 0); + for (std::size_t c = 0; c < dimsOut[2]; ++c) { + const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a)); + for (std::size_t d = 0; d < dimsOut[3]; ++d) { + std::size_t idx0 = idx0_0 + + strides0[2] * ((dims0[2] > 1) ? c : 0) + + ((dims0[3] > 1) ? d : 0); + std::size_t idx1 = idx1_0 + + strides1[2] * ((dims1_tmp[2] > 1) ? c : 0) + + ((dims1_tmp[3] > 1) ? d : 0); + result[idx_out + d] = std::pow(array0[idx0], array1[idx1]); + // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " ** " << array1[idx1] << " -> " << idx_out + d << std::endl; + } + } + } + } + + // conversion to Aidge::Tensors + // input0 + T0->resize(dims0); + T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]); + + // input1 + T1->resize(dims1); + T1 -> getImpl() -> setRawPtr(array1, array1_size); + + // results + Tres->resize(dimsOut); + Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]); + + // compute result + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + myPow->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + // comparison between truth and computed result + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + + delete[] array0; + delete[] array1; + delete[] result; + + const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); + number_of_operation += nb_elements; } - }); - - std::shared_ptr<Node> myPow = Pow(); - auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator()); - op->associateInput(0, input_1); - op->associateInput(1, input_2); - op->setDataType(DataType::Float32); - op->setBackend("cpu"); - op->computeOutputDims(); - myPow->forward(); - - float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); - float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 54; ++i) { - REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); + + std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; + std::cout << "total time: " << duration.count() << "μs" << std::endl; } } -} \ No newline at end of file +} +} // namespace Aidge diff --git a/unit_tests/operator/Test_ReduceMeanImpl.cpp b/unit_tests/operator/Test_ReduceMeanImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d9bf68b78d1ece371cbfb5cda3c502f82eaf97de --- /dev/null +++ b/unit_tests/operator/Test_ReduceMeanImpl.cpp @@ -0,0 +1,198 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> +#include <memory> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/ReduceMean.hpp" +#include "aidge/operator/Conv.hpp" + +#include "aidge/backend/cpu.hpp" +#include "aidge/utils/TensorUtils.hpp" + +using namespace Aidge; + +TEST_CASE("[cpu/operator] ReduceMean(forward)", "[ReduceMean][CPU]") { + SECTION("KeepDims") { + SECTION("test 1") { + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> { + { + { + { 5.0, 1.0 }, + { 20.0, 2.0 } + }, + { + { 30.0, 1.0 }, + { 40.0, 2.0 } + }, + { + { 55.0, 1.0 }, + { 60.0, 2.0 } + } + } + }); + Tensor myOutput = Tensor(Array3D<float,3,1,2> { + { + + {{ 12.5, 1.5 }}, + {{ 35.0, 1.5 }}, + {{ 57.5, 1.5 }} + } + }); + + std::shared_ptr<Node> myReduceMean = ReduceMean({1}, 1); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReduceMean->forward(); + op->getOutput(0)->print(); + + REQUIRE(*(op->getOutput(0)) == myOutput); + } + SECTION("test 2") { + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,3,2> { + { + { + { 0.0, 0.0 }, + { 1.0, 1.0 }, + { 2.0, 2.0 } + }, + { + { 3.0, 3.0 }, + { 4.0, 4.0 }, + { 5.0, 5.0 } + }, + { + { 6.0, 6.0 }, + { 7.0, 7.0 }, + { 8.0, 8.0 } + } + } + }); + Tensor myOutput = Tensor(Array3D<float,3,1,1> { + { + + {{ 1.0 }}, + {{ 4.0 }}, + {{ 7.0 }} + } + }); + + std::shared_ptr<Node> myReduceMean = ReduceMean({1, 2}, 1); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReduceMean->forward(); + myOutput.print(); + op->getOutput(0)->print(); + REQUIRE(*(op->getOutput(0)) == myOutput); + } + } + SECTION("not_KeepDims") { + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> { + { + { + { 5.0, 1.0 }, + { 20.0, 2.0 } + }, + { + { 30.0, 1.0 }, + { 40.0, 2.0 } + }, + { + { 55.0, 1.0 }, + { 60.0, 2.0 } + } + } + }); + std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<float,3,2> { + { + { 12.5, 1.5 }, + { 35.0, 1.5 }, + { 57.5, 1.5 } + } + }); + + std::shared_ptr<Node> myReduceMean = ReduceMean({1}, 0); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReduceMean->forward(); + op->getOutput(0)->print(); + + REQUIRE(*(op->getOutput(0)) == *myOutput); + + } + SECTION("all_axes") { + SECTION("1") { + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> { + { + { + { 5.0, 1.0 }, + { 20.0, 2.0 } + }, + { + { 30.0, 1.0 }, + { 40.0, 2.0 } + }, + { + { 55.0, 1.0 }, + { 60.0, 2.0 } + } + } + }); + std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> { + {18.25} + }); + + std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1, 2}, 0); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReduceMean->forward(); + op->getOutput(0)->print(); + + REQUIRE(*(op->getOutput(0)) == *myOutput); + } + SECTION("2") { + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<float,5,4> { + {{ 0.004232f, 0.105120f, 0.045124f, 0.009205f}, + { 0.000766f, 0.272162f, 0.503560f, 0.044163f}, + { 0.049755f, 0.000305f, 0.143634f, 0.013253f}, + { 0.096258f, 0.311231f, 0.358143f, 0.000452f}, + { 0.468617f, 0.015693f, 0.145316f, 0.000105f}} + }); + std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> { + {0.1293547f} + }); + + std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1}, 0); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReduceMean->forward(); + op->getOutput(0)->print(); + // approxEq<float>(*(op->getOutput(0)), *myOutput); + REQUIRE(approxEq<float>(*(op->getOutput(0)), *myOutput)); + } + } +} \ No newline at end of file diff --git a/unit_tests/operator/Test_ReshapeImpl.cpp b/unit_tests/operator/Test_ReshapeImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1fee1f4cd132acf9ee39a86759f2e628317fce19 --- /dev/null +++ b/unit_tests/operator/Test_ReshapeImpl.cpp @@ -0,0 +1,71 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Reshape.hpp" + +#include "aidge/backend/cpu.hpp" + +#include <memory> + +using namespace Aidge; + +TEST_CASE("[cpu/operator] Reshape(forward)") { + SECTION("1D Tensor") { + std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array1D<float,6> { + {1.0, 2.0, 3.0, 4.0, 5.0, 6.0} + }); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,3> { + { + {1.0, 2.0, 3.0}, + {4.0, 5.0, 6.0} + } + }); + + std::shared_ptr<Node> myReshape = Reshape({2, 3}); + auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator()); + op->associateInput(0, input); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReshape->forward(); + + REQUIRE(*(op->getOutput(0)) == *expectedOutput); + } + SECTION("2D Tensor") { + std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,3> { + { + {1.0, 2.0, 3.0}, + {4.0, 5.0, 6.0} + } + + }); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,3,2> { + { + {1.0, 2.0}, + {3.0, 4.0}, + {5.0, 6.0} + } + }); + + std::shared_ptr<Node> myReshape = Reshape({3, 2}); + auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator()); + op->associateInput(0, input); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myReshape->forward(); + + REQUIRE(*(op->getOutput(0)) == *expectedOutput); + } +} \ No newline at end of file diff --git a/unit_tests/operator/Test_SliceImpl.cpp b/unit_tests/operator/Test_SliceImpl.cpp index 7a71f31e9850852cadd659c91683c30ddcbe9849..0b5ae682c659bf5a0f8d50448733b9ec18a4c36e 100644 --- a/unit_tests/operator/Test_SliceImpl.cpp +++ b/unit_tests/operator/Test_SliceImpl.cpp @@ -163,4 +163,4 @@ TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") { REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims()); REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType()); } -} \ No newline at end of file +} diff --git a/unit_tests/operator/Test_SoftmaxImpl.cpp b/unit_tests/operator/Test_SoftmaxImpl.cpp index 360b7440599030dbd93954e345f0d5986eb83b15..7459a45e48cad74e722dc881e4653d34b7f549d0 100644 --- a/unit_tests/operator/Test_SoftmaxImpl.cpp +++ b/unit_tests/operator/Test_SoftmaxImpl.cpp @@ -41,15 +41,15 @@ TEST_CASE("[cpu/operator] Softmax(forward)", "[Softmax][CPU]") { std::shared_ptr<Node> mySoftmax = Softmax(1); auto op = std::static_pointer_cast<OperatorTensor>(mySoftmax -> getOperator()); - mySoftmax->getOperator()->associateInput(0,input); - mySoftmax->getOperator()->setDataType(DataType::Float32); - mySoftmax->getOperator()->setBackend("cpu"); + op->associateInput(0,input); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); op->computeOutputDims(); mySoftmax->forward(); float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 20; ++i) { + for (std::size_t i = 0; i< expectedOutput->size(); ++i) { REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); } @@ -110,17 +110,16 @@ TEST_CASE("[cpu/operator] Softmax(forward)", "[Softmax][CPU]") { std::shared_ptr<Node> mySoftmax = Softmax(1); auto op = std::static_pointer_cast<OperatorTensor>(mySoftmax -> getOperator()); - mySoftmax->getOperator()->associateInput(0,input); - mySoftmax->getOperator()->setDataType(DataType::Float32); - mySoftmax->getOperator()->setBackend("cpu"); + op->associateInput(0,input); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); op->computeOutputDims(); mySoftmax->forward(); float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 54; ++i) { + for (std::size_t i = 0; i< expectedOutput->size(); ++i) { REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); } - // REQUIRE(*mySoftmax->getOperator()->getOutput(0) == *expectedOutput); } } \ No newline at end of file diff --git a/unit_tests/operator/Test_SubImpl.cpp b/unit_tests/operator/Test_SubImpl.cpp index dfd64078b77a557e07eb11cb958ac24eeb1f9aa3..f9ba894f081b76b3abd0f0909636a38eaee3601a 100644 --- a/unit_tests/operator/Test_SubImpl.cpp +++ b/unit_tests/operator/Test_SubImpl.cpp @@ -10,123 +10,307 @@ ********************************************************************************/ #include <catch2/catch_test_macros.hpp> +#include <cstddef> // std::size_t +#include <cstdint> // std::uint16_t +#include <chrono> +#include <iostream> +#include <memory> +#include <numeric> // std::accumulate +#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution #include "aidge/data/Tensor.hpp" #include "aidge/operator/Sub.hpp" +#include "aidge/utils/TensorUtils.hpp" -#include "aidge/backend/cpu.hpp" +namespace Aidge { -#include <memory> +TEST_CASE("[cpu/operator] Sub", "[Sub][CPU]") { + constexpr std::uint16_t NBTRIALS = 10; + // Create a random number generator + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1 + std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10)); + std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5)); + std::uniform_int_distribution<int> boolDist(0,1); -using namespace Aidge; + // Create MatMul Operator + std::shared_ptr<Node> mySub = Sub(); + auto op = std::static_pointer_cast<OperatorTensor>(mySub-> getOperator()); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + + // Create 2 input Tensors + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + op->associateInput(0,T0); + T0->setDataType(DataType::Float32); + T0->setBackend("cpu"); + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(); + op -> associateInput(1,T1); + T1->setDataType(DataType::Float32); + T1->setBackend("cpu"); + + // Create results Tensor + std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(); + Tres->setDataType(DataType::Float32); + Tres->setBackend("cpu"); + + // To measure execution time of 'MatMul_Op::forward()' member function call + std::chrono::time_point<std::chrono::system_clock> start; + std::chrono::time_point<std::chrono::system_clock> end; + std::chrono::duration<double, std::micro> duration{}; + + SECTION("SubImpl_cpu::forward()") { + SECTION("Scalar / Scalar") { -TEST_CASE("[cpu/operator] Sub(forward)", "[Sub][CPU]") { - SECTION("2D Tensor by Singleton") { - std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {0.34234560, 0.92812711}, - {0.73706615, 0.69953883} - } - }); - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array2D<float,1,1>{{2.5}}); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {-2.15765429, -1.57187295}, - {-1.76293385, -1.80046117} - } - }); - - std::shared_ptr<Node> mySub = Sub(); - auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator()); - mySub->getOperator()->associateInput(0, input_1); - mySub->getOperator()->associateInput(1, input_2); - mySub->getOperator()->setDataType(DataType::Float32); - mySub->getOperator()->setBackend("cpu"); - op->computeOutputDims(); - mySub->forward(); - - float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); - float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 4; ++i) { - REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); } + SECTION("Scalar / +1-D Tensor") { - } + } + SECTION("+1-D Tensor / +1-D Tensor - same dimensions") { + std::size_t number_of_operation = 0; - SECTION("2D Tensors") { - std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {0.34234560, 0.92812711}, - {0.73706615, 0.69953883} - } - }); - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array2D<float,2,2>{ - { - {0.61729127, 0.83004373}, - {0.72002089, 0.52473849} - } - }); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> { - { - {-0.27494568, 0.09808338}, - {0.01704526, 0.17480034} + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate 2 random Tensors + const std::size_t nbDims = nbDimsDist(gen); + std::vector<std::size_t> dims; + for (std::size_t i = 0; i < nbDims; ++i) { + dims.push_back(dimSizeDist(gen)); + } + const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>()); + number_of_operation += nb_elements; + + // without broadcasting + float* array0 = new float[nb_elements]; + float* array1 = new float[nb_elements]; + float* result = new float[nb_elements]; + + for (std::size_t i = 0; i < nb_elements; ++i) { + array0[i] = valueDist(gen); + array1[i] = valueDist(gen); + result[i] = array0[i] - array1[i]; + } + + // input0 + T0->resize(dims); + T0 -> getImpl() -> setRawPtr(array0, nb_elements); + + // input1 + T1->resize(dims); + T1 -> getImpl() -> setRawPtr(array1, nb_elements); + + // results + Tres->resize(dims); + Tres -> getImpl() -> setRawPtr(result, nb_elements); + + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + mySub->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + + delete[] array0; + delete[] array1; + delete[] result; + + // with broadcasting } - }); - - std::shared_ptr<Node> mySub = Sub(); - auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator()); - mySub->getOperator()->associateInput(0, input_1); - mySub->getOperator()->associateInput(1, input_2); - mySub->getOperator()->setDataType(DataType::Float32); - mySub->getOperator()->setBackend("cpu"); - op->computeOutputDims(); - mySub->forward(); - - float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); - float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 4; ++i) { - REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); + std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; + std::cout << "total time: " << duration.count() << "μs" << std::endl; } - } + SECTION("+1-D Tensor / +1-D Tensor - broadcasting") { + std::size_t number_of_operation = 0; - SECTION("3D Tensor by 1D Tensor") { - std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> { - { - {{0.84181279, 0.20655948, 0.09750116}, - {0.37723488, 0.73120135, 0.04666907}}, + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate 2 random Tensors + // handle dimensions, replace some dimensions with '1' to get broadcasting + constexpr std::size_t nbDims = 4; + std::vector<std::size_t> dims; + for (std::size_t i = 0; i < nbDims; ++i) { + dims.push_back(dimSizeDist(gen)); + } + std::vector<std::size_t> dims0 = dims; + std::vector<std::size_t> dims1 = dims; + std::vector<std::size_t> dimsOut = dims; + for (std::size_t i = 0; i < nbDims; ++i) { + if (boolDist(gen)) { + dims0[i] = 1; + } + if (boolDist(gen)) { + dims1[i] = 1; + } + dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i]; + } - {{0.91483921, 0.93985939, 0.58823180}, - {0.39963132, 0.67879969, 0.33209187}} - } - }); - std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<float,3>{ - {0.04784805, 0.91903114, 0.38606840} - }); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> { - { - {{0.79396474, -0.71247166, -0.28856725}, - {0.32938683, -0.18782979, -0.33939934}}, - - {{0.86699116, 0.02082825, 0.20216340}, - {0.35178328, -0.24023145, -0.05397654}} + // create arrays and fill them with random values + float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]]; + float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]]; + float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]]; + + for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) { + array0[i] = valueDist(gen); + } + for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) { + array1[i] = valueDist(gen); + } + + // compute true result + const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1}; + const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1}; + for (std::size_t a = 0; a < dimsOut[0]; ++a) { + for (std::size_t b = 0; b < dimsOut[1]; ++b) { + const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0) + + strides0[1] * ((dims0[1] > 1) ? b : 0); + const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0) + + strides1[1] * ((dims1[1] > 1) ? b : 0); + for (std::size_t c = 0; c < dimsOut[2]; ++c) { + const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a)); + for (std::size_t d = 0; d < dimsOut[3]; ++d) { + std::size_t idx0 = idx0_0 + + strides0[2] * ((dims0[2] > 1) ? c : 0) + + ((dims0[3] > 1) ? d : 0); + std::size_t idx1 = idx1_0 + + strides1[2] * ((dims1[2] > 1) ? c : 0) + + ((dims1[3] > 1) ? d : 0); + result[idx_out + d] = array0[idx0] - array1[idx1]; + // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " - " << array1[idx1] << " -> " << idx_out + d << std::endl; + } + } + } + } + + // conversion to Aidge::Tensors + // input0 + T0->resize(dims0); + T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]); + + // input1 + T1->resize(dims1); + T1 -> getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]); + + // results + Tres->resize(dimsOut); + Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]); + + // compute result + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + mySub->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + // comparison between truth and computed result + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + + delete[] array0; + delete[] array1; + delete[] result; + + const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); + number_of_operation += nb_elements; } - }); - - std::shared_ptr<Node> mySub = Sub(); - auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator()); - mySub->getOperator()->associateInput(0, input_1); - mySub->getOperator()->associateInput(1, input_2); - mySub->getOperator()->setDataType(DataType::Float32); - mySub->getOperator()->setBackend("cpu"); - op->computeOutputDims(); - mySub->forward(); - - float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); - float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr()); - for (std::size_t i = 0; i< 12; ++i) { - REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001); + std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; + std::cout << "total time: " << duration.count() << "μs" << std::endl; } + SECTION("+1-D Tensor / 1-D Tensor") { + std::size_t number_of_operation = 0; + std::uniform_int_distribution<std::size_t> nbRemovedDimsDist(std::size_t(1), std::size_t(3)); + + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // generate 2 random Tensors + // handle dimensions + constexpr std::size_t nbDims = 4; + std::vector<std::size_t> dims0(4); + for (std::size_t i = 0; i < nbDims; ++i) { + dims0[i] = dimSizeDist(gen); + } + std::vector<std::size_t> dimsOut = dims0; + std::vector<std::size_t> dims1 = dims0; + for (std::size_t i = 0; i < nbDims; ++i) { + if (boolDist(gen)) { + dims1[i] = 1; + } + } + dims1.erase(dims1.cbegin(), dims1.cbegin() + nbRemovedDimsDist(gen)); + + // create arrays and fill them with random values + float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]]; + std::size_t array1_size = std::accumulate(dims1.cbegin(), dims1.cend(), std::size_t(1), std::multiplies<std::size_t>()); + float* array1 = new float[array1_size]; + float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]]; + + for (std::size_t i = 0; i < (dims0[0]*dims0[1]*dims0[2]*dims0[3]); ++i) { + array0[i] = valueDist(gen); + } + for (std::size_t i = 0; i < array1_size; ++i) { + array1[i] = valueDist(gen); + } + // compute true result + auto dims1_tmp = dims1; + dims1_tmp.insert(dims1_tmp.cbegin(), 4 - dims1_tmp.size(), std::size_t(1)); + + const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1}; + const std::size_t strides1[nbDims] = {dims1_tmp[1]*dims1_tmp[2]*dims1_tmp[3], dims1_tmp[2]*dims1_tmp[3], dims1_tmp[3], 1}; + for (std::size_t a = 0; a < dimsOut[0]; ++a) { + for (std::size_t b = 0; b < dimsOut[1]; ++b) { + const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0) + + strides0[1] * ((dims0[1] > 1) ? b : 0); + const std::size_t idx1_0 = strides1[0] * ((dims1_tmp[0] > 1) ? a : 0) + + strides1[1] * ((dims1_tmp[1] > 1) ? b : 0); + for (std::size_t c = 0; c < dimsOut[2]; ++c) { + const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a)); + for (std::size_t d = 0; d < dimsOut[3]; ++d) { + std::size_t idx0 = idx0_0 + + strides0[2] * ((dims0[2] > 1) ? c : 0) + + ((dims0[3] > 1) ? d : 0); + std::size_t idx1 = idx1_0 + + strides1[2] * ((dims1_tmp[2] > 1) ? c : 0) + + ((dims1_tmp[3] > 1) ? d : 0); + result[idx_out + d] = array0[idx0] - array1[idx1]; + // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " - " << array1[idx1] << " -> " << idx_out + d << std::endl; + } + } + } + } + + // conversion to Aidge::Tensors + // input0 + T0->resize(dims0); + T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]); + + // input1 + T1->resize(dims1); + T1 -> getImpl() -> setRawPtr(array1, array1_size); + + // results + Tres->resize(dimsOut); + Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]); + + // compute result + op->computeOutputDims(); + start = std::chrono::system_clock::now(); + mySub->forward(); + end = std::chrono::system_clock::now(); + duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start); + + // comparison between truth and computed result + REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres)); + + delete[] array0; + delete[] array1; + delete[] result; + + const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); + number_of_operation += nb_elements; + } + + std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl; + std::cout << "total time: " << duration.count() << "μs" << std::endl; + } } -} \ No newline at end of file +} +} // namespace Aidge diff --git a/unit_tests/operator/Test_TransposeImpl.cpp b/unit_tests/operator/Test_TransposeImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d381faadd7750f6a9a48fe9371f98e813b94a310 --- /dev/null +++ b/unit_tests/operator/Test_TransposeImpl.cpp @@ -0,0 +1,127 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> +#include <memory> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Transpose.hpp" + +#include "aidge/backend/cpu.hpp" + +using namespace Aidge; + +TEST_CASE("[cpu/operator] Transpose(forward)") { + SECTION("3D Tensor") { + std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> { + { + {{0.42507452, 0.11244237, 0.43243718, 0.62354952}, + {0.90250170, 0.48719984, 0.45781207, 0.92536664}, + {0.06348717, 0.91678733, 0.64452291, 0.00484818}}, + + {{0.66873497, 0.99508536, 0.55714869, 0.84887981}, + {0.41666120, 0.92365038, 0.80034822, 0.38721532}, + {0.52037925, 0.53937608, 0.66380072, 0.36330253}} + } + }); + std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array3D<float,2,4,3> { + { + {{0.42507452, 0.90250170, 0.06348717}, + {0.11244237, 0.48719984, 0.91678733}, + {0.43243718, 0.45781207, 0.64452291}, + {0.62354952, 0.92536664, 0.00484818}}, + + {{0.66873497, 0.41666120, 0.52037925}, + {0.99508536, 0.92365038, 0.53937608}, + {0.55714869, 0.80034822, 0.66380072}, + {0.84887981, 0.38721532, 0.36330253}} + } + }); + std::shared_ptr<Node> myTranspose = Transpose<3>(std::array<DimSize_t,3>{{0,2,1}}); + auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator()); + op->associateInput(0,input); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->computeOutputDims(); + myTranspose->forward(); + + REQUIRE(*(op->getOutput(0)) == *output); + } + SECTION("4D Tensor") { + std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,2,3,1,4> { + { + { + { + {1, 2, 3, 4} + }, + { + {5, 6, 7, 8} + }, + { + {9, 10, 11, 12} + } + }, + { + { + {13, 14, 15, 16} + }, + { + {17, 18, 19, 20} + }, + { + {21, 22, 23, 24} + } + } + } + }); + std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array4D<int,2,4,1,3> { + { + { + { + {1, 5, 9} + }, + { + {2, 6, 10} + }, + { + {3, 7, 11} + }, + { + {4, 8, 12} + } + }, + { + { + {13, 17, 21} + }, + { + {14, 18, 22} + }, + { + {15, 19, 23} + }, + { + {16, 20, 24} + } + } + } + }); + std::shared_ptr<Node> myTranspose = Transpose<4>(std::array<DimSize_t,4>{{0,3,2,1}}); + auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator()); + op->associateInput(0,input); + op->setDataType(DataType::Int32); + op->setBackend("cpu"); + op->computeOutputDims(); + myTranspose->forward(); + + REQUIRE(*(op->getOutput(0)) == *output); + } +} \ No newline at end of file diff --git a/unit_tests/recipies/Test_ConstantFolding.cpp b/unit_tests/recipies/Test_ConstantFolding.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c4866b1258702b93a1bce80501d9acd094a65741 --- /dev/null +++ b/unit_tests/recipies/Test_ConstantFolding.cpp @@ -0,0 +1,85 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> + +#include "aidge/recipes/Recipes.hpp" +#include "aidge/operator/Add.hpp" +#include "aidge/operator/MatMul.hpp" +#include "aidge/operator/Producer.hpp" +#include "aidge/graph/OpArgs.hpp" +#include "aidge/scheduler/SequentialScheduler.hpp" +#include "aidge/utils/TensorUtils.hpp" +#include <cstddef> + +using namespace Aidge; + +TEST_CASE("[ConstantFolding] test") { + // generate the original GraphView + auto matmul0 = MatMul("matmul0"); + auto add0 = Add(2, "add0"); + auto matmul1 = MatMul("matmul1"); + auto add1 = Add(2, "add1"); + + auto b0 = Producer(std::make_shared<Tensor>(Array1D<float,5>{{1, 2, 3, 4, 5}}), "B0", true); + auto w0 = Producer(std::make_shared<Tensor>(Array2D<float,5,5>{{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}, {1, 2, 3, 4, 5}}}), "W0", true); + auto b1 = Producer(std::make_shared<Tensor>(Array1D<float,5>{{1, 2, 3, 4, 5}}), "B1", true); + auto w1 = Producer(std::make_shared<Tensor>(Array2D<float,5,5>{{{6, 7, 8, 9, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}}}),"W1", true); + auto input = Producer(std::make_shared<Tensor>(Array2D<float,2,5>{{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}}}), "input", true); + + input->addChild(matmul0, 0, 0); + w0->addChild(matmul0, 0, 1); + + matmul0->addChild(add0, 0, 0); + b0->addChild(add0, 0, 1); + + add0->addChild(matmul1, 0, 0); + w1->addChild(matmul1, 0, 1); + + matmul1->addChild(add1, 0, 0); + b1->addChild(add1, 0, 1); + + auto g = std::make_shared<GraphView>(); + g->add({input, w0, matmul0, b0, add0, w1, matmul1, b1, add1}); + g->setBackend("cpu"); + g->forwardDims(); + + // Check original graph + REQUIRE(g->getNodes() == + std::set<std::shared_ptr<Node>>({input, w0, matmul0, b0, add0, w1, matmul1, b1, add1})); + REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0))); + REQUIRE(((add0->getParent(0) == matmul0) && (add0->getParent(1) == b0))); + REQUIRE(((matmul1->getParent(0) == add0) && (matmul1->getParent(1) == w1))); + REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1))); + + auto scheduler = SequentialScheduler(g); + scheduler.forward(); + + const std::shared_ptr<Tensor> result = std::make_shared<Tensor>(Array2D<float,2,5>{{ + { 1201.000000, 1532.000000, 1863.000000, 2194.000000, 785.000000}, + { 2501.000000, 3207.000000, 3913.000000, 4619.000000, 1735.000000} + }}); + + auto add1Op = std::static_pointer_cast<Add_Op>(add1->getOperator()); + REQUIRE(approxEq<float>(*(add1Op->getOutput(0)), *result)); + + // Transform GraphView inplace + constantFolding(g); + + // Check new GraphView + std::set<std::shared_ptr<Node>> newNodes = g->getNodes(); + REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({input, w0, matmul0, b0, add0, w1, matmul1, b1, add1})); + REQUIRE(newNodes.size() == 1); + REQUIRE((*newNodes.cbegin())->type() == "Producer"); + + auto prodOp = std::static_pointer_cast<Producer_Op>((*newNodes.cbegin())->getOperator()); + REQUIRE(approxEq<float>(*(prodOp->getOutput(0)), *result)); +} diff --git a/unit_tests/recipies/Test_ExplicitCastMove.cpp b/unit_tests/recipies/Test_ExplicitCastMove.cpp index 7d169ba9ba949ead0bf96f80e53a47e1ca6c24d9..27c788961b787c6f5248254f19ef7ac7a4366206 100644 --- a/unit_tests/recipies/Test_ExplicitCastMove.cpp +++ b/unit_tests/recipies/Test_ExplicitCastMove.cpp @@ -11,7 +11,7 @@ #include <catch2/catch_test_macros.hpp> -#include "aidge/recipies/Recipies.hpp" +#include "aidge/recipes/Recipes.hpp" #include "aidge/operator/Conv.hpp" #include "aidge/operator/Producer.hpp" #include "aidge/graph/OpArgs.hpp" diff --git a/unit_tests/recipies/Test_FuseBatchNorm.cpp b/unit_tests/recipies/Test_FuseBatchNorm.cpp index c4b3bf18a5f5b68d0e41b9cd40966790a0cf7ff6..68a01541894ba25a8841343d2b3943ccc08c7a9d 100644 --- a/unit_tests/recipies/Test_FuseBatchNorm.cpp +++ b/unit_tests/recipies/Test_FuseBatchNorm.cpp @@ -18,14 +18,14 @@ #include "aidge/operator/Conv.hpp" #include "aidge/operator/BatchNorm.hpp" #include "aidge/operator/Producer.hpp" -#include "aidge/recipies/Recipies.hpp" -#include "aidge/scheduler/Scheduler.hpp" +#include "aidge/recipes/Recipes.hpp" +#include "aidge/scheduler/SequentialScheduler.hpp" #include "aidge/data/Tensor.hpp" namespace Aidge { -TEST_CASE("[core/recipies] FuseBatchNorm", "[recipies][FuseBatchNorm]") { +TEST_CASE("[core/recipes] FuseBatchNorm", "[recipes][FuseBatchNorm]") { auto myProd = Producer({2, 3, 3, 3}, "dataProvider"); auto myConv = Conv(3, 3, {1, 1}, "conv1"); auto myBN = BatchNorm<2>(32, 1.0e-5F, 0.1F, "batchnorm1"); @@ -86,14 +86,11 @@ TEST_CASE("[core/recipies] FuseBatchNorm", "[recipies][FuseBatchNorm]") { myBNOp -> setInput(4, std::make_shared<Tensor>(Array1D<float,3> {{0.4470, 0.3064, 0.7061}})); auto g1 = Sequential({ + myProd, myConv, myBN }); g1 -> setName("fuseBNGraph"); - myProd -> addChild(myConv); // set graph input - - myProdOp -> setDataType(DataType::Float32); - myProdOp -> setBackend("cpu"); g1 -> compile("cpu", DataType::Float32); auto s = SequentialScheduler(g1); @@ -107,7 +104,7 @@ TEST_CASE("[core/recipies] FuseBatchNorm", "[recipies][FuseBatchNorm]") { std::shared_ptr<Tensor> res2 = std::make_shared<Tensor>(*(myConvOp -> getOutput(0))); REQUIRE(g1 -> outputNodes().size() == 1); - REQUIRE(g1 -> inputNodes().size() == 1); + REQUIRE(g1 -> inputNodes().size() == 0); bool eq = true; for (std::size_t i = 0; i < res1->size(); ++i) { eq &= std::abs(res1->get<float>(i) - res2->get<float>(i)) < 1.0e-06; diff --git a/unit_tests/recipies/Test_HorizontalTiling.cpp b/unit_tests/recipies/Test_HorizontalTiling.cpp index 268d94cc55821c41f9c3d4a8451b5730ecaf1bd0..a8a384f611a8cf99a0aa94c58e9bcd5955f698c4 100644 --- a/unit_tests/recipies/Test_HorizontalTiling.cpp +++ b/unit_tests/recipies/Test_HorizontalTiling.cpp @@ -16,14 +16,14 @@ #include "aidge/graph/OpArgs.hpp" #include "aidge/operator/Conv.hpp" #include "aidge/operator/ReLU.hpp" -#include "aidge/recipies/Recipies.hpp" -#include "aidge/scheduler/Scheduler.hpp" +#include "aidge/recipes/Recipes.hpp" +#include "aidge/scheduler/SequentialScheduler.hpp" #include "aidge/operator/Concat.hpp" namespace Aidge { -TEST_CASE("[core/recipies] Tiling(transformation)", "[Tiling][Recipies]") { +TEST_CASE("[core/recipes] Tiling(transformation)", "[Tiling][Recipes]") { SECTION("Transform a pre-generated GraphView") { diff --git a/unit_tests/scheduler/Test_CastMove.cpp b/unit_tests/scheduler/Test_CastMove.cpp index a52b2b06901818f01117273d181d5d5388348f95..5ca2cd9de4dcc9dab2c78f7ae1e1bf3090db8f2b 100644 --- a/unit_tests/scheduler/Test_CastMove.cpp +++ b/unit_tests/scheduler/Test_CastMove.cpp @@ -18,8 +18,8 @@ #include "aidge/graph/Node.hpp" #include "aidge/graph/GraphView.hpp" #include "aidge/graph/OpArgs.hpp" -#include "aidge/scheduler/Scheduler.hpp" -#include "aidge/recipies/Recipies.hpp" +#include "aidge/scheduler/SequentialScheduler.hpp" +#include "aidge/recipes/Recipes.hpp" #include "aidge/backend/cpu.hpp" diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp index 8ea8e726f286035a1059a317471b893ce4639251..953f291d107e8ea99c25b9aa1f06def6b3e381b2 100644 --- a/unit_tests/scheduler/Test_Scheduler.cpp +++ b/unit_tests/scheduler/Test_Scheduler.cpp @@ -17,12 +17,14 @@ #include "aidge/graph/Node.hpp" #include "aidge/graph/GraphView.hpp" #include "aidge/graph/OpArgs.hpp" -#include "aidge/scheduler/Scheduler.hpp" +#include "aidge/scheduler/SequentialScheduler.hpp" +#include "aidge/scheduler/ParallelScheduler.hpp" #include "aidge/backend/cpu.hpp" +#include "aidge/recipes/GraphViewHelper.hpp" -using namespace Aidge; +namespace Aidge { TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") { std::shared_ptr<Tensor> inputTensor = @@ -205,5 +207,231 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") { SECTION("Test Residual graph") { } - SECTION("Test Recurrent graph") {} -} \ No newline at end of file + SECTION("Test Recurrent graph (sequential)") { + std::shared_ptr<Tensor> in = std::make_shared<Tensor>( + Array2D<int, 2, 3>{{{1, 2, 3}, {4, 5, 6}}}); + std::shared_ptr<Tensor> initTensor = std::make_shared<Tensor>( + Array2D<int, 2, 3>{{{0, 0, 0}, {1, 1, 1}}}); + std::shared_ptr<Tensor> biasTensor = std::make_shared<Tensor>( + Array2D<int, 2, 3>{{{2, 0, 0}, {1, 0, 0}}}); + + auto add1 = Add(2, "add1"); + auto mem = Memorize(3, "mem1"); + auto add2 = Add(2, "add2"); + auto bias = Producer(biasTensor, "bias"); + auto init = Producer(initTensor, "init"); + auto input = Producer(in, "input"); + + std::shared_ptr<GraphView> g = Sequential({add1, mem, add2}); + init->addChild(mem, 0, 1); + mem->addChild(add1, 1, 1); + bias->addChild(add2, 0, 1); + input->addChild(add1, 0, 0); + // Update GraphView inputs/outputs following previous connections: + g->add({mem, add1, add2, init, bias, input}); + + g->setBackend("cpu"); + g->setDataType(Aidge::DataType::Int32); + g->save("graphRecurrent"); + g->forwardDims(); + + SequentialScheduler scheduler(g); + REQUIRE_NOTHROW(scheduler.forward(true)); + scheduler.saveStaticSchedulingDiagram("static_schedule"); + scheduler.saveSchedulingDiagram("schedulingRecurrent_seq"); + + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>( + Array2D<int, 2, 3>{{{5, 6, 9}, {14, 16, 19}}}); + std::shared_ptr<Tensor> result = + std::static_pointer_cast<Tensor>(g->getNode("add2")->getOperator()->getRawOutput(0)); + result->print(); + expectedOutput->print(); + bool equal = (*result == *expectedOutput); + REQUIRE(equal); + } + + + SECTION("Test Recurrent graph (parallel)") { + std::shared_ptr<Tensor> in = std::make_shared<Tensor>( + Array2D<int, 2, 3>{{{1, 2, 3}, {4, 5, 6}}}); + std::shared_ptr<Tensor> initTensor = std::make_shared<Tensor>( + Array2D<int, 2, 3>{{{0, 0, 0}, {1, 1, 1}}}); + std::shared_ptr<Tensor> biasTensor = std::make_shared<Tensor>( + Array2D<int, 2, 3>{{{2, 0, 0}, {1, 0, 0}}}); + + auto add1 = Add(2, "add1"); + auto mem = Memorize(3, "mem1"); + auto add2 = Add(2, "add2"); + auto bias = Producer(biasTensor, "bias"); + auto init = Producer(initTensor, "init"); + auto input = Producer(in, "input"); + + std::shared_ptr<GraphView> g = Sequential({add1, mem, add2}); + init->addChild(mem, 0, 1); + mem->addChild(add1, 1, 1); + bias->addChild(add2, 0, 1); + input->addChild(add1, 0, 0); + // Update GraphView inputs/outputs following previous connections: + g->add({mem, add1, add2, init, bias, input}); + + g->setBackend("cpu"); + g->setDataType(Aidge::DataType::Int32); + g->save("graphRecurrent"); + g->forwardDims(); + + ParallelScheduler scheduler(g); + REQUIRE_NOTHROW(scheduler.forward(true)); + scheduler.saveSchedulingDiagram("schedulingRecurrent_par"); + + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>( + Array2D<int, 2, 3>{{{5, 6, 9}, {14, 16, 19}}}); + std::shared_ptr<Tensor> result = + std::static_pointer_cast<Tensor>(g->getNode("add2")->getOperator()->getRawOutput(0)); + result->print(); + expectedOutput->print(); + bool equal = (*result == *expectedOutput); + REQUIRE(equal); + } + + SECTION("Test ConnectInput graph") { + std::shared_ptr<GraphView> g = + Sequential({ + Conv(1, 3, {3, 3}, "conv1"), + Conv(3, 4, {1, 1}, "conv2"), + Conv(4, 3, {1, 1}, "conv3"), + FC(27, 5, false, "fc")}); + + // g->getNode("conv1")->getOperator()->setInput(0, inputTensor); + g->getNode("conv1")->getOperator()->setInput(1, weight1); + g->getNode("conv1")->getOperator()->setInput(2, bias1); + + std::shared_ptr<Tensor> weight2 = + std::make_shared<Tensor>(Array4D<int, 4, 3, 1, 1>{{{{{1}}, {{2}}, {{3}}}, + {{{4}}, {{5}}, {{6}}}, + {{{7}}, {{8}}, {{9}}}, + {{{10}}, {{11}}, {{12}}}}}); + std::shared_ptr<Tensor> bias2 = std::make_shared<Tensor>(Array1D<int, 4>{{1, 2, 3, 4}}); + g->getNode("conv2")->getOperator()->setInput(1, weight2); + g->getNode("conv2")->getOperator()->setInput(2, bias2); + // *(g->getNode("conv2")->getOperator()->input(1, weight2); + + std::shared_ptr<Tensor> weight3 = std::make_shared<Tensor>( + Array4D<int, 3, 4, 1, 1>{{{{{1}}, {{2}}, {{3}}, {{4}}}, + {{{5}}, {{6}}, {{7}}, {{8}}}, + {{{9}}, {{10}}, {{11}}, {{12}}}}}); + std::shared_ptr<Tensor> bias3 = std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}}); + g->getNode("conv3")->getOperator()->setInput(1, weight3); + g->getNode("conv3")->getOperator()->setInput(2, bias3); + + std::shared_ptr<Tensor> weightfc = std::make_shared<Tensor>( + Array2D<int, 5, 27>{{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, + {13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + {10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6}, + {7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3}, + {4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}}); + std::shared_ptr<Tensor> biasfc = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}}); + g->getNode("fc")->getOperator()->setInput(1, weightfc); + g->getNode("fc")->getOperator()->setInput(2, biasfc); + + // input->addChild(g); + g->setDataType(Aidge::DataType::Int32); + g->setBackend("cpu"); + std::vector<std::vector<Aidge::DimSize_t>> dims = {inputTensor->dims()}; + g->forwardDims(dims); + SequentialScheduler scheduler(g); + + std::vector<std::shared_ptr<Aidge::Tensor>> dataIn = {inputTensor}; + REQUIRE_NOTHROW(scheduler.forward(true, dataIn)); + + scheduler.saveSchedulingDiagram("schedulingSequential"); + + std::shared_ptr<Tensor> expectedOutput1 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{ + {{{{367, 412, 457}, {592, 637, 682}, {817, 862, 907}}, + {{854, 980, 1106}, {1484, 1610, 1736}, {2114, 2240, 2366}}, + {{1341, 1548, 1755}, {2376, 2583, 2790}, {3411, 3618, 3825}}}, + {{{1492, 1537, 1582}, {1717, 1762, 1807}, {1942, 1987, 2032}}, + {{4004, 4130, 4256}, {4634, 4760, 4886}, {5264, 5390, 5516}}, + {{6516, 6723, 6930}, {7551, 7758, 7965}, {8586, 8793, 9000}}}}}); + + std::shared_ptr<Tensor> expectedOutput2 = std::make_shared<Tensor>(Array4D<int, 2, 4, 3, 3>{ + {{{{6099, 7017, 7935}, {10689, 11607, 12525}, {15279, 16197, 17115}}, + {{13786, 15838, 17890}, {24046, 26098, 28150}, {34306, 36358, 38410}}, + {{21473, 24659, 27845}, {37403, 40589, 43775}, {53333, 56519, 59705}}, + {{29160, 33480, 37800}, {50760, 55080, 59400}, {72360, 76680, 81000}}}, + {{{29049, 29967, 30885}, {33639, 34557, 35475}, {38229, 39147, 40065}}, + {{65086, 67138, 69190}, {75346, 77398, 79450}, {85606, 87658, 89710}}, + {{101123, 104309, 107495}, {117053, 120239, 123425}, {132983, 136169, 139355}}, + {{137160, 141480, 145800}, {158760, 163080, 167400}, {180360, 184680, 189000}}}}}); + + std::shared_ptr<Tensor> expectedOutput3 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{ + {{{{214731, 246591, 278451}, {374031, 405891, 437751}, {533331, 565191, 597051}}, + {{496804, 570568, 644332}, {865624, 939388, 1013152}, {1234444, 1308208, 1381972}}, + {{778877, 894545, 1010213}, {1357217, 1472885, 1588553}, {1935557, 2051225, 2166893}}}, + {{{1011231, 1043091, 1074951}, {1170531, 1202391, 1234251}, {1329831, 1361691, 1393551}}, + {{2340904, 2414668, 2488432}, {2709724, 2783488, 2857252}, {3078544, 3152308, 3226072}}, + {{3670577, 3786245, 3901913}, {4248917, 4364585, 4480253}, {4827257, 4942925, 5058593}}}}}); + + Tensor expectedOutput4 = Array2D<int, 2, 5>{ + {{205050376, 198925904, 181355097, 196978090, 238868348}, + {598467376, 561797804, 560823897, 593043790, 698672948}}}; + std::shared_ptr<Tensor> other1 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv1")->getOperator())->getOutput(0); + bool equal1 = (*other1 == *expectedOutput1); + REQUIRE(equal1); + std::shared_ptr<Tensor> other2 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv2")->getOperator())->getOutput(0); + bool equal2 = (*other2 == *expectedOutput2); + REQUIRE(equal2); + std::shared_ptr<Tensor> other3 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv3")->getOperator())->getOutput(0); + bool equal3 = (*other3 == *expectedOutput3); + REQUIRE(equal3); + std::shared_ptr<Tensor> other4 = std::static_pointer_cast<OperatorTensor>(g->getNode("fc")->getOperator())->getOutput(0); + bool equal4 = (*other4 == expectedOutput4); + REQUIRE(equal4); + } +} + +TEST_CASE("[cpu/scheduler] SequentialScheduler(backward)", "[scheduler][backward]") { + + // create GraphView + std::shared_ptr<GraphView> gv = Sequential({ReLU("relu0"), Sqrt("srqt0"), ReLU("relu1")}); + + std::shared_ptr<Tensor> inputTensor = + std::make_shared<Tensor>(Array4D<float, 2, 1, 5, 5>{{{{{0.0f, 1.0f, 2.0f, 3.0f, 4.0f}, + {5.0f, 6.0f, 7.0f, 8.0f, 9.0f}, + {10.0f, 11.0f, 12.0f, 13.0f, 14.0f}, + {15.0f, 16.0f, 17.0f, 18.0f, 19.0f}, + {20.0f, 21.0f, 22.0f, 23.0f, 24.0f}}}, + {{{25.0f, 26.0f, 27.0f, 28.0f, 29.0f}, + {30.0f, 31.0f, 32.0f, 33.0f, 34.0f}, + {35.0f, 36.0f, 37.0f, 38.0f, 39.0f}, + {40.0f, 41.0f, 42.0f, 43.0f, 44.0f}, + {45.0f, 46.0f, 47.0f, 48.0f, 49.0f}}}}}); + auto label = inputTensor; + // implem already set to default + auto myProd = Producer(inputTensor, "prod"); + myProd -> addChild(gv); + gv -> compile("cpu", DataType::Float32); + compile_gradient(gv); + SequentialScheduler scheduler(gv); + scheduler.forward(); + auto predictedOutput = gv->getOrderedOutputs()[0].first; + + std::shared_ptr<Tensor> targetOutput = + std::make_shared<Tensor>(Array4D<float, 2, 1, 5, 5>{{{{{0.0f, 1.0f, 1.0f, 2.0f, 2.0f}, + {2.0f, 2.0f, 3.0f, 3.0f, 3.0f}, + {3.0f, 3.0f, 3.0f, 4.0f, 4.0f}, + {4.0f, 4.0f, 4.0f, 4.0f, 4.0f}, + {4.0f, 5.0f, 5.0f, 5.0f, 5.0f}}}, + {{{5.0f, 5.0f, 5.0f, 5.0f, 5.0f}, + {5.0f, 6.0f, 6.0f, 6.0f, 6.0f}, + {6.0f, 6.0f, 6.0f, 6.0f, 6.0f}, + {6.0f, 6.0f, 6.0f, 7.0f, 7.0f}, + {7.0f, 7.0f, 7.0f, 7.0f, 7.0f}}}}}); + + REQUIRE_NOTHROW(scheduler.backward({targetOutput})); +} +} // namespace Aidge diff --git a/version.txt b/version.txt index 8a9ecc2ea99d607e92feae1656ddbf6fdd82a2c1..341cf11faf9a29504168de4e54beaad182c5adc5 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.0.1 \ No newline at end of file +0.2.0 \ No newline at end of file