diff --git a/CMakeLists.txt b/CMakeLists.txt index c8d5d2296fa7d429e256b124d5250d10cee8ec73..243cc75158a9c4b997a63e6a771bc4177eb26080 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,6 +1,9 @@ # CMake >= 3.18 is required for good support of FindCUDAToolkit cmake_minimum_required(VERSION 3.18) -set(CXX_STANDARD 14) + +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) file(STRINGS "${CMAKE_SOURCE_DIR}/version.txt" version) diff --git a/pyproject.toml b/pyproject.toml index 4946af13d7a6956a1d846cb3ebd8f4d91c06a40d..9abb470b62d89ec89e2712c6721428dec87307b2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ description="CUDA implementations of the operators of aidge framework" dependencies = [ "numpy", ] -requires-python = ">= 3.7" +requires-python = ">= 3.8" readme = "README.md" license = { file = "LICENSE" } classifiers = [ diff --git a/setup.py b/setup.py index 706fc53ca08319ee487ef789ebc85f0d513ab25b..c72553a4650c7671669fa144ab3baacad77373fd 100644 --- a/setup.py +++ b/setup.py @@ -37,6 +37,7 @@ class CMakeBuild(build_ext): # This lists the number of processors available on the machine # The compilation will use half of them max_jobs = str(ceil(multiprocessing.cpu_count() / 2)) + max_jobs = os.environ.get("AIDGE_NB_PROC", max_jobs) cwd = pathlib.Path().absolute() @@ -50,33 +51,41 @@ class CMakeBuild(build_ext): os.chdir(str(build_temp)) - compile_type = ( - "Release" - if "AIDGE_PYTHON_BUILD_TYPE" not in os.environ - else os.environ["AIDGE_PYTHON_BUILD_TYPE"] - ) - install_path = ( os.path.join(sys.prefix, "lib", "libAidge") if "AIDGE_INSTALL" not in os.environ else os.environ["AIDGE_INSTALL"] ) + # Read environment variables for CMake options + c_compiler = os.environ.get("AIDGE_C_COMPILER", "gcc") + cxx_compiler = os.environ.get("AIDGE_CXX_COMPILER", "g++") + build_type = os.environ.get("AIDGE_BUILD_TYPE", "Release") + asan = os.environ.get("AIDGE_ASAN", "OFF") + with_cuda = os.environ.get("AIDGE_WITH_CUDA", "OFF") + # using ninja as default build system to build faster and with the same compiler as on windows - build_gen = ( - ["-G", os.environ["AIDGE_BUILD_GEN"]] - if "AIDGE_BUILD_GEN" in os.environ + build_gen = os.environ.get("AIDGE_BUILD_GEN", "") + build_gen_opts = ( + ["-G", build_gen] + if build_gen else [] ) - + + test_onoff = os.environ.get("AIDGE_BUILD_TEST", "OFF") + self.spawn( [ "cmake", *build_gen, str(cwd), - "-DTEST=OFF", + f"-DTEST={test_onoff}", f"-DCMAKE_INSTALL_PREFIX:PATH={install_path}", - f"-DCMAKE_BUILD_TYPE={compile_type}", + f"-DCMAKE_BUILD_TYPE={build_type}", + f"-DCMAKE_C_COMPILER={c_compiler}", + f"-DCMAKE_CXX_COMPILER={cxx_compiler}", + f"-DENABLE_ASAN={asan}", + f"-DCUDA={with_cuda}", "-DPYBIND=ON", "-DCMAKE_EXPORT_COMPILE_COMMANDS=ON", "-DCOVERAGE=OFF", @@ -86,9 +95,9 @@ class CMakeBuild(build_ext): if not self.dry_run: self.spawn( - ["cmake", "--build", ".", "--config", compile_type, "-j", max_jobs] + ["cmake", "--build", ".", "--config", build_type, "-j", max_jobs] ) - self.spawn(["cmake", "--install", ".", "--config", compile_type]) + self.spawn(["cmake", "--install", ".", "--config", build_type]) os.chdir(str(cwd)) aidge_package = build_lib / (get_project_name()) diff --git a/unit_tests/Test_AvgPoolingImpl.cpp b/unit_tests/Test_AvgPoolingImpl.cpp index 4ad761a588ec6a9c5d4edaa1511d6d216670a35d..b658ee759d946249c7fc3c56c80dfa367fa0371f 100644 --- a/unit_tests/Test_AvgPoolingImpl.cpp +++ b/unit_tests/Test_AvgPoolingImpl.cpp @@ -236,8 +236,8 @@ TEST_CASE("[gpu/operator] AvgPooling(forward)", "[AvgPooling][GPU]") delete[] array0; cudaFree(array0_d); } - fmt::print("INFO: Number of elements over time spent: {}\n", number_of_operation / duration.count()); - fmt::print("INFO: Total time: {}μs\n", duration.count()); + Log::info("Number of elements over time spent: {}\n", number_of_operation / duration.count()); + Log::info("Total time: {}μs\n", duration.count()); } } diff --git a/unit_tests/Test_GlobalAveragePoolingImpl.cpp b/unit_tests/Test_GlobalAveragePoolingImpl.cpp index fb67fa5b1472ac4c22bf4bdbbd703d1a31024a0e..5c13f45bc40d3aaa874209f2475468e9b59d500e 100644 --- a/unit_tests/Test_GlobalAveragePoolingImpl.cpp +++ b/unit_tests/Test_GlobalAveragePoolingImpl.cpp @@ -178,8 +178,8 @@ TEST_CASE("[gpu/operator] GlobalAveragePooling", delete[] array0; cudaFree(array0_d); } - fmt::print("INFO: Number of elements over time spent: {}\n", number_of_operation / duration.count()); - fmt::print("INFO: Total time: {}μs\n", duration.count()); + Log::info("Number of elements over time spent: {}\n", number_of_operation / duration.count()); + Log::info("Total time: {}μs\n", duration.count()); } } } // namespace Aidge diff --git a/unit_tests/Test_MaxPoolingImpl.cpp b/unit_tests/Test_MaxPoolingImpl.cpp index e6fcc80157d921bbd10d5f3e99ee7312e3fa3406..0474a70dee755ccb1830a7ac27761fbf522e1260 100644 --- a/unit_tests/Test_MaxPoolingImpl.cpp +++ b/unit_tests/Test_MaxPoolingImpl.cpp @@ -193,7 +193,7 @@ TEST_CASE("[gpu/operator] MaxPooling(forward)", "[MaxPooling][GPU]") { delete[] array0; cudaFree(array0_d); } - fmt::print("INFO: Number of elements over time spent: {}\n", number_of_operation / duration.count()); - fmt::print("INFO: Total time: {}μs\n", duration.count()); + Log::info("Number of elements over time spent: {}\n", number_of_operation / duration.count()); + Log::info("Total time: {}μs\n", duration.count()); } } \ No newline at end of file diff --git a/unit_tests/Test_PowImpl.cpp b/unit_tests/Test_PowImpl.cpp index 60629cadd4d3005d5eff8a9b310c092a1d60a68d..78be1ca2c92ccb7fc444dbbfa4ea53e0894c99f1 100644 --- a/unit_tests/Test_PowImpl.cpp +++ b/unit_tests/Test_PowImpl.cpp @@ -148,8 +148,8 @@ TEST_CASE("[gpu/operator] Pow", "[Pow][GPU]") { cudaFree(array0_d); cudaFree(array1_d); } - fmt::print("INFO: Number of elements over time spent: {}\n", number_of_operation / duration.count()); - fmt::print("INFO: Total time: {}μs\n", duration.count()); + Log::info("Number of elements over time spent: {}\n", number_of_operation / duration.count()); + Log::info("Total time: {}μs\n", duration.count()); } SECTION("+1-D Tensor / +1-D Tensor - broadcasting") { @@ -256,8 +256,8 @@ TEST_CASE("[gpu/operator] Pow", "[Pow][GPU]") { const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); number_of_operation += nb_elements; } - fmt::print("INFO: Number of elements over time spent: {}\n", number_of_operation / duration.count()); - fmt::print("INFO: Total time: {}μs\n", duration.count()); + Log::info("Number of elements over time spent: {}\n", number_of_operation / duration.count()); + Log::info("Total time: {}μs\n", duration.count()); } SECTION("+1-D Tensor / 1-D Tensor") { // Create Pow Operator @@ -361,8 +361,8 @@ TEST_CASE("[gpu/operator] Pow", "[Pow][GPU]") { const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>()); number_of_operation += nb_elements; } - fmt::print("INFO: Number of elements over time spent: {}\n", number_of_operation / duration.count()); - fmt::print("INFO: Total time: {}μs\n", duration.count()); + Log::info("Number of elements over time spent: {}\n", number_of_operation / duration.count()); + Log::info("Total time: {}μs\n", duration.count()); } } } diff --git a/unit_tests/Test_ReLUImpl.cpp b/unit_tests/Test_ReLUImpl.cpp index 58a395319685b59802d165123bed3b50c27ada20..d55ccd1c1ca17e2992ac2178f556a178914694b8 100644 --- a/unit_tests/Test_ReLUImpl.cpp +++ b/unit_tests/Test_ReLUImpl.cpp @@ -174,7 +174,7 @@ TEST_CASE("[gpu/operator] ReLU(forward)", "[ReLU][GPU]") { delete[] input_h; cudaFree(input_d); } - fmt::print("INFO: Number of elements over time spent: {}\n", number_of_operation / duration.count()); - fmt::print("INFO: Total time: {}μs\n", duration.count()); + Log::info("Number of elements over time spent: {}\n", number_of_operation / duration.count()); + Log::info("Total time: {}μs\n", duration.count()); } } diff --git a/unit_tests/Test_ReshapeImpl.cpp b/unit_tests/Test_ReshapeImpl.cpp index 60828ad53becaa180e50b2f148618a0698de1ed7..d62fc4625c51ff4affb207d8dd7c30d0661ad294 100644 --- a/unit_tests/Test_ReshapeImpl.cpp +++ b/unit_tests/Test_ReshapeImpl.cpp @@ -185,8 +185,8 @@ TEST_CASE("[gpu/operator] Reshape(forward)") { delete[] array0; cudaFree(array0_d); } - fmt::print("INFO: Number of elements over time spent: {}\n", number_of_operation / duration.count()); - fmt::print("INFO: Total time: {}μs\n", duration.count()); + Log::info("Number of elements over time spent: {}\n", number_of_operation / duration.count()); + Log::info("Total time: {}μs\n", duration.count()); } } diff --git a/unit_tests/Test_SqrtImpl.cpp b/unit_tests/Test_SqrtImpl.cpp index c381b69d4ff13edbe84d4fec6d90bb8b911db89a..934cb0949be2ea110758a916846afbcfc85cec70 100644 --- a/unit_tests/Test_SqrtImpl.cpp +++ b/unit_tests/Test_SqrtImpl.cpp @@ -116,7 +116,7 @@ constexpr std::uint16_t NBTRIALS = 10; delete[] array0; cudaFree(array0_d); } - fmt::print("INFO: Number of elements over time spent: {}\n", number_of_operation / duration.count()); - fmt::print("INFO: Total time: {}μs\n", duration.count()); + Log::info("Number of elements over time spent: {}\n", number_of_operation / duration.count()); + Log::info("Total time: {}μs\n", duration.count()); } } // namespace Aidge