Skip to content
Snippets Groups Projects
Commit 3e847c1b authored by Maxence Naud's avatar Maxence Naud
Browse files

[UPD] version 0.4.0 -> 0.5.0

Merge branch 'dev' into 'main'

See merge request eclipse/aidge/aidge_backend_cuda!67
parents 9ecaa69c 24446f6c
Branches main
Tags v0.5.0
No related merge requests found
Showing
with 385 additions and 74 deletions
......@@ -4,6 +4,7 @@
# C++ Build
build*/
install*/
include/aidge/backend/cuda_version.h
# VSCode
.vscode
......
# Version 0.5.0 (January 31, 2024)
# Version 0.4.0 (December 6, 2024)
# Version 0.1.0 (January 23, 2024)
......
# CMake >= 3.18 is required for good support of FindCUDAToolkit
cmake_minimum_required(VERSION 3.18)
set(CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
file(STRINGS "${CMAKE_SOURCE_DIR}/version.txt" version)
# Parse version.txt to retrieve Major, Minor and Path
string(REGEX MATCH "([0-9]+\\.[0-9]+\\.[0-9]+)" _ MATCHES ${version})
set(PROJECT_VERSION_MAJOR ${CMAKE_MATCH_1})
set(PROJECT_VERSION_MINOR ${CMAKE_MATCH_2})
set(PROJECT_VERSION_PATCH ${CMAKE_MATCH_3})
project(aidge_backend_cuda
VERSION ${version}
DESCRIPTION "CUDA implementations of the operators of aidge framework."
......@@ -21,7 +30,6 @@ execute_process(
)
message(STATUS "Latest git commit: ${GIT_COMMIT_HASH}")
# Define a preprocessor macro with the Git commit version
add_definitions(-DGIT_COMMIT_HASH="${GIT_COMMIT_HASH}")
# Note : project name is ${CMAKE_PROJECT_NAME} and python module name is also ${CMAKE_PROJECT_NAME}
set(module_name _${CMAKE_PROJECT_NAME}) # target name
......@@ -107,6 +115,14 @@ if (PYBIND)
)
endif()
message(STATUS "Creating ${CMAKE_CURRENT_SOURCE_DIR}/include/aidge/backend/cuda_version.h")
# Generate version.h file from config file version.h.in
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/include/aidge/backend/version.h.in"
"${CMAKE_CURRENT_SOURCE_DIR}/include/aidge/backend/cuda_version.h"
)
target_link_libraries(${module_name}
PUBLIC
_aidge_core # _ is added because we link the target not the project
......
from aidge_backend_cuda.aidge_backend_cuda import * # import so generated by PyBind
from ._version import *
......@@ -11,9 +11,13 @@
#ifndef AIDGE_BACKEND_CUDA_IMPORTS_H_
#define AIDGE_BACKEND_CUDA_IMPORTS_H_
#include "aidge/backend/cuda_version.h"
#include "aidge/backend/cuda/data/TensorImpl.hpp"
#include "aidge/backend/cuda/operator/OperatorImpl.hpp"
#include "aidge/backend/cuda/operator/AbsImpl.hpp"
#include "aidge/backend/cuda/operator/AddImpl.hpp"
#include "aidge/backend/cuda/operator/AndImpl.hpp"
#include "aidge/backend/cuda/operator/ArgMaxImpl.hpp"
......@@ -22,11 +26,14 @@
#include "aidge/backend/cuda/operator/ConvImpl.hpp"
#include "aidge/backend/cuda/operator/ClipImpl.hpp"
#include "aidge/backend/cuda/operator/DivImpl.hpp"
#include "aidge/backend/cuda/operator/ErfImpl.hpp"
#include "aidge/backend/cuda/operator/FCImpl.hpp"
#include "aidge/backend/cuda/operator/GlobalAveragePoolingImpl.hpp"
#include "aidge/backend/cuda/operator/ILayerNormImpl.hpp"
#include "aidge/backend/cuda/operator/LRNImpl.hpp"
#include "aidge/backend/cuda/operator/LnImpl.hpp"
#include "aidge/backend/cuda/operator/MaxPoolingImpl.hpp"
#include "aidge/backend/cuda/operator/MatMulImpl.hpp"
#include "aidge/backend/cuda/operator/MulImpl.hpp"
#include "aidge/backend/cuda/operator/PadImpl.hpp"
#include "aidge/backend/cuda/operator/PowImpl.hpp"
......@@ -34,17 +41,12 @@
#include "aidge/backend/cuda/operator/ReduceSumImpl.hpp"
#include "aidge/backend/cuda/operator/ReLUImpl.hpp"
#include "aidge/backend/cuda/operator/RoundImpl.hpp"
#include "aidge/backend/cuda/operator/ShiftMaxImpl.hpp"
#include "aidge/backend/cuda/operator/ShiftGELUImpl.hpp"
#include "aidge/backend/cuda/operator/ReshapeImpl.hpp"
#include "aidge/backend/cuda/operator/ShiftMaxImpl.hpp"
#include "aidge/backend/cuda/operator/SigmoidImpl.hpp"
#include "aidge/backend/cuda/operator/SoftmaxImpl.hpp"
#include "aidge/backend/cuda/operator/SqrtImpl.hpp"
#include "aidge/backend/cuda/operator/SubImpl.hpp"
#include "aidge/backend/cuda/operator/TanhImpl.hpp"
#include "aidge/backend/cuda/operator/ShiftMaxImpl.hpp"
#include "aidge/backend/cuda/operator/ShiftGELUImpl.hpp"
#include "aidge/backend/cuda/operator/ILayerNormImpl.hpp"
#endif /* AIDGE_BACKEND_CUDA_IMPORTS_H_ */
......@@ -4,6 +4,9 @@
#include <cstddef> // std::size_t
#include <memory>
#include <string>
#include <vector>
#include <cuda.h>
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/data/Tensor.hpp"
......
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_BACKEND_CUDA_OPERATOR_ABSIMPL_H_
#define AIDGE_BACKEND_CUDA_OPERATOR_ABSIMPL_H_
#include <array>
#include <memory>
#include <tuple>
#include <vector>
#include <cudnn.h>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Abs.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class AbsImpl_cuda : public OperatorImpl {
public:
AbsImpl_cuda(const Abs_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<AbsImpl_cuda> create(const Abs_Op& op) {
return std::make_unique<AbsImpl_cuda>(op);
}
virtual std::vector<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
private:
std::shared_ptr<Tensor> mInputFallback;
std::shared_ptr<Tensor> mOutputGradFallback;
template <class T> void forward_(const Tensor& input);
template <class T> void backward_(const Tensor& input, const Tensor& outputGrad);
};
// Implementation entry point registration to Operator
REGISTRAR(Abs_Op, "cuda", Aidge::AbsImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_ABSIMPL_H_ */
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CUDA_OPERATOR_ABSIMPL_KERNELS_H_
#define AIDGE_CUDA_OPERATOR_ABSIMPL_KERNELS_H_
#include <stdexcept>
#include <cfloat>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_fp16.h>
#include "aidge/data/Data.hpp"
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
template <class T>
void absForward(const T* input, T* output, int size);
}
#endif /* AIDGE_CUDA_OPERATOR_ABSIMPL_KERNELS_H_ */
......@@ -9,8 +9,8 @@
*
********************************************************************************/
#ifndef AIDGE_CUDA_OPERATOR_FCIMPL_KERNELS_H_
#define AIDGE_CUDA_OPERATOR_FCIMPL_KERNELS_H_
#ifndef AIDGE_CUDA_OPERATOR_CUBLAS_KERNELS_H_
#define AIDGE_CUDA_OPERATOR_CUBLAS_KERNELS_H_
#include <stdexcept>
#include <cfloat>
......@@ -33,6 +33,17 @@ cublasStatus_t cublasGemm(cublasHandle_t handle,
const T *beta,
T *C, int ldc);
template <class T>
cublasStatus_t cublasGemmStridedBatched(cublasHandle_t handle,
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const T *alpha,
const T *A, int lda, long long int strideA,
const T *B, int ldb, long long int strideB,
const T *beta,
T *C, int ldc, long long int strideC,
int batchCount);
template <class T>
cublasStatus_t cublasGemv(cublasHandle_t handle, cublasOperation_t trans,
int m, int n,
......@@ -42,4 +53,4 @@ cublasStatus_t cublasGemv(cublasHandle_t handle, cublasOperation_t trans,
const T *beta,
T *y, int incy);
}
#endif /* AIDGE_CUDA_OPERATOR_FCIMPL_KERNELS_H_ */
\ No newline at end of file
#endif /* AIDGE_CUDA_OPERATOR_CUBLAS_KERNELS_H_ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_BACKEND_CUDA_OPERATOR_ERFIMPL_H_
#define AIDGE_BACKEND_CUDA_OPERATOR_ERFIMPL_H_
#include <array>
#include <memory>
#include <tuple>
#include <vector>
#include <cudnn.h>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Erf.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class ErfImpl_cuda : public OperatorImpl {
public:
ErfImpl_cuda(const Erf_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<ErfImpl_cuda> create(const Erf_Op& op) {
return std::make_unique<ErfImpl_cuda>(op);
}
virtual std::vector<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
private:
std::shared_ptr<Tensor> mInputFallback;
std::shared_ptr<Tensor> mOutputGradFallback;
template <class T> void forward_(const Tensor& input);
template <class T> void backward_(const Tensor& output_grad);
};
// Implementation entry point registration to Operator
REGISTRAR(Erf_Op, "cuda", Aidge::ErfImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_ERFIMPL_H_ */
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CUDA_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_
#define AIDGE_CUDA_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_
#include <stdexcept>
#include <cfloat>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_fp16.h>
#include "aidge/data/Data.hpp"
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
template <class T>
void ErfForward(const T* input, T* output, int size);
}
#endif /* AIDGE_CUDA_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_ */
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_BACKEND_CUDA_OPERATOR_MATMULIMPL_H_
#define AIDGE_BACKEND_CUDA_OPERATOR_MATMULIMPL_H_
#include <array>
#include <memory>
#include <tuple>
#include <vector>
#include <cudnn.h>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/MatMul.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class MatMulImpl_cuda : public OperatorImpl {
public:
MatMulImpl_cuda(const MatMul_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<MatMulImpl_cuda> create(const MatMul_Op& op) {
return std::make_unique<MatMulImpl_cuda>(op);
}
virtual std::vector<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
private:
std::shared_ptr<Tensor> mInput0Fallback;
std::shared_ptr<Tensor> mInput1Fallback;
template <class T> void forward_(const Tensor& input0, const Tensor& input1);
template <class T> void backward_(const Tensor& outGrad);
};
// Implementation entry point registration to Operator
REGISTRAR(MatMul_Op, "cuda", Aidge::MatMulImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_MATMULIMPL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
......@@ -9,8 +9,8 @@
*
********************************************************************************/
#ifndef AIDGE_BACKEND_CUDA_OPERATOR_RESHAPEIMPL_H_
#define AIDGE_BACKEND_CUDA_OPERATOR_RESHAPEIMPL_H_
#ifndef AIDGE_BACKEND_CUDA_OPERATOR_SOFTMAXIMPL_H_
#define AIDGE_BACKEND_CUDA_OPERATOR_SOFTMAXIMPL_H_
#include <array>
#include <memory>
......@@ -20,7 +20,7 @@
#include <cudnn.h>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Reshape.hpp"
#include "aidge/operator/Softmax.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
......@@ -28,12 +28,12 @@
namespace Aidge {
// Operator implementation entry point for the backend
class ReshapeImpl_cuda : public OperatorImpl {
class SoftmaxImpl_cuda : public OperatorImpl {
public:
ReshapeImpl_cuda(const Reshape_Op& op) : OperatorImpl(op, "cuda") {}
SoftmaxImpl_cuda(const Softmax_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<ReshapeImpl_cuda> create(const Reshape_Op& op) {
return std::make_unique<ReshapeImpl_cuda>(op);
static std::unique_ptr<SoftmaxImpl_cuda> create(const Softmax_Op& op) {
return std::make_unique<SoftmaxImpl_cuda>(op);
}
virtual std::vector<ImplSpec> getAvailableImplSpecs() const override {
......@@ -48,11 +48,15 @@ public:
void backward() override;
private:
// CuDNN specific variables
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
template <class T> void forward_(const Tensor& input);
template <class T> void backward_(const Tensor& output_grad, int axis);
};
// Implementation entry point registration to Operator
REGISTRAR(Reshape_Op, "cuda", Aidge::ReshapeImpl_cuda::create);
REGISTRAR(Softmax_Op, "cuda", Aidge::SoftmaxImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_RESHAPEIMPL_H_ */
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_SOFTMAXIMPL_H_ */
#ifndef AIDGE_BACKEND_CUDA_CUDA_UTILS_H
#define AIDGE_BACKEND_CUDA_CUDA_UTILS_H
#ifndef AIDGE_BACKEND_CUDA_CUDA_UTILS_H_
#define AIDGE_BACKEND_CUDA_CUDA_UTILS_H_
#include <string>
#include <memory>
#include <sstream>
#include <iostream>
#include <stdexcept>
#include <fmt/core.h>
#include <fmt/format.h>
#include <cublas_v2.h>
#include <cuda.h>
......@@ -18,31 +18,29 @@
do { \
const cudnnStatus_t e = (status); \
if (e != CUDNN_STATUS_SUCCESS) { \
std::stringstream error; \
error << "CUDNN failure: " << cudnnGetErrorString(e) << " (" \
<< static_cast<int>(e) << ") in " << __FILE__ << ':' << __LINE__; \
int status_dev; \
if (cudaGetDevice(&status_dev) == cudaSuccess) \
error << " on device #" << status_dev; \
std::cerr << error.str() << std::endl; \
std::string error = fmt::format("CUDNN failure: {} ({}) in {}:{}", \
cudnnGetErrorString(e), static_cast<int>(e), __FILE__, __LINE__); \
int status_dev; \
if (cudaGetDevice(&status_dev) == cudaSuccess) \
error = fmt::format("{} on device #{}", error, status_dev); \
fmt::print(stderr, "{}\n", error); \
cudaDeviceReset(); \
throw std::runtime_error(error.str()); \
throw std::runtime_error(error); \
} \
} while(0)
#define CHECK_CUDA_STATUS(status) \
do { \
const cudaError_t e = (status); \
if ((e) != cudaSuccess) { \
std::stringstream error; \
error << "Cuda failure: " << cudaGetErrorString(e) << " (" \
<< static_cast<int>(e) << ") in " << __FILE__ << ':' << __LINE__; \
int status_dev; \
if (cudaGetDevice(&status_dev) == cudaSuccess) \
error << " on device #" << status_dev; \
std::cerr << error.str() << std::endl; \
if ((e) != cudaSuccess) { \
std::string error = fmt::format("Cuda failure: {} ({}) in {}:{}", \
cudaGetErrorString(e), static_cast<int>(e), __FILE__, __LINE__); \
int status_dev; \
if (cudaGetDevice(&status_dev) == cudaSuccess) \
error = fmt::format("{} on device #{}", error, status_dev); \
fmt::print(stderr, "{}\n", error); \
cudaDeviceReset(); \
throw std::runtime_error(error.str()); \
throw std::runtime_error(error); \
} \
} while(0)
......@@ -50,16 +48,14 @@
do { \
const cublasStatus_t e = (status); \
if (e != CUBLAS_STATUS_SUCCESS) { \
std::stringstream error; \
error << "Cublas failure: " \
<< Aidge::Cuda::cublasGetErrorString(e) << " (" \
<< static_cast<int>(e) << ") in " << __FILE__ << ':' << __LINE__; \
int status_dev; \
if (cudaGetDevice(&status_dev) == cudaSuccess) \
error << " on device #" << status_dev; \
std::cerr << error.str() << std::endl; \
std::string error = fmt::format("Cublas failure: {} ({}) in {}:{}", \
Aidge::Cuda::cublasGetErrorString(e), static_cast<int>(e), __FILE__, __LINE__); \
int status_dev; \
if (cudaGetDevice(&status_dev) == cudaSuccess) \
error = fmt::format("{} on device #{}", error, status_dev); \
fmt::print(stderr, "{}\n", error); \
cudaDeviceReset(); \
throw std::runtime_error(error.str()); \
throw std::runtime_error(error); \
} \
} while(0)
......@@ -96,4 +92,4 @@ namespace Cuda {
}
}
#endif // AIDGE_BACKEND_CUDA_CUDA_UTILS_H
#endif // AIDGE_BACKEND_CUDA_CUDA_UTILS_H_
#ifndef VERSION_H
#define VERSION_H
namespace Aidge {
static constexpr const int PROJECT_VERSION_MAJOR = @PROJECT_VERSION_MAJOR@;
static constexpr const int PROJECT_VERSION_MINOR = @PROJECT_VERSION_MINOR@;
static constexpr const int PROJECT_VERSION_PATCH = @PROJECT_VERSION_PATCH@;
static constexpr const char * PROJECT_VERSION = "@PROJECT_VERSION_MAJOR@.@PROJECT_VERSION_MINOR@.@PROJECT_VERSION_PATCH@";
static constexpr const char * PROJECT_GIT_HASH = "@GIT_COMMIT_HASH@";
}
#endif // VERSION_H
......@@ -3,20 +3,19 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp" // CHECK_CUDA_STATUS
#include "aidge/utils/Log.hpp"
#include "aidge/backend/cuda_version.h"
namespace Aidge {
#ifndef PROJECT_VERSION // Normally defined in CMakeLists.txt
#define PROJECT_VERSION "Unknown version"
#endif
#ifndef GIT_COMMIT_HASH
#define GIT_COMMIT_HASH ""
#endif
#ifndef CUDA_COMPILER_VERSION
#define CUDA_COMPILER_VERSION "Unknown version"
#endif
void showCudaVersion() {
Log::info("Aidge backend CUDA: {} ({}), {} {}", PROJECT_VERSION, GIT_COMMIT_HASH, __DATE__, __TIME__);
constexpr inline const char * getBackendCudaProjectVersion(){
return PROJECT_VERSION;
}
constexpr inline const char * getBackendCudaGitHash(){
return PROJECT_GIT_HASH;
}
void showBackendCudaProjectVersion() {
Log::info("Aidge backend CUDA: {} ({}), {} {}", getBackendCudaProjectVersion(), getBackendCudaGitHash(), __DATE__, __TIME__);
Log::info("CUDA compiler version: {}", CUDA_COMPILER_VERSION);
Log::info("CuDNN version: {}.{}.{}\n", CUDNN_MAJOR, CUDNN_MINOR,
CUDNN_PATCHLEVEL);
......
aidge_backend_cuda
......@@ -4,14 +4,22 @@ description="CUDA implementations of the operators of aidge framework"
dependencies = [
"numpy",
]
requires-python = ">= 3.7"
requires-python = ">= 3.8"
readme = "README.md"
license = { file = "LICENSE" }
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 3"
]
dynamic = ["version"]
dynamic = ["version"] # defined by pbr
[project.urls]
Homepage = "https://www.deepgreen.ai/en/platform"
Documentation = "https://eclipse-aidge.readthedocs.io/en/latest/"
Repository = "https://gitlab.eclipse.org/eclipse/aidge/aidge_backend_cuda"
Issues = "https://gitlab.eclipse.org/eclipse/aidge/aidge_backend_cuda/-/issues"
Changelog = "https://gitlab.eclipse.org/eclipse/aidge/aidge_backend_cuda/-/releases"
#####################################################
# SETUPTOOLS
......@@ -21,16 +29,13 @@ where = ["."] # list of folders that contain the packages (["."] by default)
include = ["aidge_backend_cuda*"] # package names should match these glob patterns (["*"] by default)
exclude = ["aidge_backend_cuda.unit_tests*"] # exclude packages matching these glob patterns (empty by default)
namespaces = false # to disable scanning PEP 420 namespaces (true by default)
# SETUPTOOLS_SCM
[tool.setuptools_scm]
write_to = "aidge_backend_cuda/_version.py"
[build-system]
requires = [
"setuptools>=68",
"setuptools-scm",
"cmake>=3.18.0",
"toml"
"toml",
"pbr"
]
build-backend = "setuptools.build_meta"
......
......@@ -4,6 +4,8 @@
namespace py = pybind11;
namespace Aidge {
void init_cuda_sys_info(py::module& m){
m.def("show_cuda_version", &showCudaVersion);
m.def("show_version", &showBackendCudaProjectVersion);
m.def("get_project_version", &getBackendCudaProjectVersion);
m.def("get_git_hash", &getBackendCudaGitHash);
}
}
# pbr file
[metadata]
version = file: version.txt
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment