Skip to content
Snippets Groups Projects
Commit ccb400be authored by Grégoire Kubler's avatar Grégoire Kubler
Browse files

chore : formatted cpp files

parent dbff9526
No related branches found
No related tags found
1 merge request!44Feat formatting
Pipeline #58484 canceled
Showing
with 2628 additions and 2119 deletions
......@@ -22,20 +22,29 @@
#include "aidge/utils/Types.h"
void Aidge::ReshapeImpl_cuda::forward() {
const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
const OperatorTensor &op = static_cast<const OperatorTensor &>(mOp);
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getRawInput(0) && "missing input #0");
const auto& input = op.getInput(0)->refCastFrom(mInputFallback, *op.getOutput(0));
const auto &input =
op.getInput(0)->refCastFrom(mInputFallback, *op.getOutput(0));
std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))-> getImpl() -> setRawPtr(input.getImpl()->rawPtr(), input.getImpl()->size());
std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))
->getImpl()
->setRawPtr(input.getImpl()->rawPtr(), input.getImpl()->size());
}
void Aidge::ReshapeImpl_cuda::backward() {
const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
const OperatorTensor &op = static_cast<const OperatorTensor &>(mOp);
AIDGE_ASSERT(op.getOutput(0)->grad(), "missing output grad #0");
const auto& output_grad = op.getOutput(0)->grad()->refCastFrom(mOutputGradFallback, *op.getOutput(0)->grad());
const auto &output_grad =
op.getOutput(0)->grad()->refCastFrom(mOutputGradFallback,
*op.getOutput(0)->grad());
std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->grad() -> getImpl() -> setRawPtr(output_grad.getImpl()->rawPtr(), output_grad.getImpl()->size());
std::static_pointer_cast<Tensor>(mOp.getRawInput(0))
->grad()
->getImpl()
->setRawPtr(output_grad.getImpl()->rawPtr(),
output_grad.getImpl()->size());
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
const char* Aidge::Cuda::cublasGetErrorString(cublasStatus_t error)
{
const char *Aidge::Cuda::cublasGetErrorString(cublasStatus_t error) {
switch (error) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
......@@ -28,19 +27,24 @@ const char* Aidge::Cuda::cublasGetErrorString(cublasStatus_t error)
return "<unknown>";
}
void Aidge::Cuda::setMultiDevicePeerAccess(unsigned int size, unsigned int* devices)
{
void Aidge::Cuda::setMultiDevicePeerAccess(unsigned int size,
unsigned int *devices) {
for (unsigned int i = 0; i < size; ++i) {
for (unsigned int j = 0; j < size; ++j) {
if (i != j) {
int canAccessPeer = 0;
CHECK_CUDA_STATUS(cudaDeviceCanAccessPeer(&canAccessPeer,
devices[j], devices[i]));
devices[j],
devices[i]));
if (canAccessPeer) {
CHECK_CUDA_STATUS(cudaSetDevice(devices[j]));
const cudaError_t status = cudaDeviceEnablePeerAccess(devices[i], 0);
const cudaError_t status =
cudaDeviceEnablePeerAccess(devices[i], 0);
if (status == cudaErrorPeerAccessAlreadyEnabled) {
fmt::print("Peer access already enabled between device {} and device {}\n", devices[j], devices[i]);
fmt::print("Peer access already enabled between "
"device {} and device {}\n",
devices[j],
devices[i]);
} else {
CHECK_CUDA_STATUS(status);
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment