From 48657c04586678f03021601a70e61bac18d898e8 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Tue, 25 Jun 2024 13:26:43 +0000
Subject: [PATCH] Fix multiple typo.

---
 src/operator/SubImpl.cpp | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/src/operator/SubImpl.cpp b/src/operator/SubImpl.cpp
index 5b55735..995689b 100644
--- a/src/operator/SubImpl.cpp
+++ b/src/operator/SubImpl.cpp
@@ -42,13 +42,13 @@ void Aidge::SubImpl_cuda::computeStrides(const Sub_Op& op){
         CHECK_CUDNN_STATUS(cudnnCreateTensorDescriptor(&mTensorDescs[i]));
         switch(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()) {
             case DataType::Float64:
-                CHECK_CUDNN_STATUS(cudnnSetTensorNdDescriptor(mTensorDescs[i], aContext::data_type<double>::value, mDims[i].size(), mDims[i].data(), mStrides[0].data()));
+                CHECK_CUDNN_STATUS(cudnnSetTensorNdDescriptor(mTensorDescs[i], CudaContext::data_type<double>::value, mDims[i].size(), mDims[i].data(), mStrides[0].data()));
                 break;
             case DataType::Float32:
-                CHECK_CUDNN_STATUS(cudnnSetTensorNdDescriptor(mTensorDescs[i], aContext::data_type<float>::value, mDims[i].size(), mDims[i].data(), mStrides[0].data()));
+                CHECK_CUDNN_STATUS(cudnnSetTensorNdDescriptor(mTensorDescs[i], CudaContext::data_type<float>::value, mDims[i].size(), mDims[i].data(), mStrides[0].data()));
                 break;
             case DataType::Float16:
-                CHECK_CUDNN_STATUS(cudnnSetTensorNdDescriptor(mTensorDescs[i], aContext::data_type<half>::value, mDims[i].size(), mDims[i].data(), mStrides[0].data()));
+                CHECK_CUDNN_STATUS(cudnnSetTensorNdDescriptor(mTensorDescs[i], CudaContext::data_type<half>::value, mDims[i].size(), mDims[i].data(), mStrides[0].data()));
                 break;
             default:
                 AIDGE_THROW_OR_ABORT(std::runtime_error, "Data type is not supported by Backend Cuda");
@@ -100,7 +100,7 @@ void Aidge::SubImpl_cuda::forward_(const std::vector<Tensor>& inputs, const std:
     const typename Cuda::cudnn_scaling_type<T>::type alpha = 1.0f;
     const typename Cuda::cudnn_scaling_type<T>::type beta = 0.0f;
     const typename Cuda::cudnn_scaling_type<T>::type gamma = -1.0f;
-    cudnnTensorDescriptor_t outputTensorDesc = std::dynamic_pointer_cast<TensorImpl_cuda_>(op.getOutput(0)etImpl())->getCudnnTensorDesc(*op.getOutput(0));
+    cudnnTensorDescriptor_t outputTensorDesc = std::dynamic_pointer_cast<TensorImpl_cuda_>(op.getOutput(0)->getImpl())->getCudnnTensorDesc(*op.getOutput(0));
     void* outputRawPtr = std::static_pointer_cast<Tensor>(op.getRawOutput(0))->getImpl()->rawPtr();
     // Add first input to the output
     CHECK_CUDNN_STATUS(
-- 
GitLab