diff --git a/src/operator/SubImpl.cpp b/src/operator/SubImpl.cpp
index 5b55735592e05ec345aafc9ea2d123bd0cfd9f72..995689b96acbf48932147a3de18a54d9c83355bd 100644
--- a/src/operator/SubImpl.cpp
+++ b/src/operator/SubImpl.cpp
@@ -42,13 +42,13 @@ void Aidge::SubImpl_cuda::computeStrides(const Sub_Op& op){
         CHECK_CUDNN_STATUS(cudnnCreateTensorDescriptor(&mTensorDescs[i]));
         switch(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()) {
             case DataType::Float64:
-                CHECK_CUDNN_STATUS(cudnnSetTensorNdDescriptor(mTensorDescs[i], aContext::data_type<double>::value, mDims[i].size(), mDims[i].data(), mStrides[0].data()));
+                CHECK_CUDNN_STATUS(cudnnSetTensorNdDescriptor(mTensorDescs[i], CudaContext::data_type<double>::value, mDims[i].size(), mDims[i].data(), mStrides[0].data()));
                 break;
             case DataType::Float32:
-                CHECK_CUDNN_STATUS(cudnnSetTensorNdDescriptor(mTensorDescs[i], aContext::data_type<float>::value, mDims[i].size(), mDims[i].data(), mStrides[0].data()));
+                CHECK_CUDNN_STATUS(cudnnSetTensorNdDescriptor(mTensorDescs[i], CudaContext::data_type<float>::value, mDims[i].size(), mDims[i].data(), mStrides[0].data()));
                 break;
             case DataType::Float16:
-                CHECK_CUDNN_STATUS(cudnnSetTensorNdDescriptor(mTensorDescs[i], aContext::data_type<half>::value, mDims[i].size(), mDims[i].data(), mStrides[0].data()));
+                CHECK_CUDNN_STATUS(cudnnSetTensorNdDescriptor(mTensorDescs[i], CudaContext::data_type<half>::value, mDims[i].size(), mDims[i].data(), mStrides[0].data()));
                 break;
             default:
                 AIDGE_THROW_OR_ABORT(std::runtime_error, "Data type is not supported by Backend Cuda");
@@ -100,7 +100,7 @@ void Aidge::SubImpl_cuda::forward_(const std::vector<Tensor>& inputs, const std:
     const typename Cuda::cudnn_scaling_type<T>::type alpha = 1.0f;
     const typename Cuda::cudnn_scaling_type<T>::type beta = 0.0f;
     const typename Cuda::cudnn_scaling_type<T>::type gamma = -1.0f;
-    cudnnTensorDescriptor_t outputTensorDesc = std::dynamic_pointer_cast<TensorImpl_cuda_>(op.getOutput(0)etImpl())->getCudnnTensorDesc(*op.getOutput(0));
+    cudnnTensorDescriptor_t outputTensorDesc = std::dynamic_pointer_cast<TensorImpl_cuda_>(op.getOutput(0)->getImpl())->getCudnnTensorDesc(*op.getOutput(0));
     void* outputRawPtr = std::static_pointer_cast<Tensor>(op.getRawOutput(0))->getImpl()->rawPtr();
     // Add first input to the output
     CHECK_CUDNN_STATUS(