diff --git a/CMakeLists.txt b/CMakeLists.txt
index d940def39a4e092bb01765a0b127b41c6a88914f..01ebb6f258b173aee6df867c5c5c991ec936df57 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -2,11 +2,24 @@
 cmake_minimum_required(VERSION 3.18)
 
 file(READ "${CMAKE_SOURCE_DIR}/version.txt" version)
+add_definitions(-DPROJECT_VERSION="${version}")
 file(READ "${CMAKE_SOURCE_DIR}/project_name.txt" project)
 
 message(STATUS "Project name: ${project}")
 message(STATUS "Project version: ${version}")
 
+execute_process(
+    COMMAND git rev-parse --short HEAD
+    WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
+    OUTPUT_VARIABLE GIT_COMMIT_HASH
+    OUTPUT_STRIP_TRAILING_WHITESPACE
+)
+message(STATUS "Latest git commit: ${GIT_COMMIT_HASH}")
+
+# Define a preprocessor macro with the Git commit version
+add_definitions(-DGIT_COMMIT_HASH="${GIT_COMMIT_HASH}")
+
+
 # Note : project name is {project} and python module name is also {project}
 set(module_name _${project}) # target name
 
@@ -33,6 +46,11 @@ endif()
 
 enable_language(CUDA)
 
+message(STATUS "Cuda compiler version = ${CMAKE_CUDA_COMPILER_VERSION}")
+# Define a preprocessor macro with the Cuda compiler version
+add_definitions(-DCUDA_COMPILER_VERSION="${CMAKE_CUDA_COMPILER_VERSION}")
+
+
 ##############################################
 # Find system dependencies
 find_package(CUDAToolkit REQUIRED)
diff --git a/include/aidge/utils/sys_info/CudaVersionInfo.hpp b/include/aidge/utils/sys_info/CudaVersionInfo.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..17490476b18d62da66671a28f76709349e3ba805
--- /dev/null
+++ b/include/aidge/utils/sys_info/CudaVersionInfo.hpp
@@ -0,0 +1,53 @@
+#ifndef AIDGE_UTILS_SYS_INFO_CUDA_VERSION_INFO_H
+#define AIDGE_UTILS_SYS_INFO_CUDA_VERSION_INFO_H
+
+#include "aidge/backend/cuda/utils/CudaUtils.hpp"  // CHECK_CUDA_STATUS
+#include "aidge/utils/Log.hpp"
+
+namespace Aidge {
+
+#ifndef PROJECT_VERSION // Normally defined in CMakeLists.txt
+#define PROJECT_VERSION "Unknown version"
+#endif
+#ifndef GIT_COMMIT_HASH
+#define GIT_COMMIT_HASH ""
+#endif
+#ifndef CUDA_COMPILER_VERSION
+#define CUDA_COMPILER_VERSION "Unknown version"
+#endif
+void showCudaVersion() {
+    Log::info("Aidge backend CUDA: {} ({}), {} {}", PROJECT_VERSION, GIT_COMMIT_HASH, __DATE__, __TIME__);
+    Log::info("CUDA compiler version: {}", CUDA_COMPILER_VERSION);
+    Log::info("CuDNN version: {}.{}.{}\n", CUDNN_MAJOR, CUDNN_MINOR,
+              CUDNN_PATCHLEVEL);
+
+    int deviceCount = 0;
+    CHECK_CUDA_STATUS(cudaGetDeviceCount(&deviceCount));
+
+    if (deviceCount == 0) {
+        Log::warn("There are no available device(s) that support CUDA");
+    } else {
+        Log::info("Detected {} CUDA Capable device(s)", deviceCount);
+    }
+
+    for (int dev = 0; dev < deviceCount; ++dev) {
+        cudaSetDevice(dev);
+        cudaDeviceProp deviceProp;
+        cudaGetDeviceProperties(&deviceProp, dev);
+
+        Log::info("\nDevice #{}: \"{}\"", dev, deviceProp.name);
+
+        int driverVersion = 0;
+        int runtimeVersion = 0;
+        cudaDriverGetVersion(&driverVersion);
+        cudaRuntimeGetVersion(&runtimeVersion);
+        Log::info(
+            "\tCUDA Driver Version / Runtime Version:          {}.{} / {}.{}",
+            (driverVersion / 1000), ((driverVersion % 100) / 10),
+            (runtimeVersion / 1000), ((runtimeVersion % 100) / 10));
+        Log::info("\tCUDA Capability Major/Minor version number:     {}.{}",
+                  deviceProp.major, deviceProp.minor);
+    }
+}
+}  // namespace Aidge
+#endif  // AIDGE_UTILS_SYS_INFO_CUDA_VERSION_INFO_H
diff --git a/python_binding/pybind_backend_cuda.cpp b/python_binding/pybind_backend_cuda.cpp
index 4b68621b14c2260754dd438d74f059b9f9aa256b..abd1997389f3574a24e171f6ab26628dcfe40cfd 100644
--- a/python_binding/pybind_backend_cuda.cpp
+++ b/python_binding/pybind_backend_cuda.cpp
@@ -6,8 +6,10 @@ namespace py = pybind11;
 
 namespace Aidge {
 
-void init_Aidge(py::module& /*m*/){
+void init_cuda_sys_info(py::module& m);
 
+void init_Aidge(py::module& m){
+    init_cuda_sys_info(m);
 }
 
 PYBIND11_MODULE(aidge_backend_cuda, m) {
diff --git a/python_binding/utils/sys_info/pybind_CudaVersionInfo.cpp b/python_binding/utils/sys_info/pybind_CudaVersionInfo.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..64f650903ec75d579ffd58dbd6d7db7bbaf573a2
--- /dev/null
+++ b/python_binding/utils/sys_info/pybind_CudaVersionInfo.cpp
@@ -0,0 +1,9 @@
+#include <pybind11/pybind11.h>
+#include "aidge/utils/sys_info/CudaVersionInfo.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+void init_cuda_sys_info(py::module& m){
+    m.def("show_cuda_version", &showCudaVersion);
+}
+}
diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp
index eb9cc6a1f4412178525a5e6bccd32e94c4413d4d..6692b342c7f745eede689dd79a9a704bbefa9d77 100644
--- a/src/operator/AvgPoolingImpl.cpp
+++ b/src/operator/AvgPoolingImpl.cpp
@@ -45,18 +45,14 @@ void Aidge::AvgPoolingImpl_cuda<DIM>::forward() {
                                         &strides[0]));
     }
 
-    switch(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()) {
-        case DataType::Float64:
-            forward_<double>(input);
-            break;
-        case DataType::Float32:
-            forward_<float>(input);
-            break;
-        case DataType::Float16:
-            forward_<half>(input);
-            break;
-        default:
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Data type is not supported by Backend Cuda");
+    // Do the actual forward computation
+    // Template is only for scaling parameters, which are always in float
+    // excepted when the convolution is performed in double precision.
+    if (op.getOutput(0)->dataType() == DataType::Float64) {
+        forward_<double>(input);
+    }
+    else {
+        forward_<float>(input);
     }
 }
 
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index 096ee9485a03b736326f46e9a569c6b3c9b5a631..c0c32d3bbb758c9403577c84500bfe951e5e1a96 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -102,18 +102,11 @@ void Aidge::ConvImpl_cuda<DIM>::forward() {
     // Do the actual forward computation
     // Template is only for scaling parameters, which are always in float
     // excepted when the convolution is performed in double precision.
-    switch(op.getOutput(0)->dataType()) {
-        case DataType::Float64:
-            forward_<double>(input0, input1, input2);
-            break;
-        case DataType::Float32:
-            forward_<float>(input0, input1, input2);
-            break;
-        case DataType::Float16:
-            forward_<half>(input0, input1, input2);
-            break;
-        default:
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Data type is not supported by Backend Cuda");
+    if (op.getOutput(0)->dataType() == DataType::Float64) {
+        forward_<double>(input0, input1, input2);
+    }
+    else {
+        forward_<float>(input0, input1, input2);
     }
 }
 
diff --git a/src/operator/MaxPoolingImpl.cpp b/src/operator/MaxPoolingImpl.cpp
index b8d7c81948bd898b7cc4e2f3bead9c498175e2c1..de41915e7506cd121f25a6112252ecea92b047d5 100644
--- a/src/operator/MaxPoolingImpl.cpp
+++ b/src/operator/MaxPoolingImpl.cpp
@@ -45,18 +45,14 @@ void Aidge::MaxPoolingImpl_cuda<DIM>::forward() {
                                         &strides[0]));
     }
 
-    switch(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()) {
-        case DataType::Float64:
-            forward_<double>(input);
-            break;
-        case DataType::Float32:
-            forward_<float>(input);
-            break;
-        case DataType::Float16:
-            forward_<half>(input);
-            break;
-        default:
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Data type is not supported by Backend Cuda");
+    // Do the actual forward computation
+    // Template is only for scaling parameters, which are always in float
+    // excepted when the convolution is performed in double precision.
+    if (op.getOutput(0)->dataType() == DataType::Float64) {
+        forward_<double>(input);
+    }
+    else {
+        forward_<float>(input);
     }
 }
 
diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp
index 2ebd6b276e02000bdefb22fe8d2373255a1a5c2c..0a4eeeb7d8a2a4be94b5ac6b43dbae69cd8e3869 100644
--- a/src/operator/ReLUImpl.cpp
+++ b/src/operator/ReLUImpl.cpp
@@ -37,18 +37,14 @@ void Aidge::ReLUImpl_cuda::forward() {
 		#endif
     }
 
-    switch(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()) {
-        case DataType::Float64:
-            forward_<double>(input);
-            break;
-        case DataType::Float32:
-            forward_<float>(input);
-            break;
-        case DataType::Float16:
-            forward_<half>(input);
-            break;
-        default:
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Data type is not supported by Backend Cuda");
+    // Do the actual forward computation
+    // Template is only for scaling parameters, which are always in float
+    // excepted when the convolution is performed in double precision.
+    if (op.getOutput(0)->dataType() == DataType::Float64) {
+        forward_<double>(input);
+    }
+    else {
+        forward_<float>(input);
     }
 }
 
diff --git a/unit_tests/Test_AvgPoolingImpl.cpp b/unit_tests/Test_AvgPoolingImpl.cpp
index d4d39db555e9d12c7e5135d1eb3db6ffc8f459c3..dfadebbe07aa38371576cf4006773484494751a0 100644
--- a/unit_tests/Test_AvgPoolingImpl.cpp
+++ b/unit_tests/Test_AvgPoolingImpl.cpp
@@ -80,7 +80,6 @@ TEST_CASE("[gpu/operator] AvgPooling(forward)", "[AvgPooling][GPU]") {
         op->associateInput(0,myInput);
         op->setDataType(DataType::Float32);
         op->setBackend("cuda");
-        op->computeOutputDims();
         myAvgPool->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
@@ -113,7 +112,6 @@ TEST_CASE("[gpu/operator] AvgPooling(forward)", "[AvgPooling][GPU]") {
         op->associateInput(0,myInput2);
         op->setDataType(DataType::Float32);
         op->setBackend("cuda");
-        op->computeOutputDims();
         myAvgPool->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
@@ -147,7 +145,6 @@ TEST_CASE("[gpu/operator] AvgPooling(forward)", "[AvgPooling][GPU]") {
         op->associateInput(0,myInput2);
         op->setDataType(DataType::Float16);
         op->setBackend("cuda");
-        op->computeOutputDims();
         myAvgPool->forward();
 
         half_float::half* computedOutput   = new half_float::half[myOutput->size()]();
diff --git a/unit_tests/Test_ConvImpl.cpp b/unit_tests/Test_ConvImpl.cpp
index b7faadd677336b9ff72274ea250251f95785b24f..12e40cf8266a86259c5128b425919214f2db6052 100644
--- a/unit_tests/Test_ConvImpl.cpp
+++ b/unit_tests/Test_ConvImpl.cpp
@@ -53,7 +53,6 @@ TEST_CASE("[gpu/operator] Conv(forward)") {
 
         op->associateInput(0,myInput);
         op->associateInput(1,myWeights);
-        op->computeOutputDims();
         myConv->forward();
 
         REQUIRE(op->getOutput(0)->size() == 1);
@@ -210,7 +209,6 @@ TEST_CASE("[gpu/operator] Conv(forward)") {
         op->associateInput(0,myInput);
         op->associateInput(1,myWeights);
         op->associateInput(2,myBias);
-        op->computeOutputDims();
         myConv->forward();
         // op->getOutput(0)->print();
 
diff --git a/unit_tests/Test_FCImpl.cpp b/unit_tests/Test_FCImpl.cpp
index 54e37db15ded5546eb8fc3caacff9bae238b452c..0126755d08727597b00823b2055300e7b15accb3 100644
--- a/unit_tests/Test_FCImpl.cpp
+++ b/unit_tests/Test_FCImpl.cpp
@@ -68,7 +68,6 @@ TEST_CASE("[gpu/operator] FC(forward)", "[FC][GPU]") {
         op->associateInput(0, myInput);
         op -> setDataType(DataType::Float32);
         op -> setBackend("cuda");
-        op->computeOutputDims();
         myFC->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
@@ -117,7 +116,6 @@ TEST_CASE("[gpu/operator] FC(forward)", "[FC][GPU]") {
         op->associateInput(0, myInput);
         op -> setDataType(DataType::Float32);
         op -> setBackend("cuda");
-        op->computeOutputDims();
         myFC->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
diff --git a/unit_tests/Test_MaxPoolingImpl.cpp b/unit_tests/Test_MaxPoolingImpl.cpp
index b2ec0dfe5dc6df072b6be3b20c075190cd3f6fce..bc2efdd447363044dc02fab06964909756a8e2d1 100644
--- a/unit_tests/Test_MaxPoolingImpl.cpp
+++ b/unit_tests/Test_MaxPoolingImpl.cpp
@@ -77,7 +77,6 @@ TEST_CASE("[cpu/operator] MaxPooling(forward)", "[MaxPooling][CPU]") {
         myMaxPool->getOperator()->associateInput(0,myInput);
         myMaxPool->getOperator()->setDataType(DataType::Float32);
         myMaxPool->getOperator()->setBackend("cuda");
-        op->computeOutputDims();
         myMaxPool->forward();
         
         float* computedOutput   = new float[myOutput->size()]();
diff --git a/unit_tests/Test_ReLUImpl.cpp b/unit_tests/Test_ReLUImpl.cpp
index 82da6fae6737ee39fc60d771c10dc69fa2dea5f6..5651496561f3e1d864767ec38addd4d704b8693c 100644
--- a/unit_tests/Test_ReLUImpl.cpp
+++ b/unit_tests/Test_ReLUImpl.cpp
@@ -37,7 +37,6 @@ TEST_CASE("[gpu/operator] ReLU(forward)", "[ReLU][GPU]") {
         op->associateInput(0,input0);
         op->setDataType(DataType::Float32);
         op->setBackend("cuda");
-        op->computeOutputDims();
         myReLU->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
@@ -70,7 +69,6 @@ TEST_CASE("[gpu/operator] ReLU(forward)", "[ReLU][GPU]") {
         op->associateInput(0,input0);
         op->setDataType(DataType::Float32);
         op->setBackend("cuda");
-        op->computeOutputDims();
         myReLU->forward();
         
         float* computedOutput   = new float[myOutput->size()]();
@@ -115,7 +113,6 @@ TEST_CASE("[gpu/operator] ReLU(forward)", "[ReLU][GPU]") {
         op->associateInput(0,input0);
         op->setDataType(DataType::Float32);
         op->setBackend("cuda");
-        op->computeOutputDims();
         myReLU->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
@@ -184,7 +181,6 @@ TEST_CASE("[gpu/operator] ReLU(forward)", "[ReLU][GPU]") {
         op->associateInput(0,input0);
         op->setDataType(DataType::Float32);
         op->setBackend("cuda");
-        op->computeOutputDims();
         op->forward();
 
         float* computedOutput   = new float[myOutput->size()]();