diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml
index d12c714d34b2d457a5b4a675bbd4b99c1211362b..a8b6595731f3ba8ecc14fffcd306ada52bb52616 100644
--- a/.gitlab/ci/build.gitlab-ci.yml
+++ b/.gitlab/ci/build.gitlab-ci.yml
@@ -187,7 +187,7 @@ build:ubuntu_python:
 #     - Expand-Archive -Path .\build_artifacts.zip -DestinationPath . -Force
 #     - Remove-Item .\build_cpp\ -Recurse
 #     # aidge_backend_cpu
-#     - 'curl "https://gitlab.eclipse.org/api/v4/projects/5140/jobs/artifacts/master/download?job=build:windows_cpp" -o build_artifacts.zip'
+#     - 'curl "https://gitlab.eclipse.org/api/v4/projects/5140/jobs/artifacts/main/download?job=build:windows_cpp" -o build_artifacts.zip'
 #     - Expand-Archive -Path .\build_artifacts.zip -DestinationPath . -Force
 #     - Remove-Item .\build_cpp\ -Recurse
 
@@ -227,7 +227,7 @@ build:ubuntu_python:
 #     - 'curl "https://gitlab.eclipse.org/api/v4/projects/5139/jobs/artifacts/main/download?job=build:windows_python" -o build_artifacts.zip'
 #     - Expand-Archive -Path .\build_artifacts.zip -DestinationPath . -Force
 #     # aidge_backend_cpu (Python)
-#     - 'curl "https://gitlab.eclipse.org/api/v4/projects/5140/jobs/artifacts/master/download?job=build:windows_python" -o build_artifacts.zip'
+#     - 'curl "https://gitlab.eclipse.org/api/v4/projects/5140/jobs/artifacts/main/download?job=build:windows_python" -o build_artifacts.zip'
 #     - Expand-Archive -Path .\build_artifacts.zip -DestinationPath . -Force
 
 #     - python -m pip install virtualenv
diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp
index f1180c1972985411e65dc4076228518cb1485672..6692b342c7f745eede689dd79a9a704bbefa9d77 100644
--- a/src/operator/AvgPoolingImpl.cpp
+++ b/src/operator/AvgPoolingImpl.cpp
@@ -45,6 +45,9 @@ void Aidge::AvgPoolingImpl_cuda<DIM>::forward() {
                                         &strides[0]));
     }
 
+    // Do the actual forward computation
+    // Template is only for scaling parameters, which are always in float
+    // excepted when the convolution is performed in double precision.
     if (op.getOutput(0)->dataType() == DataType::Float64) {
         forward_<double>(input);
     }
diff --git a/src/operator/MaxPoolingImpl.cpp b/src/operator/MaxPoolingImpl.cpp
index 3054bb1a39dd55df5ae5f43f09f41a12360bd7de..156c67ff8f571725a5d5df18ada772d8cc8957d3 100644
--- a/src/operator/MaxPoolingImpl.cpp
+++ b/src/operator/MaxPoolingImpl.cpp
@@ -45,6 +45,10 @@ void Aidge::MaxPoolingImpl_cuda<DIM>::forward() {
                                         &strides[0]));
     }
 
+
+    // Do the actual forward computation
+    // Template is only for scaling parameters, which are always in float
+    // excepted when the convolution is performed in double precision.
     if (op.getOutput(0)->dataType() == DataType::Float64) {
         forward_<double>(input);
     }
diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp
index 6dd211e75374986ead03195e3175d84c6d596cc4..055c343ef9997f4b0ea1764b828d5e4c57543a31 100644
--- a/src/operator/ReLUImpl.cpp
+++ b/src/operator/ReLUImpl.cpp
@@ -37,6 +37,9 @@ void Aidge::ReLUImpl_cuda::forward() {
 		#endif
     }
 
+    // Do the actual forward computation
+    // Template is only for scaling parameters, which are always in float
+    // excepted when the convolution is performed in double precision.
     if (op.getOutput(0)->dataType() == DataType::Float64) {
         forward_<double>(input);
     }
diff --git a/unit_tests/Test_AvgPoolingImpl.cpp b/unit_tests/Test_AvgPoolingImpl.cpp
index d4d39db555e9d12c7e5135d1eb3db6ffc8f459c3..dfadebbe07aa38371576cf4006773484494751a0 100644
--- a/unit_tests/Test_AvgPoolingImpl.cpp
+++ b/unit_tests/Test_AvgPoolingImpl.cpp
@@ -80,7 +80,6 @@ TEST_CASE("[gpu/operator] AvgPooling(forward)", "[AvgPooling][GPU]") {
         op->associateInput(0,myInput);
         op->setDataType(DataType::Float32);
         op->setBackend("cuda");
-        op->computeOutputDims();
         myAvgPool->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
@@ -113,7 +112,6 @@ TEST_CASE("[gpu/operator] AvgPooling(forward)", "[AvgPooling][GPU]") {
         op->associateInput(0,myInput2);
         op->setDataType(DataType::Float32);
         op->setBackend("cuda");
-        op->computeOutputDims();
         myAvgPool->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
@@ -147,7 +145,6 @@ TEST_CASE("[gpu/operator] AvgPooling(forward)", "[AvgPooling][GPU]") {
         op->associateInput(0,myInput2);
         op->setDataType(DataType::Float16);
         op->setBackend("cuda");
-        op->computeOutputDims();
         myAvgPool->forward();
 
         half_float::half* computedOutput   = new half_float::half[myOutput->size()]();
diff --git a/unit_tests/Test_ConvImpl.cpp b/unit_tests/Test_ConvImpl.cpp
index b7faadd677336b9ff72274ea250251f95785b24f..12e40cf8266a86259c5128b425919214f2db6052 100644
--- a/unit_tests/Test_ConvImpl.cpp
+++ b/unit_tests/Test_ConvImpl.cpp
@@ -53,7 +53,6 @@ TEST_CASE("[gpu/operator] Conv(forward)") {
 
         op->associateInput(0,myInput);
         op->associateInput(1,myWeights);
-        op->computeOutputDims();
         myConv->forward();
 
         REQUIRE(op->getOutput(0)->size() == 1);
@@ -210,7 +209,6 @@ TEST_CASE("[gpu/operator] Conv(forward)") {
         op->associateInput(0,myInput);
         op->associateInput(1,myWeights);
         op->associateInput(2,myBias);
-        op->computeOutputDims();
         myConv->forward();
         // op->getOutput(0)->print();
 
diff --git a/unit_tests/Test_FCImpl.cpp b/unit_tests/Test_FCImpl.cpp
index 54e37db15ded5546eb8fc3caacff9bae238b452c..0126755d08727597b00823b2055300e7b15accb3 100644
--- a/unit_tests/Test_FCImpl.cpp
+++ b/unit_tests/Test_FCImpl.cpp
@@ -68,7 +68,6 @@ TEST_CASE("[gpu/operator] FC(forward)", "[FC][GPU]") {
         op->associateInput(0, myInput);
         op -> setDataType(DataType::Float32);
         op -> setBackend("cuda");
-        op->computeOutputDims();
         myFC->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
@@ -117,7 +116,6 @@ TEST_CASE("[gpu/operator] FC(forward)", "[FC][GPU]") {
         op->associateInput(0, myInput);
         op -> setDataType(DataType::Float32);
         op -> setBackend("cuda");
-        op->computeOutputDims();
         myFC->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
diff --git a/unit_tests/Test_MaxPoolingImpl.cpp b/unit_tests/Test_MaxPoolingImpl.cpp
index b2ec0dfe5dc6df072b6be3b20c075190cd3f6fce..bc2efdd447363044dc02fab06964909756a8e2d1 100644
--- a/unit_tests/Test_MaxPoolingImpl.cpp
+++ b/unit_tests/Test_MaxPoolingImpl.cpp
@@ -77,7 +77,6 @@ TEST_CASE("[cpu/operator] MaxPooling(forward)", "[MaxPooling][CPU]") {
         myMaxPool->getOperator()->associateInput(0,myInput);
         myMaxPool->getOperator()->setDataType(DataType::Float32);
         myMaxPool->getOperator()->setBackend("cuda");
-        op->computeOutputDims();
         myMaxPool->forward();
         
         float* computedOutput   = new float[myOutput->size()]();
diff --git a/unit_tests/Test_ReLUImpl.cpp b/unit_tests/Test_ReLUImpl.cpp
index 1ac50c29d4b98cc5311bf270e05206fe64ce3b30..b38ac76de00b3622251e0df2c34c0ae7af1a561a 100644
--- a/unit_tests/Test_ReLUImpl.cpp
+++ b/unit_tests/Test_ReLUImpl.cpp
@@ -78,7 +78,6 @@ TEST_CASE("[gpu/operator] ReLU(forward)", "[ReLU][GPU]") {
         op->associateInput(0,input0);
         op->setDataType(DataType::Float32);
         op->setBackend("cuda");
-        op->computeOutputDims();
         op->forward();
 
         float* computedOutput   = new float[myOutput->size()]();