diff --git a/src/PTQ/CLE.cpp b/src/PTQ/CLE.cpp
index 5ffc8eb29a04313d91d94069eedc29697a47e4b0..f70793cd297bdba3806bce68b57704fcdc7c4d3d 100644
--- a/src/PTQ/CLE.cpp
+++ b/src/PTQ/CLE.cpp
@@ -174,7 +174,7 @@ void crossLayerEqualization(std::shared_ptr<GraphView> graphView, double targetD
 
             insertScalingBelowProducer(n1->getParent(1), s1, graphView);
 
-            if (n1->type() != "MatMul") // TODO : enhance this !
+            if (n1->type() != "MatMul") // TODO : exclude every node that we can't call getParent(2) on !
                 if (n1->getParent(2))
                     insertScalingBelowProducer(n1->getParent(2), s1, graphView);
 
diff --git a/src/backend/cuda/operator/LSQImpl.cpp b/src/backend/cuda/operator/LSQImpl.cpp
index 5f9f032fafcf17653450da59e9a3193c2af1f373..bb30cc10b6e87f3d6797918d02874ebca48d47ea 100644
--- a/src/backend/cuda/operator/LSQImpl.cpp
+++ b/src/backend/cuda/operator/LSQImpl.cpp
@@ -56,8 +56,8 @@ void Aidge::LSQImpl_cuda::backward() {
         if (mWorkspace != nullptr) {
             cudaFree(mWorkspace);
         }
-        std::size_t sizeofData = getDataTypeBitWidth(gra_int0->dataType()) / 8;
-        CHECK_CUDA_STATUS(cudaMalloc(&mWorkspace, sizeofData * gra_int0->size()));
+        std::size_t sizeOfData = getDataTypeBitWidth(gra_int0->dataType()) / 8;
+        CHECK_CUDA_STATUS(cudaMalloc(&mWorkspace, sizeOfData * gra_int0->size()));
         mWorkspaceSize = gra_int0->size();
     }