From 0d7ea89558f4f9433cd8f5c97178c2a47da7b811 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Tue, 4 Mar 2025 10:40:45 +0100
Subject: [PATCH] Fixed typo

---
 include/aidge/backend/cpu/operator/AbsImpl_kernels.hpp |  4 ++--
 .../aidge/backend/cpu/operator/AtanImpl_kernels.hpp    |  8 ++++----
 include/aidge/backend/cpu/operator/ErfImpl_kernels.hpp |  4 ++--
 .../backend/cpu/operator/HeavisideImpl_kernels.hpp     |  4 ++--
 .../backend/cpu/operator/LeakyReLUImpl_kernels.hpp     |  8 ++++----
 include/aidge/backend/cpu/operator/LnImpl_kernels.hpp  | 10 +++++-----
 .../aidge/backend/cpu/operator/ReLUImpl_kernels.hpp    | 10 +++++-----
 .../aidge/backend/cpu/operator/RoundImpl_kernels.hpp   |  4 ++--
 .../aidge/backend/cpu/operator/ScalingImpl_kernels.hpp |  4 ++--
 .../aidge/backend/cpu/operator/SigmoidImpl_kernels.hpp | 10 +++++-----
 .../aidge/backend/cpu/operator/SqrtImpl_kernels.hpp    |  8 ++++----
 .../aidge/backend/cpu/operator/TanhImpl_kernels.hpp    | 10 +++++-----
 12 files changed, 42 insertions(+), 42 deletions(-)

diff --git a/include/aidge/backend/cpu/operator/AbsImpl_kernels.hpp b/include/aidge/backend/cpu/operator/AbsImpl_kernels.hpp
index 16e5f9de..e6474cf2 100644
--- a/include/aidge/backend/cpu/operator/AbsImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AbsImpl_kernels.hpp
@@ -20,14 +20,14 @@
 
 namespace Aidge {
 template <class I, class O>
-void AbsImpl_cpu_forward_kernel(std::size_t inputLenght,
+void AbsImpl_cpu_forward_kernel(std::size_t inputLength,
                                      const void* input_,
                                      void* output_) {
 
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+    for (std::size_t i = 0; i < inputLength; ++i) {
         output[i] = std::abs(input[i]);
     }
 }
diff --git a/include/aidge/backend/cpu/operator/AtanImpl_kernels.hpp b/include/aidge/backend/cpu/operator/AtanImpl_kernels.hpp
index 2a786339..141e5b60 100644
--- a/include/aidge/backend/cpu/operator/AtanImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AtanImpl_kernels.hpp
@@ -20,20 +20,20 @@
 
 namespace Aidge {
 template <class I, class O>
-void AtanImpl_cpu_forward_kernel(std::size_t inputLenght,
+void AtanImpl_cpu_forward_kernel(std::size_t inputLength,
                                     const void* input_,
                                     void* output_) {
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
-    for (size_t i = 0; i < inputLenght; ++i) {
+    for (size_t i = 0; i < inputLength; ++i) {
         output[i] = static_cast<O>(atan(input[i]));
     }
 
 }
 
 template <class O, class GI, class GO>
-void AtanImpl_cpu_backward_kernel(const std::size_t inputLenght,
+void AtanImpl_cpu_backward_kernel(const std::size_t inputLength,
                                      const void* output_, const void* grad_output_,
 				     void* grad_input_) {
     const O* output = static_cast<const O*>(output_);
@@ -41,7 +41,7 @@ void AtanImpl_cpu_backward_kernel(const std::size_t inputLenght,
     GI* grad_input = static_cast<GI*>(grad_input_);
 
     // Apply the derivative of atan for each element in the input array
-    for (size_t i = 0; i < inputLenght; ++i) {
+    for (size_t i = 0; i < inputLength; ++i) {
         // dx = dy * (1 / (1 + x^2))
         grad_input[i] = grad_output[i] * static_cast<O>(1.0 / (1.0 + output[i] * output[i]));
     }
diff --git a/include/aidge/backend/cpu/operator/ErfImpl_kernels.hpp b/include/aidge/backend/cpu/operator/ErfImpl_kernels.hpp
index 02041f55..709f4a6f 100644
--- a/include/aidge/backend/cpu/operator/ErfImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ErfImpl_kernels.hpp
@@ -20,14 +20,14 @@
 
 namespace Aidge {
 template <class I, class O>
-void ErfImpl_cpu_forward_kernel(std::size_t inputLenght,
+void ErfImpl_cpu_forward_kernel(std::size_t inputLength,
                                      const void* input_,
                                      void* output_) {
 
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+    for (std::size_t i = 0; i < inputLength; ++i) {
         output[i] = std::erf(input[i]);
     }
 }
diff --git a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
index 3fd6ca7d..06d7fff8 100644
--- a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
@@ -23,14 +23,14 @@
 namespace Aidge {
 
 template <class I, class O>
-void HeavisideImplCpuForwardKernel(std::size_t inputLenght,
+void HeavisideImplCpuForwardKernel(std::size_t inputLength,
                                    const void *input_,
                                    void *output_,
                                    const float value) {
     const I *input = static_cast<const I *>(input_);
     O *output = static_cast<O *>(output_);
 
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+    for (std::size_t i = 0; i < inputLength; ++i) {
         output[i] = (input[i] > 0) ? 1 : (input[i] == 0 ? value : 0);
     }
 }
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl_kernels.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl_kernels.hpp
index bc856f70..7afd8298 100644
--- a/include/aidge/backend/cpu/operator/LeakyReLUImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl_kernels.hpp
@@ -19,7 +19,7 @@
 namespace Aidge {
 template <class I, class O>
 void LeakyReLUImpl_cpu_forward_kernel(const float negativeSlope_,
-                                     std::size_t inputLenght,
+                                     std::size_t inputLength,
                                      const void* input_,
                                      void* output_) {
 
@@ -27,14 +27,14 @@ void LeakyReLUImpl_cpu_forward_kernel(const float negativeSlope_,
     O* output = static_cast<O*>(output_);
     const I negativeSlope = static_cast<const I>(negativeSlope_);
 
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+    for (std::size_t i = 0; i < inputLength; ++i) {
         output[i] = (input[i] >= 0) ? input[i] : input[i] * negativeSlope;
     }
 }
 
 template <class I, class O>
 void LeakyReLUImpl_cpu_backward_kernel(const float negativeSlope_,
-                                     std::size_t inputLenght,
+                                     std::size_t inputLength,
                                      const void* input_,
                                      void* output_) {
 
@@ -42,7 +42,7 @@ void LeakyReLUImpl_cpu_backward_kernel(const float negativeSlope_,
     O* output = static_cast<O*>(output_);
     const I negativeSlope = static_cast<const I>(negativeSlope_);
 
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+    for (std::size_t i = 0; i < inputLength; ++i) {
         output[i] = (input[i] > 0) ? input[i] : negativeSlope*input[i];
     }
 }
diff --git a/include/aidge/backend/cpu/operator/LnImpl_kernels.hpp b/include/aidge/backend/cpu/operator/LnImpl_kernels.hpp
index b30b05bb..ee2864b6 100755
--- a/include/aidge/backend/cpu/operator/LnImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/LnImpl_kernels.hpp
@@ -18,7 +18,7 @@
 
 namespace Aidge {
 template <class I, class O>
-void LnImpl_cpu_forward_kernel(std::size_t inputLenght,
+void LnImpl_cpu_forward_kernel(std::size_t inputLength,
                                const void* input_,
                                void* output_) {
 
@@ -26,8 +26,8 @@ void LnImpl_cpu_forward_kernel(std::size_t inputLenght,
     O* output = static_cast<O*>(output_);
 	const float eps = 1.0e-20f;
 
-//#pragma omp parallel for if (inputLenght > 1024)
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+//#pragma omp parallel for if (inputLength > 1024)
+    for (std::size_t i = 0; i < inputLength; ++i) {
 		if (input[i] > I(eps)) {
 			output[i] = std::log(input[i]);
 		} else {
@@ -37,7 +37,7 @@ void LnImpl_cpu_forward_kernel(std::size_t inputLenght,
 }
 
 template <class I, class GI, class GO>
-void LnImpl_cpu_backward_kernel(const std::size_t inputLenght,
+void LnImpl_cpu_backward_kernel(const std::size_t inputLength,
                                 const void* input_, const void* grad_output_,
 	                            void* grad_input_) {
 						 
@@ -46,7 +46,7 @@ void LnImpl_cpu_backward_kernel(const std::size_t inputLenght,
     GI* grad_input = static_cast<GI*>(grad_input_);
 	const float eps = 1.0e-20f;
 	
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+    for (std::size_t i = 0; i < inputLength; ++i) {
 		if (input[i] > I(eps)) {
 			grad_input[i] = grad_output[i] / input[i];
 		} else {
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl_kernels.hpp b/include/aidge/backend/cpu/operator/ReLUImpl_kernels.hpp
index e39e9b7d..bb5d7cc3 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl_kernels.hpp
@@ -26,27 +26,27 @@
 namespace Aidge {
 // Kernels
 template <class I, class O>
-void ReLUImpl_cpu_forward_kernel(std::size_t inputLenght,
+void ReLUImpl_cpu_forward_kernel(std::size_t inputLength,
                                      const void* input_,
                                      void* output_) {
 
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
-//#pragma omp parallel for if (inputLenght > 1024)
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+//#pragma omp parallel for if (inputLength > 1024)
+    for (std::size_t i = 0; i < inputLength; ++i) {
         output[i] = (input[i] > 0) ? input[i] : 0;
     }
 }
 
 template <class I, class GI, class GO>
-void ReLUImpl_cpu_backward_kernel(const std::size_t inputLenght,
+void ReLUImpl_cpu_backward_kernel(const std::size_t inputLength,
                                   const void* input_, const void* grad_output_,
 				  void* grad_input_) {
     const I* input = static_cast<const I*>(input_);
     const GO* grad_output = static_cast<const GO*>(grad_output_);
     GI* grad_input = static_cast<GI*>(grad_input_);
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+    for (std::size_t i = 0; i < inputLength; ++i) {
         grad_input[i] = (input[i] > 0) ? grad_output[i] : 0;
     }
 }
diff --git a/include/aidge/backend/cpu/operator/RoundImpl_kernels.hpp b/include/aidge/backend/cpu/operator/RoundImpl_kernels.hpp
index ba9c63bc..7ac4319b 100644
--- a/include/aidge/backend/cpu/operator/RoundImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/RoundImpl_kernels.hpp
@@ -21,14 +21,14 @@
 
 namespace Aidge {
 template <class I, class O>
-void RoundImpl_cpu_forward_kernel(const std::size_t inputLenght,
+void RoundImpl_cpu_forward_kernel(const std::size_t inputLength,
                                      const void* input_,
                                      void* output_) {
 
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+    for (std::size_t i = 0; i < inputLength; ++i) {
         //std::round would not work since it doesn't follow the halves rules (See ONNX Round)
         output[i] = static_cast<O>(std::nearbyint(static_cast<float>(input[i])));
     }
diff --git a/include/aidge/backend/cpu/operator/ScalingImpl_kernels.hpp b/include/aidge/backend/cpu/operator/ScalingImpl_kernels.hpp
index c758c9cf..f9ca00b7 100644
--- a/include/aidge/backend/cpu/operator/ScalingImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ScalingImpl_kernels.hpp
@@ -76,14 +76,14 @@ template <class I, class O>
 void ScalingImpl_cpu_forward_kernel(const float scalingFactor,
                                     const std::size_t quantizedNbBits,
                                     const bool isOutputUnsigned,
-                                    std::size_t inputLenght,
+                                    std::size_t inputLength,
                                     const void* input_,
                                     void* output_) {
 
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+    for (std::size_t i = 0; i < inputLength; ++i) {
         output[i] = static_cast<O>(input[i] * static_cast<I>(scalingFactor));
 
         if(quantizedNbBits > 0) {
diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl_kernels.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl_kernels.hpp
index dfd71ce0..83ad4575 100644
--- a/include/aidge/backend/cpu/operator/SigmoidImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/SigmoidImpl_kernels.hpp
@@ -18,15 +18,15 @@
 
 namespace Aidge {
 template <class I, class O>
-void SigmoidImpl_cpu_forward_kernel(std::size_t inputLenght,
+void SigmoidImpl_cpu_forward_kernel(std::size_t inputLength,
                                     const void* input_,
                                     void* output_) {
 
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
-//#pragma omp parallel for if (inputLenght > 1024)
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+//#pragma omp parallel for if (inputLength > 1024)
+    for (std::size_t i = 0; i < inputLength; ++i) {
 		if (input[i] > I(0)) {
 			output[i] = O(1) / (O(1) + std::exp(-input[i]));
 		} else {
@@ -36,13 +36,13 @@ void SigmoidImpl_cpu_forward_kernel(std::size_t inputLenght,
 }
 
 template <class O, class GI, class GO>
-void SigmoidImpl_cpu_backward_kernel(const std::size_t inputLenght,
+void SigmoidImpl_cpu_backward_kernel(const std::size_t inputLength,
                                      const void* output_, const void* grad_output_,
 				     void* grad_input_) {
     const O* output = static_cast<const O*>(output_);
     const GO* grad_output = static_cast<const GO*>(grad_output_);
     GI* grad_input = static_cast<GI*>(grad_input_);
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+    for (std::size_t i = 0; i < inputLength; ++i) {
         grad_input[i] = output[i] * (O(1) - output[i]) * grad_output[i];
     }
 }
diff --git a/include/aidge/backend/cpu/operator/SqrtImpl_kernels.hpp b/include/aidge/backend/cpu/operator/SqrtImpl_kernels.hpp
index 0464119c..1ce1ef9b 100644
--- a/include/aidge/backend/cpu/operator/SqrtImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/SqrtImpl_kernels.hpp
@@ -21,27 +21,27 @@
 
 namespace Aidge {
 template <class I, class O>
-void SqrtImpl_cpu_forward_kernel(const std::size_t inputLenght,
+void SqrtImpl_cpu_forward_kernel(const std::size_t inputLength,
                                      const void* input_,
                                      void* output_) {
 
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+    for (std::size_t i = 0; i < inputLength; ++i) {
         output[i] = static_cast<O>(std::sqrt(static_cast<float>(input[i])));
     }
 }
 
 template <class I, class O>
-void SqrtImpl_cpu_backward_kernel(const std::size_t inputLenght,
+void SqrtImpl_cpu_backward_kernel(const std::size_t inputLength,
                                      const void* input_,
                                      void* output_) {
 
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+    for (std::size_t i = 0; i < inputLength; ++i) {
         output[i] = static_cast<O>(0.5/(std::sqrt(static_cast<float>(input[i]))));
     }
 }
diff --git a/include/aidge/backend/cpu/operator/TanhImpl_kernels.hpp b/include/aidge/backend/cpu/operator/TanhImpl_kernels.hpp
index fdcac210..49cfe9cb 100644
--- a/include/aidge/backend/cpu/operator/TanhImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/TanhImpl_kernels.hpp
@@ -18,27 +18,27 @@
 
 namespace Aidge {
 template <class I, class O>
-void TanhImpl_cpu_forward_kernel(std::size_t inputLenght,
+void TanhImpl_cpu_forward_kernel(std::size_t inputLength,
                                      const void* input_,
                                      void* output_) {
 
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
-//#pragma omp parallel for if (inputLenght > 1024)
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+//#pragma omp parallel for if (inputLength > 1024)
+    for (std::size_t i = 0; i < inputLength; ++i) {
         output[i] = std::tanh(input[i]);
     }
 }
 
 template <class O, class GI, class GO>
-void TanhImpl_cpu_backward_kernel(const std::size_t inputLenght,
+void TanhImpl_cpu_backward_kernel(const std::size_t inputLength,
                                   const void* output_, const void* grad_output_,
 			          void* grad_input_) {
     const O* output = static_cast<const O*>(output_);
     const GO* grad_output = static_cast<const GO*>(grad_output_);
     GI* grad_input = static_cast<GI*>(grad_input_);
-    for (std::size_t i = 0; i < inputLenght; ++i) {
+    for (std::size_t i = 0; i < inputLength; ++i) {
         grad_input[i] = (O(1) - output[i] * output[i]) * grad_output[i];
     }
 }
-- 
GitLab