From 28ca848f333c2d3c5c231b487fba639e6e1be9fb Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Fri, 14 Feb 2025 15:35:32 +0100
Subject: [PATCH 01/13] feat: Add surrogate backward function for Heaviside
 operator

---
 .../cpu/operator/HeavisideImpl_kernels.hpp    | 19 ++++++++++++++++++-
 src/operator/HeavisideImpl.cpp                | 19 ++++++++++++++++++-
 unit_tests/operator/Test_HeavisideImpl.cpp    | 11 +++++++++++
 3 files changed, 47 insertions(+), 2 deletions(-)

diff --git a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
index 06d7fff8..0bbbddee 100644
--- a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
@@ -19,7 +19,6 @@
 #include "aidge/backend/cpu/operator/HeavisideImpl.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
-
 namespace Aidge {
 
 template <class I, class O>
@@ -35,6 +34,24 @@ void HeavisideImplCpuForwardKernel(std::size_t inputLength,
     }
 }
 
+
+// Surrogate Gradient
+template <class O, class GO, class GI>
+void HeavisideImplCpuBackwardKernel(std::size_t inputLength, 
+                                    const void* output_,
+                                    const void* grad_output_,
+                                    void* grad_input_) {
+
+    const O* output = static_cast<const O*>(output_);
+    const GO* grad_output = static_cast<const GO*>(grad_output_);
+    GI* grad_input = static_cast<GI*>(grad_input_);
+
+    for (size_t i = 0; i < inputLength; ++i) {
+        // dx = dy * (1/PI) * (1 / (1 + (PI * x)^2))
+        grad_input[i] = (1 / M_PI) * grad_output[i] * static_cast<O>(1.0 / (1.0 + output[i] * output[i]));
+    }
+}
+
 // Kernels registration to implementation entry point
 REGISTRAR(HeavisideImplCpu,
           {DataType::Float32},
diff --git a/src/operator/HeavisideImpl.cpp b/src/operator/HeavisideImpl.cpp
index 56ceb9b0..2ead2978 100644
--- a/src/operator/HeavisideImpl.cpp
+++ b/src/operator/HeavisideImpl.cpp
@@ -32,6 +32,23 @@ template <> void Aidge::HeavisideImplCpu::forward() {
                  op_.value());
 }
 
-template <> void Aidge::HeavisideImplCpu::backward() {
+template <> 
+void Aidge::HeavisideImplCpu::backward() {
     AIDGE_THROW_OR_ABORT(std::runtime_error, "Heaviside backward not implemented yet");
+
+    // TODO: The following lines are assuming that the surrogate gradient is Atan
+    // remove that assumption by providing an attribute to Heaviside, 
+    // allowing to choose between different surrogate gradients.
+    
+    // const Heavisde_Op& op_ = dynamic_cast<const Heavisie_Op &>(mOp);
+
+
+
+    // ! backward of hs = forward of atan
+    //const auto impl = Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec()));
+    // std::shared_ptr<Tensor> in0 = op_.getInput(0);
+    // std::shared_ptr<Tensor> out0 = op_.getOutput(0);
+  
+    //impl.forward()
 }
+
diff --git a/unit_tests/operator/Test_HeavisideImpl.cpp b/unit_tests/operator/Test_HeavisideImpl.cpp
index 4cbdf1a0..a0142513 100644
--- a/unit_tests/operator/Test_HeavisideImpl.cpp
+++ b/unit_tests/operator/Test_HeavisideImpl.cpp
@@ -11,6 +11,7 @@
 
 #include "aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp"
 
+#include <aidge/operator/Memorize.hpp>
 #include <memory>
 #include <cstdlib>
 #include <random>
@@ -22,6 +23,8 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/TensorUtils.hpp"
 
+#include "aidge/operator/Add.hpp"
+
 namespace Aidge
 {
 
@@ -95,4 +98,12 @@ TEST_CASE("[cpu/operator] Heaviside(forward)", "[Heaviside][CPU]") {
         REQUIRE(approxEq<float>(*(op->getOutput(0)), *T1));
     }
 }
+
+TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
+
+    auto add = Add();
+    auto mem = Memorize(2);
+    auto hs = Heaviside(1);
+}
+
 }
-- 
GitLab


From 2d63028a6fd1416b8081d53bcb59b3f4e02e0cb3 Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Fri, 28 Feb 2025 17:52:53 +0100
Subject: [PATCH 02/13] Add Heaviside backward surrogate and associated tests

---
 .../backend/cpu/operator/HeavisideImpl.hpp    |  2 +-
 .../cpu/operator/HeavisideImpl_kernels.hpp    |  5 ++--
 src/operator/HeavisideImpl.cpp                | 17 +++++------
 unit_tests/operator/Test_HeavisideImpl.cpp    | 28 ++++++++++++++++---
 4 files changed, 37 insertions(+), 15 deletions(-)

diff --git a/include/aidge/backend/cpu/operator/HeavisideImpl.hpp b/include/aidge/backend/cpu/operator/HeavisideImpl.hpp
index 7a3ba9ad..877fa2a9 100644
--- a/include/aidge/backend/cpu/operator/HeavisideImpl.hpp
+++ b/include/aidge/backend/cpu/operator/HeavisideImpl.hpp
@@ -23,7 +23,7 @@ namespace Aidge {
 using HeavisideImplCpu =
     OperatorImpl_cpu<Heaviside_Op,
                      void(std::size_t, const void *, void *, const float),
-                     void(const float, std::size_t, const void *, void *)>;
+                     void(std::size_t, const void *, const void *, void *)>;
 
 // Implementation entry point registration for operator Heaviside
 REGISTRAR(Heaviside_Op, "cpu", HeavisideImplCpu::create);
diff --git a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
index 0bbbddee..7fc0eb0a 100644
--- a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
@@ -48,7 +48,8 @@ void HeavisideImplCpuBackwardKernel(std::size_t inputLength,
 
     for (size_t i = 0; i < inputLength; ++i) {
         // dx = dy * (1/PI) * (1 / (1 + (PI * x)^2))
-        grad_input[i] = (1 / M_PI) * grad_output[i] * static_cast<O>(1.0 / (1.0 + output[i] * output[i]));
+        // grad_input[i] = (1 / M_PI) * grad_output[i] * static_cast<O>(1.0 / (1.0 + (output[i] * output[i]) * (M_PI * M_PI)));
+        grad_input[i] = grad_output[i] * static_cast<O>(1.0 / (1.0 + (output[i] * output[i]) * (M_PI * M_PI)));
     }
 }
 
@@ -57,7 +58,7 @@ REGISTRAR(HeavisideImplCpu,
           {DataType::Float32},
           {ProdConso::inPlaceModel,
            Aidge::HeavisideImplCpuForwardKernel<float, float>,
-           nullptr});
+           Aidge::HeavisideImplCpuBackwardKernel<float,float,float>});
 } // namespace Aidge
 
 #endif // AIDGE_CPU_OPERATOR_HEAVISIDEIMPL_KERNELS_H__H_
diff --git a/src/operator/HeavisideImpl.cpp b/src/operator/HeavisideImpl.cpp
index 2ead2978..5bf77f87 100644
--- a/src/operator/HeavisideImpl.cpp
+++ b/src/operator/HeavisideImpl.cpp
@@ -34,21 +34,22 @@ template <> void Aidge::HeavisideImplCpu::forward() {
 
 template <> 
 void Aidge::HeavisideImplCpu::backward() {
-    AIDGE_THROW_OR_ABORT(std::runtime_error, "Heaviside backward not implemented yet");
 
     // TODO: The following lines are assuming that the surrogate gradient is Atan
     // remove that assumption by providing an attribute to Heaviside, 
     // allowing to choose between different surrogate gradients.
     
-    // const Heavisde_Op& op_ = dynamic_cast<const Heavisie_Op &>(mOp);
+    const Heaviside_Op& op_ = dynamic_cast<const Heaviside_Op &>(mOp);
 
+    const auto impl = Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec()));
 
+    auto gra_int0 = op_.getInput(0)->grad();
+    auto gra_out0 = op_.getOutput(0)->grad();
 
-    // ! backward of hs = forward of atan
-    //const auto impl = Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec()));
-    // std::shared_ptr<Tensor> in0 = op_.getInput(0);
-    // std::shared_ptr<Tensor> out0 = op_.getOutput(0);
-  
-    //impl.forward()
+    std::shared_ptr<Tensor> in0 = op_.getInput(0);
+    std::shared_ptr<Tensor> out0 = op_.getOutput(0);
+    AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type());
+
+    impl.backward(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
 }
 
diff --git a/unit_tests/operator/Test_HeavisideImpl.cpp b/unit_tests/operator/Test_HeavisideImpl.cpp
index a0142513..515d6802 100644
--- a/unit_tests/operator/Test_HeavisideImpl.cpp
+++ b/unit_tests/operator/Test_HeavisideImpl.cpp
@@ -12,6 +12,7 @@
 #include "aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp"
 
 #include <aidge/operator/Memorize.hpp>
+#include <aidge/utils/Types.h>
 #include <memory>
 #include <cstdlib>
 #include <random>
@@ -100,10 +101,29 @@ TEST_CASE("[cpu/operator] Heaviside(forward)", "[Heaviside][CPU]") {
 }
 
 TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
+    auto hs = Heaviside(1.0f);
+    auto op = std::static_pointer_cast<OperatorTensor>(hs->getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
 
-    auto add = Add();
-    auto mem = Memorize(2);
-    auto hs = Heaviside(1);
-}
+    auto input = Tensor(Array1D<float, 3>({1.0, -1.0, 1.0}));
+    input.setDataType(DataType::Float32);
+    input.setBackend("cpu");
+
+    auto grad = Tensor(Array1D<float, 3>({1.0, 1.0, 1.0}));
+    grad.setDataType(DataType::Float32);
+    grad.setBackend("cpu");
+
+    op->setInput(IOIndex_t(0), std::make_shared<Tensor>(input));
+    op->forward();
 
+    Log::info("Output : ");
+    op->getOutput(0)->print();
+
+    op->getOutput(0)->setGrad(std::make_shared<Tensor>(grad));
+    op->backward();
+
+    Log::info("Gradient : ");
+    op->getInput(0)->grad()->print();
+}
 }
-- 
GitLab


From 09bec2683d0312f8dc700d1e85f01dec254ceade Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Mon, 3 Mar 2025 15:56:10 +0100
Subject: [PATCH 03/13] Improve test for Heaviside backward

---
 .../backend/cpu/operator/HeavisideImpl_kernels.hpp     | 10 +++++++---
 unit_tests/operator/Test_HeavisideImpl.cpp             |  7 ++-----
 2 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
index 7fc0eb0a..4e2a7db2 100644
--- a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
@@ -42,14 +42,18 @@ void HeavisideImplCpuBackwardKernel(std::size_t inputLength,
                                     const void* grad_output_,
                                     void* grad_input_) {
 
+    /*
+     * Heaviside is approximated by an arctan function for backward :
+     * S ~= \frac{1}{\pi}\text{arctan}(\pi U \frac{\alpha}{2})
+     * \frac{dS}{dU} = \frac{\alpha}{2} \frac{1}{(1+(\frac{\pi U \alpha}{2})^2)}}
+     * */
+
     const O* output = static_cast<const O*>(output_);
     const GO* grad_output = static_cast<const GO*>(grad_output_);
     GI* grad_input = static_cast<GI*>(grad_input_);
 
     for (size_t i = 0; i < inputLength; ++i) {
-        // dx = dy * (1/PI) * (1 / (1 + (PI * x)^2))
-        // grad_input[i] = (1 / M_PI) * grad_output[i] * static_cast<O>(1.0 / (1.0 + (output[i] * output[i]) * (M_PI * M_PI)));
-        grad_input[i] = grad_output[i] * static_cast<O>(1.0 / (1.0 + (output[i] * output[i]) * (M_PI * M_PI)));
+        grad_input[i] = grad_output[i] * static_cast<O>(1.0 / (1.0 + (output[i] * M_PI) * (output[i] * M_PI)));
     }
 }
 
diff --git a/unit_tests/operator/Test_HeavisideImpl.cpp b/unit_tests/operator/Test_HeavisideImpl.cpp
index 515d6802..e6aa38b8 100644
--- a/unit_tests/operator/Test_HeavisideImpl.cpp
+++ b/unit_tests/operator/Test_HeavisideImpl.cpp
@@ -117,13 +117,10 @@ TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
     op->setInput(IOIndex_t(0), std::make_shared<Tensor>(input));
     op->forward();
 
-    Log::info("Output : ");
-    op->getOutput(0)->print();
-
     op->getOutput(0)->setGrad(std::make_shared<Tensor>(grad));
     op->backward();
 
-    Log::info("Gradient : ");
-    op->getInput(0)->grad()->print();
+    auto expectedResult = Tensor(Array1D<float,3>({0.0920, 0.0920, 0.0920}));
+    REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), expectedResult));
 }
 }
-- 
GitLab


From eff8202099609940a906fe752dc50596bebd5c12 Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Mon, 3 Mar 2025 16:44:35 +0100
Subject: [PATCH 04/13] Add randomness in heaviside backward test

---
 src/operator/HeavisideImpl.cpp             |  1 -
 unit_tests/operator/Test_HeavisideImpl.cpp | 57 +++++++++++++++++-----
 2 files changed, 45 insertions(+), 13 deletions(-)

diff --git a/src/operator/HeavisideImpl.cpp b/src/operator/HeavisideImpl.cpp
index 5bf77f87..8349a0ad 100644
--- a/src/operator/HeavisideImpl.cpp
+++ b/src/operator/HeavisideImpl.cpp
@@ -48,7 +48,6 @@ void Aidge::HeavisideImplCpu::backward() {
 
     std::shared_ptr<Tensor> in0 = op_.getInput(0);
     std::shared_ptr<Tensor> out0 = op_.getOutput(0);
-    AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type());
 
     impl.backward(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
 }
diff --git a/unit_tests/operator/Test_HeavisideImpl.cpp b/unit_tests/operator/Test_HeavisideImpl.cpp
index e6aa38b8..2743027c 100644
--- a/unit_tests/operator/Test_HeavisideImpl.cpp
+++ b/unit_tests/operator/Test_HeavisideImpl.cpp
@@ -100,27 +100,60 @@ TEST_CASE("[cpu/operator] Heaviside(forward)", "[Heaviside][CPU]") {
     }
 }
 
+// TODO: Make this work for random intput.
 TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
+
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_real_distribution<float> valueDist(-2.0f, 2.0f);
+    std::uniform_int_distribution<std::size_t> sizeDist(5, 100);
+
+    const std::size_t tensorSize = sizeDist(gen);
+    
     auto hs = Heaviside(1.0f);
     auto op = std::static_pointer_cast<OperatorTensor>(hs->getOperator());
     op->setDataType(DataType::Float32);
     op->setBackend("cpu");
 
-    auto input = Tensor(Array1D<float, 3>({1.0, -1.0, 1.0}));
-    input.setDataType(DataType::Float32);
-    input.setBackend("cpu");
 
-    auto grad = Tensor(Array1D<float, 3>({1.0, 1.0, 1.0}));
-    grad.setDataType(DataType::Float32);
-    grad.setBackend("cpu");
+    auto inputTensor = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
+    inputTensor->setDataType(DataType::Float32);
+    inputTensor->setBackend("cpu");
+    auto* inputData = static_cast<float*>(inputTensor->getImpl()->rawPtr());
+    
+    for(std::size_t i = 0; i < tensorSize; ++i) {
+        inputData[i] = valueDist(gen);
+    }
+
+    auto gradTensor = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
+    gradTensor->setDataType(DataType::Float32);
+    gradTensor->setBackend("cpu");
+    auto* gradData = static_cast<float*>(gradTensor->getImpl()->rawPtr());
+    
+    for (std::size_t i = 0; i < tensorSize; ++i) {
+        gradData[i] = valueDist(gen);
+    }
 
-    op->setInput(IOIndex_t(0), std::make_shared<Tensor>(input));
+    op->setInput(IOIndex_t(0), inputTensor);
     op->forward();
-
-    op->getOutput(0)->setGrad(std::make_shared<Tensor>(grad));
+    
+    auto output = op->getOutput(0);
+    output->setGrad(gradTensor);
+    
+    // Backward pass
     op->backward();
-
-    auto expectedResult = Tensor(Array1D<float,3>({0.0920, 0.0920, 0.0920}));
-    REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), expectedResult));
+    
+    // Compute expected gradient manually
+    auto expectedGrad = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
+    expectedGrad->setDataType(DataType::Float32);
+    expectedGrad->setBackend("cpu");
+    auto* expectedGradData = static_cast<float*>(expectedGrad->getImpl()->rawPtr());
+    
+    for (std::size_t i = 0; i < tensorSize; ++i) {
+        expectedGradData[i] = gradData[i] * (1.0f / (1.0f + (inputData[i] * M_PI) * (inputData[i] * M_PI)));
+    }
+    
+    // Compare actual gradient with expected gradient
+    REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad));
 }
 }
-- 
GitLab


From c92b78e4c77c18165578a17caee985ad3a6108ce Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Tue, 4 Mar 2025 10:09:25 +0100
Subject: [PATCH 05/13] Format file and remove unused variable

---
 src/operator/HeavisideImpl.cpp | 51 +++++++++++++++-------------------
 1 file changed, 23 insertions(+), 28 deletions(-)

diff --git a/src/operator/HeavisideImpl.cpp b/src/operator/HeavisideImpl.cpp
index 8349a0ad..3932eb33 100644
--- a/src/operator/HeavisideImpl.cpp
+++ b/src/operator/HeavisideImpl.cpp
@@ -13,42 +13,37 @@
 
 #include <stdexcept>
 
-#include "aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include "aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
 template <> void Aidge::HeavisideImplCpu::forward() {
-    const Heaviside_Op &op_ = dynamic_cast<const Heaviside_Op &>(mOp);
-    std::shared_ptr<Tensor> input0 = op_.getInput(0);
-    std::shared_ptr<Tensor> output0 = op_.getOutput(0);
-    AIDGE_ASSERT(input0, "missing input #0");
-
-    const auto impl =
-        Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec()));
-
-    impl.forward(input0->size(),
-                 getCPUPtr(mOp.getRawInput(0)),
-                 getCPUPtr(mOp.getRawOutput(0)),
-                 op_.value());
-}
+  const Heaviside_Op &op_ = dynamic_cast<const Heaviside_Op &>(mOp);
+  std::shared_ptr<Tensor> input0 = op_.getInput(0);
+  std::shared_ptr<Tensor> output0 = op_.getOutput(0);
+  AIDGE_ASSERT(input0, "missing input #0");
 
-template <> 
-void Aidge::HeavisideImplCpu::backward() {
+  const auto impl =
+      Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec()));
 
-    // TODO: The following lines are assuming that the surrogate gradient is Atan
-    // remove that assumption by providing an attribute to Heaviside, 
-    // allowing to choose between different surrogate gradients.
-    
-    const Heaviside_Op& op_ = dynamic_cast<const Heaviside_Op &>(mOp);
+  impl.forward(input0->size(), getCPUPtr(mOp.getRawInput(0)),
+               getCPUPtr(mOp.getRawOutput(0)), op_.value());
+}
 
-    const auto impl = Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec()));
+template <> void Aidge::HeavisideImplCpu::backward() {
 
-    auto gra_int0 = op_.getInput(0)->grad();
-    auto gra_out0 = op_.getOutput(0)->grad();
+  // TODO: The following lines are assuming that the surrogate gradient is Atan
+  // remove that assumption by providing an attribute to Heaviside,
+  // allowing to choose between different surrogate gradients.
 
-    std::shared_ptr<Tensor> in0 = op_.getInput(0);
-    std::shared_ptr<Tensor> out0 = op_.getOutput(0);
+  const Heaviside_Op &op_ = dynamic_cast<const Heaviside_Op &>(mOp);
+  const auto impl =
+      Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec()));
 
-    impl.backward(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
-}
+  auto in0 = op_.getInput(0);
+  auto gra_int0 = op_.getInput(0)->grad();
+  auto gra_out0 = op_.getOutput(0)->grad();
 
+  impl.backward(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0),
+                getCPUPtr(gra_int0));
+}
-- 
GitLab


From 9c83962af9e7bd99b64a5d48dc8b518515e72333 Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Tue, 4 Mar 2025 10:59:06 +0100
Subject: [PATCH 06/13] Improve Heavisise Backward Test

Compare the result of surrogate gradient to the real Atan operator.
---
 unit_tests/operator/Test_HeavisideImpl.cpp | 36 +++++++++++++++++++++-
 1 file changed, 35 insertions(+), 1 deletion(-)

diff --git a/unit_tests/operator/Test_HeavisideImpl.cpp b/unit_tests/operator/Test_HeavisideImpl.cpp
index 2743027c..9241f0d3 100644
--- a/unit_tests/operator/Test_HeavisideImpl.cpp
+++ b/unit_tests/operator/Test_HeavisideImpl.cpp
@@ -11,7 +11,10 @@
 
 #include "aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp"
 
+#include <aidge/operator/Atan.hpp>
 #include <aidge/operator/Memorize.hpp>
+#include <aidge/operator/Mul.hpp>
+#include <aidge/operator/Producer.hpp>
 #include <aidge/utils/Types.h>
 #include <memory>
 #include <cstdlib>
@@ -100,7 +103,6 @@ TEST_CASE("[cpu/operator] Heaviside(forward)", "[Heaviside][CPU]") {
     }
 }
 
-// TODO: Make this work for random intput.
 TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
 
     std::random_device rd;
@@ -115,6 +117,7 @@ TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
     op->setDataType(DataType::Float32);
     op->setBackend("cpu");
 
+        
 
     auto inputTensor = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
     inputTensor->setDataType(DataType::Float32);
@@ -125,6 +128,29 @@ TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
         inputData[i] = valueDist(gen);
     }
 
+    // Compare it to the real Atan implementation
+    auto mul = Mul();
+    auto pi = std::make_shared<Tensor>(Array1D<float,1>{M_PI});
+    auto producer = Producer(pi);
+    auto atan = Atan();
+    auto mulOp = std::static_pointer_cast<OperatorTensor>(mul->getOperator());
+    auto piOp = std::static_pointer_cast<OperatorTensor>(producer->getOperator());
+    auto atanOp = std::static_pointer_cast<OperatorTensor>(atan->getOperator());
+    mulOp->setBackend("cpu");
+    piOp->setBackend("cpu");
+    atanOp->setBackend("cpu");
+    mulOp->setDataType(DataType::Float32);
+    piOp->setDataType(DataType::Float32);
+    atanOp->setDataType(DataType::Float32);
+
+
+    producer->addChild(mul,0,0);
+    mulOp->setInput(IOIndex_t(1),  inputTensor);
+    mulOp->forward();
+    auto outmul = mulOp->getOutput(0);
+    atanOp->setInput(0, inputTensor);
+    atanOp->forward();
+
     auto gradTensor = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
     gradTensor->setDataType(DataType::Float32);
     gradTensor->setBackend("cpu");
@@ -142,6 +168,10 @@ TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
     
     // Backward pass
     op->backward();
+
+    atanOp->setOutput(0, outmul);
+    atanOp->getOutput(0)->setGrad(gradTensor);
+    atanOp->backward();
     
     // Compute expected gradient manually
     auto expectedGrad = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
@@ -155,5 +185,9 @@ TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
     
     // Compare actual gradient with expected gradient
     REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad));
+
+    // Compare Atan(pi*input) to expected Gradient
+    REQUIRE(approxEq<float>(*(atanOp->getInput(0)->grad()), *expectedGrad));
 }
+
 }
-- 
GitLab


From e7c9550decce4d66d2c35be106fd9ba222dbe4c8 Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Tue, 4 Mar 2025 11:40:40 +0100
Subject: [PATCH 07/13] Fix includes

---
 unit_tests/operator/Test_HeavisideImpl.cpp | 13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/unit_tests/operator/Test_HeavisideImpl.cpp b/unit_tests/operator/Test_HeavisideImpl.cpp
index 9241f0d3..d3ed3826 100644
--- a/unit_tests/operator/Test_HeavisideImpl.cpp
+++ b/unit_tests/operator/Test_HeavisideImpl.cpp
@@ -11,23 +11,22 @@
 
 #include "aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp"
 
-#include <aidge/operator/Atan.hpp>
-#include <aidge/operator/Memorize.hpp>
-#include <aidge/operator/Mul.hpp>
-#include <aidge/operator/Producer.hpp>
-#include <aidge/utils/Types.h>
 #include <memory>
+#include <cmath>
 #include <cstdlib>
 #include <random>
 
 #include <catch2/catch_test_macros.hpp>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/backend/cpu/operator/HeavisideImpl.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/Atan.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/Producer.hpp"
 #include "aidge/utils/TensorUtils.hpp"
+#include "aidge/utils/Types.h"
 
-#include "aidge/operator/Add.hpp"
 
 namespace Aidge
 {
-- 
GitLab


From 3be714857faff91636fc0736b11654ae404c4ee8 Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Mon, 17 Mar 2025 16:06:57 +0100
Subject: [PATCH 08/13] Include <cmath> in HeavisideImpl kernels

---
 include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
index 4e2a7db2..92f12fbe 100644
--- a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
@@ -15,6 +15,7 @@
 #include "aidge/utils/Registrar.hpp"
 
 #include <cstddef> // std::size_t
+#include <cmath>
 
 #include "aidge/backend/cpu/operator/HeavisideImpl.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
@@ -37,7 +38,7 @@ void HeavisideImplCpuForwardKernel(std::size_t inputLength,
 
 // Surrogate Gradient
 template <class O, class GO, class GI>
-void HeavisideImplCpuBackwardKernel(std::size_t inputLength, 
+void HeavisideImplCpuBackwardKernel(std::size_t inputLength,
                                     const void* output_,
                                     const void* grad_output_,
                                     void* grad_input_) {
-- 
GitLab


From e02f7007fe94c5865b38957b1054867df144704b Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Mon, 17 Mar 2025 16:16:04 +0100
Subject: [PATCH 09/13] Define _USE_MATH_DEFINES in kernel header

---
 include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp | 1 +
 1 file changed, 1 insertion(+)

diff --git a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
index 92f12fbe..03815dc2 100644
--- a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
@@ -15,6 +15,7 @@
 #include "aidge/utils/Registrar.hpp"
 
 #include <cstddef> // std::size_t
+#define _USE_MATH_DEFINES
 #include <cmath>
 
 #include "aidge/backend/cpu/operator/HeavisideImpl.hpp"
-- 
GitLab


From 3a44ecdcbb7c368d734b662b9e9a9fcfedd18c97 Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Mon, 17 Mar 2025 16:23:43 +0100
Subject: [PATCH 10/13] Replace cmath with math.h

---
 include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
index 03815dc2..f397927a 100644
--- a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
@@ -15,8 +15,7 @@
 #include "aidge/utils/Registrar.hpp"
 
 #include <cstddef> // std::size_t
-#define _USE_MATH_DEFINES
-#include <cmath>
+#include <math.h>
 
 #include "aidge/backend/cpu/operator/HeavisideImpl.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
-- 
GitLab


From 6db22c4a9e06eb28553ba9038956efe5a8198e1b Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Mon, 17 Mar 2025 16:30:08 +0100
Subject: [PATCH 11/13] Enable Math Constants and Use cmath Header

---
 CMakeLists.txt                                               | 3 +++
 include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 729853ee..21c5c6b9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -86,6 +86,9 @@ target_link_libraries(${module_name}
         _aidge_core # _ is added because we link the exported target and not the project
 )
 
+# Add definition _USE_MATH_DEFINES to enable math constant definitions from math.h/cmath.
+target_compile_definitions(${module_name} PRIVATE _USE_MATH_DEFINES)
+
 #Set target properties
 set_property(TARGET ${module_name} PROPERTY POSITION_INDEPENDENT_CODE ON)
 
diff --git a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
index f397927a..92f12fbe 100644
--- a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
@@ -15,7 +15,7 @@
 #include "aidge/utils/Registrar.hpp"
 
 #include <cstddef> // std::size_t
-#include <math.h>
+#include <cmath>
 
 #include "aidge/backend/cpu/operator/HeavisideImpl.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
-- 
GitLab


From eeded572c5a5dfd97417b78e7743146706fe7e95 Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Mon, 17 Mar 2025 16:38:14 +0100
Subject: [PATCH 12/13] Add _USE_MATH_DEFINES to unit tests

---
 unit_tests/CMakeLists.txt | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt
index e1f261d0..571c96b9 100644
--- a/unit_tests/CMakeLists.txt
+++ b/unit_tests/CMakeLists.txt
@@ -21,6 +21,8 @@ file(GLOB_RECURSE src_files "*.cpp")
 
 add_executable(tests${module_name} ${src_files})
 
+target_compile_definitions(tests${module_name} PRIVATE _USE_MATH_DEFINES)
+
 target_link_libraries(tests${module_name} PRIVATE ${module_name})
 
 target_link_libraries(tests${module_name} PRIVATE Catch2::Catch2WithMain)
-- 
GitLab


From 9765d6e866ca8721eb389b3719c878db662be1f2 Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Fri, 21 Mar 2025 16:02:51 +0100
Subject: [PATCH 13/13] Only define _USE_MATH_DEFINES on Windows platforms

The _USE_MATH_DEFINES macro is only needed on Windows to expose math
constants like M_PI in math.h/cmath.
---
 CMakeLists.txt            | 4 +++-
 unit_tests/CMakeLists.txt | 4 +++-
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 21c5c6b9..6c87a89b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -87,7 +87,9 @@ target_link_libraries(${module_name}
 )
 
 # Add definition _USE_MATH_DEFINES to enable math constant definitions from math.h/cmath.
-target_compile_definitions(${module_name} PRIVATE _USE_MATH_DEFINES)
+if (WIN32)
+    target_compile_definitions(${module_name} PRIVATE _USE_MATH_DEFINES)
+endif()
 
 #Set target properties
 set_property(TARGET ${module_name} PROPERTY POSITION_INDEPENDENT_CODE ON)
diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt
index 571c96b9..217cf8fb 100644
--- a/unit_tests/CMakeLists.txt
+++ b/unit_tests/CMakeLists.txt
@@ -21,7 +21,9 @@ file(GLOB_RECURSE src_files "*.cpp")
 
 add_executable(tests${module_name} ${src_files})
 
-target_compile_definitions(tests${module_name} PRIVATE _USE_MATH_DEFINES)
+if (WIN32)
+    target_compile_definitions(tests${module_name} PRIVATE _USE_MATH_DEFINES)
+endif()
 
 target_link_libraries(tests${module_name} PRIVATE ${module_name})
 
-- 
GitLab