From 9c83962af9e7bd99b64a5d48dc8b518515e72333 Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Tue, 4 Mar 2025 10:59:06 +0100
Subject: [PATCH] Improve Heavisise Backward Test

Compare the result of surrogate gradient to the real Atan operator.
---
 unit_tests/operator/Test_HeavisideImpl.cpp | 36 +++++++++++++++++++++-
 1 file changed, 35 insertions(+), 1 deletion(-)

diff --git a/unit_tests/operator/Test_HeavisideImpl.cpp b/unit_tests/operator/Test_HeavisideImpl.cpp
index 2743027c..9241f0d3 100644
--- a/unit_tests/operator/Test_HeavisideImpl.cpp
+++ b/unit_tests/operator/Test_HeavisideImpl.cpp
@@ -11,7 +11,10 @@
 
 #include "aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp"
 
+#include <aidge/operator/Atan.hpp>
 #include <aidge/operator/Memorize.hpp>
+#include <aidge/operator/Mul.hpp>
+#include <aidge/operator/Producer.hpp>
 #include <aidge/utils/Types.h>
 #include <memory>
 #include <cstdlib>
@@ -100,7 +103,6 @@ TEST_CASE("[cpu/operator] Heaviside(forward)", "[Heaviside][CPU]") {
     }
 }
 
-// TODO: Make this work for random intput.
 TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
 
     std::random_device rd;
@@ -115,6 +117,7 @@ TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
     op->setDataType(DataType::Float32);
     op->setBackend("cpu");
 
+        
 
     auto inputTensor = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
     inputTensor->setDataType(DataType::Float32);
@@ -125,6 +128,29 @@ TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
         inputData[i] = valueDist(gen);
     }
 
+    // Compare it to the real Atan implementation
+    auto mul = Mul();
+    auto pi = std::make_shared<Tensor>(Array1D<float,1>{M_PI});
+    auto producer = Producer(pi);
+    auto atan = Atan();
+    auto mulOp = std::static_pointer_cast<OperatorTensor>(mul->getOperator());
+    auto piOp = std::static_pointer_cast<OperatorTensor>(producer->getOperator());
+    auto atanOp = std::static_pointer_cast<OperatorTensor>(atan->getOperator());
+    mulOp->setBackend("cpu");
+    piOp->setBackend("cpu");
+    atanOp->setBackend("cpu");
+    mulOp->setDataType(DataType::Float32);
+    piOp->setDataType(DataType::Float32);
+    atanOp->setDataType(DataType::Float32);
+
+
+    producer->addChild(mul,0,0);
+    mulOp->setInput(IOIndex_t(1),  inputTensor);
+    mulOp->forward();
+    auto outmul = mulOp->getOutput(0);
+    atanOp->setInput(0, inputTensor);
+    atanOp->forward();
+
     auto gradTensor = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
     gradTensor->setDataType(DataType::Float32);
     gradTensor->setBackend("cpu");
@@ -142,6 +168,10 @@ TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
     
     // Backward pass
     op->backward();
+
+    atanOp->setOutput(0, outmul);
+    atanOp->getOutput(0)->setGrad(gradTensor);
+    atanOp->backward();
     
     // Compute expected gradient manually
     auto expectedGrad = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
@@ -155,5 +185,9 @@ TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
     
     // Compare actual gradient with expected gradient
     REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad));
+
+    // Compare Atan(pi*input) to expected Gradient
+    REQUIRE(approxEq<float>(*(atanOp->getInput(0)->grad()), *expectedGrad));
 }
+
 }
-- 
GitLab