diff --git a/CMakeLists.txt b/CMakeLists.txt
index 729853eec605b9ad7baee163557699368f1c9103..6c87a89b8ac1254f8bfb8fb990f8c03f7e593d61 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -86,6 +86,11 @@ target_link_libraries(${module_name}
         _aidge_core # _ is added because we link the exported target and not the project
 )
 
+# Add definition _USE_MATH_DEFINES to enable math constant definitions from math.h/cmath.
+if (WIN32)
+    target_compile_definitions(${module_name} PRIVATE _USE_MATH_DEFINES)
+endif()
+
 #Set target properties
 set_property(TARGET ${module_name} PROPERTY POSITION_INDEPENDENT_CODE ON)
 
diff --git a/include/aidge/backend/cpu/operator/HeavisideImpl.hpp b/include/aidge/backend/cpu/operator/HeavisideImpl.hpp
index 7a3ba9add1e98580c51a8416adc0d1feb5e1317a..877fa2a9c1fbf126fee5d1f3ce4db2db808cbc92 100644
--- a/include/aidge/backend/cpu/operator/HeavisideImpl.hpp
+++ b/include/aidge/backend/cpu/operator/HeavisideImpl.hpp
@@ -23,7 +23,7 @@ namespace Aidge {
 using HeavisideImplCpu =
     OperatorImpl_cpu<Heaviside_Op,
                      void(std::size_t, const void *, void *, const float),
-                     void(const float, std::size_t, const void *, void *)>;
+                     void(std::size_t, const void *, const void *, void *)>;
 
 // Implementation entry point registration for operator Heaviside
 REGISTRAR(Heaviside_Op, "cpu", HeavisideImplCpu::create);
diff --git a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
index 06d7fff8776d342da0467ad7f9d6759f45202151..92f12fbed5c6530a6c5b57e74d2b46446533ca02 100644
--- a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp
@@ -15,11 +15,11 @@
 #include "aidge/utils/Registrar.hpp"
 
 #include <cstddef> // std::size_t
+#include <cmath>
 
 #include "aidge/backend/cpu/operator/HeavisideImpl.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
-
 namespace Aidge {
 
 template <class I, class O>
@@ -35,12 +35,35 @@ void HeavisideImplCpuForwardKernel(std::size_t inputLength,
     }
 }
 
+
+// Surrogate Gradient
+template <class O, class GO, class GI>
+void HeavisideImplCpuBackwardKernel(std::size_t inputLength,
+                                    const void* output_,
+                                    const void* grad_output_,
+                                    void* grad_input_) {
+
+    /*
+     * Heaviside is approximated by an arctan function for backward :
+     * S ~= \frac{1}{\pi}\text{arctan}(\pi U \frac{\alpha}{2})
+     * \frac{dS}{dU} = \frac{\alpha}{2} \frac{1}{(1+(\frac{\pi U \alpha}{2})^2)}}
+     * */
+
+    const O* output = static_cast<const O*>(output_);
+    const GO* grad_output = static_cast<const GO*>(grad_output_);
+    GI* grad_input = static_cast<GI*>(grad_input_);
+
+    for (size_t i = 0; i < inputLength; ++i) {
+        grad_input[i] = grad_output[i] * static_cast<O>(1.0 / (1.0 + (output[i] * M_PI) * (output[i] * M_PI)));
+    }
+}
+
 // Kernels registration to implementation entry point
 REGISTRAR(HeavisideImplCpu,
           {DataType::Float32},
           {ProdConso::inPlaceModel,
            Aidge::HeavisideImplCpuForwardKernel<float, float>,
-           nullptr});
+           Aidge::HeavisideImplCpuBackwardKernel<float,float,float>});
 } // namespace Aidge
 
 #endif // AIDGE_CPU_OPERATOR_HEAVISIDEIMPL_KERNELS_H__H_
diff --git a/src/operator/HeavisideImpl.cpp b/src/operator/HeavisideImpl.cpp
index 56ceb9b0b474d416f25d77b533373d4b193532b8..3932eb3341b5515c3a590d72aa538a5aeda6f423 100644
--- a/src/operator/HeavisideImpl.cpp
+++ b/src/operator/HeavisideImpl.cpp
@@ -13,25 +13,37 @@
 
 #include <stdexcept>
 
-#include "aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include "aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
 template <> void Aidge::HeavisideImplCpu::forward() {
-    const Heaviside_Op &op_ = dynamic_cast<const Heaviside_Op &>(mOp);
-    std::shared_ptr<Tensor> input0 = op_.getInput(0);
-    std::shared_ptr<Tensor> output0 = op_.getOutput(0);
-    AIDGE_ASSERT(input0, "missing input #0");
-
-    const auto impl =
-        Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec()));
-
-    impl.forward(input0->size(),
-                 getCPUPtr(mOp.getRawInput(0)),
-                 getCPUPtr(mOp.getRawOutput(0)),
-                 op_.value());
+  const Heaviside_Op &op_ = dynamic_cast<const Heaviside_Op &>(mOp);
+  std::shared_ptr<Tensor> input0 = op_.getInput(0);
+  std::shared_ptr<Tensor> output0 = op_.getOutput(0);
+  AIDGE_ASSERT(input0, "missing input #0");
+
+  const auto impl =
+      Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec()));
+
+  impl.forward(input0->size(), getCPUPtr(mOp.getRawInput(0)),
+               getCPUPtr(mOp.getRawOutput(0)), op_.value());
 }
 
 template <> void Aidge::HeavisideImplCpu::backward() {
-    AIDGE_THROW_OR_ABORT(std::runtime_error, "Heaviside backward not implemented yet");
+
+  // TODO: The following lines are assuming that the surrogate gradient is Atan
+  // remove that assumption by providing an attribute to Heaviside,
+  // allowing to choose between different surrogate gradients.
+
+  const Heaviside_Op &op_ = dynamic_cast<const Heaviside_Op &>(mOp);
+  const auto impl =
+      Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec()));
+
+  auto in0 = op_.getInput(0);
+  auto gra_int0 = op_.getInput(0)->grad();
+  auto gra_out0 = op_.getOutput(0)->grad();
+
+  impl.backward(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0),
+                getCPUPtr(gra_int0));
 }
diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt
index e1f261d00894cfd18970526e0fd2cd2225a097e8..217cf8fbcd344968064f3ca3a5ba52c5a4d56ac7 100644
--- a/unit_tests/CMakeLists.txt
+++ b/unit_tests/CMakeLists.txt
@@ -21,6 +21,10 @@ file(GLOB_RECURSE src_files "*.cpp")
 
 add_executable(tests${module_name} ${src_files})
 
+if (WIN32)
+    target_compile_definitions(tests${module_name} PRIVATE _USE_MATH_DEFINES)
+endif()
+
 target_link_libraries(tests${module_name} PRIVATE ${module_name})
 
 target_link_libraries(tests${module_name} PRIVATE Catch2::Catch2WithMain)
diff --git a/unit_tests/operator/Test_HeavisideImpl.cpp b/unit_tests/operator/Test_HeavisideImpl.cpp
index 4cbdf1a0e29f8670e45897236374726dac62bb43..d3ed38263f8fe82e5b5b5fbcc03a8f7dcd761c73 100644
--- a/unit_tests/operator/Test_HeavisideImpl.cpp
+++ b/unit_tests/operator/Test_HeavisideImpl.cpp
@@ -12,15 +12,21 @@
 #include "aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp"
 
 #include <memory>
+#include <cmath>
 #include <cstdlib>
 #include <random>
 
 #include <catch2/catch_test_macros.hpp>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/backend/cpu/operator/HeavisideImpl.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/Atan.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/Producer.hpp"
 #include "aidge/utils/TensorUtils.hpp"
+#include "aidge/utils/Types.h"
+
 
 namespace Aidge
 {
@@ -95,4 +101,92 @@ TEST_CASE("[cpu/operator] Heaviside(forward)", "[Heaviside][CPU]") {
         REQUIRE(approxEq<float>(*(op->getOutput(0)), *T1));
     }
 }
+
+TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
+
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_real_distribution<float> valueDist(-2.0f, 2.0f);
+    std::uniform_int_distribution<std::size_t> sizeDist(5, 100);
+
+    const std::size_t tensorSize = sizeDist(gen);
+    
+    auto hs = Heaviside(1.0f);
+    auto op = std::static_pointer_cast<OperatorTensor>(hs->getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+        
+
+    auto inputTensor = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
+    inputTensor->setDataType(DataType::Float32);
+    inputTensor->setBackend("cpu");
+    auto* inputData = static_cast<float*>(inputTensor->getImpl()->rawPtr());
+    
+    for(std::size_t i = 0; i < tensorSize; ++i) {
+        inputData[i] = valueDist(gen);
+    }
+
+    // Compare it to the real Atan implementation
+    auto mul = Mul();
+    auto pi = std::make_shared<Tensor>(Array1D<float,1>{M_PI});
+    auto producer = Producer(pi);
+    auto atan = Atan();
+    auto mulOp = std::static_pointer_cast<OperatorTensor>(mul->getOperator());
+    auto piOp = std::static_pointer_cast<OperatorTensor>(producer->getOperator());
+    auto atanOp = std::static_pointer_cast<OperatorTensor>(atan->getOperator());
+    mulOp->setBackend("cpu");
+    piOp->setBackend("cpu");
+    atanOp->setBackend("cpu");
+    mulOp->setDataType(DataType::Float32);
+    piOp->setDataType(DataType::Float32);
+    atanOp->setDataType(DataType::Float32);
+
+
+    producer->addChild(mul,0,0);
+    mulOp->setInput(IOIndex_t(1),  inputTensor);
+    mulOp->forward();
+    auto outmul = mulOp->getOutput(0);
+    atanOp->setInput(0, inputTensor);
+    atanOp->forward();
+
+    auto gradTensor = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
+    gradTensor->setDataType(DataType::Float32);
+    gradTensor->setBackend("cpu");
+    auto* gradData = static_cast<float*>(gradTensor->getImpl()->rawPtr());
+    
+    for (std::size_t i = 0; i < tensorSize; ++i) {
+        gradData[i] = valueDist(gen);
+    }
+
+    op->setInput(IOIndex_t(0), inputTensor);
+    op->forward();
+    
+    auto output = op->getOutput(0);
+    output->setGrad(gradTensor);
+    
+    // Backward pass
+    op->backward();
+
+    atanOp->setOutput(0, outmul);
+    atanOp->getOutput(0)->setGrad(gradTensor);
+    atanOp->backward();
+    
+    // Compute expected gradient manually
+    auto expectedGrad = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
+    expectedGrad->setDataType(DataType::Float32);
+    expectedGrad->setBackend("cpu");
+    auto* expectedGradData = static_cast<float*>(expectedGrad->getImpl()->rawPtr());
+    
+    for (std::size_t i = 0; i < tensorSize; ++i) {
+        expectedGradData[i] = gradData[i] * (1.0f / (1.0f + (inputData[i] * M_PI) * (inputData[i] * M_PI)));
+    }
+    
+    // Compare actual gradient with expected gradient
+    REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad));
+
+    // Compare Atan(pi*input) to expected Gradient
+    REQUIRE(approxEq<float>(*(atanOp->getInput(0)->grad()), *expectedGrad));
+}
+
 }