diff --git a/src/operator/HeavisideImpl.cpp b/src/operator/HeavisideImpl.cpp
index 8349a0ad32075fc1c72a83b412bea5402ced960a..3932eb3341b5515c3a590d72aa538a5aeda6f423 100644
--- a/src/operator/HeavisideImpl.cpp
+++ b/src/operator/HeavisideImpl.cpp
@@ -13,42 +13,37 @@
 
 #include <stdexcept>
 
-#include "aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include "aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
 template <> void Aidge::HeavisideImplCpu::forward() {
-    const Heaviside_Op &op_ = dynamic_cast<const Heaviside_Op &>(mOp);
-    std::shared_ptr<Tensor> input0 = op_.getInput(0);
-    std::shared_ptr<Tensor> output0 = op_.getOutput(0);
-    AIDGE_ASSERT(input0, "missing input #0");
-
-    const auto impl =
-        Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec()));
-
-    impl.forward(input0->size(),
-                 getCPUPtr(mOp.getRawInput(0)),
-                 getCPUPtr(mOp.getRawOutput(0)),
-                 op_.value());
-}
+  const Heaviside_Op &op_ = dynamic_cast<const Heaviside_Op &>(mOp);
+  std::shared_ptr<Tensor> input0 = op_.getInput(0);
+  std::shared_ptr<Tensor> output0 = op_.getOutput(0);
+  AIDGE_ASSERT(input0, "missing input #0");
 
-template <> 
-void Aidge::HeavisideImplCpu::backward() {
+  const auto impl =
+      Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec()));
 
-    // TODO: The following lines are assuming that the surrogate gradient is Atan
-    // remove that assumption by providing an attribute to Heaviside, 
-    // allowing to choose between different surrogate gradients.
-    
-    const Heaviside_Op& op_ = dynamic_cast<const Heaviside_Op &>(mOp);
+  impl.forward(input0->size(), getCPUPtr(mOp.getRawInput(0)),
+               getCPUPtr(mOp.getRawOutput(0)), op_.value());
+}
 
-    const auto impl = Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec()));
+template <> void Aidge::HeavisideImplCpu::backward() {
 
-    auto gra_int0 = op_.getInput(0)->grad();
-    auto gra_out0 = op_.getOutput(0)->grad();
+  // TODO: The following lines are assuming that the surrogate gradient is Atan
+  // remove that assumption by providing an attribute to Heaviside,
+  // allowing to choose between different surrogate gradients.
 
-    std::shared_ptr<Tensor> in0 = op_.getInput(0);
-    std::shared_ptr<Tensor> out0 = op_.getOutput(0);
+  const Heaviside_Op &op_ = dynamic_cast<const Heaviside_Op &>(mOp);
+  const auto impl =
+      Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec()));
 
-    impl.backward(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
-}
+  auto in0 = op_.getInput(0);
+  auto gra_int0 = op_.getInput(0)->grad();
+  auto gra_out0 = op_.getOutput(0)->grad();
 
+  impl.backward(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0),
+                getCPUPtr(gra_int0));
+}