diff --git a/include/aidge/backend/cpu/operator/HeavisideImpl.hpp b/include/aidge/backend/cpu/operator/HeavisideImpl.hpp index 7a3ba9add1e98580c51a8416adc0d1feb5e1317a..877fa2a9c1fbf126fee5d1f3ce4db2db808cbc92 100644 --- a/include/aidge/backend/cpu/operator/HeavisideImpl.hpp +++ b/include/aidge/backend/cpu/operator/HeavisideImpl.hpp @@ -23,7 +23,7 @@ namespace Aidge { using HeavisideImplCpu = OperatorImpl_cpu<Heaviside_Op, void(std::size_t, const void *, void *, const float), - void(const float, std::size_t, const void *, void *)>; + void(std::size_t, const void *, const void *, void *)>; // Implementation entry point registration for operator Heaviside REGISTRAR(Heaviside_Op, "cpu", HeavisideImplCpu::create); diff --git a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp index 0bbbddee1040224fd9d0c2658f97c57e3956ca48..7fc0eb0a86210d896ad0ccec9b88150670f4e328 100644 --- a/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp +++ b/include/aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp @@ -48,7 +48,8 @@ void HeavisideImplCpuBackwardKernel(std::size_t inputLength, for (size_t i = 0; i < inputLength; ++i) { // dx = dy * (1/PI) * (1 / (1 + (PI * x)^2)) - grad_input[i] = (1 / M_PI) * grad_output[i] * static_cast<O>(1.0 / (1.0 + output[i] * output[i])); + // grad_input[i] = (1 / M_PI) * grad_output[i] * static_cast<O>(1.0 / (1.0 + (output[i] * output[i]) * (M_PI * M_PI))); + grad_input[i] = grad_output[i] * static_cast<O>(1.0 / (1.0 + (output[i] * output[i]) * (M_PI * M_PI))); } } @@ -57,7 +58,7 @@ REGISTRAR(HeavisideImplCpu, {DataType::Float32}, {ProdConso::inPlaceModel, Aidge::HeavisideImplCpuForwardKernel<float, float>, - nullptr}); + Aidge::HeavisideImplCpuBackwardKernel<float,float,float>}); } // namespace Aidge #endif // AIDGE_CPU_OPERATOR_HEAVISIDEIMPL_KERNELS_H__H_ diff --git a/src/operator/HeavisideImpl.cpp b/src/operator/HeavisideImpl.cpp index 2ead2978ef1c17ec38e2d12df6b1c78f6c894964..5bf77f87072005896b87f009ebe78711606f78ff 100644 --- a/src/operator/HeavisideImpl.cpp +++ b/src/operator/HeavisideImpl.cpp @@ -34,21 +34,22 @@ template <> void Aidge::HeavisideImplCpu::forward() { template <> void Aidge::HeavisideImplCpu::backward() { - AIDGE_THROW_OR_ABORT(std::runtime_error, "Heaviside backward not implemented yet"); // TODO: The following lines are assuming that the surrogate gradient is Atan // remove that assumption by providing an attribute to Heaviside, // allowing to choose between different surrogate gradients. - // const Heavisde_Op& op_ = dynamic_cast<const Heavisie_Op &>(mOp); + const Heaviside_Op& op_ = dynamic_cast<const Heaviside_Op &>(mOp); + const auto impl = Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec())); + auto gra_int0 = op_.getInput(0)->grad(); + auto gra_out0 = op_.getOutput(0)->grad(); - // ! backward of hs = forward of atan - //const auto impl = Registrar<HeavisideImplCpu>::create(getBestMatch(getRequiredSpec())); - // std::shared_ptr<Tensor> in0 = op_.getInput(0); - // std::shared_ptr<Tensor> out0 = op_.getOutput(0); - - //impl.forward() + std::shared_ptr<Tensor> in0 = op_.getInput(0); + std::shared_ptr<Tensor> out0 = op_.getOutput(0); + AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type()); + + impl.backward(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0)); } diff --git a/unit_tests/operator/Test_HeavisideImpl.cpp b/unit_tests/operator/Test_HeavisideImpl.cpp index a0142513d20d4df474d69582e89c8648a06fa340..515d6802d56f2c4a90ecf0bc2d86f2e9a727aeed 100644 --- a/unit_tests/operator/Test_HeavisideImpl.cpp +++ b/unit_tests/operator/Test_HeavisideImpl.cpp @@ -12,6 +12,7 @@ #include "aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp" #include <aidge/operator/Memorize.hpp> +#include <aidge/utils/Types.h> #include <memory> #include <cstdlib> #include <random> @@ -100,10 +101,29 @@ TEST_CASE("[cpu/operator] Heaviside(forward)", "[Heaviside][CPU]") { } TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") { + auto hs = Heaviside(1.0f); + auto op = std::static_pointer_cast<OperatorTensor>(hs->getOperator()); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); - auto add = Add(); - auto mem = Memorize(2); - auto hs = Heaviside(1); -} + auto input = Tensor(Array1D<float, 3>({1.0, -1.0, 1.0})); + input.setDataType(DataType::Float32); + input.setBackend("cpu"); + + auto grad = Tensor(Array1D<float, 3>({1.0, 1.0, 1.0})); + grad.setDataType(DataType::Float32); + grad.setBackend("cpu"); + + op->setInput(IOIndex_t(0), std::make_shared<Tensor>(input)); + op->forward(); + Log::info("Output : "); + op->getOutput(0)->print(); + + op->getOutput(0)->setGrad(std::make_shared<Tensor>(grad)); + op->backward(); + + Log::info("Gradient : "); + op->getInput(0)->grad()->print(); +} }