diff --git a/src/operator/HeavisideImpl.cpp b/src/operator/HeavisideImpl.cpp index 5bf77f87072005896b87f009ebe78711606f78ff..8349a0ad32075fc1c72a83b412bea5402ced960a 100644 --- a/src/operator/HeavisideImpl.cpp +++ b/src/operator/HeavisideImpl.cpp @@ -48,7 +48,6 @@ void Aidge::HeavisideImplCpu::backward() { std::shared_ptr<Tensor> in0 = op_.getInput(0); std::shared_ptr<Tensor> out0 = op_.getOutput(0); - AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type()); impl.backward(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0)); } diff --git a/unit_tests/operator/Test_HeavisideImpl.cpp b/unit_tests/operator/Test_HeavisideImpl.cpp index e6aa38b88a37191cc7765ad9d3037bc733687481..2743027cb4b6afbe71303a81ae19410839ced3c0 100644 --- a/unit_tests/operator/Test_HeavisideImpl.cpp +++ b/unit_tests/operator/Test_HeavisideImpl.cpp @@ -100,27 +100,60 @@ TEST_CASE("[cpu/operator] Heaviside(forward)", "[Heaviside][CPU]") { } } +// TODO: Make this work for random intput. TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") { + + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<float> valueDist(-2.0f, 2.0f); + std::uniform_int_distribution<std::size_t> sizeDist(5, 100); + + const std::size_t tensorSize = sizeDist(gen); + auto hs = Heaviside(1.0f); auto op = std::static_pointer_cast<OperatorTensor>(hs->getOperator()); op->setDataType(DataType::Float32); op->setBackend("cpu"); - auto input = Tensor(Array1D<float, 3>({1.0, -1.0, 1.0})); - input.setDataType(DataType::Float32); - input.setBackend("cpu"); - auto grad = Tensor(Array1D<float, 3>({1.0, 1.0, 1.0})); - grad.setDataType(DataType::Float32); - grad.setBackend("cpu"); + auto inputTensor = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize}); + inputTensor->setDataType(DataType::Float32); + inputTensor->setBackend("cpu"); + auto* inputData = static_cast<float*>(inputTensor->getImpl()->rawPtr()); + + for(std::size_t i = 0; i < tensorSize; ++i) { + inputData[i] = valueDist(gen); + } + + auto gradTensor = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize}); + gradTensor->setDataType(DataType::Float32); + gradTensor->setBackend("cpu"); + auto* gradData = static_cast<float*>(gradTensor->getImpl()->rawPtr()); + + for (std::size_t i = 0; i < tensorSize; ++i) { + gradData[i] = valueDist(gen); + } - op->setInput(IOIndex_t(0), std::make_shared<Tensor>(input)); + op->setInput(IOIndex_t(0), inputTensor); op->forward(); - - op->getOutput(0)->setGrad(std::make_shared<Tensor>(grad)); + + auto output = op->getOutput(0); + output->setGrad(gradTensor); + + // Backward pass op->backward(); - - auto expectedResult = Tensor(Array1D<float,3>({0.0920, 0.0920, 0.0920})); - REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), expectedResult)); + + // Compute expected gradient manually + auto expectedGrad = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize}); + expectedGrad->setDataType(DataType::Float32); + expectedGrad->setBackend("cpu"); + auto* expectedGradData = static_cast<float*>(expectedGrad->getImpl()->rawPtr()); + + for (std::size_t i = 0; i < tensorSize; ++i) { + expectedGradData[i] = gradData[i] * (1.0f / (1.0f + (inputData[i] * M_PI) * (inputData[i] * M_PI))); + } + + // Compare actual gradient with expected gradient + REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad)); } }