Skip to content
Snippets Groups Projects
Commit 9c83962a authored by Jerome Hue's avatar Jerome Hue Committed by Olivier BICHLER
Browse files

Improve Heavisise Backward Test

Compare the result of surrogate gradient to the real Atan operator.
parent c92b78e4
No related branches found
No related tags found
1 merge request!146Implement a backward for Heaviside
......@@ -11,7 +11,10 @@
#include "aidge/backend/cpu/operator/HeavisideImpl_kernels.hpp"
#include <aidge/operator/Atan.hpp>
#include <aidge/operator/Memorize.hpp>
#include <aidge/operator/Mul.hpp>
#include <aidge/operator/Producer.hpp>
#include <aidge/utils/Types.h>
#include <memory>
#include <cstdlib>
......@@ -100,7 +103,6 @@ TEST_CASE("[cpu/operator] Heaviside(forward)", "[Heaviside][CPU]") {
}
}
// TODO: Make this work for random intput.
TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
std::random_device rd;
......@@ -115,6 +117,7 @@ TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
op->setDataType(DataType::Float32);
op->setBackend("cpu");
auto inputTensor = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
inputTensor->setDataType(DataType::Float32);
......@@ -125,6 +128,29 @@ TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
inputData[i] = valueDist(gen);
}
// Compare it to the real Atan implementation
auto mul = Mul();
auto pi = std::make_shared<Tensor>(Array1D<float,1>{M_PI});
auto producer = Producer(pi);
auto atan = Atan();
auto mulOp = std::static_pointer_cast<OperatorTensor>(mul->getOperator());
auto piOp = std::static_pointer_cast<OperatorTensor>(producer->getOperator());
auto atanOp = std::static_pointer_cast<OperatorTensor>(atan->getOperator());
mulOp->setBackend("cpu");
piOp->setBackend("cpu");
atanOp->setBackend("cpu");
mulOp->setDataType(DataType::Float32);
piOp->setDataType(DataType::Float32);
atanOp->setDataType(DataType::Float32);
producer->addChild(mul,0,0);
mulOp->setInput(IOIndex_t(1), inputTensor);
mulOp->forward();
auto outmul = mulOp->getOutput(0);
atanOp->setInput(0, inputTensor);
atanOp->forward();
auto gradTensor = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
gradTensor->setDataType(DataType::Float32);
gradTensor->setBackend("cpu");
......@@ -142,6 +168,10 @@ TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
// Backward pass
op->backward();
atanOp->setOutput(0, outmul);
atanOp->getOutput(0)->setGrad(gradTensor);
atanOp->backward();
// Compute expected gradient manually
auto expectedGrad = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
......@@ -155,5 +185,9 @@ TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
// Compare actual gradient with expected gradient
REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad));
// Compare Atan(pi*input) to expected Gradient
REQUIRE(approxEq<float>(*(atanOp->getInput(0)->grad()), *expectedGrad));
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment