Skip to content
Snippets Groups Projects
Commit eff82020 authored by Jerome Hue's avatar Jerome Hue Committed by Olivier BICHLER
Browse files

Add randomness in heaviside backward test

parent 09bec268
No related branches found
No related tags found
1 merge request!146Implement a backward for Heaviside
...@@ -48,7 +48,6 @@ void Aidge::HeavisideImplCpu::backward() { ...@@ -48,7 +48,6 @@ void Aidge::HeavisideImplCpu::backward() {
std::shared_ptr<Tensor> in0 = op_.getInput(0); std::shared_ptr<Tensor> in0 = op_.getInput(0);
std::shared_ptr<Tensor> out0 = op_.getOutput(0); std::shared_ptr<Tensor> out0 = op_.getOutput(0);
AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type());
impl.backward(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0)); impl.backward(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
} }
......
...@@ -100,27 +100,60 @@ TEST_CASE("[cpu/operator] Heaviside(forward)", "[Heaviside][CPU]") { ...@@ -100,27 +100,60 @@ TEST_CASE("[cpu/operator] Heaviside(forward)", "[Heaviside][CPU]") {
} }
} }
// TODO: Make this work for random intput.
TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") { TEST_CASE("[cpu/operator] Heaviside(backward)", "[Heaviside][CPU]") {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<float> valueDist(-2.0f, 2.0f);
std::uniform_int_distribution<std::size_t> sizeDist(5, 100);
const std::size_t tensorSize = sizeDist(gen);
auto hs = Heaviside(1.0f); auto hs = Heaviside(1.0f);
auto op = std::static_pointer_cast<OperatorTensor>(hs->getOperator()); auto op = std::static_pointer_cast<OperatorTensor>(hs->getOperator());
op->setDataType(DataType::Float32); op->setDataType(DataType::Float32);
op->setBackend("cpu"); op->setBackend("cpu");
auto input = Tensor(Array1D<float, 3>({1.0, -1.0, 1.0}));
input.setDataType(DataType::Float32);
input.setBackend("cpu");
auto grad = Tensor(Array1D<float, 3>({1.0, 1.0, 1.0})); auto inputTensor = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
grad.setDataType(DataType::Float32); inputTensor->setDataType(DataType::Float32);
grad.setBackend("cpu"); inputTensor->setBackend("cpu");
auto* inputData = static_cast<float*>(inputTensor->getImpl()->rawPtr());
for(std::size_t i = 0; i < tensorSize; ++i) {
inputData[i] = valueDist(gen);
}
auto gradTensor = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
gradTensor->setDataType(DataType::Float32);
gradTensor->setBackend("cpu");
auto* gradData = static_cast<float*>(gradTensor->getImpl()->rawPtr());
for (std::size_t i = 0; i < tensorSize; ++i) {
gradData[i] = valueDist(gen);
}
op->setInput(IOIndex_t(0), std::make_shared<Tensor>(input)); op->setInput(IOIndex_t(0), inputTensor);
op->forward(); op->forward();
op->getOutput(0)->setGrad(std::make_shared<Tensor>(grad)); auto output = op->getOutput(0);
output->setGrad(gradTensor);
// Backward pass
op->backward(); op->backward();
auto expectedResult = Tensor(Array1D<float,3>({0.0920, 0.0920, 0.0920})); // Compute expected gradient manually
REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), expectedResult)); auto expectedGrad = std::make_shared<Tensor>(std::vector<std::size_t>{tensorSize});
expectedGrad->setDataType(DataType::Float32);
expectedGrad->setBackend("cpu");
auto* expectedGradData = static_cast<float*>(expectedGrad->getImpl()->rawPtr());
for (std::size_t i = 0; i < tensorSize; ++i) {
expectedGradData[i] = gradData[i] * (1.0f / (1.0f + (inputData[i] * M_PI) * (inputData[i] * M_PI)));
}
// Compare actual gradient with expected gradient
REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad));
} }
} }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment