Skip to content
Snippets Groups Projects

Fix/PowBackwardKernel

Merged Houssem ROUIS requested to merge hrouis/aidge_backend_cpu:Fix/PowBackwardKernel into dev
1 unresolved thread
1 file
+ 166
0
Compare changes
  • Side-by-side
  • Inline
@@ -313,5 +313,171 @@ TEST_CASE("[cpu/operator] Pow", "[Pow][CPU]") {
std::cout << "total time: " << duration.count() << "μs" << std::endl;
}
}
SECTION("PowImpl_cpu::backward()") {
SECTION("3D Tensors") {
const auto input0 = std::make_shared<Tensor>(Array3D<float, 2, 2, 2>(
{
{
{
{2.0, 3.0},
{4.0, 5.0}
},
{
{6.0, 7.0},
{8.0, 9.0}
}
}
}
));
const auto input1 = std::make_shared<Tensor>(Array3D<float, 2, 2, 2>(
{
{
{
{1.0, 2.0},
{3.0, 2.0}
},
{
{2.0, 3.0},
{1.0, 0.5}
}
}
}
));
const auto gradOut = std::make_shared<Tensor>(Array3D<float, 2, 2, 2>(
{
{
{
{0.5, 1.0},
{1.5, 2.0}
},
{
{2.5, 3.0},
{3.5, 4.0}
}
}
}
));
const auto expectedGrad0 = std::make_shared<Tensor>(Array3D<float, 2, 2, 2>(
{
{
{
{0.50000000, 6.00000000},
{72.00000000, 20.00000000}
},
{
{30.00000000, 441.00000000},
{3.50000000, 0.66666669}
}
}
}
));
const auto expectedGrad1 = std::make_shared<Tensor>(Array3D<float, 2, 2, 2>(
{
{
{
{ 0.693147182, 9.88751030},
{1.33084259e+02, 8.04718933e+01}
},
{
{1.61258362e+02, 2.00234143e+03},
{5.82243652e+01, 2.63666954e+01}
}
}
}
));
for(const auto T: {input0, input1, gradOut, expectedGrad0, expectedGrad1})
{
T->setBackend("cpu") ;
T->setDataType(DataType::Float32);
}
std::shared_ptr<Node> powOp = Pow();
auto opr = std::static_pointer_cast<OperatorTensor>(powOp-> getOperator());
opr->setDataType(DataType::Float32);
opr->setBackend("cpu");
opr->associateInput(0, input0);
opr->associateInput(1, input1);
opr->getOutput(0)->setGrad(gradOut);
opr->forward();
powOp->backward();
REQUIRE(approxEq<float>(*(opr->getInput(0)->grad()), *expectedGrad0));
REQUIRE(approxEq<float>(*(opr->getInput(1)->grad()), *expectedGrad1));
}
SECTION("Broadcasting") {
const auto input0 = std::make_shared<Tensor>(Array3D<float, 2, 2, 3>(
{
{
{
{1.0, 2.0, 3.0},
{4.0, 5.0, 6.0}
},
{
{1.5, 2.5, 3.5},
{4.5, 5.5, 6.5}
}
}
}
));
const auto input1 = std::make_shared<Tensor>(Array1D<float, 3>(
{
{0.1, 0.2, 0.3}
}
));
const auto gradOut = std::make_shared<Tensor>(Array3D<float, 2, 2, 3>(
{
{
{
{1.0, 2.0, 3.0},
{4.0, 5.0, 6.0}
},
{
{6.0, 5.0, 4.0},
{3.0, 2.0, 1.0}
}
}
}
));
const auto expectedGrad0 = std::make_shared<Tensor>(Array3D<float, 2, 2, 3>(
{
{
{
{0.10000000, 0.22973967, 0.41711676},
{0.11486985, 0.27594593, 0.51353097}
},
{
{0.41655189, 0.48044977, 0.49926791},
{0.07748720, 0.10227509, 0.08092485}
}
}
}
));
const auto expectedGrad1 = std::make_shared<Tensor>(Array1D<float, 3>(
{
{14.14779854, 22.99299049, 33.56402588}
}
));
for(const auto T: {input0, input1, gradOut, expectedGrad0, expectedGrad1})
{
T->setBackend("cpu") ;
T->setDataType(DataType::Float32);
}
std::shared_ptr<Node> powOp = Pow();
auto opr = std::static_pointer_cast<OperatorTensor>(powOp-> getOperator());
opr->setDataType(DataType::Float32);
opr->setBackend("cpu");
opr->associateInput(0, input0);
opr->associateInput(1, input1);
opr->getOutput(0)->setGrad(gradOut);
powOp->forward();
powOp->backward();
REQUIRE(approxEq<float>(*(opr->getInput(0)->grad()), *expectedGrad0));
REQUIRE(approxEq<float>(*(opr->getInput(1)->grad()), *expectedGrad1));
}
}
}
} // namespace Aidge
Loading