Skip to content
Snippets Groups Projects
Commit 7d8a52b4 authored by Olivier Antoni's avatar Olivier Antoni
Browse files

Fix update gradient tensor dimesions

parent 914cdda1
No related branches found
No related tags found
2 merge requests!166Update 0.5.0 -> 0.6.0,!162Update the associated grad tensor when tensor is resized
Pipeline #70668 passed
...@@ -159,10 +159,10 @@ TEST_CASE("[cpu/operator] Add(backward)", "[Add][CPU]") { ...@@ -159,10 +159,10 @@ TEST_CASE("[cpu/operator] Add(backward)", "[Add][CPU]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->getOutput(0)->setGrad(std::make_shared<Tensor>(
Array2D<float, 2, 3>({{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}})));
op->forwardDims(); op->forwardDims();
op->getOutput(0)->setGrad(std::make_shared<Tensor>(
Array2D<float, 2, 3>({{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}})));
op->backward(); op->backward();
const Tensor expectedGrad0 = const Tensor expectedGrad0 =
...@@ -194,8 +194,9 @@ TEST_CASE("[cpu/operator] Add(backward)", "[Add][CPU]") { ...@@ -194,8 +194,9 @@ TEST_CASE("[cpu/operator] Add(backward)", "[Add][CPU]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->getOutput(0)->setGrad(newGrad);
op->forwardDims(); op->forwardDims();
op->getOutput(0)->setGrad(newGrad);
op->backward(); op->backward();
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0)); REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
...@@ -236,9 +237,9 @@ TEST_CASE("[cpu/operator] Add(backward)", "[Add][CPU]") { ...@@ -236,9 +237,9 @@ TEST_CASE("[cpu/operator] Add(backward)", "[Add][CPU]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->getOutput(0)->setGrad(newGrad);
op->forwardDims(); op->forwardDims();
op->getOutput(0)->setGrad(newGrad);
op->backward(); op->backward();
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0)); REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
...@@ -290,8 +291,8 @@ TEST_CASE("[cpu/operator] Add(backward)", "[Add][CPU]") { ...@@ -290,8 +291,8 @@ TEST_CASE("[cpu/operator] Add(backward)", "[Add][CPU]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->getOutput(0)->setGrad(newGrad);
op->forwardDims(); op->forwardDims();
op->getOutput(0)->setGrad(newGrad);
op->backward(); op->backward();
...@@ -364,8 +365,7 @@ TEST_CASE("[cpu/operator] Add(backward)", "[Add][CPU]") { ...@@ -364,8 +365,7 @@ TEST_CASE("[cpu/operator] Add(backward)", "[Add][CPU]") {
val = dist(gen); val = dist(gen);
} }
op->getOutput(0)->setGrad(std::make_shared<Tensor>()); op->getOutput(0)->setGrad(std::make_shared<Tensor>(outputDims));
op->getOutput(0)->grad()->resize(outputDims);
op->getOutput(0)->grad()->getImpl()->setRawPtr(gradOutputData.data(), op->getOutput(0)->grad()->getImpl()->setRawPtr(gradOutputData.data(),
expectedOutput.size()); expectedOutput.size());
......
...@@ -339,10 +339,10 @@ TEST_CASE("[CPU/Operator] Div(Backward)", "[Div][CPU][Backward]") { ...@@ -339,10 +339,10 @@ TEST_CASE("[CPU/Operator] Div(Backward)", "[Div][CPU][Backward]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->getOutput(0)->setGrad(std::make_shared<Tensor>(
Array2D<float, 2, 3>({{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}})));
op->forwardDims(); op->forwardDims();
op->getOutput(0)->setGrad(std::make_shared<Tensor>(
Array2D<float, 2, 3>({{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}})));
op->backward(); op->backward();
const Tensor expectedGrad0 = const Tensor expectedGrad0 =
...@@ -373,9 +373,9 @@ TEST_CASE("[CPU/Operator] Div(Backward)", "[Div][CPU][Backward]") { ...@@ -373,9 +373,9 @@ TEST_CASE("[CPU/Operator] Div(Backward)", "[Div][CPU][Backward]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->getOutput(0)->setGrad(newGrad);
op->forwardDims(); op->forwardDims();
op->getOutput(0)->setGrad(newGrad);
op->backward(); op->backward();
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0)); REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
...@@ -415,9 +415,9 @@ TEST_CASE("[CPU/Operator] Div(Backward)", "[Div][CPU][Backward]") { ...@@ -415,9 +415,9 @@ TEST_CASE("[CPU/Operator] Div(Backward)", "[Div][CPU][Backward]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->getOutput(0)->setGrad(newGrad);
op->forwardDims(); op->forwardDims();
op->getOutput(0)->setGrad(newGrad);
op->backward(); op->backward();
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0)); REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
...@@ -471,9 +471,9 @@ TEST_CASE("[CPU/Operator] Div(Backward)", "[Div][CPU][Backward]") { ...@@ -471,9 +471,9 @@ TEST_CASE("[CPU/Operator] Div(Backward)", "[Div][CPU][Backward]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->getOutput(0)->setGrad(newGrad);
op->forwardDims(); op->forwardDims();
op->getOutput(0)->setGrad(newGrad);
op->backward(); op->backward();
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0)); REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
...@@ -545,8 +545,7 @@ TEST_CASE("[CPU/Operator] Div(Backward)", "[Div][CPU][Backward]") { ...@@ -545,8 +545,7 @@ TEST_CASE("[CPU/Operator] Div(Backward)", "[Div][CPU][Backward]") {
val = dist(gen); val = dist(gen);
} }
op->getOutput(0)->setGrad(std::make_shared<Tensor>()); op->getOutput(0)->setGrad(std::make_shared<Tensor>(outputDims));
op->getOutput(0)->grad()->resize(outputDims);
op->getOutput(0)->grad()->getImpl()->setRawPtr(gradOutputData.data(), op->getOutput(0)->grad()->getImpl()->setRawPtr(gradOutputData.data(),
expectedOutput.size()); expectedOutput.size());
......
...@@ -46,10 +46,10 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") { ...@@ -46,10 +46,10 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->getOutput(0)->setGrad(std::make_shared<Tensor>(
Array2D<float, 2, 3>({{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}})));
op->forwardDims(); op->forwardDims();
op->getOutput(0)->setGrad(std::make_shared<Tensor>(
Array2D<float, 2, 3>({{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}})));
op->backward(); op->backward();
const Tensor expectedGrad0 = const Tensor expectedGrad0 =
...@@ -80,9 +80,9 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") { ...@@ -80,9 +80,9 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->getOutput(0)->setGrad(newGrad);
op->forwardDims(); op->forwardDims();
op->getOutput(0)->setGrad(newGrad);
op->backward(); op->backward();
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0)); REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
...@@ -122,9 +122,9 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") { ...@@ -122,9 +122,9 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->getOutput(0)->setGrad(newGrad);
op->forwardDims(); op->forwardDims();
op->getOutput(0)->setGrad(newGrad);
op->backward(); op->backward();
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0)); REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
...@@ -176,9 +176,9 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") { ...@@ -176,9 +176,9 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->getOutput(0)->setGrad(newGrad);
op->forwardDims(); op->forwardDims();
op->getOutput(0)->setGrad(newGrad);
op->backward(); op->backward();
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0)); REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
...@@ -250,8 +250,7 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") { ...@@ -250,8 +250,7 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
val = dist(gen); val = dist(gen);
} }
op->getOutput(0)->setGrad(std::make_shared<Tensor>()); op->getOutput(0)->setGrad(std::make_shared<Tensor>(outputDims));
op->getOutput(0)->grad()->resize(outputDims);
op->getOutput(0)->grad()->getImpl()->setRawPtr(gradOutputData.data(), op->getOutput(0)->grad()->getImpl()->setRawPtr(gradOutputData.data(),
expectedOutput.size()); expectedOutput.size());
......
This diff is collapsed.
...@@ -344,10 +344,10 @@ TEST_CASE("[CPU/Operator] Sub(Backward)", "[Sub][CPU][Backward]") { ...@@ -344,10 +344,10 @@ TEST_CASE("[CPU/Operator] Sub(Backward)", "[Sub][CPU][Backward]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->getOutput(0)->setGrad(std::make_shared<Tensor>(
Array2D<float, 2, 3>({{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}})));
op->forwardDims(); op->forwardDims();
op->getOutput(0)->setGrad(std::make_shared<Tensor>(
Array2D<float, 2, 3>({{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}})));
mySub->backward(); mySub->backward();
// For subtraction: grad_input0 = grad_output // For subtraction: grad_input0 = grad_output
...@@ -387,9 +387,9 @@ TEST_CASE("[CPU/Operator] Sub(Backward)", "[Sub][CPU][Backward]") { ...@@ -387,9 +387,9 @@ TEST_CASE("[CPU/Operator] Sub(Backward)", "[Sub][CPU][Backward]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->getOutput(0)->setGrad(newGrad);
op->forwardDims(); op->forwardDims();
op->getOutput(0)->setGrad(newGrad);
mySub->backward(); mySub->backward();
REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad0)); REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad0));
...@@ -434,14 +434,12 @@ TEST_CASE("[CPU/Operator] Sub(Backward)", "[Sub][CPU][Backward]") { ...@@ -434,14 +434,12 @@ TEST_CASE("[CPU/Operator] Sub(Backward)", "[Sub][CPU][Backward]") {
op->associateInput(0, T0); op->associateInput(0, T0);
op->associateInput(1, T1); op->associateInput(1, T1);
op->forwardDims();
// Set gradient of output // Set gradient of output
op->getOutput(0)->setGrad(std::make_shared<Tensor>()); op->getOutput(0)->setGrad(std::make_shared<Tensor>(outputDims));
op->getOutput(0)->grad()->resize(outputDims);
op->getOutput(0)->grad()->getImpl()->setRawPtr(gradOutputData.data(), outputSize); op->getOutput(0)->grad()->getImpl()->setRawPtr(gradOutputData.data(), outputSize);
op->forwardDims();
// Compute reference gradients // Compute reference gradients
std::vector<float> expectedGrad0(input0Size, 0.0f); std::vector<float> expectedGrad0(input0Size, 0.0f);
std::vector<float> expectedGrad1(input1Size, 0.0f); std::vector<float> expectedGrad1(input1Size, 0.0f);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment