Skip to content
Snippets Groups Projects
Commit dcdee743 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Missing backend

parent 8536d095
No related branches found
No related tags found
Loading
Pipeline #35597 failed
...@@ -113,16 +113,19 @@ TEST_CASE("[cuda/convert] Convert(forward)") { ...@@ -113,16 +113,19 @@ TEST_CASE("[cuda/convert] Convert(forward)") {
std::shared_ptr<Tensor> other1 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv1")->getOperator())->getOutput(0); std::shared_ptr<Tensor> other1 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv1")->getOperator())->getOutput(0);
Tensor hostOther1(other1->dataType()); Tensor hostOther1(other1->dataType());
hostOther1.setBackend("cpu");
hostOther1.copyCastFrom(*other1); hostOther1.copyCastFrom(*other1);
REQUIRE(approxEq<half_float::half, int>(hostOther1, *expectedOutput1, 0.0, 1.0e-12)); REQUIRE(approxEq<half_float::half, int>(hostOther1, *expectedOutput1, 0.0, 1.0e-12));
std::shared_ptr<Tensor> other2 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv2")->getOperator())->getOutput(0); std::shared_ptr<Tensor> other2 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv2")->getOperator())->getOutput(0);
Tensor hostOther2(other2->dataType()); Tensor hostOther2(other2->dataType());
hostOther2.setBackend("cpu");
hostOther2.copyCastFrom(*other2); hostOther2.copyCastFrom(*other2);
REQUIRE(approxEq<float, int>(hostOther2, *expectedOutput2, 0.0, 1.0e-12)); REQUIRE(approxEq<float, int>(hostOther2, *expectedOutput2, 0.0, 1.0e-12));
std::shared_ptr<Tensor> other3 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv3")->getOperator())->getOutput(0); std::shared_ptr<Tensor> other3 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv3")->getOperator())->getOutput(0);
Tensor hostOther3(other3->dataType()); Tensor hostOther3(other3->dataType());
hostOther3.setBackend("cpu");
hostOther3.copyCastFrom(*other3); hostOther3.copyCastFrom(*other3);
REQUIRE(approxEq<double, int>(hostOther3, *expectedOutput3, 0.0, 1.0e-12)); REQUIRE(approxEq<double, int>(hostOther3, *expectedOutput3, 0.0, 1.0e-12));
} }
...@@ -197,16 +200,19 @@ TEST_CASE("[cuda/convert] Convert(forward)") { ...@@ -197,16 +200,19 @@ TEST_CASE("[cuda/convert] Convert(forward)") {
std::shared_ptr<Tensor> other1 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv1")->getOperator())->getOutput(0); std::shared_ptr<Tensor> other1 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv1")->getOperator())->getOutput(0);
Tensor hostOther1(other1->dataType()); Tensor hostOther1(other1->dataType());
hostOther1.setBackend("cpu");
hostOther1.copyCastFrom(*other1); hostOther1.copyCastFrom(*other1);
REQUIRE(approxEq<half_float::half, int>(hostOther1, *expectedOutput1, 0.0, 1.0e-12)); REQUIRE(approxEq<half_float::half, int>(hostOther1, *expectedOutput1, 0.0, 1.0e-12));
std::shared_ptr<Tensor> other2 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv2")->getOperator())->getOutput(0); std::shared_ptr<Tensor> other2 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv2")->getOperator())->getOutput(0);
Tensor hostOther2(other2->dataType()); Tensor hostOther2(other2->dataType());
hostOther2.setBackend("cpu");
hostOther2.copyCastFrom(*other2); hostOther2.copyCastFrom(*other2);
REQUIRE(approxEq<float, int>(hostOther2, *expectedOutput2, 0.0, 1.0e-12)); REQUIRE(approxEq<float, int>(hostOther2, *expectedOutput2, 0.0, 1.0e-12));
std::shared_ptr<Tensor> other3 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv3")->getOperator())->getOutput(0); std::shared_ptr<Tensor> other3 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv3")->getOperator())->getOutput(0);
Tensor hostOther3(other3->dataType()); Tensor hostOther3(other3->dataType());
hostOther3.setBackend("cpu");
hostOther3.copyCastFrom(*other3); hostOther3.copyCastFrom(*other3);
REQUIRE(approxEq<double, int>(hostOther3, *expectedOutput3, 0.0, 1.0e-12)); REQUIRE(approxEq<double, int>(hostOther3, *expectedOutput3, 0.0, 1.0e-12));
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment