Skip to content
Snippets Groups Projects
Commit 5f4c3ad9 authored by Maxence Naud's avatar Maxence Naud
Browse files

Adapt some tests to changes in operators

parent 63ec8e67
No related branches found
No related tags found
1 merge request!22Update operators implementation
Pipeline #34722 failed
......@@ -49,24 +49,24 @@ class test_recipies(unittest.TestCase):
np_shift = np.array([0.05]).astype(np.float32)
np_mean = np.array([0.05]).astype(np.float32)
np_var = np.array([0.05]).astype(np.float32)
conv.input(1)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_weights))
conv.input(2)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_bias))
bn.input(1)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_scale))
bn.input(2)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_shift))
bn.input(3)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_mean))
bn.input(4)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_var))
conv.input(1)[0].get_operator().set_output(0, aidge_core.Tensor(np_weights))
conv.input(2)[0].get_operator().set_output(0, aidge_core.Tensor(np_bias))
bn.input(1)[0].get_operator().set_output(0, aidge_core.Tensor(np_scale))
bn.input(2)[0].get_operator().set_output(0, aidge_core.Tensor(np_shift))
bn.input(3)[0].get_operator().set_output(0, aidge_core.Tensor(np_mean))
bn.input(4)[0].get_operator().set_output(0, aidge_core.Tensor(np_var))
scheduler0 = aidge_core.SequentialScheduler(graph_view)
scheduler0.forward()
for outNode in graph_view.get_output_nodes():
output_aidge0 = outNode.get_operator().output(0)
output_aidge0 = outNode.get_operator().get_output(0)
aidge_core.fuse_batchnorm(graph_view)
scheduler1 = aidge_core.SequentialScheduler(graph_view)
scheduler1.forward()
for outNode in graph_view.get_output_nodes():
output_aidge1 = outNode.get_operator().output(0)
output_aidge1 = outNode.get_operator().get_output(0)
self.assertTrue(aidge_core.approx_eq(output_aidge0, output_aidge1, 0.000001, 0.0001))
......
......@@ -22,30 +22,30 @@ class test_scheduler(unittest.TestCase):
gv.add(relu)
gv.add(input_node)
input_node.add_child(relu)
gv.set_datatype(aidge_core.DataType.Int32)
gv.set_backend("cpu")
input_node.add_child(relu)
scheduler = aidge_core.SequentialScheduler(gv)
scheduler.forward()
out_tensor = relu.get_operator().output(0)
out_tensor = relu.get_operator().get_output(0)
expected_out = [0,0,0,0,1,2]
for i in range(len(expected_out)):
self.assertEqual(expected_out[i], out_tensor[i])
def test_sequential_scheduling(self):
input_data = np.array([]).astype(np.float32)
input_data = np.array([0]).astype(np.float32)
input_tensor = aidge_core.Tensor(input_data)
input_node = aidge_core.Producer(input_tensor, "X")
graph_view = aidge_core.sequential([
aidge_core.FC(50, name='0'),
aidge_core.FC(50, name='1'),
aidge_core.FC(10, name='2'),
aidge_core.FC(1, 50, name='0'),
aidge_core.FC(50, 50, name='1'),
aidge_core.FC(50, 10, name='2'),
])
EXPECTED_SCHEDULE = ['0', '1', '2']
......@@ -64,14 +64,14 @@ class test_scheduler(unittest.TestCase):
def test_parallel_scheduling(self):
input_data = np.array([]).astype(np.float32)
input_data = np.array([0]).astype(np.float32)
input_tensor = aidge_core.Tensor(input_data)
input_node = aidge_core.Producer(input_tensor, "X")
graph_view = aidge_core.sequential([
aidge_core.FC(50, name='0'),
aidge_core.parallel([aidge_core.FC(50, name='1'), aidge_core.FC(50, name='3')]),
aidge_core.Add(name='2'),
aidge_core.FC(1, 50, name='0'),
aidge_core.parallel([aidge_core.FC(50, 50, name='1'), aidge_core.FC(50, 50, name='3')]),
aidge_core.Add(2, name='2'),
])
EXPECTED_SCHEDULE = [['0', '1', '3', '2'], ['0', '3', '1', '2']] # Both scheduling are valid !
......
......@@ -50,13 +50,11 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
Conv(1, 3, {3, 3}, "conv1"),
Conv(3, 4, {1, 1}, "conv2"),
Conv(4, 3, {1, 1}, "conv3"),
FC(5, false, "fc")});
g->setDataType(Aidge::DataType::Int32);
g->setBackend("cpu");
FC(27, 5, false, "fc")});
g->getNode("conv1")->getOperator()->input(0) = *inputTensor;
g->getNode("conv1")->getOperator()->input(1) = *weight1;
g->getNode("conv1")->getOperator()->input(2) = *bias1;
g->getNode("conv1")->getOperator()->setInput(0, inputTensor);
g->getNode("conv1")->getOperator()->setInput(1, weight1);
g->getNode("conv1")->getOperator()->setInput(2, bias1);
std::shared_ptr<Tensor> weight2 =
std::make_shared<Tensor>(Array4D<int, 4, 3, 1, 1>{{{{{1}}, {{2}}, {{3}}},
......@@ -64,8 +62,8 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
{{{7}}, {{8}}, {{9}}},
{{{10}}, {{11}}, {{12}}}}});
std::shared_ptr<Tensor> bias2 = std::make_shared<Tensor>(Array1D<int, 4>{{1, 2, 3, 4}});
g->getNode("conv2")->getOperator()->input(1) = *weight2;
g->getNode("conv2")->getOperator()->input(2) = *bias2;
g->getNode("conv2")->getOperator()->setInput(1, weight2);
g->getNode("conv2")->getOperator()->setInput(2, bias2);
// *(g->getNode("conv2")->getOperator()->input(1, weight2);
std::shared_ptr<Tensor> weight3 = std::make_shared<Tensor>(
......@@ -73,8 +71,8 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
{{{5}}, {{6}}, {{7}}, {{8}}},
{{{9}}, {{10}}, {{11}}, {{12}}}}});
std::shared_ptr<Tensor> bias3 = std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}});
g->getNode("conv3")->getOperator()->input(1) = *weight3;
g->getNode("conv3")->getOperator()->input(2) = *bias3;
g->getNode("conv3")->getOperator()->setInput(1, weight3);
g->getNode("conv3")->getOperator()->setInput(2, bias3);
std::shared_ptr<Tensor> weightfc = std::make_shared<Tensor>(
Array2D<int, 5, 27>{{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
......@@ -88,10 +86,12 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}});
std::shared_ptr<Tensor> biasfc = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}});
g->getNode("fc")->getOperator()->input(1) = *weightfc;
g->getNode("fc")->getOperator()->input(2) = *biasfc;
g->getNode("fc")->getOperator()->setInput(1, weightfc);
g->getNode("fc")->getOperator()->setInput(2, biasfc);
// input->addChild(g);
g->setDataType(Aidge::DataType::Int32);
g->setBackend("cpu");
g->forwardDims();
SequentialScheduler scheduler(g);
REQUIRE_NOTHROW(scheduler.forward());
......@@ -126,17 +126,17 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
Tensor expectedOutput4 = Array2D<int, 2, 5>{
{{205050376, 198925904, 181355097, 196978090, 238868348},
{598467376, 561797804, 560823897, 593043790, 698672948}}};
Tensor other1 = g->getNode("conv1")->getOperator()->output(0);
bool equal1 = (other1 == *expectedOutput1);
std::shared_ptr<Tensor> other1 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv1")->getOperator())->getOutput(0);
bool equal1 = (*other1 == *expectedOutput1);
REQUIRE(equal1);
Tensor other2 = g->getNode("conv2")->getOperator()->output(0);
bool equal2 = (other2 == *expectedOutput2);
std::shared_ptr<Tensor> other2 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv2")->getOperator())->getOutput(0);
bool equal2 = (*other2 == *expectedOutput2);
REQUIRE(equal2);
Tensor other3 = g->getNode("conv3")->getOperator()->output(0);
bool equal3 = (other3 == *expectedOutput3);
std::shared_ptr<Tensor> other3 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv3")->getOperator())->getOutput(0);
bool equal3 = (*other3 == *expectedOutput3);
REQUIRE(equal3);
Tensor other4 = g->getNode("fc")->getOperator()->output(0);
bool equal4 = (other4 == expectedOutput4);
std::shared_ptr<Tensor> other4 = std::static_pointer_cast<OperatorTensor>(g->getNode("fc")->getOperator())->getOutput(0);
bool equal4 = (*other4 == expectedOutput4);
REQUIRE(equal4);
}
......@@ -149,34 +149,32 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
Conv(3, 3, {1, 1}, "conv1.3")}),
Add(3, "add1"),
Conv(3, 2, {1, 1}, "conv2"),
FC(5, false, "out")});
g->setBackend("cpu");
g->setDataType(Aidge::DataType::Int32);
FC(18, 5, false, "out")});
g->getNode("inputConv")->getOperator()->input(0) = *inputTensor;
g->getNode("inputConv")->getOperator()->input(1) = *weight1;
g->getNode("inputConv")->getOperator()->input(2) = *bias1;
g->getNode("inputConv")->getOperator()->setInput(0, inputTensor);
g->getNode("inputConv")->getOperator()->setInput(1, weight1);
g->getNode("inputConv")->getOperator()->setInput(2, bias1);
std::shared_ptr<Tensor> conv11Weight = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
{{{{1}}, {{2}}, {{3}}}, {{{4}}, {{5}}, {{6}}}, {{{7}}, {{8}}, {{9}}}}});
g->getNode("conv1.1")->getOperator()->input(1) = *conv11Weight;
g->getNode("conv1.1")->getOperator()->input(2) = *bias1;
g->getNode("conv1.1")->getOperator()->setInput(1, conv11Weight);
g->getNode("conv1.1")->getOperator()->setInput(2, bias1);
std::shared_ptr<Tensor> conv12Weight = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
{{{{11}}, {{12}}, {{13}}}, {{{14}}, {{15}}, {{16}}}, {{{17}}, {{18}}, {{19}}}}});
g->getNode("conv1.2")->getOperator()->input(1) = *conv12Weight;
g->getNode("conv1.2")->getOperator()->input(2) = *bias1;
g->getNode("conv1.2")->getOperator()->setInput(1, conv12Weight);
g->getNode("conv1.2")->getOperator()->setInput(2, bias1);
std::shared_ptr<Tensor> conv13Weight = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
{{{{21}}, {{22}}, {{23}}}, {{{24}}, {{25}}, {{26}}}, {{{27}}, {{28}}, {{29}}}}});
g->getNode("conv1.3")->getOperator()->input(1) = *conv13Weight;
g->getNode("conv1.3")->getOperator()->input(2) = *bias1;
g->getNode("conv1.3")->getOperator()->setInput(1, conv13Weight);
g->getNode("conv1.3")->getOperator()->setInput(2, bias1);
std::shared_ptr<Tensor> conv2Weight = std::make_shared<Tensor>(
Array4D<int, 2, 3, 1, 1>{{{{{1}}, {{2}}, {{3}}}, {{{4}}, {{5}}, {{6}}}}});
std::shared_ptr<Tensor> bias2 = std::make_shared<Tensor>(Array1D<int, 2>{{1, 2}});
g->getNode("conv2")->getOperator()->input(1) = *conv2Weight;
g->getNode("conv2")->getOperator()->input(2) = *bias2;
g->getNode("conv2")->getOperator()->setInput(1, conv2Weight);
g->getNode("conv2")->getOperator()->setInput(2, bias2);
std::shared_ptr<Tensor> fcWeight = std::make_shared<Tensor>(
Array2D<int, 5, 18>{{{1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3},
......@@ -185,19 +183,21 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
{5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2},
{3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}}});
std::shared_ptr<Tensor> fcBias = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}});
g->getNode("out")->getOperator()->input(1) = *fcWeight;
g->getNode("out")->getOperator()->input(2) = *fcBias;
g->getNode("out")->getOperator()->setInput(1, fcWeight);
g->getNode("out")->getOperator()->setInput(2, fcBias);
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(
Array2D<int, 2, 5>{{{124324368, 130692907, 133325056, 125044620, 142843879},
{369195468, 394615207, 382643056, 379441320, 416291779}}});
g->setBackend("cpu");
g->setDataType(Aidge::DataType::Int32);
g->forwardDims();
SequentialScheduler scheduler(g);
REQUIRE_NOTHROW(scheduler.forward());
scheduler.saveSchedulingDiagram("schedulingSequential");
std::shared_ptr<Tensor> result =
std::static_pointer_cast<Tensor>(g->getNode("out")->getOperator()->getOutput(0));
std::static_pointer_cast<Tensor>(g->getNode("out")->getOperator()->getRawOutput(0));
bool equal = (*result == *expectedOutput);
REQUIRE(equal);
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment