Skip to content
Snippets Groups Projects
Commit 3b646a93 authored by Maxence Naud's avatar Maxence Naud
Browse files

Remove number-of-input option from Add Operator

parent 1b3bb038
No related branches found
No related tags found
3 merge requests!118v0.4.0,!108v0.4.0,!92Export refactor
Pipeline #57392 canceled
......@@ -66,7 +66,7 @@ class test_scheduler(unittest.TestCase):
aidge_core.Producer(input_tensor, "X"),
aidge_core.FC(1, 50, name='0'),
aidge_core.parallel([aidge_core.FC(50, 50, name='1'), aidge_core.FC(50, 50, name='3')]),
aidge_core.Add(2, name='2'),
aidge_core.Add(name='2'),
])
EXPECTED_SCHEDULE = [['0', '1', '3', '2'], ['0', '3', '1', '2']] # Both scheduling are valid !
......
......@@ -35,7 +35,7 @@ TEST_CASE("Test addition of Tensors","[TensorImpl][Add]") {
std::uniform_int_distribution<int> boolDist(0,1);
// Create MatMul Operator
std::shared_ptr<Node> mySub = Add(2);
std::shared_ptr<Node> mySub = Add();
auto op = std::static_pointer_cast<OperatorTensor>(mySub-> getOperator());
op->setDataType(DataType::Float32);
op->setBackend("cpu");
......
......@@ -39,17 +39,6 @@ TEST_CASE("[cpu/operator] Add(forward)", "[Add][CPU]") {
} //
}); //
SECTION("One input") {
std::shared_ptr<Node> myAdd = Add(1);
auto op = std::static_pointer_cast<OperatorTensor>(myAdd -> getOperator());
op->associateInput(0, input1);
op->setBackend("cpu");
op->setDataType(DataType::Int32);
myAdd->forward();
REQUIRE(*(op->getOutput(0)) == *input1);
}
SECTION("Two inputs") {
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
{
......@@ -71,7 +60,7 @@ TEST_CASE("[cpu/operator] Add(forward)", "[Add][CPU]") {
}
});
std::shared_ptr<Node> myAdd = Add(2);
std::shared_ptr<Node> myAdd = Add();
auto op = std::static_pointer_cast<OperatorTensor>(myAdd -> getOperator());
op->associateInput(0, input1);
op->associateInput(1, input1);
......@@ -82,39 +71,6 @@ TEST_CASE("[cpu/operator] Add(forward)", "[Add][CPU]") {
REQUIRE(*(op->getOutput(0)) == *expectedOutput);
}
SECTION("Three inputs") {
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
{
{
{{ 60, 141},{ 63, 144},{ 66, 147}},
{{ 69, 150},{ 72, 153},{ 75, 156}},
{{ 78, 159},{ 81, 162},{ 84, 165}}
},
{
{{ 87, 168},{ 90, 171},{ 93, 174}},
{{ 96, 177},{ 99, 180},{102, 183}},
{{105, 186},{108, 189},{111, 192}}
},
{
{{114, 195},{117, 198},{120, 201}},
{{123, 204},{126, 207},{129, 210}},
{{132, 213},{135, 216},{138, 219}}
}
}
});
std::shared_ptr<Node> myAdd = Add(3);
auto op = std::static_pointer_cast<OperatorTensor>(myAdd -> getOperator());
op->associateInput(0, input1);
op->associateInput(1, input1);
op->associateInput(2, input1);
op->setDataType(DataType::Int32);
op->setBackend("cpu");
myAdd->forward();
REQUIRE(*op->getOutput(0) == *expectedOutput);
}
SECTION("Broadcasting") {
std::shared_ptr<Tensor> input_0 = std::make_shared<Tensor>(Array4D<int,3,1,3,2> {
{ //
......@@ -139,7 +95,7 @@ TEST_CASE("[cpu/operator] Add(forward)", "[Add][CPU]") {
} //
}); //
std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<int,2> {{100,200}});
std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<int,2> {{100,200}});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
{ //
{ //
......@@ -160,16 +116,23 @@ TEST_CASE("[cpu/operator] Add(forward)", "[Add][CPU]") {
} //
}); //
std::shared_ptr<Node> myAdd = Add(3);
auto op = std::static_pointer_cast<OperatorTensor>(myAdd -> getOperator());
op->associateInput(0, input_0);
op->associateInput(1, input_1);
op->associateInput(2, input_2);
op->setDataType(DataType::Int32);
op->setBackend("cpu");
myAdd->forward();
op->getOutput(0)->print();
std::shared_ptr<Node> myAdd_0 = Add();
std::shared_ptr<Node> myAdd_1 = Add();
auto op_0 = std::static_pointer_cast<OperatorTensor>(myAdd_0 -> getOperator());
auto op_1 = std::static_pointer_cast<OperatorTensor>(myAdd_1 -> getOperator());
op_0->associateInput(0, input_0);
op_0->associateInput(1, input_1);
op_1->associateInput(0, input_2);
op_1->associateInput(1, op_0->getOutput(0));
op_0->setDataType(DataType::Int32);
op_1->setDataType(DataType::Int32);
op_0->setBackend("cpu");
op_1->setBackend("cpu");
myAdd_0->forward();
myAdd_1->forward();
op_1->getOutput(0)->print();
expectedOutput->print();
REQUIRE(*op->getOutput(0) == *expectedOutput);
REQUIRE(*op_1->getOutput(0) == *expectedOutput);
}
}
\ No newline at end of file
......@@ -22,12 +22,12 @@
using namespace Aidge;
TEST_CASE("[ConstantFolding] test") {
TEST_CASE("[ConstantFolding] forward", "[ConstantFolding][forward][CPU]") {
// generate the original GraphView
auto matmul0 = MatMul("matmul0");
auto add0 = Add(2, "add0");
auto add0 = Add("add0");
auto matmul1 = MatMul("matmul1");
auto add1 = Add(2, "add1");
auto add1 = Add("add1");
auto b0 = Producer(std::make_shared<Tensor>(Array1D<float,5>{{1, 2, 3, 4, 5}}), "B0", true);
auto w0 = Producer(std::make_shared<Tensor>(Array2D<float,5,5>{{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}, {1, 2, 3, 4, 5}}}), "W0", true);
......
......@@ -147,10 +147,13 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
std::shared_ptr<GraphView> g =
Sequential({Conv(1, 3, {3, 3}, "inputConv"),
Parallel({
Conv(3, 3, {1, 1}, "conv1.1"),
Conv(3, 3, {1, 1}, "conv1.2"),
Sequential({
Parallel({
Conv(3, 3, {1, 1}, "conv1.1"),
Conv(3, 3, {1, 1}, "conv1.2")}),
Add("add1")}),
Conv(3, 3, {1, 1}, "conv1.3")}),
Add(3, "add1"),
Add("add2"),
Conv(3, 2, {1, 1}, "conv2"),
FC(18, 5, false, "out")});
......@@ -216,9 +219,9 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
std::shared_ptr<Tensor> biasTensor = std::make_shared<Tensor>(
Array2D<int, 2, 3>{{{2, 0, 0}, {1, 0, 0}}});
auto add1 = Add(2, "add1");
auto add1 = Add("add1");
auto mem = Memorize(3, "mem1");
auto add2 = Add(2, "add2");
auto add2 = Add("add2");
auto bias = Producer(biasTensor, "bias");
auto init = Producer(initTensor, "init");
auto input = Producer(in, "input");
......@@ -260,9 +263,9 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
std::shared_ptr<Tensor> biasTensor = std::make_shared<Tensor>(
Array2D<int, 2, 3>{{{2, 0, 0}, {1, 0, 0}}});
auto add1 = Add(2, "add1");
auto add1 = Add("add1");
auto mem = Memorize(3, "mem1");
auto add2 = Add(2, "add2");
auto add2 = Add("add2");
auto bias = Producer(biasTensor, "bias");
auto init = Producer(initTensor, "init");
auto input = Producer(in, "input");
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment