Skip to content
Snippets Groups Projects

Improved scheduling

Merged Olivier BICHLER requested to merge scheduling into dev
1 file
+ 85
0
Compare changes
  • Side-by-side
  • Inline
+ 85
0
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "aidge/recipes/Recipes.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/operator/MatMul.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/graph/OpArgs.hpp"
#include "aidge/scheduler/SequentialScheduler.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include <cstddef>
using namespace Aidge;
TEST_CASE("[ConstantFolding] test") {
// generate the original GraphView
auto matmul0 = MatMul("matmul0");
auto add0 = Add(2, "add0");
auto matmul1 = MatMul("matmul1");
auto add1 = Add(2, "add1");
auto b0 = Producer(std::make_shared<Tensor>(Array1D<float,5>{{1, 2, 3, 4, 5}}), "B0", true);
auto w0 = Producer(std::make_shared<Tensor>(Array2D<float,5,5>{{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}, {1, 2, 3, 4, 5}}}), "W0", true);
auto b1 = Producer(std::make_shared<Tensor>(Array1D<float,5>{{1, 2, 3, 4, 5}}), "B1", true);
auto w1 = Producer(std::make_shared<Tensor>(Array2D<float,5,5>{{{6, 7, 8, 9, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}}}),"W1", true);
auto input = Producer(std::make_shared<Tensor>(Array2D<float,2,5>{{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}}}), "input", true);
input->addChild(matmul0, 0, 0);
w0->addChild(matmul0, 0, 1);
matmul0->addChild(add0, 0, 0);
b0->addChild(add0, 0, 1);
add0->addChild(matmul1, 0, 0);
w1->addChild(matmul1, 0, 1);
matmul1->addChild(add1, 0, 0);
b1->addChild(add1, 0, 1);
auto g = std::make_shared<GraphView>();
g->add({input, w0, matmul0, b0, add0, w1, matmul1, b1, add1});
g->setBackend("cpu");
g->forwardDims();
// Check original graph
REQUIRE(g->getNodes() ==
std::set<std::shared_ptr<Node>>({input, w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
REQUIRE(((add0->getParent(0) == matmul0) && (add0->getParent(1) == b0)));
REQUIRE(((matmul1->getParent(0) == add0) && (matmul1->getParent(1) == w1)));
REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
auto scheduler = SequentialScheduler(g);
scheduler.forward();
const std::shared_ptr<Tensor> result = std::make_shared<Tensor>(Array2D<float,2,5>{{
{ 1201.000000, 1532.000000, 1863.000000, 2194.000000, 785.000000},
{ 2501.000000, 3207.000000, 3913.000000, 4619.000000, 1735.000000}
}});
auto add1Op = std::static_pointer_cast<Add_Op>(add1->getOperator());
REQUIRE(approxEq<float>(*(add1Op->getOutput(0)), *result));
// Transform GraphView inplace
constantFolding(g);
// Check new GraphView
std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({input, w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
REQUIRE(newNodes.size() == 1);
REQUIRE((*newNodes.cbegin())->type() == "Producer");
auto prodOp = std::static_pointer_cast<Producer_Op>((*newNodes.cbegin())->getOperator());
REQUIRE(approxEq<float>(*(prodOp->getOutput(0)), *result));
}
Loading