Skip to content
Snippets Groups Projects
Commit b724c161 authored by Jerome Hue's avatar Jerome Hue
Browse files

Save changes

parent 8169920b
No related branches found
No related tags found
No related merge requests found
......@@ -42,8 +42,8 @@ void Aidge::SubImpl_cpu::forward() {
template <>
void Aidge::SubImpl_cpu::backward() {
Log::info("SubImpl_cpu::backward()");
const Sub_Op& op_ = dynamic_cast<const Sub_Op&>(mOp);
//Log::info("SubImpl_cpu::backward() : Node {}", op_.name());
auto in0 = op_.getInput(0);
auto in1 = op_.getInput(1);
......
......@@ -8,8 +8,10 @@
*
********************************************************************************/
#include <aidge/operator/Identity.hpp>
#include <aidge/operator/Mul.hpp>
#include <aidge/operator/Pop.hpp>
#include <aidge/operator/Stack.hpp>
#include <memory>
#include <string>
......@@ -23,6 +25,7 @@
#include "aidge/graph/OpArgs.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/operator/Memorize.hpp"
#include "aidge/operator/PerMemorize.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/recipes/GraphViewHelper.hpp"
#include "aidge/scheduler/SequentialScheduler.hpp"
......@@ -116,7 +119,7 @@ TEST_CASE("[cpu/operator] Memorize(backward)", "[Memorize][CPU]") {
auto mem = Memorize(/*endStep=*/3);
auto add = Add();
auto pop = Pop();
// Initialization tensor for Memorize
auto input = Producer(std::make_shared<Tensor>(Array1D<float,2>({1.0f, 1.0f})), "input");
auto decay = Producer(std::make_shared<Tensor>(Array1D<float,2>({0.9f, 0.9f})), "decay");
......@@ -166,4 +169,86 @@ TEST_CASE("[cpu/operator] Memorize(backward)", "[Memorize][CPU]") {
}
}
TEST_CASE("[cpu/operator] Memorize(forward)", "[Memorize][Periodic]") {
auto mul = Mul();
//auto mem = Memorize(/*endStep=*/3);
//auto mem = Identity();
auto mem = PerMemorize(2);
auto add = Add();
auto pop = Pop();
auto stack = Stack(3);
// Initialization tensor for Memorize
auto input = Producer(std::make_shared<Tensor>(Array1D<float,2>({1.0f, 1.0f})), "input");
auto decay = Producer(std::make_shared<Tensor>(Array1D<float,2>({0.9f, 0.9f})), "decay");
auto init = Producer(std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})), "init");
auto back = std::make_shared<Tensor>(Array1D<float,2>({1.0f, 1.0f}));
auto initTensor = std::make_shared<Tensor>(Array2D<float,3,2>({
{
{1,1},
{1,1},
{1,1},
}
}));
std::static_pointer_cast<OperatorTensor>(pop->getOperator())->setInput(0, initTensor);
auto memOp = std::static_pointer_cast<OperatorTensor>(mem->getOperator());
memOp->setOutput(1, std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})));
memOp->setOutput(0, std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})));
//init->addChild(mem,0,0);
mem->addChild(mul,1,0);
decay->addChild(mul,0,1);
mul->addChild(add,0,1);
pop->addChild(add,0,0);
add->addChild(mem,0,0);
mem->addChild(stack,1,0);
auto graphView = getConnectedGraphView(mem);
graphView->compile();
Log::info("GraphView output nodes : {}", graphView->outputNodes().size());
for(auto node : graphView->outputNodes())
{
Log::info("output node type : {}", node->type());
}
// TODO: Set ordered outputs for this node.
auto scheduler = SequentialScheduler(graphView);
scheduler.forward();
//std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->print();
std::static_pointer_cast<OperatorTensor>(stack->getOperator())->getOutput(0)->print();
REQUIRE(true);
}
TEST_CASE("[cpu/operator] Memorize(forward)", "[Memorize][Periodic2]") {
auto input = Producer(std::make_shared<Tensor>(1.0f));
auto init = Producer(std::make_shared<Tensor>(1.0f));
auto add = Add();
//auto mem = PerMemorize(3);
auto mem = Memorize(3);
input->addChild(add,0,0);
add->addChild(mem,0,0);
mem->addChild(add, 1,1);
init->addChild(mem, 0, 1);
auto gv = getConnectedGraphView(mem);
gv->compile();
auto scheduler = SequentialScheduler(gv);
scheduler.forward();
std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->print();
REQUIRE(true);
}
} // namespace Aidge
......@@ -881,7 +881,6 @@ TEST_CASE("[cpu/operator] MetaOperator", "[Leaky][CPU][Simple]") {
{1,1,1,1},
}});
auto pop = Pop("pop");
auto popOp = std::static_pointer_cast<OperatorTensor>(pop->getOperator());
auto stack = Stack(nbTimeSteps, "stack");
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment