Skip to content
Snippets Groups Projects
Commit 1f875305 authored by Jerome Hue's avatar Jerome Hue
Browse files

save changes

parent 7db6e417
No related branches found
No related tags found
No related merge requests found
Pipeline #70075 failed
...@@ -72,6 +72,11 @@ void Aidge::FCImpl_cpu::backward() ...@@ -72,6 +72,11 @@ void Aidge::FCImpl_cpu::backward()
const auto& input1grad = op_.getInput(1)->grad()->refCastFrom(input1gradFallback, *(op_.getOutput(0))); const auto& input1grad = op_.getInput(1)->grad()->refCastFrom(input1gradFallback, *(op_.getOutput(0)));
const auto& input2grad = (op_.getInput(2)) ? op_.getInput(2)->grad()->refCastFrom(input2gradFallback, *(op_.getOutput(0))) : Tensor(); const auto& input2grad = (op_.getInput(2)) ? op_.getInput(2)->grad()->refCastFrom(input2gradFallback, *(op_.getOutput(0))) : Tensor();
Log::info(" ");
Log::info("(FCImpl.cpp) Gradient of output 0 : {}", fc_grad->toString());
Log::info("(FCImpl.cpp) Gradient of input 0 : {}", input0grad.toString());
Log::info("(FCImpl.cpp) Gradient of input 1 : {}", input1grad.toString());
// Call kernel // Call kernel
const auto batchSize = (input0grad.dims().size() > 1) ? input0grad.dims()[0] : 1; const auto batchSize = (input0grad.dims().size() > 1) ? input0grad.dims()[0] : 1;
impl.backward(batchSize, impl.backward(batchSize,
...@@ -83,4 +88,8 @@ void Aidge::FCImpl_cpu::backward() ...@@ -83,4 +88,8 @@ void Aidge::FCImpl_cpu::backward()
input0grad.getImpl()->rawPtr(), input0grad.getImpl()->rawPtr(),
input1grad.getImpl()->rawPtr(), input1grad.getImpl()->rawPtr(),
(op_.getInput(2)) ? input2grad.getImpl()->rawPtr() : nullptr); (op_.getInput(2)) ? input2grad.getImpl()->rawPtr() : nullptr);
Log::info("(FCImpl.cpp) Gradient of output 0 : {}", fc_grad->toString());
Log::info("(FCImpl.cpp) Gradient of input 0 : {}", input0grad.toString());
Log::info("(FCImpl.cpp) Gradient of input 1 : {}", input1grad.toString());
} }
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include "aidge/graph/OpArgs.hpp" #include "aidge/graph/OpArgs.hpp"
#include "aidge/operator/Add.hpp" #include "aidge/operator/Add.hpp"
#include "aidge/operator/Memorize.hpp" #include "aidge/operator/Memorize.hpp"
#include "aidge/operator/PerMemorize.hpp"
#include "aidge/operator/Producer.hpp" #include "aidge/operator/Producer.hpp"
#include "aidge/recipes/GraphViewHelper.hpp" #include "aidge/recipes/GraphViewHelper.hpp"
#include "aidge/scheduler/SequentialScheduler.hpp" #include "aidge/scheduler/SequentialScheduler.hpp"
...@@ -169,86 +168,4 @@ TEST_CASE("[cpu/operator] Memorize(backward)", "[Memorize][CPU]") { ...@@ -169,86 +168,4 @@ TEST_CASE("[cpu/operator] Memorize(backward)", "[Memorize][CPU]") {
} }
} }
TEST_CASE("[cpu/operator] Memorize(forward)", "[Memorize][Periodic]") {
auto mul = Mul();
//auto mem = Memorize(/*endStep=*/3);
//auto mem = Identity();
auto mem = PerMemorize(2);
auto add = Add();
auto pop = Pop();
auto stack = Stack(3);
// Initialization tensor for Memorize
auto input = Producer(std::make_shared<Tensor>(Array1D<float,2>({1.0f, 1.0f})), "input");
auto decay = Producer(std::make_shared<Tensor>(Array1D<float,2>({0.9f, 0.9f})), "decay");
auto init = Producer(std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})), "init");
auto back = std::make_shared<Tensor>(Array1D<float,2>({1.0f, 1.0f}));
auto initTensor = std::make_shared<Tensor>(Array2D<float,3,2>({
{
{1,1},
{1,1},
{1,1},
}
}));
std::static_pointer_cast<OperatorTensor>(pop->getOperator())->setInput(0, initTensor);
auto memOp = std::static_pointer_cast<OperatorTensor>(mem->getOperator());
memOp->setOutput(1, std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})));
memOp->setOutput(0, std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})));
//init->addChild(mem,0,0);
mem->addChild(mul,1,0);
decay->addChild(mul,0,1);
mul->addChild(add,0,1);
pop->addChild(add,0,0);
add->addChild(mem,0,0);
mem->addChild(stack,1,0);
auto graphView = getConnectedGraphView(mem);
graphView->compile();
Log::info("GraphView output nodes : {}", graphView->outputNodes().size());
for(auto node : graphView->outputNodes())
{
Log::info("output node type : {}", node->type());
}
// TODO: Set ordered outputs for this node.
auto scheduler = SequentialScheduler(graphView);
scheduler.forward();
//std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->print();
std::static_pointer_cast<OperatorTensor>(stack->getOperator())->getOutput(0)->print();
REQUIRE(true);
}
TEST_CASE("[cpu/operator] Memorize(forward)", "[Memorize][Periodic2]") {
auto input = Producer(std::make_shared<Tensor>(1.0f));
auto init = Producer(std::make_shared<Tensor>(1.0f));
auto add = Add();
//auto mem = PerMemorize(3);
auto mem = Memorize(3);
input->addChild(add,0,0);
add->addChild(mem,0,0);
mem->addChild(add, 1,1);
init->addChild(mem, 0, 1);
auto gv = getConnectedGraphView(mem);
gv->compile();
auto scheduler = SequentialScheduler(gv);
scheduler.forward();
std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->print();
REQUIRE(true);
}
} // namespace Aidge } // namespace Aidge
...@@ -708,7 +708,8 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") { ...@@ -708,7 +708,8 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
auto fc2 = FC(outChannels, inChannels, true, "fc2"); auto fc2 = FC(outChannels, inChannels, true, "fc2");
// NOTE: Account for init step by adding 1 to the max timestep // NOTE: Account for init step by adding 1 to the max timestep
// parameter. // parameter.
auto lif1 = Leaky(nbTimeSteps + 1, beta, threshold, LeakyReset::Subtraction, "leaky"); //auto lif1 = Leaky(nbTimeSteps + 1, beta, threshold, LeakyReset::Subtraction, "leaky");
auto lif1 = Leaky(nbTimeSteps, beta, threshold, LeakyReset::Subtraction, "leaky");
// associateInput() does not work // associateInput() does not work
fc1->input(1).first->getOperator()->setOutput(0, myWeights); fc1->input(1).first->getOperator()->setOutput(0, myWeights);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment