Skip to content
Snippets Groups Projects
Commit c9cfde96 authored by Jerome Hue's avatar Jerome Hue Committed by Olivier BICHLER
Browse files

chore: Improve Leaky Meta-operator

parent 3b3f7548
No related branches found
No related tags found
2 merge requests!318[Upd] release verision 0.5.0,!283[Add] Operator Leaky_MetaOperator
...@@ -166,7 +166,7 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels, ...@@ -166,7 +166,7 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels,
std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length); std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length);
std::shared_ptr<MetaOperator_Op> LeakyOp(); std::shared_ptr<MetaOperator_Op> LeakyOp();
std::shared_ptr<Node> Leaky(const std::string& name = ""); std::shared_ptr<Node> Leaky(const int nbTimeSteps, const float threshold, const float beta, const std::string& name = "");
} // namespace Aidge } // namespace Aidge
......
...@@ -13,13 +13,9 @@ namespace Aidge { ...@@ -13,13 +13,9 @@ namespace Aidge {
constexpr auto memorizeOpDataOutputRecIndex = 1; constexpr auto memorizeOpDataOutputRecIndex = 1;
constexpr auto memorizeOpDataOutputIndex = 0; constexpr auto memorizeOpDataOutputIndex = 0;
std::shared_ptr<Node> Leaky(const std::string &name) { std::shared_ptr<Node> Leaky(const int nbTimeSteps, const float threshold, const float beta, const std::string &name) {
Log::warn("! Lots of parameters are hardcoded"); Log::warn("! Lots of parameters are hardcoded");
const auto softReset = true;
const auto beta = 0.9;
const auto thresholdValue = 1.0;
const auto seqLength = 2;
auto microGraph = std::make_shared<GraphView>(); auto microGraph = std::make_shared<GraphView>();
...@@ -31,21 +27,18 @@ std::shared_ptr<Node> Leaky(const std::string &name) { ...@@ -31,21 +27,18 @@ std::shared_ptr<Node> Leaky(const std::string &name) {
auto subNode2 = Sub(!name.empty() ? name + "_threshold" : ""); auto subNode2 = Sub(!name.empty() ? name + "_threshold" : "");
auto reset = Mul(!name.empty() ? name + "_reset" : ""); auto reset = Mul(!name.empty() ? name + "_reset" : "");
// auto betaTensor = std::make_shared<Tensor>(Array2D<float, 16, 32>{}); auto betaTensor = std::make_shared<Tensor>(beta);
// FIXME: Use beta instead of a fixed value here, and put real dimensions
auto betaTensor = std::make_shared<Tensor>(Array2D<float, 3, 2>{});
auto uthTensor = auto uthTensor =
std::make_shared<Tensor>(static_cast<float>(thresholdValue)); std::make_shared<Tensor>(static_cast<float>(threshold));
uniformFiller<float>(betaTensor, beta, beta); uniformFiller<float>(uthTensor, threshold, threshold);
uniformFiller<float>(uthTensor, thresholdValue, thresholdValue);
auto decayRate = Producer(betaTensor, "leaky_beta", true); auto decayRate = Producer(betaTensor, "leaky_beta", true);
auto uth = Producer(uthTensor, "leaky_uth", true); auto uth = Producer(uthTensor, "leaky_uth", true);
auto potentialMem = auto potentialMem =
Memorize(seqLength, (!name.empty()) ? name + "_potential" : ""); Memorize(nbTimeSteps, (!name.empty()) ? name + "_potential" : "");
auto spikeMem = auto spikeMem =
Memorize(seqLength, (!name.empty()) ? name + "_spike" : ""); Memorize(nbTimeSteps, (!name.empty()) ? name + "_spike" : "");
// U[t] = Input[T] + beta * U[T-1] - S[T-1] * U_th // U[t] = Input[T] + beta * U[T-1] - S[T-1] * U_th
// with S[T] = | 1, if U[T] - U_th > 0 // with S[T] = | 1, if U[T] - U_th > 0
...@@ -59,20 +52,16 @@ std::shared_ptr<Node> Leaky(const std::string &name) { ...@@ -59,20 +52,16 @@ std::shared_ptr<Node> Leaky(const std::string &name) {
mulNode->addChild(/*otherNode=*/addNode, /*outId=*/0, /*otherInId=*/1); mulNode->addChild(/*otherNode=*/addNode, /*outId=*/0, /*otherInId=*/1);
inputNode->addChild(/*otherNode=*/addNode, /*outId=*/0, /*otherInId=*/0); inputNode->addChild(/*otherNode=*/addNode, /*outId=*/0, /*otherInId=*/0);
// S[T-1] * Uth // S[T-1] * U_th
spikeMem->addChild(reset, spikeMem->addChild(reset,
/*outId=*/memorizeOpDataOutputRecIndex, /*outId=*/memorizeOpDataOutputRecIndex,
/*otherInId=*/0); /*otherInId=*/0);
// TODO: Handle hard/soft reset
uth->addChild(reset, 0, 1); uth->addChild(reset, 0, 1);
if (softReset) {
uth->addChild(reset, 0, 1);
} else { // Input[T] + beta * U[T-1] - S[T-1] * U_th
// addNode->addChild(reset, 0, 1);
AIDGE_THROW_OR_ABORT(std::runtime_error,
"Hard reset not implemented yet.");
}
// Input[T] + beta * U[T-1] - S[T-1] * Uth
addNode->addChild(subNode, 0, 0); addNode->addChild(subNode, 0, 0);
reset->addChild(subNode, 0, 1); reset->addChild(subNode, 0, 1);
...@@ -103,6 +92,8 @@ std::shared_ptr<Node> Leaky(const std::string &name) { ...@@ -103,6 +92,8 @@ std::shared_ptr<Node> Leaky(const std::string &name) {
microGraph->setOrderedInputs( microGraph->setOrderedInputs(
{{inputNode, 0}, {potentialMem, 1}, {spikeMem, 1}}); {{inputNode, 0}, {potentialMem, 1}, {spikeMem, 1}});
// NOTE: Outputs are NOT the memory nodes (as it is done in LSTM), to avoid producing data during init
// This way, we can plug a stack operator after or node, and get correct results
microGraph->setOrderedOutputs({//{potentialMem, memorizeOpDataOutputIndex}, microGraph->setOrderedOutputs({//{potentialMem, memorizeOpDataOutputIndex},
//{spikeMem, memorizeOpDataOutputIndex} //{spikeMem, memorizeOpDataOutputIndex}
{addNode, 0}, {addNode, 0},
...@@ -113,8 +104,6 @@ std::shared_ptr<Node> Leaky(const std::string &name) { ...@@ -113,8 +104,6 @@ std::shared_ptr<Node> Leaky(const std::string &name) {
/*forcedInputsCategory=*/{}, /*forcedInputsCategory=*/{},
/*name*/ "leaky"); /*name*/ "leaky");
// addProducer(metaOp, 1, {1,2}, "memorizeInit1");
// addProducer(metaOp, 2, {1,2}, "memorizeInit2");
return metaOp; return metaOp;
} }
......
...@@ -148,15 +148,15 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") { ...@@ -148,15 +148,15 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
} }
SECTION("Leaky") { SECTION("Leaky") {
auto myLeaky = Leaky(); auto myLeaky = Leaky(10, 1.0, 0.9);
auto op = std::static_pointer_cast<OperatorTensor>(myLeaky->getOperator()); auto op = std::static_pointer_cast<OperatorTensor>(myLeaky->getOperator());
// 2 inputs :
// 1 for the actual input data,
// 1 for the Memorize init tensor.
auto inputs = myLeaky->inputs(); auto inputs = myLeaky->inputs();
// Two memorize nodes + real data input
REQUIRE(myLeaky->nbInputs() == 3); REQUIRE(myLeaky->nbInputs() == 3);
// Outputs for spike and memory + 2 Memorize node
REQUIRE(myLeaky->nbOutputs() == 4);
REQUIRE(true); REQUIRE(true);
} }
} }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment