diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp index c4ceccf530e7fca8939aeec92067b94f7a6bde80..975fcffaa481ae7c319b58cbaf8f3d0c074578ac 100644 --- a/include/aidge/operator/MetaOperatorDefs.hpp +++ b/include/aidge/operator/MetaOperatorDefs.hpp @@ -305,10 +305,17 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels, bool noBias = false, const std::string &name = ""); +enum class LeakyReset { + Subtraction, + ToZero +}; + + std::shared_ptr<MetaOperator_Op> LeakyOp(); std::shared_ptr<Node> Leaky(const int nbTimeSteps, const float beta, const float threshold = 1.0, + const LeakyReset resetType = LeakyReset::Subtraction, const std::string &name = ""); } // namespace Aidge diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp index d1eec290741ac6477f3d75ffb4457ea4d21421cd..31e3a009953c8d5b938dfe6fb888d911bef1e066 100644 --- a/python_binding/graph/pybind_GraphView.cpp +++ b/python_binding/graph/pybind_GraphView.cpp @@ -30,6 +30,7 @@ void init_GraphView(py::module& m) { :param path: save location :type path: str )mydelimiter") + .def("set_name", &GraphView::setName, py::arg("name")) .def("inputs", (std::vector<std::pair<NodePtr, IOIndex_t>> (GraphView::*)() const) &GraphView::inputs) .def("outputs", (std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> (GraphView::*)() const) &GraphView::outputs) .def("in_view", (bool (GraphView::*)(const NodePtr&) const) &GraphView::inView) diff --git a/python_binding/operator/pybind_LRN.cpp b/python_binding/operator/pybind_LRN.cpp index f802152ba77f1506ac9d93284ecbe4a589b7de74..8807eb04081396b03db1076cf2e4b83a222b5f09 100644 --- a/python_binding/operator/pybind_LRN.cpp +++ b/python_binding/operator/pybind_LRN.cpp @@ -34,7 +34,7 @@ void init_LRN(py::module& m) { .def_static("attributes_name", []() { std::vector<std::string> result; auto attributes = LRN_Op::attributesName(); - for (size_t i = 0; attributes[i] != nullptr; ++i) { + for (size_t i = 0; i < size(EnumStrings<LRN_Op::Attr>::data); ++i) { result.emplace_back(attributes[i]); } return result; diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp index 2b2cdea12fee04e88ccb715abebf9da768758de3..9e266cfe216406d89881dd0b9e913edbea7fbaab 100644 --- a/python_binding/operator/pybind_MetaOperatorDefs.cpp +++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp @@ -400,12 +400,25 @@ void declare_LSTMOp(py::module &m) { )mydelimiter"); } +void declare_LeakyResetEnum(py::module &m) { + py::enum_<LeakyReset>(m, "leaky_reset", R"doc( + Enumeration for the Leaky neuron reset mode. + + Subtraction: Membrane potential is subtracted by threshold upon spiking. + ToZero : Membrane potential is forced to 0 upon spiking. + )doc") + .value("subtraction", LeakyReset::Subtraction) + .value("to_zero", LeakyReset::ToZero) + .export_values(); +} + void declare_LeakyOp(py::module &m) { m.def("Leaky", &Leaky, py::arg("nb_timesteps"), py::arg("beta"), py::arg("threshold") = 1.0, + py::arg("reset") = LeakyReset::Subtraction, py::arg("name") = "", R"mydelimiter( Initialize a Leaky neuron operator. @@ -440,6 +453,7 @@ void init_MetaOperatorDefs(py::module &m) { declare_PaddedMaxPoolingOp<2>(m); // declare_PaddedMaxPoolingOp<3>(m); declare_LSTMOp(m); + declare_LeakyResetEnum(m); declare_LeakyOp(m); py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperatorOp", py::multiple_inheritance()) diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index a789ea4b1228345e5e334016bebbe5d773428908..b372fd3929dd3abca12956d00e2b2effcc49ce0e 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -809,18 +809,19 @@ bool Aidge::GraphView::add(std::set<std::shared_ptr<Node>> otherNodes, bool incl } bool orderUnicity = true; - // List only the nodes that are not already present in current graph std::set<NodePtr> nodesToAdd; std::set_difference(otherNodes.begin(), otherNodes.end(), mNodes.begin(), mNodes.end(), std::inserter(nodesToAdd, nodesToAdd.begin())); // Check no name is common with the name in the current Graph for (auto node : nodesToAdd) { - if (mNodeRegistry.find(node->name()) != mNodeRegistry.end()){ - std::string newName = node->createUniqueName(node->name()); - fmt::print("Warning: node name \"{}\" is a duplicate, renaming to {}.\n", node->name(), newName); - node->setName(newName); - + if (mNodeRegistry.find(node->name()) != mNodeRegistry.end()) { + std::string newName = node->createUniqueName(node->name()); + while (mNodeRegistry.find(newName) != mNodeRegistry.end()) { + newName = node->createUniqueName(newName + "_1"); + } + Log::notice("node name \"{}\" is a duplicate, renaming to {}.\n", node->name(), newName); + node->setName(newName); } } // List the nodes to rank, initially all the nodes in the GraphView diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp index 0dec30c2f2f2ffcb0f83740c863d46d7169d2f06..f791ab31ceb61b496382bf5e43e729e186257164 100644 --- a/src/graph/Node.cpp +++ b/src/graph/Node.cpp @@ -93,6 +93,7 @@ std::string Aidge::Node::createUniqueName(std::string baseName) nameAlreadyUsed = false; for (auto graphView : views()) { if (graphView->inView(newName)) { + Log::info("Node::createUniqueName(): name '{}' already used in graph '{}'", newName, graphView->name()); nameAlreadyUsed = true; break; } diff --git a/src/operator/MetaOperatorDefs/Leaky.cpp b/src/operator/MetaOperatorDefs/Leaky.cpp index c5e8ab3f10bf56b87479d7c5e7b5a71449884304..b5dc65cca88bc22f458ba37e1b893bab57354a8d 100644 --- a/src/operator/MetaOperatorDefs/Leaky.cpp +++ b/src/operator/MetaOperatorDefs/Leaky.cpp @@ -16,95 +16,100 @@ constexpr auto memorizeOpDataOutputRecIndex = 1; std::shared_ptr<Node> Leaky(const int nbTimeSteps, const float beta, const float threshold, + const LeakyReset resetType, const std::string &name) { auto microGraph = std::make_shared<GraphView>(); - auto inputNode = Identity((!name.empty()) ? name + "_input" : ""); - auto addNode = Add(!name.empty() ? name + "_add" : ""); - auto mulNode = Mul(!name.empty() ? name + "_mul" : ""); - auto subNode = Sub(!name.empty() ? name + "_sub" : ""); - auto hsNode = Heaviside(0, !name.empty() ? name + "_hs" : ""); - auto subNode2 = Sub(!name.empty() ? name + "_threshold" : ""); - auto reset = Mul(!name.empty() ? name + "_reset" : ""); + /* + * U[t] = Input[T] + beta * U[T-1] - S[T-1] * U_th + * with S[T] = | 1, if U[T] - U_th > 0 + * | 0 otherwise + */ + + auto input = Identity((!name.empty()) ? name + "_input" : ""); + auto decay = Mul(!name.empty() ? name + "_mul" : ""); + auto spike = Heaviside(0, !name.empty() ? name + "_hs" : ""); + auto subNode2 = Sub(!name.empty() ? name + "_threshold" : ""); auto betaTensor = std::make_shared<Tensor>(beta); auto uthTensor = std::make_shared<Tensor>(static_cast<float>(threshold)); - uniformFiller<float>(uthTensor, threshold, threshold); - auto decayRate = Producer(betaTensor, "leaky_beta", true); auto uth = Producer(uthTensor, "leaky_uth", true); + auto potentialMem = Memorize(nbTimeSteps, (!name.empty()) ? name + "_potential" : ""); + auto spikeMem = Memorize(nbTimeSteps, (!name.empty()) ? name + "_spike" : ""); + uniformFiller<float>(uthTensor, threshold, threshold); - auto potentialMem = - Memorize(nbTimeSteps, (!name.empty()) ? name + "_potential" : ""); - auto spikeMem = - Memorize(nbTimeSteps, (!name.empty()) ? name + "_spike" : ""); - - // U[t] = Input[T] + beta * U[T-1] - S[T-1] * U_th - // with S[T] = | 1, if U[T] - U_th > 0 - // | 0 otherwise - - // beta * U[T-1] - decayRate->addChild(/*otherNode=*/mulNode, /*outId=*/0, /*otherInId=*/1); - potentialMem->addChild(mulNode, 1, 0); - - // Input[T] + beta * U[T-1] - mulNode->addChild(/*otherNode=*/addNode, /*outId=*/0, /*otherInId=*/1); - inputNode->addChild(/*otherNode=*/addNode, /*outId=*/0, /*otherInId=*/0); - - // S[T-1] * U_th - spikeMem->addChild(reset, - /*outId=*/memorizeOpDataOutputRecIndex, - /*otherInId=*/0); - - // TODO(#219) Handle hard/soft reset - uth->addChild(reset, 0, 1); + // Common connections + decayRate->addChild(decay, 0, 1); + potentialMem->addChild(decay, 1, 0); + + std::shared_ptr<Node> potentialNode; // Node containing the final potential value + + if (resetType == LeakyReset::Subtraction) { + auto decayPlusInput = Add(!name.empty() ? name + "_add" : ""); + decay->addChild(decayPlusInput, 0, 1); + input->addChild(decayPlusInput, 0, 0); + + auto potentialSubReset = Sub(!name.empty() ? name + "_sub" : ""); + auto reset = Mul(!name.empty() ? name + "_reset" : ""); + + spikeMem->addChild(reset, 1, 0); + uth->addChild(reset, 0, 1); + + decayPlusInput->addChild(potentialSubReset, 0, 0); + reset->addChild(potentialSubReset, 0, 1); + + potentialSubReset->addChild(potentialMem, 0, 0); + + potentialNode = potentialSubReset; + microGraph->add({decayPlusInput, potentialSubReset, reset}); + + } else if (resetType == LeakyReset::ToZero) { + auto oneMinusSpike = Sub(!name.empty() ? name + "_one_minus_spike" : ""); + auto one = Producer(std::make_shared<Tensor>(1.0f), "one", true); + auto finalMul = Mul(!name.empty() ? name + "_final" : ""); + auto decayPlusInput = Add(!name.empty() ? name + "_add" : ""); + + one->addChild(oneMinusSpike, 0, 0); + spikeMem->addChild(oneMinusSpike, 1, 1); + + oneMinusSpike->addChild(finalMul, 0, 0); + decay->addChild(finalMul, 0, 1); + + // finalMul = (1-S[t-1]) * (decay) + oneMinusSpike->addChild(finalMul, 0, 1); + decay->addChild(finalMul, 0, 1); + + // (1-S[t-1]) * (decay) + WX[t] + finalMul->addChild(decayPlusInput, 0, 0); + input->addChild(decayPlusInput, 0, 1); + + decayPlusInput->addChild(potentialMem, 0, 0); + potentialNode = decayPlusInput; + + microGraph->add({oneMinusSpike, one, finalMul, decayPlusInput}); + } + + // Threshold comparison : (U[t] - Uth) + potentialNode->addChild(subNode2, 0, 0); + uth->addChild(subNode2, 0, 1); - // Input[T] + beta * U[T-1] - S[T-1] * U_th - addNode->addChild(subNode, 0, 0); - reset->addChild(subNode, 0, 1); + // heaviside + subNode2->addChild(spike, 0, 0); + spike->addChild(spikeMem, 0, 0); - // U[t] = (Input[T] + beta * U[T-1]) - S[T-1] - subNode->addChild(potentialMem, 0, 0); + microGraph->add(input); + microGraph->add({decay, potentialMem, decayRate, + uth, spikeMem, spike, subNode2}, false); - // U[T] - U_th - subNode->addChild(subNode2, 0, 0); - uth->addChild(subNode2, 0, 1); + microGraph->setOrderedInputs( + {{input, 0}, {potentialMem, 1}, {spikeMem, 1}}); - // with S[T] = | 1, if U[T] - U_th > 0 - subNode2->addChild(hsNode, 0, 0); - hsNode->addChild(spikeMem, 0, 0); - - microGraph->add(inputNode); - microGraph->add({addNode, - mulNode, - potentialMem, - decayRate, - uth, - spikeMem, - hsNode, - subNode, - subNode2, - reset}, - false); + // Use potentialNode for membrane potential output + microGraph->setOrderedOutputs({{potentialNode, 0}, {spike, 0}}); - microGraph->setOrderedInputs( - {{inputNode, 0}, {potentialMem, 1}, {spikeMem, 1}}); - - // NOTE: Outputs are NOT the memory nodes (as it is done in LSTM), to avoid - // producing data during init. This way, we can plug an operator after - // our node, and get correct results. - microGraph->setOrderedOutputs({//{potentialMem, memorizeOpDataOutputIndex}, - //{spikeMem, memorizeOpDataOutputIndex} - {subNode, 0}, - {hsNode, 0}}); - - auto metaOp = MetaOperator(/*type*/ "Leaky", - /*graph*/ microGraph, - /*forcedInputsCategory=*/{}, - /*name*/ "leaky"); - - return metaOp; + return MetaOperator("Leaky", microGraph, {}, name); } std::shared_ptr<MetaOperator_Op> LeakyOp() { diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp index cf44280551fb04f4636fa72a23b32a51219020da..c58eb0f644b490f8ba4e4e673ab40ea690e3f38e 100644 --- a/unit_tests/operator/Test_MetaOperator.cpp +++ b/unit_tests/operator/Test_MetaOperator.cpp @@ -159,4 +159,17 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") { REQUIRE(myLeaky->nbOutputs() == 4); REQUIRE(true); } + + SECTION("Leaky(Reset to zero)") { + auto myLeaky = Leaky(10, 1.0, 0.9, LeakyReset::ToZero); + auto op = std::static_pointer_cast<OperatorTensor>(myLeaky->getOperator()); + + auto inputs = myLeaky->inputs(); + + // Two memorize nodes + real data input + REQUIRE(myLeaky->nbInputs() == 3); + // Outputs for spike and memory + 2 Memorize node + REQUIRE(myLeaky->nbOutputs() == 4); + REQUIRE(true); + } }