diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp index 481a7795e24acd006acfc66a0fccde1b8da747e7..c9de36b1f032f986f6bd4461198ecff77d64d1f7 100644 --- a/include/aidge/operator/MetaOperatorDefs.hpp +++ b/include/aidge/operator/MetaOperatorDefs.hpp @@ -19,11 +19,11 @@ #include "aidge/graph/GraphView.hpp" #include "aidge/graph/Node.hpp" #include "aidge/graph/OpArgs.hpp" // Sequential -#include "aidge/operator/MetaOperator.hpp" #include "aidge/operator/AvgPooling.hpp" -#include "aidge/operator/MaxPooling.hpp" #include "aidge/operator/Conv.hpp" #include "aidge/operator/ConvDepthWise.hpp" +#include "aidge/operator/MaxPooling.hpp" +#include "aidge/operator/MetaOperator.hpp" #include "aidge/operator/Pad.hpp" #include "aidge/operator/Sigmoid.hpp" #include "aidge/utils/ArrayHelpers.hpp" @@ -31,128 +31,174 @@ namespace Aidge { - template <std::array<DimSize_t, 1>::size_type DIM> -extern std::shared_ptr<Node> PaddedConv(DimSize_t in_channels, - DimSize_t out_channels, - const std::array<DimSize_t, DIM> &kernel_dims, - const std::string& name = "", - const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), - const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), - bool no_bias = false); +extern std::shared_ptr<Node> +PaddedConv(DimSize_t in_channels, + DimSize_t out_channels, + const std::array<DimSize_t, DIM> &kernel_dims, + const std::string &name = "", + const std::array<DimSize_t, DIM> &stride_dims = + create_array<DimSize_t, DIM>(1), + const std::array<DimSize_t, 2 * DIM> &padding_dims = + create_array<DimSize_t, 2 * DIM>(0), + const std::array<DimSize_t, DIM> &dilation_dims = + create_array<DimSize_t, DIM>(1), + bool no_bias = false); template <std::array<DimSize_t, 1>::size_type DIM> -extern std::shared_ptr<MetaOperator_Op> PaddedConv_Op( - const std::array<DimSize_t, DIM> &kernel_dims, - const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), - const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)); - -// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction +extern std::shared_ptr<MetaOperator_Op> +PaddedConv_Op(const std::array<DimSize_t, DIM> &kernel_dims, + const std::array<DimSize_t, DIM> &stride_dims = + create_array<DimSize_t, DIM>(1), + const std::array<DimSize_t, 2 * DIM> &padding_dims = + create_array<DimSize_t, 2 * DIM>(0), + const std::array<DimSize_t, DIM> &dilation_dims = + create_array<DimSize_t, DIM>(1)); + +// helper with C-style array instead of std::array for kernel_dims to allow +// automatic template DIM deduction template <DimSize_t DIM> -extern std::shared_ptr<Node> PaddedConv( - DimSize_t in_channels, - DimSize_t out_channels, - DimSize_t const (&kernel_dims)[DIM], - const std::string& name = "", - const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), - const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), - bool no_bias = false); +extern std::shared_ptr<Node> +PaddedConv(DimSize_t in_channels, + DimSize_t out_channels, + DimSize_t const (&kernel_dims)[DIM], + const std::string &name = "", + const std::array<DimSize_t, DIM> &stride_dims = + create_array<DimSize_t, DIM>(1), + const std::array<DimSize_t, 2 * DIM> &padding_dims = + create_array<DimSize_t, 2 * DIM>(0), + const std::array<DimSize_t, DIM> &dilation_dims = + create_array<DimSize_t, DIM>(1), + bool no_bias = false); //////////////////////////////////////////////////////////////////////////////// template <std::array<DimSize_t, 1>::size_type DIM> -std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels, - const std::array<DimSize_t, DIM> &kernel_dims, - const std::string& name = "", - const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), - const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), - bool no_bias = false); +std::shared_ptr<Node> +PaddedConvDepthWise(const DimSize_t nb_channels, + const std::array<DimSize_t, DIM> &kernel_dims, + const std::string &name = "", + const std::array<DimSize_t, DIM> &stride_dims = + create_array<DimSize_t, DIM>(1), + const std::array<DimSize_t, 2 * DIM> &padding_dims = + create_array<DimSize_t, 2 * DIM>(0), + const std::array<DimSize_t, DIM> &dilation_dims = + create_array<DimSize_t, DIM>(1), + bool no_bias = false); template <std::array<DimSize_t, 1>::size_type DIM> -std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op( - const std::array<DimSize_t, DIM> &kernel_dims, - const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), - const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)); - -// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction +std::shared_ptr<MetaOperator_Op> +PaddedConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims, + const std::array<DimSize_t, DIM> &stride_dims = + create_array<DimSize_t, DIM>(1), + const std::array<DimSize_t, 2 * DIM> &padding_dims = + create_array<DimSize_t, 2 * DIM>(0), + const std::array<DimSize_t, DIM> &dilation_dims = + create_array<DimSize_t, DIM>(1)); + +// helper with C-style array instead of std::array for kernel_dims to allow +// automatic template DIM deduction template <DimSize_t DIM> -inline std::shared_ptr<Node> PaddedConvDepthWise( - const DimSize_t nb_channels, - DimSize_t const (&kernel_dims)[DIM], - const std::string& name = "", - const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), - const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), - bool no_bias = false); +inline std::shared_ptr<Node> +PaddedConvDepthWise(const DimSize_t nb_channels, + DimSize_t const (&kernel_dims)[DIM], + const std::string &name = "", + const std::array<DimSize_t, DIM> &stride_dims = + create_array<DimSize_t, DIM>(1), + const std::array<DimSize_t, 2 * DIM> &padding_dims = + create_array<DimSize_t, 2 * DIM>(0), + const std::array<DimSize_t, DIM> &dilation_dims = + create_array<DimSize_t, DIM>(1), + bool no_bias = false); //////////////////////////////////////////////////////////////////////////////// - template <std::array<DimSize_t, 1>::size_type DIM> -extern std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims, - const std::string& name = "", - const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0)); - +extern std::shared_ptr<Node> +PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims, + const std::string &name = "", + const std::array<DimSize_t, DIM> &stride_dims = + create_array<DimSize_t, DIM>(1), + const std::array<DimSize_t, 2 * DIM> &padding_dims = + create_array<DimSize_t, 2 * DIM>(0)); template <std::array<DimSize_t, 1>::size_type DIM> -extern std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims, - const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0)); - -// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction +extern std::shared_ptr<MetaOperator_Op> +PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims, + const std::array<DimSize_t, DIM> &stride_dims = + create_array<DimSize_t, DIM>(1), + const std::array<DimSize_t, 2 * DIM> &padding_dims = + create_array<DimSize_t, 2 * DIM>(0)); + +// helper with C-style array instead of std::array for kernel_dims to allow +// automatic template DIM deduction template <DimSize_t DIM> -extern std::shared_ptr<Node> PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM], - const std::string& name = "", - const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0)); +extern std::shared_ptr<Node> +PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM], + const std::string &name = "", + const std::array<DimSize_t, DIM> &stride_dims = + create_array<DimSize_t, DIM>(1), + const std::array<DimSize_t, 2 * DIM> &padding_dims = + create_array<DimSize_t, 2 * DIM>(0)); //////////////////////////////////////////////////////////////////////////////// template <std::array<DimSize_t, 1>::size_type DIM> -inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims, - const std::string& name = "", - const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), - bool ceil_mode = false) -{ - auto graph = Sequential({ - Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""), - MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode) - }); - - return MetaOperator(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph, {}, name); +inline std::shared_ptr<Node> +PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims, + const std::string &name = "", + const std::array<DimSize_t, DIM> &stride_dims = + create_array<DimSize_t, DIM>(1), + const std::array<DimSize_t, 2 * DIM> &padding_dims = + create_array<DimSize_t, 2 * DIM>(0), + bool ceil_mode = false) { + auto graph = Sequential( + {Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""), + MaxPooling(kernel_dims, + (!name.empty()) ? name + "_maxpooling" : "", + stride_dims, + ceil_mode)}); + + return MetaOperator( + ("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), + graph, + {}, + name); } template <std::array<DimSize_t, 1>::size_type DIM> -inline std::shared_ptr<MetaOperator_Op> PaddedMaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims, - const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), - bool ceil_mode = false) -{ - auto graph = Sequential({ - Pad<DIM>(padding_dims, ""), - MaxPooling(kernel_dims, "", stride_dims, ceil_mode) - }); - - return std::make_shared<MetaOperator_Op>(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph); +inline std::shared_ptr<MetaOperator_Op> +PaddedMaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims, + const std::array<DimSize_t, DIM> &stride_dims = + create_array<DimSize_t, DIM>(1), + const std::array<DimSize_t, 2 * DIM> &padding_dims = + create_array<DimSize_t, 2 * DIM>(0), + bool ceil_mode = false) { + auto graph = + Sequential({Pad<DIM>(padding_dims, ""), + MaxPooling(kernel_dims, "", stride_dims, ceil_mode)}); + + return std::make_shared<MetaOperator_Op>( + ("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), + graph); } -// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction +// helper with C-style array instead of std::array for kernel_dims to allow +// automatic template DIM deduction template <DimSize_t DIM> -inline std::shared_ptr<Node> PaddedMaxPooling( - DimSize_t const (&kernel_dims)[DIM], - const std::string& name = "", - const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), - const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), - bool ceil_mode= false) -{ - return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims, ceil_mode); +inline std::shared_ptr<Node> +PaddedMaxPooling(DimSize_t const (&kernel_dims)[DIM], + const std::string &name = "", + const std::array<DimSize_t, DIM> &stride_dims = + create_array<DimSize_t, DIM>(1), + const std::array<DimSize_t, 2 * DIM> &padding_dims = + create_array<DimSize_t, 2 * DIM>(0), + bool ceil_mode = false) { + return PaddedMaxPooling(to_array(kernel_dims), + name, + stride_dims, + padding_dims, + ceil_mode); } //////////////////////////////////////////////////////////////////////////////// @@ -161,10 +207,16 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels, DimSize_t hidden_channels, DimSize_t seq_length, bool noBias = false, - const std::string& name = ""); + const std::string &name = ""); std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length); -} // namespace Aidge +std::shared_ptr<MetaOperator_Op> LeakyOp(); +std::shared_ptr<Node> Leaky(const int nbTimeSteps, + const float beta, + const float threshold = 1.0, + const std::string &name = ""); + +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */ diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp index 5f173068af0f1140830d458979ec924c38ade078..b2811fbaab2b6cd33dc2b105f0044cd8a5edbbc7 100644 --- a/python_binding/operator/pybind_MetaOperatorDefs.cpp +++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp @@ -179,6 +179,14 @@ void declare_LSTMOp(py::module &m) { py::arg("seq_length")); } +void declare_LeakyOp(py::module &m) { + m.def("Leaky", &Leaky, + py::arg("nb_timesteps"), + py::arg("beta"), + py::arg("threshold") = 1.0, + py::arg("name") = ""); +} + void init_MetaOperatorDefs(py::module &m) { declare_PaddedConvOp<1>(m); declare_PaddedConvOp<2>(m); @@ -193,6 +201,7 @@ void init_MetaOperatorDefs(py::module &m) { declare_PaddedMaxPoolingOp<2>(m); // declare_PaddedMaxPoolingOp<3>(m); declare_LSTMOp(m); + declare_LeakyOp(m); py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperatorOp", py::multiple_inheritance()) .def(py::init<const char *, const std::shared_ptr<GraphView>&, const std::vector<InputCategory>&>(), diff --git a/src/operator/MetaOperatorDefs/LSTM.cpp b/src/operator/MetaOperatorDefs/LSTM.cpp index 2ed548805010a6cc87950c4d1f7b89edbea4f75c..22c0469b34b52670a910f63604d02f3f8bf6eab7 100644 --- a/src/operator/MetaOperatorDefs/LSTM.cpp +++ b/src/operator/MetaOperatorDefs/LSTM.cpp @@ -11,7 +11,6 @@ #include "aidge/operator/MetaOperatorDefs.hpp" -#include <array> #include <memory> #include <string> @@ -20,7 +19,6 @@ #include "aidge/operator/Mul.hpp" #include "aidge/operator/FC.hpp" #include "aidge/operator/Identity.hpp" -#include "aidge/operator/Concat.hpp" #include "aidge/operator/Tanh.hpp" namespace Aidge { diff --git a/src/operator/MetaOperatorDefs/Leaky.cpp b/src/operator/MetaOperatorDefs/Leaky.cpp new file mode 100644 index 0000000000000000000000000000000000000000..33b2535329e9522b30120102204f2e34f755b9c5 --- /dev/null +++ b/src/operator/MetaOperatorDefs/Leaky.cpp @@ -0,0 +1,113 @@ +#include "aidge/filler/Filler.hpp" +#include "aidge/operator/Add.hpp" +#include "aidge/operator/Heaviside.hpp" +#include "aidge/operator/Identity.hpp" +#include "aidge/operator/Memorize.hpp" +#include "aidge/operator/MetaOperatorDefs.hpp" +#include "aidge/operator/Mul.hpp" +#include "aidge/operator/Producer.hpp" +#include "aidge/operator/Sub.hpp" + +namespace Aidge { + +constexpr auto memorizeOpDataOutputRecIndex = 1; +constexpr auto memorizeOpDataOutputIndex = 0; + +std::shared_ptr<Node> Leaky(const int nbTimeSteps, + const float beta, + const float threshold, + const std::string &name) { + + auto microGraph = std::make_shared<GraphView>(); + + auto inputNode = Identity((!name.empty()) ? name + "_input" : ""); + auto addNode = Add(!name.empty() ? name + "_add" : ""); + auto mulNode = Mul(!name.empty() ? name + "_mul" : ""); + auto subNode = Sub(!name.empty() ? name + "_sub" : ""); + auto hsNode = Heaviside(0, !name.empty() ? name + "_hs" : ""); + auto subNode2 = Sub(!name.empty() ? name + "_threshold" : ""); + auto reset = Mul(!name.empty() ? name + "_reset" : ""); + + auto betaTensor = std::make_shared<Tensor>(beta); + auto uthTensor = std::make_shared<Tensor>(static_cast<float>(threshold)); + uniformFiller<float>(uthTensor, threshold, threshold); + + auto decayRate = Producer(betaTensor, "leaky_beta", true); + auto uth = Producer(uthTensor, "leaky_uth", true); + + auto potentialMem = + Memorize(nbTimeSteps, (!name.empty()) ? name + "_potential" : ""); + auto spikeMem = + Memorize(nbTimeSteps, (!name.empty()) ? name + "_spike" : ""); + + // U[t] = Input[T] + beta * U[T-1] - S[T-1] * U_th + // with S[T] = | 1, if U[T] - U_th > 0 + // | 0 otherwise + + // beta * U[T-1] + decayRate->addChild(/*otherNode=*/mulNode, /*outId=*/0, /*otherInId=*/1); + potentialMem->addChild(mulNode, 1, 0); + + // Input[T] + beta * U[T-1] + mulNode->addChild(/*otherNode=*/addNode, /*outId=*/0, /*otherInId=*/1); + inputNode->addChild(/*otherNode=*/addNode, /*outId=*/0, /*otherInId=*/0); + + // S[T-1] * U_th + spikeMem->addChild(reset, + /*outId=*/memorizeOpDataOutputRecIndex, + /*otherInId=*/0); + + // TODO(#219) Handle hard/soft reset + uth->addChild(reset, 0, 1); + + // Input[T] + beta * U[T-1] - S[T-1] * U_th + addNode->addChild(subNode, 0, 0); + reset->addChild(subNode, 0, 1); + + // U[t] = (Input[T] + beta * U[T-1]) - S[T-1] + subNode->addChild(potentialMem, 0, 0); + + // U[T] - U_th + subNode->addChild(subNode2, 0, 0); + uth->addChild(subNode2, 0, 1); + + // with S[T] = | 1, if U[T] - U_th > 0 + subNode2->addChild(hsNode, 0, 0); + hsNode->addChild(spikeMem, 0, 0); + + microGraph->add(inputNode); + microGraph->add({addNode, + mulNode, + potentialMem, + decayRate, + uth, + spikeMem, + hsNode, + subNode, + subNode2, + reset}, + false); + + microGraph->setOrderedInputs( + {{inputNode, 0}, {potentialMem, 1}, {spikeMem, 1}}); + + // NOTE: Outputs are NOT the memory nodes (as it is done in LSTM), to avoid + // producing data during init. This way, we can plug an operator after + // our node, and get correct results. + microGraph->setOrderedOutputs({//{potentialMem, memorizeOpDataOutputIndex}, + //{spikeMem, memorizeOpDataOutputIndex} + {subNode, 0}, + {hsNode, 0}}); + + auto metaOp = MetaOperator(/*type*/ "Leaky", + /*graph*/ microGraph, + /*forcedInputsCategory=*/{}, + /*name*/ "leaky"); + + return metaOp; +} + +std::shared_ptr<MetaOperator_Op> LeakyOp() { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Not implemented yet"); +} +} // namespace Aidge diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp index 6711e1524fe0595b4effd68397f8cb684df590a9..042b04f01bdc1430c8b9f1b9df6951f12b821ed1 100644 --- a/unit_tests/operator/Test_MetaOperator.cpp +++ b/unit_tests/operator/Test_MetaOperator.cpp @@ -23,6 +23,7 @@ #include "aidge/graph/GraphView.hpp" #include "aidge/graph/Testing.hpp" #include "aidge/recipes/Recipes.hpp" +#include "aidge/utils/ErrorHandling.hpp" using namespace Aidge; @@ -145,4 +146,17 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") { REQUIRE(g->getNodes().size() == 33); } + + SECTION("Leaky") { + auto myLeaky = Leaky(10, 1.0, 0.9); + auto op = std::static_pointer_cast<OperatorTensor>(myLeaky->getOperator()); + + auto inputs = myLeaky->inputs(); + + // Two memorize nodes + real data input + REQUIRE(myLeaky->nbInputs() == 3); + // Outputs for spike and memory + 2 Memorize node + REQUIRE(myLeaky->nbOutputs() == 4); + REQUIRE(true); + } }