Skip to content
Snippets Groups Projects
Commit 51abf9e6 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Merge branch 'metaoperator-leaky' into 'dev'

[Add] Operator Leaky_MetaOperator

See merge request !283
parents e82fd3fc d8b5b716
No related branches found
No related tags found
2 merge requests!318[Upd] release verision 0.5.0,!283[Add] Operator Leaky_MetaOperator
Pipeline #63134 passed
...@@ -19,11 +19,11 @@ ...@@ -19,11 +19,11 @@
#include "aidge/graph/GraphView.hpp" #include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/graph/OpArgs.hpp" // Sequential #include "aidge/graph/OpArgs.hpp" // Sequential
#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/AvgPooling.hpp" #include "aidge/operator/AvgPooling.hpp"
#include "aidge/operator/MaxPooling.hpp"
#include "aidge/operator/Conv.hpp" #include "aidge/operator/Conv.hpp"
#include "aidge/operator/ConvDepthWise.hpp" #include "aidge/operator/ConvDepthWise.hpp"
#include "aidge/operator/MaxPooling.hpp"
#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/Pad.hpp" #include "aidge/operator/Pad.hpp"
#include "aidge/operator/Sigmoid.hpp" #include "aidge/operator/Sigmoid.hpp"
#include "aidge/utils/ArrayHelpers.hpp" #include "aidge/utils/ArrayHelpers.hpp"
...@@ -31,128 +31,174 @@ ...@@ -31,128 +31,174 @@
namespace Aidge { namespace Aidge {
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
extern std::shared_ptr<Node> PaddedConv(DimSize_t in_channels, extern std::shared_ptr<Node>
DimSize_t out_channels, PaddedConv(DimSize_t in_channels,
const std::array<DimSize_t, DIM> &kernel_dims, DimSize_t out_channels,
const std::string& name = "", const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::string &name = "",
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), const std::array<DimSize_t, DIM> &stride_dims =
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), create_array<DimSize_t, DIM>(1),
bool no_bias = false); const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims =
create_array<DimSize_t, DIM>(1),
bool no_bias = false);
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
extern std::shared_ptr<MetaOperator_Op> PaddedConv_Op( extern std::shared_ptr<MetaOperator_Op>
const std::array<DimSize_t, DIM> &kernel_dims, PaddedConv_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims =
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)); const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0),
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction const std::array<DimSize_t, DIM> &dilation_dims =
create_array<DimSize_t, DIM>(1));
// helper with C-style array instead of std::array for kernel_dims to allow
// automatic template DIM deduction
template <DimSize_t DIM> template <DimSize_t DIM>
extern std::shared_ptr<Node> PaddedConv( extern std::shared_ptr<Node>
DimSize_t in_channels, PaddedConv(DimSize_t in_channels,
DimSize_t out_channels, DimSize_t out_channels,
DimSize_t const (&kernel_dims)[DIM], DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "", const std::string &name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims =
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, 2 * DIM> &padding_dims =
bool no_bias = false); create_array<DimSize_t, 2 * DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims =
create_array<DimSize_t, DIM>(1),
bool no_bias = false);
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels, std::shared_ptr<Node>
const std::array<DimSize_t, DIM> &kernel_dims, PaddedConvDepthWise(const DimSize_t nb_channels,
const std::string& name = "", const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::string &name = "",
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), const std::array<DimSize_t, DIM> &stride_dims =
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), create_array<DimSize_t, DIM>(1),
bool no_bias = false); const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims =
create_array<DimSize_t, DIM>(1),
bool no_bias = false);
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op( std::shared_ptr<MetaOperator_Op>
const std::array<DimSize_t, DIM> &kernel_dims, PaddedConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims =
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)); const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0),
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction const std::array<DimSize_t, DIM> &dilation_dims =
create_array<DimSize_t, DIM>(1));
// helper with C-style array instead of std::array for kernel_dims to allow
// automatic template DIM deduction
template <DimSize_t DIM> template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedConvDepthWise( inline std::shared_ptr<Node>
const DimSize_t nb_channels, PaddedConvDepthWise(const DimSize_t nb_channels,
DimSize_t const (&kernel_dims)[DIM], DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "", const std::string &name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims =
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, 2 * DIM> &padding_dims =
bool no_bias = false); create_array<DimSize_t, 2 * DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims =
create_array<DimSize_t, DIM>(1),
bool no_bias = false);
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
extern std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims, extern std::shared_ptr<Node>
const std::string& name = "", PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::string &name = "",
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0)); const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0));
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
extern std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims, extern std::shared_ptr<MetaOperator_Op>
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0)); const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0));
// helper with C-style array instead of std::array for kernel_dims to allow
// automatic template DIM deduction
template <DimSize_t DIM> template <DimSize_t DIM>
extern std::shared_ptr<Node> PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM], extern std::shared_ptr<Node>
const std::string& name = "", PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM],
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::string &name = "",
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0)); const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0));
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims, inline std::shared_ptr<Node>
const std::string& name = "", PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::string &name = "",
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), const std::array<DimSize_t, DIM> &stride_dims =
bool ceil_mode = false) create_array<DimSize_t, DIM>(1),
{ const std::array<DimSize_t, 2 * DIM> &padding_dims =
auto graph = Sequential({ create_array<DimSize_t, 2 * DIM>(0),
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""), bool ceil_mode = false) {
MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode) auto graph = Sequential(
}); {Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
MaxPooling(kernel_dims,
return MetaOperator(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph, {}, name); (!name.empty()) ? name + "_maxpooling" : "",
stride_dims,
ceil_mode)});
return MetaOperator(
("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(),
graph,
{},
name);
} }
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<MetaOperator_Op> PaddedMaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims, inline std::shared_ptr<MetaOperator_Op>
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), PaddedMaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), const std::array<DimSize_t, DIM> &stride_dims =
bool ceil_mode = false) create_array<DimSize_t, DIM>(1),
{ const std::array<DimSize_t, 2 * DIM> &padding_dims =
auto graph = Sequential({ create_array<DimSize_t, 2 * DIM>(0),
Pad<DIM>(padding_dims, ""), bool ceil_mode = false) {
MaxPooling(kernel_dims, "", stride_dims, ceil_mode) auto graph =
}); Sequential({Pad<DIM>(padding_dims, ""),
MaxPooling(kernel_dims, "", stride_dims, ceil_mode)});
return std::make_shared<MetaOperator_Op>(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph);
return std::make_shared<MetaOperator_Op>(
("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(),
graph);
} }
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction // helper with C-style array instead of std::array for kernel_dims to allow
// automatic template DIM deduction
template <DimSize_t DIM> template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedMaxPooling( inline std::shared_ptr<Node>
DimSize_t const (&kernel_dims)[DIM], PaddedMaxPooling(DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "", const std::string &name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims =
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), create_array<DimSize_t, DIM>(1),
bool ceil_mode= false) const std::array<DimSize_t, 2 * DIM> &padding_dims =
{ create_array<DimSize_t, 2 * DIM>(0),
return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims, ceil_mode); bool ceil_mode = false) {
return PaddedMaxPooling(to_array(kernel_dims),
name,
stride_dims,
padding_dims,
ceil_mode);
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
...@@ -161,10 +207,16 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels, ...@@ -161,10 +207,16 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels,
DimSize_t hidden_channels, DimSize_t hidden_channels,
DimSize_t seq_length, DimSize_t seq_length,
bool noBias = false, bool noBias = false,
const std::string& name = ""); const std::string &name = "");
std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length); std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length);
} // namespace Aidge std::shared_ptr<MetaOperator_Op> LeakyOp();
std::shared_ptr<Node> Leaky(const int nbTimeSteps,
const float beta,
const float threshold = 1.0,
const std::string &name = "");
} // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */ #endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */
...@@ -179,6 +179,14 @@ void declare_LSTMOp(py::module &m) { ...@@ -179,6 +179,14 @@ void declare_LSTMOp(py::module &m) {
py::arg("seq_length")); py::arg("seq_length"));
} }
void declare_LeakyOp(py::module &m) {
m.def("Leaky", &Leaky,
py::arg("nb_timesteps"),
py::arg("beta"),
py::arg("threshold") = 1.0,
py::arg("name") = "");
}
void init_MetaOperatorDefs(py::module &m) { void init_MetaOperatorDefs(py::module &m) {
declare_PaddedConvOp<1>(m); declare_PaddedConvOp<1>(m);
declare_PaddedConvOp<2>(m); declare_PaddedConvOp<2>(m);
...@@ -193,6 +201,7 @@ void init_MetaOperatorDefs(py::module &m) { ...@@ -193,6 +201,7 @@ void init_MetaOperatorDefs(py::module &m) {
declare_PaddedMaxPoolingOp<2>(m); declare_PaddedMaxPoolingOp<2>(m);
// declare_PaddedMaxPoolingOp<3>(m); // declare_PaddedMaxPoolingOp<3>(m);
declare_LSTMOp(m); declare_LSTMOp(m);
declare_LeakyOp(m);
py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperatorOp", py::multiple_inheritance()) py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperatorOp", py::multiple_inheritance())
.def(py::init<const char *, const std::shared_ptr<GraphView>&, const std::vector<InputCategory>&>(), .def(py::init<const char *, const std::shared_ptr<GraphView>&, const std::vector<InputCategory>&>(),
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include "aidge/operator/MetaOperatorDefs.hpp" #include "aidge/operator/MetaOperatorDefs.hpp"
#include <array>
#include <memory> #include <memory>
#include <string> #include <string>
...@@ -20,7 +19,6 @@ ...@@ -20,7 +19,6 @@
#include "aidge/operator/Mul.hpp" #include "aidge/operator/Mul.hpp"
#include "aidge/operator/FC.hpp" #include "aidge/operator/FC.hpp"
#include "aidge/operator/Identity.hpp" #include "aidge/operator/Identity.hpp"
#include "aidge/operator/Concat.hpp"
#include "aidge/operator/Tanh.hpp" #include "aidge/operator/Tanh.hpp"
namespace Aidge { namespace Aidge {
......
#include "aidge/filler/Filler.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/operator/Heaviside.hpp"
#include "aidge/operator/Identity.hpp"
#include "aidge/operator/Memorize.hpp"
#include "aidge/operator/MetaOperatorDefs.hpp"
#include "aidge/operator/Mul.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/operator/Sub.hpp"
namespace Aidge {
constexpr auto memorizeOpDataOutputRecIndex = 1;
constexpr auto memorizeOpDataOutputIndex = 0;
std::shared_ptr<Node> Leaky(const int nbTimeSteps,
const float beta,
const float threshold,
const std::string &name) {
auto microGraph = std::make_shared<GraphView>();
auto inputNode = Identity((!name.empty()) ? name + "_input" : "");
auto addNode = Add(!name.empty() ? name + "_add" : "");
auto mulNode = Mul(!name.empty() ? name + "_mul" : "");
auto subNode = Sub(!name.empty() ? name + "_sub" : "");
auto hsNode = Heaviside(0, !name.empty() ? name + "_hs" : "");
auto subNode2 = Sub(!name.empty() ? name + "_threshold" : "");
auto reset = Mul(!name.empty() ? name + "_reset" : "");
auto betaTensor = std::make_shared<Tensor>(beta);
auto uthTensor = std::make_shared<Tensor>(static_cast<float>(threshold));
uniformFiller<float>(uthTensor, threshold, threshold);
auto decayRate = Producer(betaTensor, "leaky_beta", true);
auto uth = Producer(uthTensor, "leaky_uth", true);
auto potentialMem =
Memorize(nbTimeSteps, (!name.empty()) ? name + "_potential" : "");
auto spikeMem =
Memorize(nbTimeSteps, (!name.empty()) ? name + "_spike" : "");
// U[t] = Input[T] + beta * U[T-1] - S[T-1] * U_th
// with S[T] = | 1, if U[T] - U_th > 0
// | 0 otherwise
// beta * U[T-1]
decayRate->addChild(/*otherNode=*/mulNode, /*outId=*/0, /*otherInId=*/1);
potentialMem->addChild(mulNode, 1, 0);
// Input[T] + beta * U[T-1]
mulNode->addChild(/*otherNode=*/addNode, /*outId=*/0, /*otherInId=*/1);
inputNode->addChild(/*otherNode=*/addNode, /*outId=*/0, /*otherInId=*/0);
// S[T-1] * U_th
spikeMem->addChild(reset,
/*outId=*/memorizeOpDataOutputRecIndex,
/*otherInId=*/0);
// TODO(#219) Handle hard/soft reset
uth->addChild(reset, 0, 1);
// Input[T] + beta * U[T-1] - S[T-1] * U_th
addNode->addChild(subNode, 0, 0);
reset->addChild(subNode, 0, 1);
// U[t] = (Input[T] + beta * U[T-1]) - S[T-1]
subNode->addChild(potentialMem, 0, 0);
// U[T] - U_th
subNode->addChild(subNode2, 0, 0);
uth->addChild(subNode2, 0, 1);
// with S[T] = | 1, if U[T] - U_th > 0
subNode2->addChild(hsNode, 0, 0);
hsNode->addChild(spikeMem, 0, 0);
microGraph->add(inputNode);
microGraph->add({addNode,
mulNode,
potentialMem,
decayRate,
uth,
spikeMem,
hsNode,
subNode,
subNode2,
reset},
false);
microGraph->setOrderedInputs(
{{inputNode, 0}, {potentialMem, 1}, {spikeMem, 1}});
// NOTE: Outputs are NOT the memory nodes (as it is done in LSTM), to avoid
// producing data during init. This way, we can plug an operator after
// our node, and get correct results.
microGraph->setOrderedOutputs({//{potentialMem, memorizeOpDataOutputIndex},
//{spikeMem, memorizeOpDataOutputIndex}
{subNode, 0},
{hsNode, 0}});
auto metaOp = MetaOperator(/*type*/ "Leaky",
/*graph*/ microGraph,
/*forcedInputsCategory=*/{},
/*name*/ "leaky");
return metaOp;
}
std::shared_ptr<MetaOperator_Op> LeakyOp() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Not implemented yet");
}
} // namespace Aidge
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "aidge/graph/GraphView.hpp" #include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Testing.hpp" #include "aidge/graph/Testing.hpp"
#include "aidge/recipes/Recipes.hpp" #include "aidge/recipes/Recipes.hpp"
#include "aidge/utils/ErrorHandling.hpp"
using namespace Aidge; using namespace Aidge;
...@@ -145,4 +146,17 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") { ...@@ -145,4 +146,17 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
REQUIRE(g->getNodes().size() == 33); REQUIRE(g->getNodes().size() == 33);
} }
SECTION("Leaky") {
auto myLeaky = Leaky(10, 1.0, 0.9);
auto op = std::static_pointer_cast<OperatorTensor>(myLeaky->getOperator());
auto inputs = myLeaky->inputs();
// Two memorize nodes + real data input
REQUIRE(myLeaky->nbInputs() == 3);
// Outputs for spike and memory + 2 Memorize node
REQUIRE(myLeaky->nbOutputs() == 4);
REQUIRE(true);
}
} }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment