Skip to content
Snippets Groups Projects
Commit be665712 authored by Jerome Hue's avatar Jerome Hue Committed by Olivier BICHLER
Browse files

chore: Format modified files

parent a570e3c0
No related branches found
No related tags found
2 merge requests!318[Upd] release verision 0.5.0,!283[Add] Operator Leaky_MetaOperator
......@@ -19,11 +19,11 @@
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/graph/OpArgs.hpp" // Sequential
#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/AvgPooling.hpp"
#include "aidge/operator/MaxPooling.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/operator/ConvDepthWise.hpp"
#include "aidge/operator/MaxPooling.hpp"
#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/Pad.hpp"
#include "aidge/operator/Sigmoid.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
......@@ -31,128 +31,174 @@
namespace Aidge {
template <std::array<DimSize_t, 1>::size_type DIM>
extern std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias = false);
extern std::shared_ptr<Node>
PaddedConv(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string &name = "",
const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims =
create_array<DimSize_t, DIM>(1),
bool no_bias = false);
template <std::array<DimSize_t, 1>::size_type DIM>
extern std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1));
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
extern std::shared_ptr<MetaOperator_Op>
PaddedConv_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims =
create_array<DimSize_t, DIM>(1));
// helper with C-style array instead of std::array for kernel_dims to allow
// automatic template DIM deduction
template <DimSize_t DIM>
extern std::shared_ptr<Node> PaddedConv(
DimSize_t in_channels,
DimSize_t out_channels,
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias = false);
extern std::shared_ptr<Node>
PaddedConv(DimSize_t in_channels,
DimSize_t out_channels,
DimSize_t const (&kernel_dims)[DIM],
const std::string &name = "",
const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims =
create_array<DimSize_t, DIM>(1),
bool no_bias = false);
////////////////////////////////////////////////////////////////////////////////
template <std::array<DimSize_t, 1>::size_type DIM>
std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias = false);
std::shared_ptr<Node>
PaddedConvDepthWise(const DimSize_t nb_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string &name = "",
const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims =
create_array<DimSize_t, DIM>(1),
bool no_bias = false);
template <std::array<DimSize_t, 1>::size_type DIM>
std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1));
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
std::shared_ptr<MetaOperator_Op>
PaddedConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims =
create_array<DimSize_t, DIM>(1));
// helper with C-style array instead of std::array for kernel_dims to allow
// automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedConvDepthWise(
const DimSize_t nb_channels,
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias = false);
inline std::shared_ptr<Node>
PaddedConvDepthWise(const DimSize_t nb_channels,
DimSize_t const (&kernel_dims)[DIM],
const std::string &name = "",
const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims =
create_array<DimSize_t, DIM>(1),
bool no_bias = false);
////////////////////////////////////////////////////////////////////////////////
template <std::array<DimSize_t, 1>::size_type DIM>
extern std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
extern std::shared_ptr<Node>
PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string &name = "",
const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0));
template <std::array<DimSize_t, 1>::size_type DIM>
extern std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
extern std::shared_ptr<MetaOperator_Op>
PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0));
// helper with C-style array instead of std::array for kernel_dims to allow
// automatic template DIM deduction
template <DimSize_t DIM>
extern std::shared_ptr<Node> PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
extern std::shared_ptr<Node>
PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM],
const std::string &name = "",
const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0));
////////////////////////////////////////////////////////////////////////////////
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
bool ceil_mode = false)
{
auto graph = Sequential({
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode)
});
return MetaOperator(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph, {}, name);
inline std::shared_ptr<Node>
PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string &name = "",
const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0),
bool ceil_mode = false) {
auto graph = Sequential(
{Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
MaxPooling(kernel_dims,
(!name.empty()) ? name + "_maxpooling" : "",
stride_dims,
ceil_mode)});
return MetaOperator(
("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(),
graph,
{},
name);
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<MetaOperator_Op> PaddedMaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
bool ceil_mode = false)
{
auto graph = Sequential({
Pad<DIM>(padding_dims, ""),
MaxPooling(kernel_dims, "", stride_dims, ceil_mode)
});
return std::make_shared<MetaOperator_Op>(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph);
inline std::shared_ptr<MetaOperator_Op>
PaddedMaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0),
bool ceil_mode = false) {
auto graph =
Sequential({Pad<DIM>(padding_dims, ""),
MaxPooling(kernel_dims, "", stride_dims, ceil_mode)});
return std::make_shared<MetaOperator_Op>(
("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(),
graph);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
// helper with C-style array instead of std::array for kernel_dims to allow
// automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedMaxPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
bool ceil_mode= false)
{
return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims, ceil_mode);
inline std::shared_ptr<Node>
PaddedMaxPooling(DimSize_t const (&kernel_dims)[DIM],
const std::string &name = "",
const std::array<DimSize_t, DIM> &stride_dims =
create_array<DimSize_t, DIM>(1),
const std::array<DimSize_t, 2 * DIM> &padding_dims =
create_array<DimSize_t, 2 * DIM>(0),
bool ceil_mode = false) {
return PaddedMaxPooling(to_array(kernel_dims),
name,
stride_dims,
padding_dims,
ceil_mode);
}
////////////////////////////////////////////////////////////////////////////////
......@@ -161,13 +207,16 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels,
DimSize_t hidden_channels,
DimSize_t seq_length,
bool noBias = false,
const std::string& name = "");
const std::string &name = "");
std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length);
std::shared_ptr<MetaOperator_Op> LeakyOp();
std::shared_ptr<Node> Leaky(const int nbTimeSteps, const float threshold, const float beta, const std::string& name = "");
std::shared_ptr<Node> Leaky(const int nbTimeSteps,
const float threshold,
const float beta,
const std::string &name = "");
} // namespace Aidge
} // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */
......@@ -13,7 +13,10 @@ namespace Aidge {
constexpr auto memorizeOpDataOutputRecIndex = 1;
constexpr auto memorizeOpDataOutputIndex = 0;
std::shared_ptr<Node> Leaky(const int nbTimeSteps, const float threshold, const float beta, const std::string &name) {
std::shared_ptr<Node> Leaky(const int nbTimeSteps,
const float threshold,
const float beta,
const std::string &name) {
Log::warn("! Lots of parameters are hardcoded");
......@@ -28,8 +31,7 @@ std::shared_ptr<Node> Leaky(const int nbTimeSteps, const float threshold, const
auto reset = Mul(!name.empty() ? name + "_reset" : "");
auto betaTensor = std::make_shared<Tensor>(beta);
auto uthTensor =
std::make_shared<Tensor>(static_cast<float>(threshold));
auto uthTensor = std::make_shared<Tensor>(static_cast<float>(threshold));
uniformFiller<float>(uthTensor, threshold, threshold);
auto decayRate = Producer(betaTensor, "leaky_beta", true);
......@@ -59,7 +61,6 @@ std::shared_ptr<Node> Leaky(const int nbTimeSteps, const float threshold, const
// TODO: Handle hard/soft reset
uth->addChild(reset, 0, 1);
// Input[T] + beta * U[T-1] - S[T-1] * U_th
addNode->addChild(subNode, 0, 0);
......@@ -92,8 +93,9 @@ std::shared_ptr<Node> Leaky(const int nbTimeSteps, const float threshold, const
microGraph->setOrderedInputs(
{{inputNode, 0}, {potentialMem, 1}, {spikeMem, 1}});
// NOTE: Outputs are NOT the memory nodes (as it is done in LSTM), to avoid producing data during init
// This way, we can plug a stack operator after or node, and get correct results
// NOTE: Outputs are NOT the memory nodes (as it is done in LSTM), to avoid
// producing data during init. This way, we can plug an operator after
// our node, and get correct results.
microGraph->setOrderedOutputs({//{potentialMem, memorizeOpDataOutputIndex},
//{spikeMem, memorizeOpDataOutputIndex}
{subNode, 0},
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment