Skip to content
Snippets Groups Projects
Commit f08fe6c5 authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Merge branch 'main' into PythonOpImpl

parents 7849d468 4feec190
No related branches found
No related tags found
1 merge request!19[OperatorImpl] Python OperatorImpl
Pipeline #33095 passed
Showing
with 880 additions and 139 deletions
/**
* @file
* @brief
* @version file 1.0.0
* @author vl241552
* @copyright
* Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
* rights reserved.
*/
#ifndef AIDGE_CORE_CONDITIONAL_LEXER_H_
#define AIDGE_CORE_CONDITIONAL_LEXER_H_
#include <string>
#include <regex>
#include <memory> // for shared_ptr
#include <stdexcept> //error
#include <sstream>
#include "aidge/nodeTester/ConditionalTypes.hpp"
#include "aidge/utilsParsing/ParsingToken.hpp"
namespace Aidge{
class ConditionalLexer
{
public:
ConditionalLexer( const std::string ConditionalExpressions );
/**
* @brief Get the next token on the ConditionalExpressions
* @return ParsingToken<ConditionalTokenTypes>
*/
std::shared_ptr<ParsingToken<ConditionalTokenTypes>> getNextToken(void);
/**
* @brief Restart at the start of the ConditionalExpressions
*
*/
void rstPosition(void);
/**
* @brief Test if the string is completely read
* @return bool
*/
bool isEnd(void);
/**
* @brief Get the representation of the class
* @return string
*/
const std::string rep(){
return mConditionalExpressions;
}
private:
/**
* @brief Constructs an error message to display the character not understood by the lexer
* @return error mesage
*/
std::runtime_error badTokenError(const std::string& currentChars,std::size_t position);
/**
* @brief The expression of the test to be performed on the nodes
*/
const std::string mConditionalExpressions;
/**
* @brief The lexer's current position in mConditionalExpressions
*/
std::size_t mPosition;
};
/////////////////////////////////////
}
#endif //AIDGE_CORE_CONDITIONAL_LEXER_H_
#ifndef AIDGE_CORE_CONDITIONAL_PARSER_H_
#define AIDGE_CORE_CONDITIONAL_PARSER_H_
#include <memory> // for shared_ptr
#include <map>
#include <vector>
#include "aidge/nodeTester/ConditionalLexer.hpp"
#include "aidge/nodeTester/ConditionalTypes.hpp"
#include "aidge/utilsParsing/ParsingToken.hpp"
#include "aidge/utilsParsing/AstNode.hpp"
namespace Aidge{
const std::map<ConditionalTokenTypes, std::size_t> ConditionalPrec{
{ConditionalTokenTypes::AND,2},
{ConditionalTokenTypes::OR,1}
};
using ASTNodeCh = std::vector<std::shared_ptr<AstNode<ConditionalTokenTypes>>>;
/**
* @brief this class uses the lexer to create an AST according to a set of gramer rules
*/
class ConditionalParser{
public:
/**
* @brief AST graph creation function
* @param ConditionalExpressions String representing the logical fuction to be performed
*/
ConditionalParser(const std::string ConditionalExpressions);
virtual ~ConditionalParser() = default;
/**
* @brief AST graph creation function
* @return The AST tree
*/
std::shared_ptr<AstNode<ConditionalTokenTypes>> parse(void);
private:
/**
* @brief restart at the start of the ConditionalExpressions for LEXER and restart mCurrentToken
*/
void rstParser(void);
//////////////////
/**
* @defgroup ParsingFunctions Function for creating AST
* @brief Functions for recursive construction of the AST representing grammar rules
*/
/**
* @ingroup ParsingFunctions
* @brief Token reading and verification function
*
*/
void ackToken(ConditionalTokenTypes tokenType);
/**
* @ingroup ParsingFunctions
* @brief Function of grammar rules for values : (KEY|INTEGER|FOAT|STRING|LAMBDA lambda)
* @return AST node
*/
std::shared_ptr<AstNode<ConditionalTokenTypes>> constructAstVal(void);
/**
* @ingroup ParsingFunctions
* @brief Function of grammar rules for comparison : val (EQ|NEQ) val | LPAREN expr RPAREN
* @return AST node
*/
std::shared_ptr<AstNode<ConditionalTokenTypes>> constructAstCmpr(void);
/**
* @ingroup ParsingFunctions
* @brief Function of grammar rules for arguments of a lambda : LAMBDA val (ARGSEP val)* RPAREN
* @return AST node
*/
std::shared_ptr<AstNode<ConditionalTokenTypes>> constructAstLambda(void);
/**
* @ingroup ParsingFunctions
* @brief Function of grammar rules for a expresion : cmpr ((AND | OR) cmpr)*
* @return AST node
*/
std::shared_ptr<AstNode<ConditionalTokenTypes>> constructAstExpr(std::size_t precLimit = 0);
/**
* @brief The actual token in the parce
*/
std::shared_ptr<ParsingToken<ConditionalTokenTypes>> mCurrentToken;
/**
* @brief The lexem use
*/
ConditionalLexer mLexer;
};
}
#endif //AIDGE_CORE_CONDITIONAL_PARSER_H_
#ifndef AIDGE_CORE_CONDITIONAL_TYPES_H_
#define AIDGE_CORE_CONDITIONAL_TYPES_H_
namespace Aidge{
/**
* @brief enum for all types of token use in the parsing
* 7-5 type
* 4-0 id
*/
enum class ConditionalTokenTypes
{
STOP,
NOT, /**< ! */
AND, /**< && */
OR, /**< || */
EQ, /**< == */
NEQ, /**< != */
KEY, /**< [A-Za-z][A-Za-z0-9_]* */
INTEGER, /**< [0-9]+ */
FLOAT, /**< [0-9]+\.[0-9]* */
STRING , /**< \'.*\' */
BOOL, /**< true|false */
NODE, /**< \$ */
LAMBDA , /**< [A-Za-z][A-Za-z0-9_]*\( */
ARGSEP, /**< , */
LPAREN, /**< \( */
RPAREN, /**< \) */
};
}
#endif // AIDGE_CORE_CONDITIONAL_TYPES_H_
......@@ -81,14 +81,14 @@ public:
// return *in;
// }
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
const auto expectedDims = mInputs[0]->dims();
std::size_t nonEmptyInputTensor = 1;
......@@ -140,7 +140,7 @@ public:
}
void setBackend(const std::string& name) {
void setBackend(const std::string& name) override {
mImpl = Registrar<Add_Op<NUM>>::create(name)(*this);
mOutput->setBackend(name);
......@@ -150,7 +150,7 @@ public:
}
}
void setDatatype(const DataType& datatype) {
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......@@ -162,6 +162,12 @@ public:
inline IOIndex_t nbInputs() const noexcept override final { return NUM; }
inline IOIndex_t nbDataInputs() const noexcept override final { return NUM; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input_0", "data_input_n"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
template <std::size_t NUM>
......
......@@ -26,15 +26,14 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class AvgPoolingAttr { StrideDims, KernelDims, PaddingDims };
enum class AvgPoolingAttr { StrideDims, KernelDims };
template <DimIdx_t DIM>
class AvgPooling_Op : public Operator,
public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
public StaticAttributes<AvgPoolingAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1) >> {
std::array<DimSize_t, DIM>> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
......@@ -47,18 +46,15 @@ public:
using Attributes_ = StaticAttributes<AvgPoolingAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1)> >;
std::array<DimSize_t, DIM>>;
template <AvgPoolingAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
: Operator(Type),
Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
attr<AvgPoolingAttr::KernelDims>(kernel_dims),
attr<AvgPoolingAttr::PaddingDims>(padding_dims)) {
attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {
setDatatype(DataType::Float32);
}
......@@ -84,7 +80,7 @@ public:
return std::make_shared<AvgPooling_Op<DIM>>(*this);
}
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
......@@ -92,16 +88,14 @@ public:
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
void computeOutputDims() override final {
if (!mInput->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
std::floor(static_cast<float>(mInput->dims()[dim+2] -
this->template getAttr<AvgPoolingAttr::KernelDims>()[dim] +
this->template getAttr<AvgPoolingAttr::PaddingDims>()[dim] +
this->template getAttr<AvgPoolingAttr::PaddingDims>()[dim+DIM]) /
this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
}
outputDims[1] = mInput->dims()[1];
......@@ -145,7 +139,7 @@ public:
}
void setBackend(const std::string &name) {
void setBackend(const std::string &name) override {
mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
......@@ -153,7 +147,7 @@ public:
mInput->setBackend(name);
}
void setDatatype(const DataType &datatype) {
void setDatatype(const DataType &datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......@@ -163,34 +157,37 @@ public:
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
// FIXME: properly handle default w&b initialization in every cases
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
auto avgPool = std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
return avgPool;
return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> AvgPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
return AvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
return AvgPooling(to_array(kernel_dims), name, stride_dims);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
"KernelDims", "PaddingDims"};
"KernelDims"};
}
#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
......@@ -87,14 +87,14 @@ public:
// return *in;
// }
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 5 && "operators supports only 5 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) {
if(mInputs[i]->size() != mInputs[0]->dims()[1]) {
......@@ -136,7 +136,7 @@ public:
}
void setBackend(const std::string &name) {
void setBackend(const std::string &name) override {
mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
......@@ -147,7 +147,7 @@ public:
mInputs[4]->setBackend(name);
}
void setDatatype(const DataType &datatype) {
void setDatatype(const DataType &datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......@@ -160,6 +160,12 @@ public:
inline IOIndex_t nbInputs() const noexcept override final { return 5; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input", "scale", "shift", "mean", "variance"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
template <DimSize_t DIM>
......
......@@ -26,13 +26,13 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, PaddingDims };
enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims };
template <DimIdx_t DIM>
class Conv_Op : public Operator,
public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
DimSize_t, std::array<DimSize_t, DIM>, std::array<DimSize_t, (DIM<<1) >> {
DimSize_t, std::array<DimSize_t, DIM>> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
......@@ -45,7 +45,7 @@ public:
Conv_Op() = delete;
using Attributes_ = StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t, DimSize_t, std::array<DimSize_t, DIM>, std::array<DimSize_t, (DIM<<1) >>;
DimSize_t, DimSize_t, std::array<DimSize_t, DIM>>;
template <ConvAttr e>
using attr = typename Attributes_::template attr<e>;
......@@ -53,15 +53,13 @@ public:
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
: Operator(Type),
Attributes_(attr<ConvAttr::StrideDims>(stride_dims),
attr<ConvAttr::DilationDims>(dilation_dims),
attr<ConvAttr::InChannels>(in_channels),
attr<ConvAttr::OutChannels>(out_channels),
attr<ConvAttr::KernelDims>(kernel_dims),
attr<ConvAttr::PaddingDims>(padding_dims)) {
attr<ConvAttr::DilationDims>(dilation_dims),
attr<ConvAttr::InChannels>(in_channels),
attr<ConvAttr::OutChannels>(out_channels),
attr<ConvAttr::KernelDims>(kernel_dims)) {
setDatatype(DataType::Float32);
}
......@@ -100,14 +98,14 @@ public:
// }
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
......@@ -117,9 +115,7 @@ public:
1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent +
this->template getAttr<ConvAttr::PaddingDims>()[dim] +
this->template getAttr<ConvAttr::PaddingDims>()[dim+DIM]) /
floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
}
......@@ -160,7 +156,7 @@ public:
}
void setBackend(const std::string &name) {
void setBackend(const std::string &name) override {
mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
......@@ -169,7 +165,7 @@ public:
mInputs[2]->setBackend(name);
}
void setDatatype(const DataType &datatype) {
void setDatatype(const DataType &datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......@@ -181,6 +177,12 @@ public:
inline IOIndex_t nbInputs() const noexcept override final { return 3; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input", "weight", "bias"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
template <std::array<DimSize_t, 1>::size_type DIM>
......@@ -189,17 +191,17 @@ inline std::shared_ptr<Node> Conv(DimSize_t in_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, padding_dims, dilation_dims), name);
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), name);
// addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w");
addProducer(conv, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
addProducer(conv, 2, {out_channels}, "b");
addProducer(conv, 2, std::array<DimSize_t, 1>({out_channels}), "b");
return conv;
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> Conv(
DimSize_t in_channels,
......@@ -207,10 +209,9 @@ inline std::shared_ptr<Node> Conv(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
return Conv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
return Conv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, dilation_dims);
}
} // namespace Aidge
......@@ -221,8 +222,7 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
"DilationDims",
"InChannels",
"OutChannels",
"KernelDims",
"PaddingDims"
"KernelDims"
};
}
......
......@@ -26,7 +26,7 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims, PaddingDims };
enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims };
template <DimIdx_t DIM>
class ConvDepthWise_Op : public Operator,
......@@ -35,8 +35,7 @@ class ConvDepthWise_Op : public Operator,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1) >> {
std::array<DimSize_t, DIM>> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
......@@ -52,21 +51,18 @@ class ConvDepthWise_Op : public Operator,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1) >>;
std::array<DimSize_t, DIM>>;
template <ConvDepthWiseAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
: Operator(Type),
Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
attr<ConvDepthWiseAttr::Channels>(0),
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
attr<ConvDepthWiseAttr::PaddingDims>(padding_dims)) {
attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
attr<ConvDepthWiseAttr::Channels>(0),
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {
setDatatype(DataType::Float32);
}
......@@ -92,14 +88,14 @@ class ConvDepthWise_Op : public Operator,
return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
}
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
......@@ -109,9 +105,7 @@ class ConvDepthWise_Op : public Operator,
1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent +
this->template getAttr<ConvDepthWiseAttr::PaddingDims>()[dim] +
this->template getAttr<ConvDepthWiseAttr::PaddingDims>()[dim+DIM]) /
floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
}
this->template getAttr<ConvDepthWiseAttr::Channels>() = mInputs[0]->dims()[1];
......@@ -161,7 +155,7 @@ class ConvDepthWise_Op : public Operator,
void setBackend(const std::string &name) {
void setBackend(const std::string &name) override {
mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
......@@ -170,7 +164,7 @@ class ConvDepthWise_Op : public Operator,
mInputs[2]->setBackend(name);
}
void setDatatype(const DataType &datatype) {
void setDatatype(const DataType &datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......@@ -182,38 +176,43 @@ class ConvDepthWise_Op : public Operator,
inline IOIndex_t nbInputs() const noexcept override final { return 3; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input", "weight", "bias"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims, dilation_dims), name);
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), name);
addProducer(convDW, 1, std::array<DimSize_t,0>({}), "w");
addProducer(convDW, 2, std::array<DimSize_t,0>({}), "b");
return convDW;
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> ConvDepthWise(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
return ConvDepthWise(to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
return ConvDepthWise(to_array(kernel_dims), name, stride_dims, dilation_dims);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels",
"KernelDims", "PaddingDims"};
"KernelDims"};
}
#endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
......@@ -135,7 +135,7 @@ public:
}
void setBackend(const std::string& name) {
void setBackend(const std::string& name) override {
mImpl = Registrar<FC_Op>::create(name)(*this);
mOutput->setBackend(name);
......@@ -145,7 +145,7 @@ public:
mInputs[2]->setBackend(name);
}
void setDatatype(const DataType& datatype) {
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......@@ -158,13 +158,19 @@ public:
inline IOIndex_t nbInputs() const noexcept override final { return 3; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input", "weight", "bias"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name);
addProducer(fc, 1, {out_channels, 1}, "w");
addProducer(fc, 2, {(noBias ? 0 : out_channels)}, "b"); // already sets bias dims
addProducer(fc, 1, std::array<DimSize_t, 2>({out_channels, 1}), "w");
addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({out_channels})), "b"); // already sets bias dims
return fc;
}
} // namespace Aidge
......@@ -175,4 +181,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
"NoBias"};
}
#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
......@@ -24,6 +24,7 @@
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class GenericOperator_Op
: public Operator,
......@@ -165,8 +166,8 @@ class GenericOperator_Op
~GenericOperator_Op() = default;
void setBackend(const std::string & /*name*/) { printf("setBackend: not available yet.\n"); }
void setDatatype(const DataType & /*datatype*/) { printf("setDatatype: not available yet.\n"); }
void setBackend(const std::string & /*name*/) override { printf("setBackend: not available yet.\n"); }
void setDatatype(const DataType & /*datatype*/) override { printf("setDatatype: not available yet.\n"); }
void forward() override final {
if(mImpl){
mImpl->forward();
......@@ -181,7 +182,6 @@ class GenericOperator_Op
printf("backward: No implementation is linked.\n");
}
}
inline IOIndex_t nbInputs() const noexcept override final { return mNbIn; };
inline IOIndex_t nbDataInputs() const noexcept override final { return mNbDataIn; };
inline IOIndex_t nbOutputs() const noexcept override final { return mNbOut; };
......
......@@ -120,14 +120,14 @@ public:
}
void setBackend(const std::string& name) {
void setBackend(const std::string& name) override {
mImpl = Registrar<LeakyReLU_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType& datatype) {
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......@@ -137,10 +137,15 @@ public:
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
}
}
......
......@@ -127,7 +127,7 @@ public:
}
void setBackend(const std::string& name) {
void setBackend(const std::string& name) override {
mImpl = Registrar<MatMul_Op>::create(name)(*this);
mOutput->setBackend(name);
......@@ -136,7 +136,7 @@ public:
mInputs[1]->setBackend(name);
}
void setDatatype(const DataType& datatype) {
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......@@ -148,12 +148,18 @@ public:
inline IOIndex_t nbInputs() const noexcept override final { return 2; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input", "weight"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> MatMul(DimSize_t out_channels, const std::string& name = "") {
// FIXME: properly handle default w initialization in every cases
auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(out_channels), name);
addProducer(matmul, 1, {out_channels, 1}, "w");
addProducer(matmul, 1, std::array<DimSize_t, 2>({out_channels, 1}), "w");
return matmul;
}
} // namespace Aidge
......
......@@ -26,15 +26,14 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class MaxPoolingAttr { StrideDims, KernelDims, PaddingDims };
enum class MaxPoolingAttr { StrideDims, KernelDims };
template <DimIdx_t DIM>
class MaxPooling_Op : public Operator,
public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
public StaticAttributes<MaxPoolingAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1) >> {
std::array<DimSize_t, DIM>> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
......@@ -47,18 +46,15 @@ public:
using Attributes_ = StaticAttributes<MaxPoolingAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, (DIM<<1)> >;
std::array<DimSize_t, DIM>>;
template <MaxPoolingAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
: Operator(Type),
Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
attr<MaxPoolingAttr::KernelDims>(kernel_dims),
attr<MaxPoolingAttr::PaddingDims>(padding_dims)),
attr<MaxPoolingAttr::KernelDims>(kernel_dims)),
mOutput(std::make_shared<Tensor>()) {
setDatatype(DataType::Float32);
}
......@@ -85,7 +81,7 @@ public:
return std::make_shared<MaxPooling_Op<DIM>>(*this);
}
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
......@@ -93,16 +89,14 @@ public:
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
void computeOutputDims() override final {
if (!mInput->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
std::floor(static_cast<float>(mInput->dims()[dim+2] -
this->template getAttr<MaxPoolingAttr::KernelDims>()[dim] +
this->template getAttr<MaxPoolingAttr::PaddingDims>()[dim] +
this->template getAttr<MaxPoolingAttr::PaddingDims>()[dim+DIM]) /
this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
}
outputDims[1] = mInput->dims()[1];
......@@ -146,7 +140,7 @@ public:
}
void setBackend(const std::string &name) {
void setBackend(const std::string &name) override {
mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
......@@ -154,7 +148,7 @@ public:
mInput->setBackend(name);
}
void setDatatype(const DataType &datatype) {
void setDatatype(const DataType &datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......@@ -164,33 +158,36 @@ public:
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
// FIXME: properly handle default w&b initialization in every cases
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
return avgPool;
return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> MaxPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
return MaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
return MaxPooling(to_array(kernel_dims), name, stride_dims);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"StrideDims", "KernelDims"};
}
#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
......@@ -13,21 +13,38 @@
#define AIDGE_CORE_OPERATOR_METAOPERATOR_H_
#include "aidge/operator/Operator.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/OpArgs.hpp"
#include "aidge/scheduler/Scheduler.hpp"
namespace Aidge {
class MetaOperator : public Operator {
class MetaOperator_Op : public Operator,
public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)> {
public:
MetaOperator()
: Operator("MetaOp")
{
}
std::vector<std::shared_ptr<Tensor>> mInputs;
std::vector<std::shared_ptr<Tensor>> mOutputs; // These are shared with micro-graph outputs tensors
// Micro-graph handling:
std::shared_ptr<GraphView> mGraph; // Meta operator micro-graph
std::shared_ptr<SequentialScheduler> mScheduler;
// Need to store an ordored list of input/output operators for the micro-graph,
// because input/output nodes in a GraphView are unordered.
// TODO: refactor GraphView to handle ordered input/output?
std::vector<std::pair<std::shared_ptr<Operator>, IOIndex_t>> mInputOps;
std::vector<std::pair<std::shared_ptr<Operator>, IOIndex_t>> mOutputOps;
public:
MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph,
std::vector<NodePtr> inputNodes = std::vector<NodePtr>(),
std::vector<NodePtr> outputNodes = std::vector<NodePtr>());
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
MetaOperator(const MetaOperator& op)
: Operator("MetaOp")
MetaOperator_Op(const MetaOperator_Op& op)
: Operator(op.type().c_str()),
mGraph(op.mGraph->clone())
{
// cpy-ctor
}
......@@ -37,11 +54,114 @@ public:
* @see Operator::MatMul_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<MetaOperator>(*this);
return std::make_shared<MetaOperator_Op>(*this);
}
const std::shared_ptr<GraphView>& getMicroGraph() const {
return mGraph;
}
const std::shared_ptr<SequentialScheduler>& getMicroGraphScheduler() const {
return mScheduler;
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
const auto& inputOp = mInputOps[inputIdx];
inputOp.first->associateInput(inputOp.second, data);
// Associate inputs for custom implementation
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
// Forward dims of micro-graph
mGraph->forwardDims();
// Associate outputs to micro-graph outputs for custom implementation
for (size_t outputIdx = 0; outputIdx < mOutputOps.size(); ++outputIdx) {
const auto& outputOp = mOutputOps[outputIdx];
mOutputs[outputIdx] = outputOp.first->getOutput(outputOp.second);
}
}
bool outputDimsForwarded() const override final { return !(mOutputs[0]->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < mInputs.size() && "inputIdx out of range");
return *(mInputs[inputIdx].get());
}
inline Tensor& output(const IOIndex_t outputIdx) const override final {
assert(outputIdx < mOutputs.size() && "outputIdx out of range");
return *(mOutputs[outputIdx].get());
}
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < mInputs.size() && "inputIdx out of range");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx < mOutputs.size() && "outputIdx out of range");
return mOutputs[outputIdx];
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < mInputs.size() && "inputIdx out of range");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx < mOutputs.size() && "outputIdx out of range");
return std::static_pointer_cast<Data>(mOutputs[outputIdx]);
}
void setBackend(const std::string &name) override {
if (Registrar<MetaOperator_Op>::exists({name, type()})) {
// A custom implementation exists for this meta operator
mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
}
// The micro-graph should always be set to the right backend, since it
// shares input/output tensors.
// Input/output tensors backend are updated here.
mGraph->setBackend(name);
}
void setDatatype(const DataType &datatype) override {
// The micro-graph should always be set to the right data type, since it
// shares input/output tensors.
// Input/output tensors data type are updated here.
mGraph->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return mGraph->inputs().size(); }
inline IOIndex_t nbDataInputs() const noexcept override final { return mGraph->dataInputs().size(); }
inline IOIndex_t nbOutputs() const noexcept override final { return mGraph->outputs().size(); }
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override;
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override;
void updateConsummerProducer() override;
void forward() override;
void backward() override {
assert(false && "not implemented");
}
~MetaOperator() = default;
};
inline std::shared_ptr<Node> MetaOperator(const char *type,
const std::shared_ptr<GraphView>& graph,
const std::string& name = "",
std::vector<NodePtr> inputNodes = std::vector<NodePtr>(),
std::vector<NodePtr> outputNodes = std::vector<NodePtr>())
{
return std::make_shared<Node>(std::make_shared<MetaOperator_Op>(type, graph, inputNodes, outputNodes), name);
}
} // namespace Aidge
#endif /* MetaOperator_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_
#define AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_
#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/AvgPooling.hpp"
#include "aidge/operator/MaxPooling.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/operator/ConvDepthWise.hpp"
#include "aidge/operator/Pad.hpp"
namespace Aidge {
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
{
// Construct micro-graph
auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
// Need to specify the ordered list of input operators
const std::vector<NodePtr> orderedInputNodes = {pad, conv};
auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name, orderedInputNodes);
addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
addProducer(metaOp, 2, {out_channels}, "b");
return metaOp;
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedConv(
DimSize_t in_channels,
DimSize_t out_channels,
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
{
return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
{
// Construct micro-graph
auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
// Need to specify the ordered list of input operators
const std::vector<NodePtr> orderedInputNodes = {pad, conv};
auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name, orderedInputNodes);
addProducer(metaOp, 1, std::array<DimSize_t,0>({}), "w");
addProducer(metaOp, 2, std::array<DimSize_t,0>({}), "b");
return metaOp;
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedConvDepthWise(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
{
return PaddedConvDepthWise(to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
{
auto graph = Sequential({
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
});
return MetaOperator("PaddedAvgPooling", graph, name);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedAvgPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
{
return PaddedAvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
{
auto graph = Sequential({
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims)
});
return MetaOperator("PaddedMaxPooling", graph, name);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedMaxPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
{
return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
}
} // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */
......@@ -20,7 +20,7 @@
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/Types.h"
#include "aidge/hook/hook.hpp"
#include "aidge/hook/Hook.hpp"
namespace Aidge {
......@@ -89,7 +89,7 @@ public:
* @param inputIdx Index of the input analysed.
* @return NbElts_t
*/
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const;
virtual NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const;
/**
* @brief Amount of data from a specific input actually used in one computation pass.
......@@ -97,7 +97,7 @@ public:
* @param inputIdx Index of the input analysed.
* @return NbElts_t
*/
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const;
virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const;
/**
* @brief Amount of data ready to be used on a specific output.
......@@ -105,9 +105,9 @@ public:
* @param outputIdx Index of the output analysed.
* @return NbElts_t
*/
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const;
virtual NbElts_t getNbProducedData(const IOIndex_t outputIdx) const;
void updateConsummerProducer();
virtual void updateConsummerProducer();
virtual void forward();
......@@ -124,6 +124,12 @@ public:
virtual IOIndex_t nbInputs() const noexcept = 0;
virtual IOIndex_t nbDataInputs() const noexcept = 0;
virtual IOIndex_t nbOutputs() const noexcept = 0;
static const std::vector<std::string> getInputsName(){
return {};
}
static const std::vector<std::string> getOutputsName(){
return {};
}
};
} // namespace Aidge
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_PAD_H_
#define AIDGE_CORE_OPERATOR_PAD_H_
#include <array>
#include <numeric>
#include <vector>
#include <cmath>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
enum class PadBorderType { Constant, Edge, Reflect, Wrap };
template <DimIdx_t DIM>
class Pad_Op : public Operator,
public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
public StaticAttributes<PadAttr,
std::array<DimSize_t, 2*DIM>,
PadBorderType,
double> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char *Type = "Pad";
Pad_Op() = delete;
using Attributes_ = StaticAttributes<PadAttr,
std::array<DimSize_t, 2*DIM>,
PadBorderType,
double>;
template <PadAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
const PadBorderType &borderType = PadBorderType::Constant,
double borderValue = 0.0)
: Operator(Type),
Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples),
attr<PadAttr::BorderType>(borderType),
attr<PadAttr::BorderValue>(borderValue)) {
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Pad_Op(const Pad_Op& op)
: Operator(Type),
Attributes_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Pad_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Pad_Op<DIM>>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInput->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
for (std::size_t dim = 0; dim < DIM; ++dim) {
outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
+ mInput->dims()[dim+2]
+ this->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
}
outputDims[1] = mInput->dims()[1];
outputDims[0] = mInput->dims()[0];
mOutput->resize(outputDims);
}
}
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "Pad Operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "Pad Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string &name) override {
mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType &datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInput->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
const std::string& name = "",
const PadBorderType &borderType = PadBorderType::Constant,
double borderValue = 0.0)
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
}
// helper with C-style array instead of std::array for beginEndTuples to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> Pad(
DimSize_t const (&beginEndTuples)[2*DIM],
const std::string& name = "",
const PadBorderType &borderType = PadBorderType::Constant,
double borderValue = 0.0)
{
return Pad<DIM>(to_array(beginEndTuples), name, borderType, borderValue);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::PadAttr>::data[] = {"BeginEndBorders", "BorderType", "BorderValue"};
template <>
const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Edge", "Reflect", "Wrap"};
}
#endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
......@@ -79,7 +79,7 @@ public:
* @brief Set the Output Tensor of the Producer operator.
* This method will create a copy of the Tensor.
*
* @param newOutput Tensor containing the values to copy
* @param newOutput Tensor containing the values to copy
*/
void setOutputTensor(const Tensor& newOutput) {
*mOutput = newOutput;
......@@ -121,17 +121,23 @@ public:
inline const std::vector<DimSize_t> dims() const noexcept { return mOutput->dims(); }
void setBackend(const std::string& name) {
void setBackend(const std::string& name) override {
mImpl = Registrar<Producer_Op>::create(name)(*this);
mOutput->setBackend(name);
}
void setDatatype(const DataType& datatype) {
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 0; };
inline IOIndex_t nbDataInputs() const noexcept override final { return 0; };
inline IOIndex_t nbOutputs() const noexcept override final { return 1; };
static const std::vector<std::string> getInputsName(){
return {};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
public:
void forward() override final {
......@@ -148,6 +154,7 @@ inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, co
return std::make_shared<Node>(std::make_shared<Producer_Op>(dims), name);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <std::size_t DIM>
inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::string& name = "") {
return Producer(to_array(dims), name);
......@@ -167,6 +174,7 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, con
otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <std::size_t DIM>
void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) {
addProducer(otherNode, inputIdx, to_array(dims), extension);
......
......@@ -108,14 +108,14 @@ public:
}
void setBackend(const std::string& name) {
void setBackend(const std::string& name) override {
mImpl = Registrar<ReLU_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType& datatype) {
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......@@ -125,10 +125,15 @@ public:
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
}
}
......
......@@ -130,13 +130,13 @@ public:
}
void setBackend(const std::string& name) {
void setBackend(const std::string& name) override {
mImpl = Registrar<Scaling_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType& datatype) {
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......@@ -146,6 +146,12 @@ public:
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::string& name = "") {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment