Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
Commits on Source (14)
Showing
with 3596 additions and 207 deletions
......@@ -88,8 +88,7 @@ build:ubuntu_python:
- virtualenv venv
- source venv/bin/activate
# Numpy dependancy for unit test
- python3 -m pip install numpy
- export AIDGE_INSTALL=`pwd`/install
- python3 -m pip install -r requirements.txt
- python3 -m pip install .
artifacts:
expire_in: 1 week
......@@ -147,8 +146,7 @@ build:windows_python:
- virtualenv venv
- venv\Scripts\Activate.ps1
# Numpy dependancy for unit test
- python -m pip install numpy
- $env:AIDGE_INSTALL = "$pwd" + "install"
- python -m pip install -r requirements.txt
- python -m pip install .
artifacts:
expire_in: 1 week
......
......@@ -38,6 +38,7 @@
#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/MetaOperatorDefs.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Pad.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/operator/ReLU.hpp"
#include "aidge/operator/Softmax.hpp"
......
......@@ -169,12 +169,11 @@ template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
auto avgPool = std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
return avgPool;
return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> AvgPooling(
DimSize_t const (&kernel_dims)[DIM],
......
......@@ -201,6 +201,7 @@ inline std::shared_ptr<Node> Conv(DimSize_t in_channels,
return conv;
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> Conv(
DimSize_t in_channels,
......
......@@ -197,6 +197,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &ker
return convDW;
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> ConvDepthWise(
DimSize_t const (&kernel_dims)[DIM],
......
......@@ -146,7 +146,6 @@ public:
};
inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
}
}
......
......@@ -170,12 +170,11 @@ template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
return avgPool;
return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> MaxPooling(
DimSize_t const (&kernel_dims)[DIM],
......
......@@ -156,9 +156,11 @@ public:
inline std::shared_ptr<Node> MetaOperator(const char *type,
const std::shared_ptr<GraphView>& graph,
const std::string& name = "")
const std::string& name = "",
std::vector<NodePtr> inputNodes = std::vector<NodePtr>(),
std::vector<NodePtr> outputNodes = std::vector<NodePtr>())
{
return std::make_shared<Node>(std::make_shared<MetaOperator_Op>(type, graph), name);
return std::make_shared<Node>(std::make_shared<MetaOperator_Op>(type, graph, inputNodes, outputNodes), name);
}
} // namespace Aidge
......
......@@ -26,21 +26,22 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0},
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
{
// Construct micro-graph
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(padding_dims, PadBorderType::Constant, 0.0), (!name.empty()) ? name + "_pad" : "");
auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
// Need to specify the ordered list of input operators
const std::vector<NodePtr> orderedInputNodes = {pad, conv};
auto metaOp = std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}), orderedInputNodes), name);
auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name, orderedInputNodes);
addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
addProducer(metaOp, 2, {out_channels}, "b");
return metaOp;
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedConv(
DimSize_t in_channels,
......@@ -48,76 +49,91 @@ inline std::shared_ptr<Node> PaddedConv(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0},
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
{
return PaddedConv<DIM>(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedConvDepthWise(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
inline std::shared_ptr<Node> PaddedConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0},
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
{
// Construct micro-graph
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(padding_dims, PadBorderType::Constant, 0.0), (!name.empty()) ? name + "_pad" : "");
auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
// Need to specify the ordered list of input operators
const std::vector<NodePtr> orderedInputNodes = {pad, conv};
auto metaOp = std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}), orderedInputNodes), name);
addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
addProducer(metaOp, 2, {out_channels}, "b");
auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name, orderedInputNodes);
addProducer(metaOp, 1, std::array<DimSize_t,0>({}), "w");
addProducer(metaOp, 2, std::array<DimSize_t,0>({}), "b");
return metaOp;
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedConvDepthWise(
DimSize_t in_channels,
DimSize_t out_channels,
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0},
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
{
return PaddedConvDepthWise<DIM>(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
return PaddedConvDepthWise(to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedAvgPooling(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
inline std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0})
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
{
auto graph = Sequential({
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
AvgPooling_Op<DIM>(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
});
return std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedAvgPooling", graph), name);
return MetaOperator("PaddedAvgPooling", graph, name);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedAvgPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
{
return PaddedAvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedMaxPooling(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0})
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
{
auto graph = Sequential({
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
MaxPooling_Op<DIM>(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims)
MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims)
});
return std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedMaxPooling", graph), name);
return MetaOperator("PaddedMaxPooling", graph, name);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedMaxPooling(
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
{
return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
}
} // namespace Aidge
......
......@@ -27,13 +27,13 @@
namespace Aidge {
enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
enum class PadBorderType { Constant, Replicate, Reflect, Wrap };
enum class PadBorderType { Constant, Edge, Reflect, Wrap };
template <DimIdx_t DIM>
class Pad_Op : public Operator,
public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
public StaticAttributes<PadAttr,
std::array<std::array<DimSize_t, 2>, DIM>,
std::array<DimSize_t, 2*DIM>,
PadBorderType,
double> {
private:
......@@ -47,13 +47,13 @@ public:
Pad_Op() = delete;
using Attributes_ = StaticAttributes<PadAttr,
std::array<std::array<DimSize_t, 2>, DIM>,
std::array<DimSize_t, 2*DIM>,
PadBorderType,
double>;
template <PadAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr Pad_Op(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
const PadBorderType &borderType = PadBorderType::Constant,
double borderValue = 0.0)
: Operator(Type),
......@@ -97,9 +97,9 @@ public:
std::array<DimSize_t, DIM + 2> outputDims = {};
for (std::size_t dim = 0; dim < DIM; ++dim) {
outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[dim][0]
outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
+ mInput->dims()[dim+2]
+ this->template getAttr<PadAttr::BeginEndBorders>()[dim][1];
+ this->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
}
outputDims[1] = mInput->dims()[1];
outputDims[0] = mInput->dims()[0];
......@@ -169,62 +169,24 @@ public:
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Pad(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
const std::string& name = "",
const PadBorderType &borderType = PadBorderType::Constant,
double borderValue = 0.0)
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
return pad;
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, DIM> &dimBeginEnd,
const std::string& name = "",
const PadBorderType &borderType = PadBorderType::Constant,
double borderValue = 0.0)
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
std::array<std::array<DimSize_t, 2>, DIM> beginEndTuples;
for (size_t i = 0; i < DIM; ++i) {
beginEndTuples[i] = {dimBeginEnd[i], dimBeginEnd[i]};
}
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
return pad;
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ZeroPad(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
const std::string& name = "")
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadBorderType::Constant, 0.0), name);
return pad;
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ZeroPad(const std::array<DimSize_t, DIM> &dimBeginEnd,
const std::string& name = "")
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
std::array<std::array<DimSize_t, 2>, DIM> beginEndTuples;
for (size_t i = 0; i < DIM; ++i) {
beginEndTuples[i] = {dimBeginEnd[i], dimBeginEnd[i]};
}
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadBorderType::Constant, 0.0), name);
return pad;
return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
}
// helper with C-style array instead of std::array for beginEndTuples to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> Pad(
std::array<DimSize_t, 2> const (&beginEndTuples)[DIM],
DimSize_t const (&beginEndTuples)[2*DIM],
const std::string& name = "",
const PadBorderType &borderType = PadBorderType::Constant,
double borderValue = 0.0)
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
return Pad(to_array(beginEndTuples), name, borderType, borderValue);
return Pad<DIM>(to_array(beginEndTuples), name, borderType, borderValue);
}
} // namespace Aidge
......@@ -233,7 +195,7 @@ template <>
const char *const EnumStrings<Aidge::PadAttr>::data[] = {"BeginEndBorders", "BorderType", "BorderValue"};
template <>
const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Replicate", "Reflect", "Wrap"};
const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Edge", "Reflect", "Wrap"};
}
#endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
......@@ -133,7 +133,7 @@ public:
inline IOIndex_t nbDataInputs() const noexcept override final { return 0; };
inline IOIndex_t nbOutputs() const noexcept override final { return 1; };
static const std::vector<std::string> getInputsName(){
return {""};
return {};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
......@@ -154,6 +154,7 @@ inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, co
return std::make_shared<Node>(std::make_shared<Producer_Op>(dims), name);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <std::size_t DIM>
inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::string& name = "") {
return Producer(to_array(dims), name);
......@@ -173,6 +174,7 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, con
otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <std::size_t DIM>
void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) {
addProducer(otherNode, inputIdx, to_array(dims), extension);
......
......@@ -134,7 +134,6 @@ public:
};
inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
}
}
......
......@@ -134,7 +134,6 @@ public:
};
inline std::shared_ptr<Node> Softmax(const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
return std::make_shared<Node>(std::make_shared<Softmax_Op>(), name);
}
}
......
......@@ -19,7 +19,7 @@
#include <cassert>
#include <string>
#include "aidge/utils/Any.hpp"
#include "aidge/utils/future_std/any.hpp"
#include "aidge/utils/Attributes.hpp"
#ifdef PYBIND
......@@ -54,12 +54,12 @@ public:
auto itPy = mAttrsPy.find(name);
if (itPy != mAttrsPy.end()) {
// Insert the attribute back in C++
mAttrs.emplace(std::make_pair(name, libany::any(itPy->second.cast<T>())));
mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
}
}
#endif
return libany::any_cast<T&>(mAttrs.at(name));
return future_std::any_cast<T&>(mAttrs.at(name));
}
template<class T> const T& getAttr(const std::string& name) const
......@@ -71,12 +71,12 @@ public:
auto itPy = mAttrsPy.find(name);
if (itPy != mAttrsPy.end()) {
// Insert the attribute back in C++
mAttrs.emplace(std::make_pair(name, libany::any(itPy->second.cast<T>())));
mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
}
}
#endif
return libany::any_cast<const T&>(mAttrs.at(name));
return future_std::any_cast<const T&>(mAttrs.at(name));
}
///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
......@@ -85,7 +85,7 @@ public:
///\param value Attribute value
template<class T> void addAttr(const std::string& name, const T& value)
{
const auto& res = mAttrs.emplace(std::make_pair(name, libany::any(value)));
const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
assert(res.second && "attribute already exists");
#ifdef PYBIND
......@@ -103,9 +103,9 @@ public:
///\param value Attribute value
template<class T> void setAttr(const std::string& name, const T& value)
{
auto res = mAttrs.emplace(std::make_pair(name, libany::any(value)));
auto res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
if (!res.second)
res.first->second = libany::any(value);
res.first->second = future_std::any(value);
#ifdef PYBIND
// We cannot handle Python object if the Python interpreter is not running
......@@ -210,9 +210,9 @@ private:
std::map<std::string, py::object> mAttrsPy;
// Stores C++ attributes only
// mutable because it may be updated in getAttr() from Python
mutable std::map<std::string, libany::any> mAttrs;
mutable std::map<std::string, future_std::any> mAttrs;
#else
std::map<std::string, libany::any> mAttrs;
std::map<std::string, future_std::any> mAttrs;
#endif
};
......
......@@ -14,11 +14,11 @@
* Copyright (c) 2018 Claudio Fantacci
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE.md or copy at http://www.boost.org/LICENSE_1_0.txt)
* (See copy at http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef AIDGE_CORE_UTILS_ANY_H_
#define AIDGE_CORE_UTILS_ANY_H_
#ifndef AIDGE_CORE_UTILS_FUTURE_STD_ANY_H_
#define AIDGE_CORE_UTILS_FUTURE_STD_ANY_H_
#include <stdexcept>
#include <typeinfo>
......@@ -26,7 +26,7 @@
#include <utility>
namespace libany
namespace future_std
{
class bad_any_cast : public std::bad_cast
......@@ -549,4 +549,4 @@ inline void swap(any& lhs, any& rhs) noexcept
}
#endif /* AIDGE_CORE_UTILS_ANY_H_ */
#endif /* AIDGE_CORE_UTILS_FUTURE_STD_ANY_H_ */
This diff is collapsed.
......@@ -8,7 +8,7 @@
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifdef PYBIND
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
......@@ -39,25 +39,10 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims) {
// Lambda function wrapper because PyBind fails to convert const array.
// So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
if (kernel_dims.size() != DIM) {
throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
if (stride_dims.size() != DIM) {
throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
DimSize_t tmp_kernel_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_kernel_dims_array[i] = kernel_dims[i];
}
DimSize_t tmp_stride_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_stride_dims_array[i] = stride_dims[i];
}
const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
return AvgPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array));
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
return AvgPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()));
}, py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
......@@ -75,4 +60,3 @@ void init_AvgPooling(py::module &m) {
// (&)[1])>(&AvgPooling));
}
} // namespace Aidge
#endif
......@@ -48,33 +48,11 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &dilation_dims) {
// Lambda function wrapper because PyBind fails to convert const array.
// So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
if (kernel_dims.size() != DIM) {
throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
if (stride_dims.size() != DIM) {
throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
if (dilation_dims.size() != DIM) {
throw std::runtime_error("dilation_dims size [" + std::to_string(dilation_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
DimSize_t tmp_kernel_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_kernel_dims_array[i] = kernel_dims[i];
}
DimSize_t tmp_stride_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_stride_dims_array[i] = stride_dims[i];
}
DimSize_t tmp_dilation_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_dilation_dims_array[i] = dilation_dims[i];
}
const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
const DimSize_t (&dilation_dims_array)[DIM] = tmp_dilation_dims_array;
return Conv<DIM>(in_channels, out_channels, to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(dilation_dims_array));
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
return Conv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
}, py::arg("in_channels"),
py::arg("out_channels"),
py::arg("kernel_dims"),
......
......@@ -42,33 +42,11 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
const std::string& name,
const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &dilation_dims) {
// Lambda function wrapper because PyBind fails to convert const array.
// So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
if (kernel_dims.size() != DIM) {
throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
if (stride_dims.size() != DIM) {
throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
if (dilation_dims.size() != DIM) {
throw std::runtime_error("dilation_dims size [" + std::to_string(dilation_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
DimSize_t tmp_kernel_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_kernel_dims_array[i] = kernel_dims[i];
}
DimSize_t tmp_stride_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_stride_dims_array[i] = stride_dims[i];
}
DimSize_t tmp_dilation_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_dilation_dims_array[i] = dilation_dims[i];
}
const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
const DimSize_t (&dilation_dims_array)[DIM] = tmp_dilation_dims_array;
return ConvDepthWise<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(dilation_dims_array));
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
return ConvDepthWise<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
}, py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
......
......@@ -8,7 +8,7 @@
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifdef PYBIND
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
......@@ -39,25 +39,10 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
const std::vector<DimSize_t> &stride_dims) {
// Lambda function wrapper because PyBind fails to convert const array.
// So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
if (kernel_dims.size() != DIM) {
throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
if (stride_dims.size() != DIM) {
throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
}
DimSize_t tmp_kernel_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_kernel_dims_array[i] = kernel_dims[i];
}
DimSize_t tmp_stride_dims_array[DIM];
for (size_t i = 0; i < DIM; ++i) {
tmp_stride_dims_array[i] = stride_dims[i];
}
const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array));
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
return MaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()));
}, py::arg("kernel_dims"),
py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
......@@ -75,4 +60,3 @@ void init_MaxPooling(py::module &m) {
// (&)[1])>(&MaxPooling));
}
} // namespace Aidge
#endif