Skip to content
Snippets Groups Projects
Commit 894b770e authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Merge remote-tracking branch 'origin/dev' into scheduling

parents 69351afb c0a5d97a
No related branches found
No related tags found
No related merge requests found
Showing
with 482 additions and 86 deletions
...@@ -64,6 +64,7 @@ ...@@ -64,6 +64,7 @@
#include "aidge/stimuli/Stimulus.hpp" #include "aidge/stimuli/Stimulus.hpp"
#include "aidge/recipes/Recipes.hpp" #include "aidge/recipes/Recipes.hpp"
#include "aidge/filler/Filler.hpp"
#include "aidge/utils/Attributes.hpp" #include "aidge/utils/Attributes.hpp"
#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/StaticAttributes.hpp"
......
...@@ -52,6 +52,7 @@ public: ...@@ -52,6 +52,7 @@ public:
return mType; return mType;
} }
virtual ~Data() = default; virtual ~Data() = default;
virtual std::string toString() const = 0;
private: private:
const std::string mType; const std::string mType;
...@@ -84,4 +85,4 @@ namespace Aidge { ...@@ -84,4 +85,4 @@ namespace Aidge {
inline auto format_as(DataType dt) { return EnumStrings<Aidge::DataType>::data[static_cast<int>(dt)]; } inline auto format_as(DataType dt) { return EnumStrings<Aidge::DataType>::data[static_cast<int>(dt)]; }
} }
#endif /* AIDGE_DATA_H_ */ #endif /* AIDGE_DATA_H_ */
\ No newline at end of file
...@@ -445,7 +445,7 @@ public: ...@@ -445,7 +445,7 @@ public:
set<expectedType>(getStorageIdx(coordIdx), value); set<expectedType>(getStorageIdx(coordIdx), value);
} }
std::string toString() const; std::string toString() const override;
inline void print() const { fmt::print("{}\n", toString()); } inline void print() const { fmt::print("{}\n", toString()); }
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_FILLER_H_
#define AIDGE_CORE_FILLER_H_
#include <memory>
#include <random> // normal_distribution, uniform_real_distribution
#include "aidge/data/Tensor.hpp"
namespace Aidge {
inline void calculateFanInFanOut(std::shared_ptr<Tensor> tensor,
unsigned int& fanIn, unsigned int& fanOut) {
AIDGE_ASSERT(
tensor->nbDims() == 4,
"Tensor need to have 4 dimensions to compute FanIn and FanOut.");
// Warning: This function suppose NCXX data layout.
// Aidge currently only support NCHW but this maybe not be true in the
// future.
DimSize_t batchSize = tensor->dims()[0];
DimSize_t channelSize = tensor->dims()[1];
AIDGE_ASSERT(batchSize != 0,
"Cannot calculate FanIn if tensor batch size is 0.");
AIDGE_ASSERT(channelSize != 0,
"Cannot calculate FanOut if tensor channel size is 0.");
fanIn = static_cast<unsigned int>(tensor->size() / batchSize);
fanOut = static_cast<unsigned int>(tensor->size() / channelSize);
}
enum VarianceNorm { FanIn, Average, FanOut };
template <typename T>
void constantFiller(std::shared_ptr<Tensor> tensor, T constantValue);
template <typename T>
void normalFiller(std::shared_ptr<Tensor> tensor, double mean = 0.0,
double stdDev = 1.0);
template <typename T>
void uniformFiller(std::shared_ptr<Tensor> tensor, T min, T max);
template <typename T>
void xavierUniformFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0,
VarianceNorm varianceNorm = FanIn);
template <typename T>
void xavierNormalFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0,
VarianceNorm varianceNorm = FanIn);
template <typename T>
void heFiller(std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm = FanIn,
T meanNorm = 0.0, T scaling = 1.0);
} // namespace Aidge
#endif /* AIDGE_CORE_FILLER_H_ */
...@@ -98,6 +98,8 @@ public: ...@@ -98,6 +98,8 @@ public:
*/ */
void save(const std::string& path, bool verbose = false, bool showProducers = true) const; void save(const std::string& path, bool verbose = false, bool showProducers = true) const;
void logOutputs(const std::string& dirName) const;
/** /**
* Check that a node is in the current GraphView. * Check that a node is in the current GraphView.
* @param nodePtr Node to check * @param nodePtr Node to check
...@@ -283,7 +285,7 @@ public: ...@@ -283,7 +285,7 @@ public:
* added to the list, and so on. * added to the list, and so on.
* - Any remaining nodes have no path to the root node and are added in * - Any remaining nodes have no path to the root node and are added in
* arbitrary order. In this case, the ranking is not garanteed to be unique. * arbitrary order. In this case, the ranking is not garanteed to be unique.
* *
* If the ranking cannot be garanteed to be unique, the second item indicates * If the ranking cannot be garanteed to be unique, the second item indicates
* the rank from which unicity cannot be garanteed. * the rank from which unicity cannot be garanteed.
* @return std::pair<std::vector<NodePtr>, size_t> Pair with the list of ranked * @return std::pair<std::vector<NodePtr>, size_t> Pair with the list of ranked
......
...@@ -27,21 +27,31 @@ ...@@ -27,21 +27,31 @@
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims }; enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, NoBias };
template <DimIdx_t DIM> template <DimIdx_t DIM>
class Conv_Op : public OperatorTensor, class Conv_Op : public OperatorTensor,
public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>, public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t, public StaticAttributes<ConvAttr,
DimSize_t, std::array<DimSize_t, DIM>> { std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
DimSize_t,
std::array<DimSize_t, DIM>,
bool> {
public: public:
static const std::string Type; static const std::string Type;
Conv_Op() = delete; Conv_Op() = delete;
using Attributes_ = StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, using Attributes_ = StaticAttributes<ConvAttr,
DimSize_t, DimSize_t, std::array<DimSize_t, DIM>>; std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
DimSize_t,
DimSize_t,
std::array<DimSize_t, DIM>,
bool>;
template <ConvAttr e> template <ConvAttr e>
using attr = typename Attributes_::template attr<e>; using attr = typename Attributes_::template attr<e>;
...@@ -49,13 +59,15 @@ public: ...@@ -49,13 +59,15 @@ public:
DimSize_t outChannels, DimSize_t outChannels,
const std::array<DimSize_t, DIM> &kernelDims, const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias = false)
: OperatorTensor(Type, 1, 2, 1), : OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvAttr::StrideDims>(strideDims), Attributes_(attr<ConvAttr::StrideDims>(strideDims),
attr<ConvAttr::DilationDims>(dilationDims), attr<ConvAttr::DilationDims>(dilationDims),
attr<ConvAttr::InChannels>(inChannels), attr<ConvAttr::InChannels>(inChannels),
attr<ConvAttr::OutChannels>(outChannels), attr<ConvAttr::OutChannels>(outChannels),
attr<ConvAttr::KernelDims>(kernelDims)) {} attr<ConvAttr::KernelDims>(kernelDims),
attr<ConvAttr::NoBias>(noBias)) {}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
...@@ -163,15 +175,17 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> co ...@@ -163,15 +175,17 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> co
std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0); std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
weightIdxDims[0] = firstEltDims[1]; weightIdxDims[0] = firstEltDims[1];
// Bias
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
// Result // Result
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res; std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims)); res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims)); res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
// Bias
if (! this->template getAttr<ConvAttr::NoBias>()){
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
}
return res; return res;
} }
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
...@@ -215,12 +229,14 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels, ...@@ -215,12 +229,14 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
const std::array<DimSize_t, DIM> &kernelDims, const std::array<DimSize_t, DIM> &kernelDims,
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) { const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias = false) {
// FIXME: properly handle default w&b initialization in every cases // FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported"); static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims), name); auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims, noBias), name);
addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w"); addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
addProducer(conv, 2, {outChannels}, "b"); addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
return conv; return conv;
} }
...@@ -232,9 +248,10 @@ inline std::shared_ptr<Node> Conv( ...@@ -232,9 +248,10 @@ inline std::shared_ptr<Node> Conv(
DimSize_t const (&kernelDims)[DIM], DimSize_t const (&kernelDims)[DIM],
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) { const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias = false) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported"); static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims); return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
} }
} // namespace Aidge } // namespace Aidge
...@@ -245,7 +262,8 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = { ...@@ -245,7 +262,8 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
"DilationDims", "DilationDims",
"InChannels", "InChannels",
"OutChannels", "OutChannels",
"KernelDims" "KernelDims",
"NoBias"
}; };
} }
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims }; enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims, NoBias };
template <DimIdx_t DIM> template <DimIdx_t DIM>
class ConvDepthWise_Op : public OperatorTensor, class ConvDepthWise_Op : public OperatorTensor,
...@@ -35,7 +35,8 @@ class ConvDepthWise_Op : public OperatorTensor, ...@@ -35,7 +35,8 @@ class ConvDepthWise_Op : public OperatorTensor,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t, DimSize_t,
std::array<DimSize_t, DIM>> { std::array<DimSize_t, DIM>,
bool> {
public: public:
static const std::string Type; static const std::string Type;
...@@ -45,19 +46,22 @@ public: ...@@ -45,19 +46,22 @@ public:
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t, DimSize_t,
std::array<DimSize_t, DIM>>; std::array<DimSize_t, DIM>,
bool>;
template <ConvDepthWiseAttr e> template <ConvDepthWiseAttr e>
using attr = typename Attributes_::template attr<e>; using attr = typename Attributes_::template attr<e>;
constexpr ConvDepthWise_Op(const DimSize_t nbChannels, constexpr ConvDepthWise_Op(const DimSize_t nbChannels,
const std::array<DimSize_t, DIM> &kernel_dims, const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias=false)
: OperatorTensor(Type, 1, 2, 1), : OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims), Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
attr<ConvDepthWiseAttr::DilationDims>(dilation_dims), attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
attr<ConvDepthWiseAttr::Channels>(nbChannels), attr<ConvDepthWiseAttr::Channels>(nbChannels),
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {} attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
attr<ConvDepthWiseAttr::NoBias>(no_bias)) {}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
...@@ -157,15 +161,17 @@ public: ...@@ -157,15 +161,17 @@ public:
std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0); std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
weightIdxDims[0] = firstEltDims[1]; weightIdxDims[0] = firstEltDims[1];
// Bias
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
// Result // Result
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res; std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims)); res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims)); res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims)); // Bias
if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
}
return res; return res;
} }
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
...@@ -196,12 +202,13 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels, ...@@ -196,12 +202,13 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
const std::array<DimSize_t, DIM> &kernelDims, const std::array<DimSize_t, DIM> &kernelDims,
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) { const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias=false) {
// FIXME: properly handle default w&b initialization in every cases // FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported"); static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims), name); auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims, noBias), name);
addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w"); addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
addProducer(convDW, 2, {nbChannels}, "b"); addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b");
return convDW; return convDW;
} }
...@@ -212,16 +219,17 @@ inline std::shared_ptr<Node> ConvDepthWise( ...@@ -212,16 +219,17 @@ inline std::shared_ptr<Node> ConvDepthWise(
DimSize_t const (&kernelDims)[DIM], DimSize_t const (&kernelDims)[DIM],
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) { const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias=false) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported"); static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims); return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
} }
} // namespace Aidge } // namespace Aidge
namespace { namespace {
template <> template <>
const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels", const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels",
"KernelDims"}; "KernelDims", "NoBias"};
} }
#endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */ #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
...@@ -35,11 +35,12 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels, ...@@ -35,11 +35,12 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias = false)
{ {
// Construct micro-graph // Construct micro-graph
auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0); auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : ""); auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name); auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name);
addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w"); addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
...@@ -56,9 +57,10 @@ inline std::shared_ptr<Node> PaddedConv( ...@@ -56,9 +57,10 @@ inline std::shared_ptr<Node> PaddedConv(
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias = false)
{ {
return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims); return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
} }
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
...@@ -67,11 +69,12 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels, ...@@ -67,11 +69,12 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias = false)
{ {
// Construct micro-graph // Construct micro-graph
auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0); auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : ""); auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name); auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w"); addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w");
...@@ -87,9 +90,10 @@ inline std::shared_ptr<Node> PaddedConvDepthWise( ...@@ -87,9 +90,10 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0), const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias = false)
{ {
return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims); return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
} }
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_DIRECTORIES_H_
#define AIDGE_DIRECTORIES_H_
#include <string> // std::string
#include <sstream> // std::stringstream
#include <iostream>
#include <sys/stat.h>
#include <errno.h>
#ifdef WIN32
#include <direct.h>
#else
#include <sys/types.h>
#include <unistd.h>
#endif
namespace Aidge {
bool isNotValidFilePath(int c) {
return (iscntrl(c)
|| c == '<'
|| c == '>'
|| c == ':'
|| c == '"'
|| c == '|'
|| c == '?'
|| c == '*');
}
std::string filePath(const std::string& str) {
std::string filePath(str);
std::replace_if(filePath.begin(), filePath.end(),
isNotValidFilePath, '_');
return filePath;
}
bool createDirectories(const std::string& dirName)
{
std::stringstream path(dirName);
std::string dir;
std::string pathToDir("");
int status = 0;
while (std::getline(path, dir, '/') && status == 0) {
pathToDir += dir + '/';
struct stat fileStat;
if (stat(pathToDir.c_str(), &fileStat) != 0) {
// Directory does not exist
#ifdef WIN32
status = _mkdir(pathToDir.c_str());
#else
#if defined(S_IRWXU)
status = mkdir(pathToDir.c_str(), S_IRWXU | S_IRWXG | S_IRWXO);
#else
status = mkdir(pathToDir.c_str());
#endif
#endif
} else if (!S_ISDIR(fileStat.st_mode)) {
status = -1;
}
}
return (status == 0 || errno == EEXIST);
}
}
#endif //AIDGE_DIRECTORIES_H_
...@@ -9,23 +9,53 @@ ...@@ -9,23 +9,53 @@
* *
********************************************************************************/ ********************************************************************************/
#ifndef AIDGE_RANDOM_H_ #ifndef AIDGE_RANDOM_H_
#define AIDGE_RANDOM_H_ #define AIDGE_RANDOM_H_
#include <algorithm> #include <algorithm>
#include <vector>
#include <random> #include <random>
#include <vector>
namespace Aidge {
namespace Random { namespace Random {
void randShuffle(std::vector<unsigned int>& vec) { /**
std::random_device rd; * @brief Generator is a class created to handle only one Mersenne Twister
std::mt19937 g(rd()); * pseudo-random number generator for the whole Aidge framework.
std::shuffle(vec.begin(), vec.end(), g); *
} * All of its method are static. You can set a random seed and access the
* generator.
* By default, the random seed is set to 0 but selected randomly.
*
*/
class Generator {
public:
/**
* @brief Set a seed to the pseudo-random number generator.
*
* @return std::mt19937&
*/
static void setSeed(unsigned int seed);
static unsigned int getSeed() { return seed; };
/**
* @brief Return a Mersenne Twister pseudo-random number generator.
* You can set the seed of this generator using ``setSeed`` method.
*
* @return std::mt19937&
*/
static std::mt19937& get() { return generator; };
private:
// Mersenne Twister pseudo-random number generator
static std::mt19937 generator;
static unsigned int seed;
};
inline void randShuffle(std::vector<unsigned int>& vec) {
std::shuffle(vec.begin(), vec.end(), Aidge::Random::Generator::get());
} }
#endif //AIDGE_RANDOM_H_ } // namespace Random
\ No newline at end of file } // namespace Aidge
#endif // AIDGE_RANDOM_H_
...@@ -26,12 +26,11 @@ void init_Data(py::module& m){ ...@@ -26,12 +26,11 @@ void init_Data(py::module& m){
.value("Int64", DataType::Int64) .value("Int64", DataType::Int64)
.value("UInt8", DataType::UInt8) .value("UInt8", DataType::UInt8)
.value("UInt32", DataType::UInt32) .value("UInt32", DataType::UInt32)
.value("UInt64", DataType::UInt64) .value("UInt64", DataType::UInt64)
; ;
py::class_<Data, std::shared_ptr<Data>>(m,"Data") py::class_<Data, std::shared_ptr<Data>>(m,"Data");
.def(py::init<const std::string&>());
} }
} }
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/data/Tensor.hpp"
#include "aidge/filler/Filler.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Filler(py::module &m) {
py::enum_<enum VarianceNorm>(m, "VarianceNorm")
.value("FanIn", VarianceNorm::FanIn)
.value("Average", VarianceNorm::Average)
.value("FanOut", VarianceNorm::FanOut)
.export_values();
m.def(
"constant_filler",
[](std::shared_ptr<Tensor> tensor, py::object value) -> void {
switch (tensor->dataType()) {
case DataType::Float64:
constantFiller<double>(tensor, value.cast<double>());
break;
case DataType::Float32:
constantFiller<float>(tensor, value.cast<float>());
break;
default:
AIDGE_THROW_OR_ABORT(
py::value_error,
"Data type is not supported for Constant filler.");
}
},
py::arg("tensor"), py::arg("value"))
.def(
"normal_filler",
[](std::shared_ptr<Tensor> tensor, double mean,
double stdDev) -> void {
switch (tensor->dataType()) {
case DataType::Float64:
normalFiller<double>(tensor, mean, stdDev);
break;
case DataType::Float32:
normalFiller<float>(tensor, mean, stdDev);
break;
default:
AIDGE_THROW_OR_ABORT(
py::value_error,
"Data type is not supported for Normal filler.");
}
},
py::arg("tensor"), py::arg("mean") = 0.0, py::arg("stdDev") = 1.0)
.def(
"uniform_filler",
[](std::shared_ptr<Tensor> tensor, double min, double max) -> void {
switch (tensor->dataType()) {
case DataType::Float64:
uniformFiller<double>(tensor, min, max);
break;
case DataType::Float32:
uniformFiller<float>(tensor, min, max);
break;
default:
AIDGE_THROW_OR_ABORT(
py::value_error,
"Data type is not supported for Uniform filler.");
}
},
py::arg("tensor"), py::arg("min"), py::arg("max"))
.def(
"xavier_uniform_filler",
[](std::shared_ptr<Tensor> tensor, py::object scaling,
VarianceNorm varianceNorm) -> void {
switch (tensor->dataType()) {
case DataType::Float64:
xavierUniformFiller<double>(
tensor, scaling.cast<double>(), varianceNorm);
break;
case DataType::Float32:
xavierUniformFiller<float>(
tensor, scaling.cast<float>(), varianceNorm);
break;
default:
AIDGE_THROW_OR_ABORT(
py::value_error,
"Data type is not supported for Uniform filler.");
}
},
py::arg("tensor"), py::arg("scaling") = 1.0,
py::arg("varianceNorm") = VarianceNorm::FanIn)
.def(
"xavier_normal_filler",
[](std::shared_ptr<Tensor> tensor, py::object scaling,
VarianceNorm varianceNorm) -> void {
switch (tensor->dataType()) {
case DataType::Float64:
xavierNormalFiller<double>(
tensor, scaling.cast<double>(), varianceNorm);
break;
case DataType::Float32:
xavierNormalFiller<float>(tensor, scaling.cast<float>(),
varianceNorm);
break;
default:
AIDGE_THROW_OR_ABORT(
py::value_error,
"Data type is not supported for Uniform filler.");
}
},
py::arg("tensor"), py::arg("scaling") = 1.0,
py::arg("varianceNorm") = VarianceNorm::FanIn)
.def(
"he_filler",
[](std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm,
py::object meanNorm, py::object scaling) -> void {
switch (tensor->dataType()) {
case DataType::Float64:
heFiller<double>(tensor, varianceNorm,
meanNorm.cast<double>(),
scaling.cast<double>());
break;
case DataType::Float32:
heFiller<float>(tensor, varianceNorm,
meanNorm.cast<float>(),
scaling.cast<float>());
break;
default:
AIDGE_THROW_OR_ABORT(
py::value_error,
"Data type is not supported for Uniform filler.");
}
},
py::arg("tensor"), py::arg("varianceNorm") = VarianceNorm::FanIn,
py::arg("meanNorm") = 0.0, py::arg("scaling") = 1.0)
;
}
} // namespace Aidge
...@@ -30,7 +30,7 @@ void init_GraphView(py::module& m) { ...@@ -30,7 +30,7 @@ void init_GraphView(py::module& m) {
:param path: save location :param path: save location
:type path: str :type path: str
)mydelimiter") )mydelimiter")
.def("log_outputs", &GraphView::logOutputs, py::arg("path"))
.def("get_output_nodes", &GraphView::outputNodes, .def("get_output_nodes", &GraphView::outputNodes,
R"mydelimiter( R"mydelimiter(
Get set of output Nodes. Get set of output Nodes.
......
...@@ -23,6 +23,9 @@ template <DimSize_t DIM> ...@@ -23,6 +23,9 @@ template <DimSize_t DIM>
void declare_BatchNormOp(py::module& m) { void declare_BatchNormOp(py::module& m) {
const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D"); const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D");
py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance()) py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance())
.def(py::init<float, float>(),
py::arg("epsilon"),
py::arg("momentum"))
.def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName) .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
.def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName) .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
.def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName); .def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
......
...@@ -33,12 +33,14 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) { ...@@ -33,12 +33,14 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
DimSize_t, DimSize_t,
const std::array<DimSize_t, DIM> &, const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &, const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &>(), const std::array<DimSize_t, DIM> &,
bool>(),
py::arg("in_channels"), py::arg("in_channels"),
py::arg("out_channels"), py::arg("out_channels"),
py::arg("kernel_dims"), py::arg("kernel_dims"),
py::arg("stride_dims"), py::arg("stride_dims"),
py::arg("dilation_dims")) py::arg("dilation_dims"),
py::arg("no_bias"))
.def("get_inputs_name", &Conv_Op<DIM>::getInputsName) .def("get_inputs_name", &Conv_Op<DIM>::getInputsName)
.def("get_outputs_name", &Conv_Op<DIM>::getOutputsName) .def("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
.def("attributes_name", &Conv_Op<DIM>::staticGetAttrsName) .def("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
...@@ -51,18 +53,20 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) { ...@@ -51,18 +53,20 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
const std::vector<DimSize_t>& kernel_dims, const std::vector<DimSize_t>& kernel_dims,
const std::string& name, const std::string& name,
const std::vector<DimSize_t> &stride_dims, const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &dilation_dims) { const std::vector<DimSize_t> &dilation_dims,
bool noBias) {
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM); AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM); AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM); AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
return Conv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin())); return Conv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), noBias);
}, py::arg("in_channels"), }, py::arg("in_channels"),
py::arg("out_channels"), py::arg("out_channels"),
py::arg("kernel_dims"), py::arg("kernel_dims"),
py::arg("name") = "", py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1)); py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("no_bias") = false);
} }
......
...@@ -33,11 +33,13 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { ...@@ -33,11 +33,13 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
.def(py::init<const DimSize_t, .def(py::init<const DimSize_t,
const std::array<DimSize_t, DIM> &, const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &, const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &>(), const std::array<DimSize_t, DIM> &,
bool>(),
py::arg("nb_channels"), py::arg("nb_channels"),
py::arg("kernel_dims"), py::arg("kernel_dims"),
py::arg("stride_dims"), py::arg("stride_dims"),
py::arg("dilation_dims")) py::arg("dilation_dims"),
py::arg("no_bias"))
.def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName) .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
.def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName) .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
.def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName); .def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName);
...@@ -46,17 +48,19 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { ...@@ -46,17 +48,19 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
const std::vector<DimSize_t>& kernel_dims, const std::vector<DimSize_t>& kernel_dims,
const std::string& name, const std::string& name,
const std::vector<DimSize_t> &stride_dims, const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &dilation_dims) { const std::vector<DimSize_t> &dilation_dims,
bool no_bias) {
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM); AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM); AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM); AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin())); return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
}, py::arg("nb_channenls"), }, py::arg("nb_channenls"),
py::arg("kernel_dims"), py::arg("kernel_dims"),
py::arg("name") = "", py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1)); py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("no_bias")= false);
} }
......
...@@ -30,21 +30,23 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) { ...@@ -30,21 +30,23 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
const std::string& name, const std::string& name,
const std::vector<DimSize_t> &stride_dims, const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims, const std::vector<DimSize_t> &padding_dims,
const std::vector<DimSize_t> &dilation_dims) const std::vector<DimSize_t> &dilation_dims,
bool no_bias)
{ {
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM); AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM); AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM); AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM); AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
return PaddedConv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin())); return PaddedConv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
}, py::arg("in_channels"), }, py::arg("in_channels"),
py::arg("out_channels"), py::arg("out_channels"),
py::arg("kernel_dims"), py::arg("kernel_dims"),
py::arg("name") = "", py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0), py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1)); py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("no_bias")= false);
} }
template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) { template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
...@@ -53,20 +55,22 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) { ...@@ -53,20 +55,22 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
const std::string& name, const std::string& name,
const std::vector<DimSize_t> &stride_dims, const std::vector<DimSize_t> &stride_dims,
const std::vector<DimSize_t> &padding_dims, const std::vector<DimSize_t> &padding_dims,
const std::vector<DimSize_t> &dilation_dims) const std::vector<DimSize_t> &dilation_dims,
bool no_bias)
{ {
AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM); AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM); AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM); AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM); AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
return PaddedConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin())); return PaddedConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
}, py::arg("nb_channels"), }, py::arg("nb_channels"),
py::arg("kernel_dims"), py::arg("kernel_dims"),
py::arg("name") = "", py::arg("name") = "",
py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0), py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1)); py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("no_bias") = false);
} }
......
...@@ -11,12 +11,12 @@ ...@@ -11,12 +11,12 @@
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include "aidge/backend/cpu/data/TensorImpl.hpp" // This include add Tensor #include "aidge/backend/cpu/data/TensorImpl.hpp" // This include add Tensor
namespace py = pybind11; namespace py = pybind11;
namespace Aidge { namespace Aidge {
void init_Random(py::module&);
void init_Data(py::module&); void init_Data(py::module&);
void init_Database(py::module&); void init_Database(py::module&);
void init_DataProvider(py::module&); void init_DataProvider(py::module&);
...@@ -71,9 +71,11 @@ void init_Recipes(py::module&); ...@@ -71,9 +71,11 @@ void init_Recipes(py::module&);
void init_Scheduler(py::module&); void init_Scheduler(py::module&);
void init_TensorUtils(py::module&); void init_TensorUtils(py::module&);
void init_Filler(py::module&);
void init_Aidge(py::module& m) {
init_Random(m);
void init_Aidge(py::module& m){
init_Data(m); init_Data(m);
init_Database(m); init_Database(m);
init_DataProvider(m); init_DataProvider(m);
...@@ -129,9 +131,8 @@ void init_Aidge(py::module& m){ ...@@ -129,9 +131,8 @@ void init_Aidge(py::module& m){
init_Recipes(m); init_Recipes(m);
init_Scheduler(m); init_Scheduler(m);
init_TensorUtils(m); init_TensorUtils(m);
init_Filler(m);
} }
PYBIND11_MODULE(aidge_core, m) { PYBIND11_MODULE(aidge_core, m) { init_Aidge(m); }
init_Aidge(m); } // namespace Aidge
}
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/utils/Random.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Random(py::module &m) {
auto mRand = m.def_submodule("random", "Random module.");
py::class_<Random::Generator>(mRand, "Generator")
.def_static("set_seed", Random::Generator::setSeed);
}
} // namespace Aidge
...@@ -41,8 +41,8 @@ Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::si ...@@ -41,8 +41,8 @@ Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::si
} }
// Compute the number of bacthes depending on mDropLast boolean // Compute the number of bacthes depending on mDropLast boolean
mNbBatch = (mDropLast) ? mNbBatch = (mDropLast) ?
static_cast<std::size_t>(std::floor(mNbItems / mBatchSize)) : static_cast<std::size_t>(std::floor(mNbItems / mBatchSize)) :
static_cast<std::size_t>(std::ceil(mNbItems / mBatchSize)); static_cast<std::size_t>(std::ceil(mNbItems / mBatchSize));
} }
...@@ -98,7 +98,7 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() con ...@@ -98,7 +98,7 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() con
void Aidge::DataProvider::setBatches(){ void Aidge::DataProvider::setBatches(){
mBatches.clear(); mBatches.clear();
mBatches.resize(mNbItems); mBatches.resize(mNbItems);
std::iota(mBatches.begin(), std::iota(mBatches.begin(),
...@@ -106,7 +106,7 @@ void Aidge::DataProvider::setBatches(){ ...@@ -106,7 +106,7 @@ void Aidge::DataProvider::setBatches(){
0U); 0U);
if (mShuffle){ if (mShuffle){
Random::randShuffle(mBatches); Aidge::Random::randShuffle(mBatches);
} }
if (mNbItems % mBatchSize !=0){ // The last batch is not full if (mNbItems % mBatchSize !=0){ // The last batch is not full
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment