Skip to content
Snippets Groups Projects
Commit e1c3d326 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Merged with dev

parents ef381497 43b467f6
No related branches found
No related tags found
3 merge requests!1190.2.1,!113Draft: Fix slice,!104Make forwardDims() optional and handle data dependency
Pipeline #43561 passed
Showing
with 215 additions and 246 deletions
...@@ -39,7 +39,11 @@ class ExportNode(ABC): ...@@ -39,7 +39,11 @@ class ExportNode(ABC):
if parent_node is not None: if parent_node is not None:
self.inputs_dims.append(self.operator.get_input(idx).dims()) self.inputs_dims.append(self.operator.get_input(idx).dims())
else: else:
self.inputs_dims.append(None) print(self.operator.get_input(idx))
if self.operator.get_input(idx) is not None:
self.inputs_dims.append(self.operator.get_input(idx).dims())
else:
self.inputs_dims.append(None)
for idx, child_node in enumerate(self.node.get_children()): for idx, child_node in enumerate(self.node.get_children()):
self.outputs.append(child_node) self.outputs.append(child_node)
......
...@@ -331,6 +331,8 @@ class Tensor : public Data, ...@@ -331,6 +331,8 @@ class Tensor : public Data,
return div_.getOutput(0)->clone(); return div_.getOutput(0)->clone();
} }
~Tensor() noexcept;
public: public:
/** /**
* @brief Perform a deep copy of the tensor. * @brief Perform a deep copy of the tensor.
......
...@@ -9,34 +9,20 @@ ...@@ -9,34 +9,20 @@
* *
********************************************************************************/ ********************************************************************************/
#ifndef AIDGE_CORE_FILLER_H_ #ifndef AIDGE_CORE_FILLER_FILLER_H_
#define AIDGE_CORE_FILLER_H_ #define AIDGE_CORE_FILLER_FILLER_H_
#include <cstdint> // std::uint32_t
#include <memory> #include <memory>
#include <random> // normal_distribution, uniform_real_distribution
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
namespace Aidge { namespace Aidge {
inline void calculateFanInFanOut(std::shared_ptr<Tensor> tensor, void calculateFanInFanOut(std::shared_ptr<Tensor> tensor,
unsigned int& fanIn, unsigned int& fanOut) { std::uint32_t& fanIn, std::uint32_t& fanOut);
AIDGE_ASSERT(
tensor->nbDims() == 4, enum class VarianceNorm { FanIn, Average, FanOut };
"Tensor need to have 4 dimensions to compute FanIn and FanOut.");
// Warning: This function suppose NCXX data layout.
// Aidge currently only support NCHW but this maybe not be true in the
// future.
DimSize_t batchSize = tensor->dims()[0];
DimSize_t channelSize = tensor->dims()[1];
AIDGE_ASSERT(batchSize != 0,
"Cannot calculate FanIn if tensor batch size is 0.");
AIDGE_ASSERT(channelSize != 0,
"Cannot calculate FanOut if tensor channel size is 0.");
fanIn = static_cast<unsigned int>(tensor->size() / batchSize);
fanOut = static_cast<unsigned int>(tensor->size() / channelSize);
}
enum VarianceNorm { FanIn, Average, FanOut };
template <typename T> template <typename T>
void constantFiller(std::shared_ptr<Tensor> tensor, T constantValue); void constantFiller(std::shared_ptr<Tensor> tensor, T constantValue);
...@@ -50,14 +36,15 @@ void uniformFiller(std::shared_ptr<Tensor> tensor, T min, T max); ...@@ -50,14 +36,15 @@ void uniformFiller(std::shared_ptr<Tensor> tensor, T min, T max);
template <typename T> template <typename T>
void xavierUniformFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0, void xavierUniformFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0,
VarianceNorm varianceNorm = FanIn); VarianceNorm varianceNorm = VarianceNorm::FanIn);
template <typename T> template <typename T>
void xavierNormalFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0, void xavierNormalFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0,
VarianceNorm varianceNorm = FanIn); VarianceNorm varianceNorm = VarianceNorm::FanIn);
template <typename T> template <typename T>
void heFiller(std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm = FanIn, void heFiller(std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm = VarianceNorm::FanIn,
T meanNorm = 0.0, T scaling = 1.0); T meanNorm = 0.0, T scaling = 1.0);
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CORE_FILLER_H_ */ #endif /* AIDGE_CORE_FILLER_FILLER_H_ */
...@@ -13,18 +13,12 @@ ...@@ -13,18 +13,12 @@
#define AIDGE_CORE_OPERATOR_AVGPOOLING_H_ #define AIDGE_CORE_OPERATOR_AVGPOOLING_H_
#include <array> #include <array>
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <string> #include <string>
#include <utility> // std::pair
#include <vector> #include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/ArrayHelpers.hpp" #include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
...@@ -60,107 +54,36 @@ public: ...@@ -60,107 +54,36 @@ public:
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
AvgPooling_Op(const AvgPooling_Op<DIM>& op) AvgPooling_Op(const AvgPooling_Op<DIM>& op);
: OperatorTensor(op),
Attributes_(op)
{
if (op.mImpl) {
SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend());
} else {
mImpl = nullptr;
}
}
/** /**
* @brief Clone the operator using its copy-constructor. * @brief Clone the operator using its copy-constructor.
* @see Operator::AvgPooling_Op * @see Operator::AvgPooling_Op
*/ */
std::shared_ptr<Operator> clone() const override { std::shared_ptr<Operator> clone() const override final {
return std::make_shared<AvgPooling_Op<DIM>>(*this); return std::make_shared<AvgPooling_Op<DIM>>(*this);
} }
bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { bool computeOutputDims(bool /*allowDataDependency*/ = false) override final;
// check inputs have been associated
if (!getInput(0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
}
if (!(getInput(0)->empty())) {
std::array<DimSize_t, DIM + 2> outputDims;
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
outputDims[0] = inputDims[0];
outputDims[1] = inputDims[1];
for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
std::floor(static_cast<float>(inputDims[dim+2] -
this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
}
getOutput(0)->resize(outputDims);
return true;
}
return false;
}
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
const std::vector<DimSize_t>& outputDims, const std::vector<DimSize_t>& outputDims,
const IOIndex_t outputIdx = 0) const override final { const IOIndex_t outputIdx = 0) const override final;
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// Offset
std::vector<DimSize_t> inputIdxDims = firstEltDims;
for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
}
}
// padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// Width
std::vector<DimSize_t> inputDims;
inputDims.push_back(outputDims[0]); // same batch value
inputDims.push_back(outputDims[1]); // same channel value
for (DimIdx_t i = 0; i < DIM; ++i) {
inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
* this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
+ 1
+ (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
}
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
void setBackend(const std::string &name, DeviceIdx_t device = 0) override { void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
}
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName() {
return {"data_input"}; return {"data_input"};
} }
static const std::vector<std::string> getOutputsName(){ static const std::vector<std::string> getOutputsName() {
return {"data_output"}; return {"data_output"};
} }
}; };
template <Aidge::DimIdx_t DIM>
const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling";
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims, inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "", const std::string& name = "",
...@@ -178,6 +101,12 @@ inline std::shared_ptr<Node> AvgPooling( ...@@ -178,6 +101,12 @@ inline std::shared_ptr<Node> AvgPooling(
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported"); static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
return AvgPooling(to_array(kernel_dims), name, stride_dims); return AvgPooling(to_array(kernel_dims), name, stride_dims);
} }
extern template class Aidge::AvgPooling_Op<1>;
extern template class Aidge::AvgPooling_Op<2>;
extern template class Aidge::AvgPooling_Op<3>;
extern template class Aidge::AvgPooling_Op<4>;
} // namespace Aidge } // namespace Aidge
namespace { namespace {
......
...@@ -16,13 +16,11 @@ ...@@ -16,13 +16,11 @@
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
...@@ -50,16 +48,7 @@ public: ...@@ -50,16 +48,7 @@ public:
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
BatchNorm_Op(const BatchNorm_Op<DIM>& op) BatchNorm_Op(const BatchNorm_Op<DIM>& op);
: OperatorTensor(op),
Attributes_(op)
{
if (op.mImpl){
SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend());
}else{
mImpl = nullptr;
}
}
/** /**
* @brief Clone the operator using its copy-constructor. * @brief Clone the operator using its copy-constructor.
...@@ -79,36 +68,9 @@ public: ...@@ -79,36 +68,9 @@ public:
// } // }
bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { bool computeOutputDims(bool /*allowDataDependency*/ = false) override final;
// check inputs have been associated
bool associated = true;
for (IOIndex_t i = 0; i < nbInputs(); ++i) {
associated &= !(getInput(i)->empty());
}
if (associated) {
const DimSize_t nbFeatures = getInput(0)->dims()[1];
for (std::size_t i = nbData(); i < nbInputs(); ++i) {
if(getInput(i)->size() != nbFeatures) {
// /!\ Input size should be handled BEFORE calling this function
// This should raise an error
getInput(i)->resize({getInput(0)->dims()[1]});
}
}
mOutputs[0]->resize(getInput(0)->dims());
}
return associated;
}
void setBackend(const std::string &name, DeviceIdx_t device = 0) override { void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
// By default, automatically set backend for scale, shift, mean and variance
getInput(1)->setBackend(name, device);
getInput(2)->setBackend(name, device);
getInput(3)->setBackend(name, device);
getInput(4)->setBackend(name, device);
}
static const std::vector<std::string> getInputsName() { static const std::vector<std::string> getInputsName() {
return {"data_input", "scale", "shift", "mean", "variance"}; return {"data_input", "scale", "shift", "mean", "variance"};
...@@ -118,22 +80,19 @@ public: ...@@ -118,22 +80,19 @@ public:
} }
}; };
template <DimIdx_t DIM> extern template class Aidge::BatchNorm_Op<2>;
const std::string BatchNorm_Op<DIM>::Type = "BatchNorm"; extern template class Aidge::BatchNorm_Op<3>;
extern template class Aidge::BatchNorm_Op<4>;
template <DimSize_t DIM> template <DimSize_t DIM>
inline std::shared_ptr<Node> BatchNorm(const DimSize_t nbFeatures, std::shared_ptr<Node> BatchNorm(const DimSize_t nbFeatures,
const float epsilon = 1.0e-5F, const float epsilon = 1.0e-5F,
const float momentum = 0.1F, const float momentum = 0.1F,
const std::string& name = "") { const std::string& name = "");
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by BatchNorm, not supported");
auto batchNorm = std::make_shared<Node>(std::make_shared<BatchNorm_Op<static_cast<DimIdx_t>(DIM)>>(epsilon, momentum), name); extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t, const float, const float, const std::string&);
addProducer(batchNorm, 1, {nbFeatures}, "scale"); extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const std::string&);
addProducer(batchNorm, 2, {nbFeatures}, "shift"); extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&);
addProducer(batchNorm, 3, {nbFeatures}, "batch_mean");
addProducer(batchNorm, 4, {nbFeatures}, "batch_variance");
return batchNorm;
}
} // namespace Aidge } // namespace Aidge
namespace { namespace {
......
...@@ -75,10 +75,10 @@ public: ...@@ -75,10 +75,10 @@ public:
void setBackend(const std::string& name, DeviceIdx_t device = 0) override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName() {
return {"data_input", "weight", "bias"}; return {"data_input", "weight", "bias"};
} }
static const std::vector<std::string> getOutputsName(){ static const std::vector<std::string> getOutputsName() {
return {"data_output"}; return {"data_output"};
} }
}; };
......
...@@ -187,10 +187,10 @@ public: ...@@ -187,10 +187,10 @@ public:
inline IOIndex_t nbParam() const noexcept { return mNbParam; }; inline IOIndex_t nbParam() const noexcept { return mNbParam; };
inline IOIndex_t nbOutputs() const noexcept { return mNbOut; }; inline IOIndex_t nbOutputs() const noexcept { return mNbOut; };
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName() {
return {}; return {};
} }
static const std::vector<std::string> getOutputsName(){ static const std::vector<std::string> getOutputsName() {
return {}; return {};
} }
}; };
......
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
* *
********************************************************************************/ ********************************************************************************/
#ifndef AIDGE_MEMORY_MANAGER_H #ifndef AIDGE_CORE_SCHEDULER_MEMORY_MANAGER_H
#define AIDGE_MEMORY_MANAGER_H #define AIDGE_CORE_SCHEDULER_MEMORY_MANAGER_H
#include <memory> #include <memory>
#include <vector> #include <vector>
...@@ -75,12 +75,12 @@ public: ...@@ -75,12 +75,12 @@ public:
count(count_) count(count_)
{ {
assert(offset <= memSpace->size); assert(offset <= memSpace->size);
// The preceding assert should allow offset == memSpace->size (see // The preceding assert should allow offset == memSpace->size (see
// issue #63). This means immediate wrapping. // issue #63). This means immediate wrapping.
// It appends if the final offset computed in reallocate() is at // It appends if the final offset computed in reallocate() is at
// the end of the previous memPlane and is also at the end of the // the end of the previous memPlane and is also at the end of the
// memSpace (in case for example of in-place memory op.). // memSpace (in case for example of in-place memory op.).
// Instead of bringing the offset back to the beginning of the // Instead of bringing the offset back to the beginning of the
// memSpace, we stay attached to this offset in case the memSpace // memSpace, we stay attached to this offset in case the memSpace
// grows when a new memPlane is added. // grows when a new memPlane is added.
...@@ -128,7 +128,7 @@ public: ...@@ -128,7 +128,7 @@ public:
// Limit is computed dynamically, as memSpace->size may increase after // Limit is computed dynamically, as memSpace->size may increase after
// the creation of this memory space. This is actually necessary to // the creation of this memory space. This is actually necessary to
// ensure that the memory wrapping works correctly, because when // ensure that the memory wrapping works correctly, because when
// computing the margin required for the wrapping, it is assumed that // computing the margin required for the wrapping, it is assumed that
// the previous layer wrapping extends to the full memory space size. // the previous layer wrapping extends to the full memory space size.
inline unsigned int getLimit() const { inline unsigned int getLimit() const {
...@@ -193,7 +193,11 @@ public: ...@@ -193,7 +193,11 @@ public:
typedef std::map<std::shared_ptr<Node>, std::vector<MemoryPlane>, typedef std::map<std::shared_ptr<Node>, std::vector<MemoryPlane>,
CompByNodeName> MemMap_T; CompByNodeName> MemMap_T;
public:
MemoryManager(): mClock(0) {} MemoryManager(): mClock(0) {}
~MemoryManager() noexcept;
public:
/// Generates a new MemorySpace /// Generates a new MemorySpace
std::shared_ptr<MemorySpace> reserve(unsigned int size, std::shared_ptr<MemorySpace> reserve(unsigned int size,
const std::set<std::shared_ptr<Node> >& const std::set<std::shared_ptr<Node> >&
...@@ -246,7 +250,7 @@ public: ...@@ -246,7 +250,7 @@ public:
unsigned int stride = 0, unsigned int stride = 0,
unsigned int length = 1, unsigned int length = 1,
unsigned int count = 1); unsigned int count = 1);
/// Generate a new MemoryPlane in an existing MemorySpace, associated to a /// Generate a new MemoryPlane in an existing MemorySpace, associated to a
/// Node /// Node
unsigned int reallocate(std::shared_ptr<MemorySpace> memSpace, unsigned int reallocate(std::shared_ptr<MemorySpace> memSpace,
const std::shared_ptr<Node>& node, const std::shared_ptr<Node>& node,
...@@ -321,4 +325,4 @@ const char* const EnumStrings<Aidge::MemoryManager::OptimizeStrategy>::data[] ...@@ -321,4 +325,4 @@ const char* const EnumStrings<Aidge::MemoryManager::OptimizeStrategy>::data[]
"OptimizeMaxHoleMaxLifetimeFirst"}; "OptimizeMaxHoleMaxLifetimeFirst"};
} }
#endif // AIDGE_MEMORY_MANAGER_H #endif // AIDGE_CORE_SCHEDULER_MEMORY_MANAGER_H
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
* *
********************************************************************************/ ********************************************************************************/
#ifndef AIDGE_PARALLELSCHEDULER_H_ #ifndef AIDGE_CORE_SCHEDULER_PARALLELSCHEDULER_H_
#define AIDGE_PARALLELSCHEDULER_H_ #define AIDGE_CORE_SCHEDULER_PARALLELSCHEDULER_H_
#include <chrono> #include <chrono>
#include <memory> #include <memory>
...@@ -41,4 +41,4 @@ public: ...@@ -41,4 +41,4 @@ public:
}; };
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_PARALLELSCHEDULER_H_ */ #endif /* AIDGE_CORE_SCHEDULER_PARALLELSCHEDULER_H_ */
...@@ -9,20 +9,20 @@ ...@@ -9,20 +9,20 @@
* *
********************************************************************************/ ********************************************************************************/
#ifndef AIDGE_SCHEDULER_H_ #ifndef AIDGE_CORE_SCHEDULER_SCHEDULER_H_
#define AIDGE_SCHEDULER_H_ #define AIDGE_CORE_SCHEDULER_SCHEDULER_H_
#include <cstddef> // std::size_t
#include <chrono> #include <chrono>
#include <map>
#include <memory> #include <memory>
#include <set> #include <set>
#include <string> #include <string>
#include <vector> #include <vector>
#include <map>
#include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/scheduler/MemoryManager.hpp" #include "aidge/scheduler/MemoryManager.hpp"
#include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
class Node; class Node;
...@@ -33,30 +33,36 @@ protected: ...@@ -33,30 +33,36 @@ protected:
struct StaticSchedulingElement { struct StaticSchedulingElement {
StaticSchedulingElement( StaticSchedulingElement(
std::shared_ptr<Node> node_, std::shared_ptr<Node> node_,
size_t early_ = static_cast<size_t>(-1), std::size_t early_ = static_cast<std::size_t>(-1),
size_t late_ = static_cast<size_t>(-1)) std::size_t late_ = static_cast<std::size_t>(-1))
: node(node_), early(early_), late(late_) {} : node(node_), early(early_), late(late_) {}
std::shared_ptr<Node> node; std::shared_ptr<Node> node;
size_t early; std::size_t early;
size_t late; std::size_t late;
std::vector<std::shared_ptr<StaticSchedulingElement>> earlierThan; std::vector<std::shared_ptr<StaticSchedulingElement>> earlierThan;
std::vector<std::shared_ptr<StaticSchedulingElement>> laterThan; std::vector<std::shared_ptr<StaticSchedulingElement>> laterThan;
}; };
/**
* @brief Node with its start/end execution time stored for later display.
*/
struct SchedulingElement { struct SchedulingElement {
SchedulingElement( SchedulingElement(
std::shared_ptr<Node> node_, std::shared_ptr<Node> node_,
std::chrono::time_point<std::chrono::high_resolution_clock> start_, std::chrono::time_point<std::chrono::high_resolution_clock> start_,
std::chrono::time_point<std::chrono::high_resolution_clock> end_) std::chrono::time_point<std::chrono::high_resolution_clock> end_)
: node(node_), start(start_), end(end_) {} : node(node_), start(start_), end(end_) {}
~SchedulingElement() noexcept = default;
std::shared_ptr<Node> node; std::shared_ptr<Node> node;
std::chrono::time_point<std::chrono::high_resolution_clock> start; std::chrono::time_point<std::chrono::high_resolution_clock> start;
std::chrono::time_point<std::chrono::high_resolution_clock> end; std::chrono::time_point<std::chrono::high_resolution_clock> end;
}; };
public:
struct PriorProducersConsumers { struct PriorProducersConsumers {
PriorProducersConsumers();
PriorProducersConsumers(const PriorProducersConsumers&);
~PriorProducersConsumers() noexcept;
bool isPrior = false; bool isPrior = false;
std::set<std::shared_ptr<Aidge::Node>> requiredProducers; std::set<std::shared_ptr<Aidge::Node>> requiredProducers;
std::set<std::shared_ptr<Aidge::Node>> priorConsumers; std::set<std::shared_ptr<Aidge::Node>> priorConsumers;
...@@ -69,10 +75,22 @@ public: ...@@ -69,10 +75,22 @@ public:
{ {
// ctor // ctor
}; };
virtual ~Scheduler() = default;
virtual ~Scheduler() noexcept;
public:
/**
* @brief Return a vector of Node ordered by the order they are called by the scheduler.
* @return std::vector<std::shared_ptr<Node>>
*/
std::vector<std::shared_ptr<Node>> getStaticScheduling(std::size_t step = 0) const;
inline std::shared_ptr<GraphView> graphView() const noexcept {
return mGraphView;
}
/** /**
* Generate full static scheduling of the GraphView. * @brief Generate full static scheduling of the GraphView.
* For each node, an earliest and latest possible execution logical step * For each node, an earliest and latest possible execution logical step
* is specified. Nodes that may be scheduled at the same logical step have * is specified. Nodes that may be scheduled at the same logical step have
* no data dependency and can be run in parallel. * no data dependency and can be run in parallel.
...@@ -110,18 +128,21 @@ public: ...@@ -110,18 +128,21 @@ public:
*/ */
void saveSchedulingDiagram(const std::string& fileName) const; void saveSchedulingDiagram(const std::string& fileName) const;
protected:
/** /**
* @brief Return a vector of Node ordered by the order they are called by the scheduler * @brief Getter for the set of children Nodes of the given input Nodes.
* @return std::vector<std::shared_ptr<Node>> * @param producers Set of Nodes for which we want to obtain the set of children Nodes.
* @return std::set<std::shared_ptr<Node>> Children Nodes.
*/ */
std::vector<std::shared_ptr<Node>> getStaticScheduling(size_t step = 0) const; std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const;
inline std::shared_ptr<GraphView> getGraphView() const noexcept {
return mGraphView; Elts_t getNbAvailableData(const std::shared_ptr<Node>& node, const IOIndex_t inputIdx) const;
}
PriorProducersConsumers getPriorProducersConsumers(const std::shared_ptr<Node>& node) const;
protected:
/** /**
* Generate an initial base scheduling for the GraphView. * @brief Generate an initial base scheduling for the GraphView.
* The scheduling is entirely sequential and garanteed to be valid w.r.t. * The scheduling is entirely sequential and garanteed to be valid w.r.t.
* each node producer-consumer model. * each node producer-consumer model.
*/ */
...@@ -129,21 +150,15 @@ protected: ...@@ -129,21 +150,15 @@ protected:
/** /**
* Fill-in early and late scheduling step from initial base scheduling. * Fill-in early and late scheduling step from initial base scheduling.
* For each node, specifies the earliest and latest possible execution * For each node, specifies the earliest and latest possible execution
* logical step. * logical step.
*/ */
void generateEarlyLateScheduling(std::vector<std::shared_ptr<StaticSchedulingElement>>& schedule) const; void generateEarlyLateScheduling(std::vector<std::shared_ptr<StaticSchedulingElement>>& schedule) const;
/** private:
* @brief Set of layers receiving an input from currently processing layers void summarizeConsumerState(const std::shared_ptr<Node>& consumer, const std::string& nodeName) const;
*
* @param producers Set of layers ready to run.
* @return std::set<std::shared_ptr<Node>>
*/
std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const;
Elts_t getNbAvailableData(const std::shared_ptr<Node>& node, const IOIndex_t inputIdx) const;
PriorProducersConsumers getPriorProducersConsumers(const std::shared_ptr<Node>& node) const;
protected:
/** @brief Shared ptr to the scheduled graph view */ /** @brief Shared ptr to the scheduled graph view */
std::shared_ptr<GraphView> mGraphView; std::shared_ptr<GraphView> mGraphView;
/** @brief Shared ptr to the upper node containing the graph view */ /** @brief Shared ptr to the upper node containing the graph view */
...@@ -152,9 +167,9 @@ protected: ...@@ -152,9 +167,9 @@ protected:
std::vector<SchedulingElement> mScheduling; std::vector<SchedulingElement> mScheduling;
/** @brief List of nodes ordered by their */ /** @brief List of nodes ordered by their */
std::vector<std::vector<std::shared_ptr<StaticSchedulingElement>>> mStaticSchedule; std::vector<std::vector<std::shared_ptr<StaticSchedulingElement>>> mStaticSchedule;
size_t mStaticScheduleStep = 0; std::size_t mStaticScheduleStep = 0;
mutable std::map<std::shared_ptr<Node>, PriorProducersConsumers> mPriorCache; mutable std::map<std::shared_ptr<Node>, PriorProducersConsumers> mPriorCache;
}; };
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_SCHEDULER_H_ */ #endif /* AIDGE_CORE_SCHEDULER_SCHEDULER_H_ */
...@@ -9,16 +9,15 @@ ...@@ -9,16 +9,15 @@
* *
********************************************************************************/ ********************************************************************************/
#ifndef AIDGE_SEQUENTIALSCHEDULER_H_ #ifndef AIDGE_CORE_SCHEDULER_SEQUENTIALSCHEDULER_H_
#define AIDGE_SEQUENTIALSCHEDULER_H_ #define AIDGE_CORE_SCHEDULER_SEQUENTIALSCHEDULER_H_
#include <chrono>
#include <memory> #include <memory>
#include <set>
#include <string>
#include <vector> #include <vector>
#include <map>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/scheduler/Scheduler.hpp" #include "aidge/scheduler/Scheduler.hpp"
namespace Aidge { namespace Aidge {
...@@ -27,23 +26,26 @@ namespace Aidge { ...@@ -27,23 +26,26 @@ namespace Aidge {
*/ */
class SequentialScheduler : public Scheduler { class SequentialScheduler : public Scheduler {
public: public:
enum SchedulingPolicy { enum class SchedulingPolicy {
Default, Default,
AsSoonAsPossible, AsSoonAsPossible,
AsLateAsPossible AsLateAsPossible
}; };
public:
SequentialScheduler(std::shared_ptr<GraphView> graphView, std::shared_ptr<Node> upperNode = nullptr) SequentialScheduler(std::shared_ptr<GraphView> graphView, std::shared_ptr<Node> upperNode = nullptr)
: Scheduler(graphView, upperNode), : Scheduler(graphView, upperNode),
mSchedulingPolicy(Default) mSchedulingPolicy(SchedulingPolicy::Default)
{ {
// ctor // ctor
}; };
~SequentialScheduler() = default;
public:
inline void setSchedulingPolicy(SchedulingPolicy policy) { inline void setSchedulingPolicy(SchedulingPolicy policy) {
mSchedulingPolicy = policy; mSchedulingPolicy = policy;
} }
~SequentialScheduler() = default;
/** /**
* @brief Run the provided Computational Graph with a batch of data * @brief Run the provided Computational Graph with a batch of data
*/ */
...@@ -59,4 +61,4 @@ private: ...@@ -59,4 +61,4 @@ private:
}; };
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_SEQUENTIALSCHEDULER_H_ */ #endif /* AIDGE_CORE_SCHEDULER_SEQUENTIALSCHEDULER_H_ */
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
* *
********************************************************************************/ ********************************************************************************/
#ifndef AIDGE_THREADPOOL_H_ #ifndef AIDGE_CORE_SCHEDULER_THREADPOOL_H_
#define AIDGE_THREADPOOL_H_ #define AIDGE_CORE_SCHEDULER_THREADPOOL_H_
#include <thread> #include <thread>
#include <mutex> #include <mutex>
...@@ -39,4 +39,4 @@ private: ...@@ -39,4 +39,4 @@ private:
}; };
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_THREADPOOL_H_ */ #endif /* AIDGE_CORE_SCHEDULER_THREADPOOL_H_ */
...@@ -27,6 +27,17 @@ namespace Aidge { ...@@ -27,6 +27,17 @@ namespace Aidge {
*/ */
#define AIDGE_LOG_CONTEXT(...) const Log::Context logContext_##__LINE__(__VA_ARGS__) #define AIDGE_LOG_CONTEXT(...) const Log::Context logContext_##__LINE__(__VA_ARGS__)
template<class U>
static void discard_args(U parg) {
(void)parg;
}
template<class U, class... Us>
static void discard_args(U parg, Us... pargs) {
(void)parg;
discard_args(pargs...);
}
/** /**
* Aidge logging class, for displaying and file logging of events. * Aidge logging class, for displaying and file logging of events.
*/ */
...@@ -54,7 +65,7 @@ public: ...@@ -54,7 +65,7 @@ public:
}; };
/** /**
* Detailed messages for debugging purposes, providing information helpful * Detailed messages for debugging purposes, providing information helpful
* for developers to trace and identify issues. * for developers to trace and identify issues.
* Detailed insights of what is appening in an operation, not useful for the * Detailed insights of what is appening in an operation, not useful for the
* end-user. The operation is performed nominally. * end-user. The operation is performed nominally.
...@@ -66,11 +77,13 @@ public: ...@@ -66,11 +77,13 @@ public:
#ifndef NDEBUG #ifndef NDEBUG
// only when compiled in Debug // only when compiled in Debug
log(Debug, fmt::format(std::forward<Args>(args)...)); log(Debug, fmt::format(std::forward<Args>(args)...));
#else
discard_args(&args...);
#endif #endif
} }
/** /**
* Messages that provide a record of the normal operation, about * Messages that provide a record of the normal operation, about
* the application's state, progress, or important events. * the application's state, progress, or important events.
* Reports normal start, end and key steps in an operation. The operation is * Reports normal start, end and key steps in an operation. The operation is
* performed nominally. * performed nominally.
...@@ -103,7 +116,7 @@ public: ...@@ -103,7 +116,7 @@ public:
} }
/** /**
* Signifies a problem or unexpected condition that the application can * Signifies a problem or unexpected condition that the application can
* recover from, but attention is needed to prevent further issues. * recover from, but attention is needed to prevent further issues.
* The operation could not be performed, but it does not prevent potential * The operation could not be performed, but it does not prevent potential
* further operations. * further operations.
......
...@@ -9,19 +9,18 @@ ...@@ -9,19 +9,18 @@
* *
********************************************************************************/ ********************************************************************************/
#include <pybind11/pybind11.h> #include <array>
#include <pybind11/stl.h>
#include <string> #include <string>
#include <vector> #include <vector>
#include <array>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/operator/AvgPooling.hpp" #include "aidge/operator/AvgPooling.hpp"
#include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp"
namespace py = pybind11; namespace py = pybind11;
namespace Aidge { namespace Aidge {
......
...@@ -10,11 +10,14 @@ ...@@ -10,11 +10,14 @@
* *
********************************************************************************/ ********************************************************************************/
#include <memory>
#include <string>
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include <pybind11/stl.h> #include <pybind11/stl.h>
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Data.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/Operator.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
......
...@@ -9,13 +9,17 @@ ...@@ -9,13 +9,17 @@
* *
********************************************************************************/ ********************************************************************************/
#include <memory>
#include <string>
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/Operator.hpp"
#include <pybind11/stl.h>
namespace py = pybind11; namespace py = pybind11;
namespace Aidge { namespace Aidge {
......
...@@ -15,7 +15,7 @@ void init_Log(py::module& m){ ...@@ -15,7 +15,7 @@ void init_Log(py::module& m){
py::class_<Log>(m, "Log") py::class_<Log>(m, "Log")
.def_static("debug", [](const std::string& msg) { Log::debug(msg); }, py::arg("msg"), .def_static("debug", [](const std::string& msg) { Log::debug(msg); }, py::arg("msg"),
R"mydelimiter( R"mydelimiter(
Detailed messages for debugging purposes, providing information helpful Detailed messages for debugging purposes, providing information helpful
for developers to trace and identify issues. for developers to trace and identify issues.
Detailed insights of what is appening in an operation, not useful for the Detailed insights of what is appening in an operation, not useful for the
end-user. The operation is performed nominally. end-user. The operation is performed nominally.
...@@ -27,7 +27,7 @@ void init_Log(py::module& m){ ...@@ -27,7 +27,7 @@ void init_Log(py::module& m){
)mydelimiter") )mydelimiter")
.def_static("info", [](const std::string& msg) { Log::info(msg); }, py::arg("msg"), .def_static("info", [](const std::string& msg) { Log::info(msg); }, py::arg("msg"),
R"mydelimiter( R"mydelimiter(
Messages that provide a record of the normal operation, about Messages that provide a record of the normal operation, about
the application's state, progress, or important events. the application's state, progress, or important events.
Reports normal start, end and key steps in an operation. The operation is Reports normal start, end and key steps in an operation. The operation is
performed nominally. performed nominally.
...@@ -57,7 +57,7 @@ void init_Log(py::module& m){ ...@@ -57,7 +57,7 @@ void init_Log(py::module& m){
)mydelimiter") )mydelimiter")
.def_static("error",[](const std::string& msg) { Log::error(msg); }, py::arg("msg"), .def_static("error",[](const std::string& msg) { Log::error(msg); }, py::arg("msg"),
R"mydelimiter( R"mydelimiter(
Signifies a problem or unexpected condition that the application can Signifies a problem or unexpected condition that the application can
recover from, but attention is needed to prevent further issues. recover from, but attention is needed to prevent further issues.
The operation could not be performed, but it does not prevent potential The operation could not be performed, but it does not prevent potential
further operations. further operations.
...@@ -75,21 +75,21 @@ void init_Log(py::module& m){ ...@@ -75,21 +75,21 @@ void init_Log(py::module& m){
:param msg: Fatal message. :param msg: Fatal message.
:type msg: str :type msg: str
)mydelimiter") )mydelimiter")
.def_static("setConsoleLevel", &Log::setConsoleLevel, py::arg("level"), .def_static("set_console_level", &Log::setConsoleLevel, py::arg("level"),
R"mydelimiter( R"mydelimiter(
Set the minimum log level displayed in the console. Set the minimum log level displayed in the console.
:param level: Log level. :param level: Log level.
:type level: Level :type level: Level
)mydelimiter") )mydelimiter")
.def_static("setFileLevel", &Log::setFileLevel, py::arg("level"), .def_static("set_file_level", &Log::setFileLevel, py::arg("level"),
R"mydelimiter( R"mydelimiter(
Set the minimum log level saved in the log file. Set the minimum log level saved in the log file.
:param level: Log level. :param level: Log level.
:type level: Level :type level: Level
)mydelimiter") )mydelimiter")
.def_static("setFileName", &Log::setFileName, py::arg("fileName"), .def_static("set_file_name", &Log::setFileName, py::arg("fileName"),
R"mydelimiter( R"mydelimiter(
Set the log file name. Set the log file name.
Close the current log file and open the one with the new file name. Close the current log file and open the one with the new file name.
......
...@@ -39,6 +39,10 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) { ...@@ -39,6 +39,10 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
return *this; return *this;
} }
Aidge::Tensor::~Tensor() noexcept = default;
void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) { void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) {
// TODO: scalar Tensor not handled // TODO: scalar Tensor not handled
if (dims.empty()) { // scalar if (dims.empty()) { // scalar
......
...@@ -8,15 +8,19 @@ ...@@ -8,15 +8,19 @@
* SPDX-License-Identifier: EPL-2.0 * SPDX-License-Identifier: EPL-2.0
* *
********************************************************************************/ ********************************************************************************/
#include <memory>
#include <random> // normal_distribution, uniform_real_distribution
#include "aidge/filler/Filler.hpp" #include "aidge/filler/Filler.hpp"
#include <cstddef> // std::size_t
#include <memory>
#include <string>
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
template<typename T> template<typename T>
void Aidge::constantFiller(std::shared_ptr<Aidge::Tensor> tensor, T constantValue){ void Aidge::constantFiller(std::shared_ptr<Aidge::Tensor> tensor, T constantValue) {
AIDGE_ASSERT(tensor->getImpl(), AIDGE_ASSERT(tensor->getImpl(),
"Tensor got no implementation, cannot fill it."); "Tensor got no implementation, cannot fill it.");
AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type"); AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/filler/Filler.hpp"
#include <cstdint> // std::uint32_t
#include <memory>
#include <string>
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
void Aidge::calculateFanInFanOut(std::shared_ptr<Aidge::Tensor> tensor,
std::uint32_t& fanIn, std::uint32_t& fanOut) {
AIDGE_ASSERT(
tensor->nbDims() == 4,
"Tensor need to have 4 dimensions to compute FanIn and FanOut.");
// Warning: This function suppose NCXX data layout.
// Aidge currently only support NCHW but this maybe not be true in the
// future.
DimSize_t batchSize = tensor->dims()[0];
DimSize_t channelSize = tensor->dims()[1];
AIDGE_ASSERT(batchSize != 0,
"Cannot calculate FanIn if tensor batch size is 0.");
AIDGE_ASSERT(channelSize != 0,
"Cannot calculate FanOut if tensor channel size is 0.");
fanIn = static_cast<std::uint32_t>(tensor->size() / batchSize);
fanOut = static_cast<std::uint32_t>(tensor->size() / channelSize);
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment