Skip to content
Snippets Groups Projects
Commit b9e912b3 authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'dev' into fix_gather_and_slice

parents 2e2423e1 cc3a7008
No related branches found
No related tags found
No related merge requests found
Showing
with 236 additions and 77 deletions
......@@ -554,16 +554,11 @@ public:
inline void print() const { fmt::print("{}\n", toString()); }
std::shared_ptr<Tensor> grad() {
// if (!mGrad && mImpl) {
// mGrad = std::make_shared<Tensor>(mDims);
// mGrad->setDataType(mDataType);
// mGrad->setBackend(mImpl->backend());
// // if (mImpl) mGrad->setBackend(mImpl->backend());
// }
return mGrad;
}
void setGrad(std::shared_ptr<Tensor> newGrad) {
mGrad = newGrad;
}
/**
* @brief Associate the gradient with a Tensor instance and set its implementation
......@@ -574,7 +569,7 @@ public:
* @note If Tensor instance and implementation already existed for the gradient
* nothing is done.
*/
void initGradient() {
void initGrad() {
if (!mGrad) {
mGrad = std::make_shared<Tensor>(mDims);
}
......
......@@ -210,7 +210,7 @@ public:
* @brief Compute dimensions of input/output Tensors for each Operator of the
* GraphView object's Nodes.
*/
bool forwardDims(const std::vector<std::vector<DimSize_t>> dims = {}, bool allowDataDependency = false);
bool forwardDims(const std::vector<std::vector<DimSize_t>>& dims = {}, bool allowDataDependency = false);
/** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const;
......@@ -486,6 +486,14 @@ public:
*/
IOIndex_t getNbFreeDataInputs() const;
/**
* @brief Force update of GraphView inputs/outputs.
* It may be necessary to force the update of GraphView inputs/outputs when
* connections are added or removed inside the GraphView **after** the nodes
* were added.
*/
void updateInputsOutputs();
private:
///////////////////////////////////////////////////////
// TENSOR MANAGEMENT
......
......@@ -70,16 +70,9 @@ public:
return mScheduler;
}
void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx);
const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
inputOp.first->getOperator()->associateInput(inputOp.second, data);
// Associate inputs for custom implementation
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final;
bool forwardDims(bool allowDataDependency = false) override final {
// Check first that all required inputs are available, otherwise
......
......@@ -56,8 +56,8 @@ public:
///////////////////////////////////////////////////
// Tensor access
// input management
void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final;
void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override;
void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override;
const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const;
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final;
......
......@@ -105,7 +105,7 @@ public:
void forward() override final;
void backward() override final {
fmt::print("Basic Producer backward() function.\n");
// fmt::print("Basic Producer backward() function.\n");
}
void setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) override {
if (getAttr<ProdAttr::Constant>()) {
......
......@@ -37,7 +37,7 @@ public:
/**
* @brief Run the provided Computational Graph with a batch of data
*/
virtual void forward(bool forwardDims = true, std::vector<std::shared_ptr<Aidge::Tensor>> data = {});
virtual void forward(bool forwardDims = true, const std::vector<std::shared_ptr<Aidge::Tensor>>& data = {});
};
} // namespace Aidge
......
......@@ -114,7 +114,7 @@ public:
*
* @param data data input tensors
*/
void connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data);
void connectInputs(const std::vector<std::shared_ptr<Aidge::Tensor>>& data);
/**
* @brief Save in a Markdown file the static scheduling with early and late relative order for the nodes.
......
......@@ -49,12 +49,12 @@ public:
/**
* @brief Run the provided Computational Graph with a batch of data
*/
virtual void forward(bool forwardDims = true, std::vector<std::shared_ptr<Aidge::Tensor>> data = {});
virtual void forward(bool forwardDims = true, const std::vector<std::shared_ptr<Aidge::Tensor>>& data = {});
/**
* @brief Run the provided Computational Graph with a batch of data
*/
void backward(std::vector<std::shared_ptr<Aidge::Tensor>> data, bool instantiateGrad = true);
void backward(bool instantiateGrad = true);
private:
SchedulingPolicy mSchedulingPolicy;
......
......@@ -21,6 +21,7 @@
#include "aidge/utils/future_std/any.hpp"
#include "aidge/utils/Attributes.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#ifdef PYBIND
#include <pybind11/pybind11.h>
......@@ -86,7 +87,7 @@ public:
template<class T> void addAttr(const std::string& name, const T& value)
{
const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
assert(res.second && "attribute already exists");
AIDGE_ASSERT(res.second, "attribute already exists");
#ifdef PYBIND
// We cannot handle Python object if the Python interpreter is not running
......@@ -129,10 +130,10 @@ public:
void addAttrPy(const std::string& name, py::object&& value)
{
auto it = mAttrs.find(name);
assert(it == mAttrs.end() && "attribute already exists");
AIDGE_ASSERT(it == mAttrs.end(), "attribute already exists");
const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
assert(res.second && "attribute already exists");
AIDGE_ASSERT(res.second, "attribute already exists");
}
void setAttrPy(const std::string& name, py::object&& value) override final
......@@ -199,6 +200,8 @@ public:
};
#endif
virtual ~DynamicAttributes() {}
private:
#ifdef PYBIND
// Stores C++ attributes (copy) and Python-only attributes
......
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "aidge/data/Database.hpp"
#include "aidge/data/Tensor.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Database(py::module& m){
/**
* @brief Trampoline class for binding
*
*/
class pyDatabase : public Database {
public:
using Database::Database; // Inherit constructors
py::class_<Database, std::shared_ptr<Database>>(m,"Database");
std::vector<std::shared_ptr<Tensor>> getItem(
const std::size_t index) const override {
PYBIND11_OVERRIDE_PURE_NAME(std::vector<std::shared_ptr<Tensor>>, Database,
"get_item", getItem, index);
}
std::size_t getLen() const noexcept override {
PYBIND11_OVERRIDE_PURE_NAME(std::size_t, Database, "len", getLen);
}
std::size_t getNbModalities() const noexcept override {
PYBIND11_OVERRIDE_PURE_NAME(std::size_t, Database, "get_nb_modalities",
getNbModalities);
}
};
}
void init_Database(py::module& m) {
py::class_<Database, std::shared_ptr<Database>, pyDatabase>(
m, "Database", py::dynamic_attr())
.def(py::init<>())
.def("get_item", &Database::getItem)
.def("len", &Database::getLen)
.def("get_nb_modalities", &Database::getNbModalities);
}
} // namespace Aidge
......@@ -77,7 +77,9 @@ void init_Tensor(py::module& m){
.def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true)
.def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims)
.def("grad", &Tensor::grad)
.def("set_grad", &Tensor::setGrad)
.def("dtype", &Tensor::dataType)
.def("init_grad", &Tensor::initGrad)
.def("size", &Tensor::size)
.def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize)
.def("has_impl", &Tensor::hasImpl)
......
......@@ -73,6 +73,7 @@ void init_Recipes(py::module&);
void init_GraphViewHelper(py::module&);
void init_Scheduler(py::module&);
void init_MemoryManager(py::module&);
void init_TensorUtils(py::module&);
void init_Filler(py::module&);
......@@ -136,6 +137,7 @@ void init_Aidge(py::module& m) {
init_Recipes(m);
init_GraphViewHelper(m);
init_Scheduler(m);
init_MemoryManager(m);
init_TensorUtils(m);
init_Filler(m);
}
......
......@@ -24,5 +24,6 @@ namespace py = pybind11;
namespace Aidge {
void init_GraphViewHelper(py::module &m) {
m.def("producers", &producers, py::arg("graphview"));
m.def("compile_gradient", &compile_gradient, py::arg("graphview"));
}
} // namespace Aidge
......@@ -21,66 +21,70 @@
namespace py = pybind11;
namespace Aidge {
void init_Recipes(py::module &m) {
void init_Recipes(py::module &m)
{
m.def("fuse_mul_add", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseMulAdd), py::arg("graph_view"), R"mydelimiter(
Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
Recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
:param graph_view: Graph view on which we want to apply the recipie
:param graph_view: Graph view on which we want to apply the recipe
:type graph_view: :py:class:`aidge_core.GraphView`
)mydelimiter");
// m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
// Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
// recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
// :param nodes: The MatMul and Add nodes to fuse.
// :type nodes: list of :py:class:`aidge_core.Node`
// )mydelimiter");
m.def("remove_dropout",static_cast<void(*)(std::shared_ptr<GraphView>)>(removeDropout), py::arg("graph_view"), R"mydelimiter(
Recipie to remove a dropout operator.
Recipe to remove a dropout operator.
:param graph_view: Graph view on which we want to apply the recipie
:param graph_view: Graph view on which we want to apply the recipe
:type graph_view: :py:class:`aidge_core.GraphView`
)mydelimiter");
m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter(
Recipie to remove a flatten operator.
Recipe to remove a flatten operator.
:param graph_view: Graph view on which we want to apply the recipie
:param graph_view: Graph view on which we want to apply the recipe
:type graph_view: :py:class:`aidge_core.GraphView`
)mydelimiter");
// m.def("remove_flatten", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(removeFlatten), py::arg("nodes"), R"mydelimiter(
// Recipie to remove a flatten operator.
// Recipe to remove a flatten operator.
// :param nodes: The flatten operator to remove.
// :type nodes: list of :py:class:`aidge_core.Node`
// )mydelimiter");
// m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
// Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
// Recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
// :param nodes: The MatMul and Add nodes to fuse.
// :type nodes: list of :py:class:`aidge_core.Node`
// )mydelimiter");
m.def("fuse_batchnorm", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseBatchNorm), py::arg("graph_view"), R"mydelimiter(
Recipie to remove a flatten operator.
Recipe to remove a flatten operator.
:param graph_view: Graph view on which we want to apply the recipie
:param graph_view: Graph view on which we want to apply the recipe
:type graph_view: :py:class:`aidge_core.GraphView`
)mydelimiter");
m.def("get_conv_horizontal_tiling", static_cast<std::set<std::shared_ptr<Node>>(*)(const std::shared_ptr<Node>&, const DimIdx_t, const std::size_t)>(getConvHorizontalTiling),
m.def("get_conv_horizontal_tiling", static_cast<std::set<std::shared_ptr<Node>>(*)(const std::shared_ptr<Node>&, const DimIdx_t, const std::size_t)>(getConvHorizontalTiling),
py::arg("node"), py::arg("axis"), py::arg("nb_slices"));
// m.def("fuse_batchnorm", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseBatchNorm), py::arg("nodes"), R"mydelimiter(
// Recipie to remove a flatten operator.
// recipe to remove a flatten operator.
// :param nodes: The flatten operator to remove.
// :type nodes: list of :py:class:`aidge_core.Node`
// )mydelimiter");
m.def("expand_metaops", static_cast<void(*)(std::shared_ptr<GraphView>, bool)>(expandMetaOps), py::arg("graph_view"), py::arg("recursive") = false);
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "aidge/scheduler/MemoryManager.hpp"
namespace py = pybind11;
namespace Aidge {
void init_MemoryManager(py::module& m)
{
py::enum_<MemoryManager::OptimizeStrategy>(m, "OptimizeStrategy")
.value("None", MemoryManager::OptimizeStrategy::None)
.value("OptimizeMaxLifetimeMinSizeFirst", MemoryManager::OptimizeStrategy::OptimizeMaxLifetimeMinSizeFirst)
.value("OptimizeMaxLifetimeMaxSizeFirst", MemoryManager::OptimizeStrategy::OptimizeMaxLifetimeMaxSizeFirst)
.value("OptimizeMaxHoleMaxLifetimeFirst", MemoryManager::OptimizeStrategy::OptimizeMaxHoleMaxLifetimeFirst)
.export_values();
py::class_<MemoryManager::MemorySpace, std::shared_ptr<MemoryManager::MemorySpace>>(m, "MemorySpace")
.def(py::init<MemoryManager::Clock_T, unsigned int, unsigned int, std::set<std::shared_ptr<Node>> >(), py::arg("clock"), py::arg("offset"), py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>())
.def_readwrite("offset", &MemoryManager::MemorySpace::offset)
.def_readwrite("size", &MemoryManager::MemorySpace::size)
.def_readwrite("dependencies", &MemoryManager::MemorySpace::dependencies)
.def_readwrite("allocated", &MemoryManager::MemorySpace::allocated)
.def_readwrite("released", &MemoryManager::MemorySpace::released);
py::class_<MemoryManager::MemoryPlane, std::shared_ptr<MemoryManager::MemoryPlane>>(m, "MemoryPlane")
.def(py::init<std::shared_ptr<MemoryManager::MemorySpace>,
MemoryManager::Clock_T, unsigned int, unsigned int,
unsigned int, unsigned int, unsigned int>(),
py::arg("mem_space"), py::arg("clock"), py::arg("offset"),
py::arg("size"), py::arg("stride"), py::arg("length"), py::arg("count"))
.def_readwrite("mem_space", &MemoryManager::MemoryPlane::memSpace)
.def_readwrite("allocated", &MemoryManager::MemoryPlane::allocated)
.def_readwrite("offset", &MemoryManager::MemoryPlane::offset)
.def_readwrite("size", &MemoryManager::MemoryPlane::size)
.def_readwrite("stride", &MemoryManager::MemoryPlane::stride)
.def_readwrite("length", &MemoryManager::MemoryPlane::length)
.def_readwrite("count", &MemoryManager::MemoryPlane::count)
.def("get_size", &MemoryManager::MemoryPlane::getSize)
.def("get_useful_size", &MemoryManager::MemoryPlane::getUsefulSize)
.def("get_contiguous_offset", &MemoryManager::MemoryPlane::getContiguousOffset)
.def("get_contiguous_size", &MemoryManager::MemoryPlane::getContiguousSize)
.def("get_wrapped_offset", &MemoryManager::MemoryPlane::getWrappedOffset)
.def("get_wrapped_size", &MemoryManager::MemoryPlane::getWrappedSize)
.def("get_final_offset", &MemoryManager::MemoryPlane::getFinalOffset)
.def("get_upper_offset", &MemoryManager::MemoryPlane::getUpperOffset)
.def("get_limit", &MemoryManager::MemoryPlane::getLimit);
py::class_<MemoryManager::MaxLifetimeMinSizeFirst>(m, "MaxLifetimeMinSizeFirst")
.def(py::init<unsigned int>(), py::arg("max_lifetime"))
.def_readonly("max_lifetime", &MemoryManager::MaxLifetimeMinSizeFirst::maxLifetime)
.def("__call__", &MemoryManager::MaxLifetimeMinSizeFirst::operator(), py::arg("p0"), py::arg("p1"));
py::class_<MemoryManager::MaxLifetimeMaxSizeFirst>(m, "MaxLifetimeMaxSizeFirst")
.def(py::init<unsigned int>(), py::arg("max_lifetime"))
.def_readonly("max_lifetime", &MemoryManager::MaxLifetimeMaxSizeFirst::maxLifetime)
.def("__call__", &MemoryManager::MaxLifetimeMaxSizeFirst::operator(), py::arg("p0"), py::arg("p1"));
py::class_<MemoryManager::MaxHoleMaxLifetimeFirst>(m, "MaxHoleMaxLifetimeFirst")
.def(py::init<unsigned int, MemoryManager*>(), py::arg("max_lifetime"), py::arg("inst"))
.def_readonly("max_lifetime", &MemoryManager::MaxHoleMaxLifetimeFirst::maxLifetime)
.def_readwrite("inst", &MemoryManager::MaxHoleMaxLifetimeFirst::inst)
.def("__call__", &MemoryManager::MaxHoleMaxLifetimeFirst::operator(), py::arg("p0"), py::arg("p1"));
py::class_<MemoryManager, std::shared_ptr<MemoryManager>>(m, "MemoryManager")
.def(py::init<>())
.def("reserve", (std::shared_ptr<MemoryManager::MemorySpace> (MemoryManager::*)(unsigned int, const std::set<std::shared_ptr<Node>>&)) &MemoryManager::reserve, py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>())
.def("expand", &MemoryManager::expand, py::arg("mem_space"), py::arg("required_size"))
.def("allocate", (MemoryManager::MemoryPlane (MemoryManager::*)(unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::allocate, py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
.def("allocate", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::allocate, py::arg("node"), py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
.def("is_wrap_around", &MemoryManager::isWrapAround, py::arg("mem_space"), py::arg("offset"), py::arg("size"), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
.def("reallocate", (MemoryManager::MemoryPlane (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("mem_space"), py::arg("offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
.def("reallocate", (MemoryManager::MemoryPlane (MemoryManager::*)(const MemoryManager::MemoryPlane&, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("memPlane"), py::arg("extra_offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
.def("reallocate", (unsigned int (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>, const std::shared_ptr<Node>&, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("mem_space"), py::arg("node"), py::arg("offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
.def("reallocate", (unsigned int (MemoryManager::*)(const MemoryManager::MemoryPlane&, const std::shared_ptr<Node>&, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("mem_plane"), py::arg("node"), py::arg("extra_offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
.def("release", (unsigned int (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>)) &MemoryManager::release, py::arg("mem_space"))
.def("release", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&)) &MemoryManager::release, py::arg("node"))
.def("release_dependencies", &MemoryManager::releaseDependencies, py::arg("node"))
.def("optimize", &MemoryManager::optimize, py::arg("strategy"))
.def("get_offset", &MemoryManager::getOffset, py::arg("node"), py::arg("plane") = 0)
.def("get_size", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&, unsigned int) const) &MemoryManager::getSize, py::arg("node"), py::arg("plane"))
.def("get_size", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&) const) &MemoryManager::getSize, py::arg("node"))
.def("get_peak_usage", &MemoryManager::getPeakUsage)
.def("get_max_lifetime", &MemoryManager::getMaxLifetime)
.def("get_planes", (const std::vector<MemoryManager::MemoryPlane>& (MemoryManager::*)(const std::shared_ptr<Node>&) const) &MemoryManager::getPlanes, py::arg("node"))
.def("get_planes", (const MemoryManager::MemMap_T& (MemoryManager::*)() const) &MemoryManager::getPlanes)
.def("get_planes", (MemoryManager::MemMap_T (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>) const) &MemoryManager::getPlanes, py::arg("mem_space"))
.def("get_nb_planes", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&) const) &MemoryManager::getNbPlanes, py::arg("node"))
.def("get_nb_planes", (unsigned int (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>) const) &MemoryManager::getNbPlanes, py::arg("mem_space"))
.def("get_current_tick", &MemoryManager::getCurrentTick)
.def("tick", &MemoryManager::tick)
.def("log", &MemoryManager::log, py::arg("file_name"))
;
}
} // Aidge
......@@ -11,6 +11,7 @@
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "aidge/scheduler/MemoryManager.hpp"
#include "aidge/scheduler/Scheduler.hpp"
#include "aidge/scheduler/SequentialScheduler.hpp"
#include "aidge/scheduler/ParallelScheduler.hpp"
......@@ -22,16 +23,18 @@ namespace Aidge {
void init_Scheduler(py::module& m){
py::class_<Scheduler, std::shared_ptr<Scheduler>>(m, "Scheduler")
.def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
.def("graph_view", &Scheduler::graphView)
.def("save_scheduling_diagram", &Scheduler::saveSchedulingDiagram, py::arg("file_name"))
.def("resetScheduling", &Scheduler::resetScheduling)
.def("generate_scheduling", &Scheduler::generateScheduling)
.def("get_static_scheduling", &Scheduler::getStaticScheduling, py::arg("step") = 0)
.def("generate_memory", &Scheduler::generateMemory, py::arg("inc_producers") = false, py::arg("wrap_around_buffer") = false)
;
py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>, Scheduler>(m, "SequentialScheduler")
.def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
.def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("data")=std::vector<Tensor>())
.def("backward", &SequentialScheduler::backward, py::arg("data"), py::arg("instanciate_grad")=true)
.def("backward", &SequentialScheduler::backward, py::arg("instanciate_grad")=true)
;
py::class_<ParallelScheduler, std::shared_ptr<ParallelScheduler>, Scheduler>(m, "ParallelScheduler")
......
......@@ -20,12 +20,12 @@
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
void Aidge::calculateFanInFanOut(std::shared_ptr<Aidge::Tensor> tensor,
std::uint32_t& fanIn, std::uint32_t& fanOut) {
AIDGE_ASSERT(
tensor->nbDims() == 4,
"Tensor need to have 4 dimensions to compute FanIn and FanOut.");
AIDGE_ASSERT(tensor->nbDims() == 4 || tensor->nbDims() == 2,
"Tensor need to have 4 or 2 dimensions to compute FanIn and "
"FanOut, but found a tensor with {} dims.",
tensor->nbDims());
// Warning: This function suppose NCXX data layout.
// Aidge currently only support NCHW but this maybe not be true in the
// future.
......@@ -35,6 +35,6 @@ void Aidge::calculateFanInFanOut(std::shared_ptr<Aidge::Tensor> tensor,
"Cannot calculate FanIn if tensor batch size is 0.");
AIDGE_ASSERT(channelSize != 0,
"Cannot calculate FanOut if tensor channel size is 0.");
fanIn = static_cast<std::uint32_t>(tensor->size() / batchSize);
fanIn = static_cast<std::uint32_t>(tensor->size() / batchSize);
fanOut = static_cast<std::uint32_t>(tensor->size() / channelSize);
}
......@@ -29,7 +29,9 @@ void Aidge::heFiller(std::shared_ptr<Aidge::Tensor> tensor,
: (varianceNorm == Aidge::VarianceNorm::Average)
? (fanIn + fanOut) / 2.0
: fanOut);
AIDGE_ASSERT(n > 0,
"Something went wrong division by zero or square root of "
"negative value.");
const T stdDev(std::sqrt(2.0 / n));
const T mean(varianceNorm == Aidge::VarianceNorm::FanIn ? meanNorm / fanIn
......
......@@ -29,6 +29,9 @@ void Aidge::xavierUniformFiller(std::shared_ptr<Aidge::Tensor> tensor,
: (varianceNorm == Aidge::VarianceNorm::Average)
? (fanIn + fanOut) / 2.0
: fanOut);
AIDGE_ASSERT(n > 0,
"Something went wrong division by zero or square root of "
"negative value.");
const T scale(std::sqrt(3.0 / n));
std::uniform_real_distribution<T> uniformDist(-scale, scale);
......
......@@ -83,6 +83,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
}
fmt::print(fp.get(),
"```mermaid\n"
"%%{{init: {{'flowchart': {{ 'curve': 'monotoneY'}}, "
"'fontFamily': 'Verdana' }} }}%%\nflowchart TB\n\n");
......@@ -204,6 +205,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
fmt::print(fp.get(), "classDef producerCls_rootCls stroke:#f00,fill:#ccf\n");
fmt::print(fp.get(), "classDef genericCls_rootCls stroke:#f00,fill:#f9f9ff,stroke-width:1px,stroke-dasharray: 5 5\n");
fmt::print(fp.get(), "classDef metaCls_rootCls stroke:#f00,stroke-width:5px\n");
fmt::print(fp.get(), "```\n");
fmt::print(fp.get(), "\n");
}
......@@ -391,7 +393,7 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
forwardDims(dims);
}
bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims, bool allowDataDependency) {
bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>>& dims, bool allowDataDependency) {
// setInputs
// Link every tensor to the right pointer
// following parent - children informations
......@@ -414,9 +416,10 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type());
} else {
// Input is missing
AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i)
&& !std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(),
AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i),
"Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
AIDGE_ASSERT(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(),
"Empty input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
}
}
......@@ -907,7 +910,7 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
newGraph->getOrderedOutputs();
auto inputParents = std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(oldOIn.size());
auto outputChildren = std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(oldOOut.size());
auto outputChildren = std::vector<std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>>(oldOOut.size());
// keep in memory every node related to the node to replace :
// Parent
......@@ -918,19 +921,12 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
// inputParent.first -> addChild(newOI[i].first, inputParent.second, newOI[i].second);
}
// Children
for (std::size_t i = 0; i < oldOOut.size();) {
for (std::size_t i = 0; i < oldOOut.size(); ++i) {
std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> outputChild =
oldOOut[i].first -> output(oldOOut[i].second);
if (outputChild.empty()) {
outputChildren[i] = std::pair<std::shared_ptr<Node>, IOIndex_t>({nullptr, gk_IODefaultIndex});
++i;
}
else {
for (const auto& child : outputChild) {
if (oldNodes.find(child.first) == oldNodes.cend()) {
outputChildren[i] = child;
++i;
}
for (const auto& child : outputChild) {
if (oldNodes.find(child.first) == oldNodes.cend()) {
outputChildren[i].push_back(child);
}
}
}
......@@ -968,8 +964,8 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
}
}
for (std::size_t o = 0; o < oldOOut.size(); ++o) {
if (outputChildren[o].first) {
newOOut[o].first -> addChild(outputChildren[o].first, newOOut[o].second, outputChildren[o].second);
for (const auto& child : outputChildren[o]) {
newOOut[o].first -> addChild(child.first, newOOut[o].second, child.second);
}
}
}
......@@ -979,15 +975,21 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
if (newNodes.size() == 0) {
// Case 3
if (oldOIn.size() == oldOOut.size()) {
// Same number of inputs and outputs: connect each input to the corresponding output
for (std::size_t i = 0; i < oldOIn.size(); ++i) {
if (inputParents[i].first) {
inputParents[i].first -> addChild(outputChildren[i].first, inputParents[i].second, outputChildren[i].second);
for (const auto& child : outputChildren[i]) {
inputParents[i].first -> addChild(child.first, inputParents[i].second, child.second);
}
}
}
}
else if ((oldOIn.size() == 1) && (inputParents[0].first)) {
for (std::size_t i = 0; i < oldOIn.size(); ++i) {
inputParents[0].first -> addChild(outputChildren[i].first, inputParents[0].second, outputChildren[i].second);
// Single input: connect the only input to all the outputs
for (std::size_t i = 0; i < oldOOut.size(); ++i) {
for (const auto& child : outputChildren[i]) {
inputParents[0].first -> addChild(child.first, inputParents[0].second, child.second);
}
}
}
}
......@@ -1008,8 +1010,8 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
}
}
for (std::size_t o = 0; o < oldOOut.size(); ++o) {
if (outputChildren[o].first) {
newOOut[o].first -> addChild(outputChildren[o].first, newOOut[o].second, outputChildren[o].second);
for (const auto& child : outputChildren[o]) {
newOOut[o].first -> addChild(child.first, newOOut[o].second, child.second);
}
}
}
......@@ -1058,6 +1060,12 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
return true;
}
void Aidge::GraphView::updateInputsOutputs() {
for (auto node : mNodes) {
updateInputsOutputsNew(node);
}
}
void Aidge::GraphView::updateInputsOutputsNew(std::shared_ptr<Node> newNode) {
// Can be called several times with the same node, e.g. when addChild() is
// called on a node already part of the GraphView. In this case, inputs/outputs
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment