diff --git a/aidge_core/__init__.py b/aidge_core/__init__.py index c65dcc6cfc4be8825d1213854014718fb7170854..4b5c448355a17fd4274ba45f5cd98afa70b1ae53 100644 --- a/aidge_core/__init__.py +++ b/aidge_core/__init__.py @@ -8,4 +8,5 @@ http://www.eclipse.org/legal/epl-2.0. SPDX-License-Identifier: EPL-2.0 """ from aidge_core.aidge_core import * # import so generated by PyBind -from aidge_core.export import ExportNode +from aidge_core.export import ExportNode, generate_file, generate_str +import aidge_core.utils diff --git a/aidge_core/export/__init__.py b/aidge_core/export/__init__.py index 00b44121d68af06171525fdf953bf50e53328421..6fc846d93301f45b0635cd9b2fabae65fa7be8ab 100644 --- a/aidge_core/export/__init__.py +++ b/aidge_core/export/__init__.py @@ -1 +1,2 @@ from .node_export import * +from .code_generation import * diff --git a/aidge_core/export/code_generation.py b/aidge_core/export/code_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..b18b5476f8e083bcbe3d4f6c4a57132ebe7b780f --- /dev/null +++ b/aidge_core/export/code_generation.py @@ -0,0 +1,47 @@ +import os +from jinja2 import Environment, FileSystemLoader + + +def generate_file(file_path: str, template_path: str, **kwargs) -> None: + """Generate a file at `file_path` using the jinja template located at `file_path`. + + kwargs are used to fill the template. + + :param file_path: path where to generate the file + :type file_path: str + :param template_path: Path to the template to use for code generation + :type template_path: str + """ + # Get directory name of the file + dirname = os.path.dirname(file_path) + + # If directory doesn't exist, create it + if not os.path.exists(dirname): + os.makedirs(dirname) + + # Get directory name and name of the template + template_dir = os.path.dirname(template_path) + template_name = os.path.basename(template_path) + + # Select template + template = Environment(loader=FileSystemLoader( + template_dir)).get_template(template_name) + + # Generate file + content = template.render(kwargs) + with open(file_path, mode="w", encoding="utf-8") as message: + message.write(content) + +def generate_str(template_path:str, **kwargs) -> str: + """Generate a string using the jinja template located at `file_path`. + kwargs are used to fill the template. + + :param template_path: Path to the template to use for code generation + :type template_path: str + :return: A string of the interpreted template + :rtype: str + """ + dirname = os.path.dirname(template_path) + filename = os.path.basename(template_path) + template = Environment(loader=FileSystemLoader(dirname)).get_template(filename) + return template.render(kwargs) diff --git a/aidge_core/utils.py b/aidge_core/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d82d524b7e886ed396507376a5934a748a89e44c --- /dev/null +++ b/aidge_core/utils.py @@ -0,0 +1,16 @@ +def template_docstring(template_keyword, text_to_replace): + """Method to template docstring + + :param template: Template keyword to replace, in the documentation you template word must be between `{` `}` + :type template: str + :param text_to_replace: Text to replace your template with. + :type text_to_replace: str + """ + def dec(func): + if "{"+template_keyword+"}" not in func.__doc__: + raise RuntimeError( + f"The function {function.__name__} docstring does not contain the template keyword: {template_keyword}.") + func.__doc__ = func.__doc__.replace( + "{"+template_keyword+"}", text_to_replace) + return func + return dec diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp index 59c538ce640f9fb8a45c26a29b0c2599d883553e..c9a4c11d780a41a1620518047d66a7de2d7b55fa 100644 --- a/include/aidge/graph/GraphView.hpp +++ b/include/aidge/graph/GraphView.hpp @@ -160,7 +160,7 @@ public: /** * @brief List outside input connections of the GraphView. The vector - * size is garanteed to match the number of outside inputs of the GraphView. If there is + * size is guaranteed to match the number of outside inputs of the GraphView. If there is * no external connection to a given input, a pair of nullptr and gk_IODefaultIndex is returned. * @return std::vector<std::pair<NodePtr, IOIndex_t>> */ @@ -210,7 +210,7 @@ public: * @brief Compute dimensions of input/output Tensors for each Operator of the * GraphView object's Nodes. */ - bool forwardDims(const std::vector<std::vector<DimSize_t>> dims = {}, bool allowDataDependency = false); + bool forwardDims(const std::vector<std::vector<DimSize_t>>& dims = {}, bool allowDataDependency = false); /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */ void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const; @@ -376,6 +376,12 @@ public: addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor); } + inline void updateNodeName(const std::string& oldName, const std::string& newName){ + AIDGE_ASSERT(mNodeRegistry.find(oldName) != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", oldName, name()); + mNodeRegistry[newName] = mNodeRegistry[oldName]; + mNodeRegistry.erase(oldName); + } + /** * @brief Include a GraphView content in the current GraphView and link * the two sets by linking one Node from each GraphView. @@ -480,6 +486,14 @@ public: */ IOIndex_t getNbFreeDataInputs() const; + /** + * @brief Force update of GraphView inputs/outputs. + * It may be necessary to force the update of GraphView inputs/outputs when + * connections are added or removed inside the GraphView **after** the nodes + * were added. + */ + void updateInputsOutputs(); + private: /////////////////////////////////////////////////////// // TENSOR MANAGEMENT diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp index 908f56295887bd2fbed3350a026045a4ab6b21d9..2a0a4a3b703670c8ace05e03fc5c797fe861a423 100644 --- a/include/aidge/graph/Node.hpp +++ b/include/aidge/graph/Node.hpp @@ -235,8 +235,8 @@ public: /////////////////////////////////////////////////////// /** - * @brief Vector of pointers to each GraphView containing the object - * @return std::vector<GraphView> + * @brief Set of pointers to each GraphView containing this Node + * @return std::set<GraphView> */ inline std::set<std::shared_ptr<GraphView>> views() const noexcept { std::set<std::shared_ptr<GraphView>> res; @@ -460,10 +460,10 @@ private: // OPERATOR FUNCTIONNAL but commented out to avoid iostream inclusion // /** // * @brief operator<< overload to ease print & debug of nodes - // * @param[inout] ostream to print to + // * @param[inout] ostream to print to // * @param[in] n node to print // */ - // friend std::ostream& operator << (std::ostream& os, Node& n); + // friend std::ostream& operator << (std::ostream& os, Node& n); }; } // namespace Aidge diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp index c677da0f2e34a299ddec6ee85f5a84616206193d..a411101618a5f4acaf070516d67691a6b55e3ff5 100644 --- a/include/aidge/operator/MetaOperator.hpp +++ b/include/aidge/operator/MetaOperator.hpp @@ -70,16 +70,9 @@ public: return mScheduler; } - void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final { - AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type"); - AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx); - - const auto& inputOp = mGraph->getOrderedInputs()[inputIdx]; - inputOp.first->getOperator()->associateInput(inputOp.second, data); - - // Associate inputs for custom implementation - mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); - } + void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final; + void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final; + void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final; bool forwardDims(bool allowDataDependency = false) override final { // Check first that all required inputs are available, otherwise diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp index 6086c5145eb39cee081468ba91473dc983cfa35f..a493793278d42904d8a62e31571720f94ff1655d 100644 --- a/include/aidge/operator/OperatorTensor.hpp +++ b/include/aidge/operator/OperatorTensor.hpp @@ -56,8 +56,8 @@ public: /////////////////////////////////////////////////// // Tensor access // input management - void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final; - void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final; + void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override; + void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override; const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const; std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final; diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp index 8f54ab217631ac69a4e16555f8e58f550ab0156c..c864bd045d8a5a1fc5f4ee591d1d81fcaf241bac 100644 --- a/include/aidge/operator/Scaling.hpp +++ b/include/aidge/operator/Scaling.hpp @@ -27,9 +27,10 @@ enum class ScalingAttr { scalingFactor, quantizedNbBits, isOutputUnsigned }; -class Scaling_Op : public OperatorTensor, - public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>, - public StaticAttributes<ScalingAttr, float, size_t, bool> { +class Scaling_Op + : public OperatorTensor, + public Registrable<Scaling_Op, std::string, std::shared_ptr<OperatorImpl>(const Scaling_Op&)>, + public StaticAttributes<ScalingAttr, float, size_t, bool> { public: static const std::string Type; @@ -84,7 +85,11 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name); } */ -inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, std::size_t quantizedNbBits=8, bool isOutputUnsigned=true, const std::string& name = "") { +inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, + std::size_t quantizedNbBits=8, + bool isOutputUnsigned=true, + const std::string& name = "") +{ return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name); } } // namespace Aidge diff --git a/include/aidge/scheduler/ParallelScheduler.hpp b/include/aidge/scheduler/ParallelScheduler.hpp index 0b6f963d61bf0079a9a32bd335ba765788aba2a5..abacebf4e0c45130bb0e41872577052cfe0a176c 100644 --- a/include/aidge/scheduler/ParallelScheduler.hpp +++ b/include/aidge/scheduler/ParallelScheduler.hpp @@ -37,7 +37,7 @@ public: /** * @brief Run the provided Computational Graph with a batch of data */ - virtual void forward(bool forwardDims = true, std::vector<std::shared_ptr<Aidge::Tensor>> data = {}); + virtual void forward(bool forwardDims = true, const std::vector<std::shared_ptr<Aidge::Tensor>>& data = {}); }; } // namespace Aidge diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp index 2f8fbb7aeb6562e0dd309f8f53def6d0fed5a08a..792d73693be0780f2e938d828b0f29889216631b 100644 --- a/include/aidge/scheduler/Scheduler.hpp +++ b/include/aidge/scheduler/Scheduler.hpp @@ -114,7 +114,7 @@ public: * * @param data data input tensors */ - void connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data); + void connectInputs(const std::vector<std::shared_ptr<Aidge::Tensor>>& data); /** * @brief Save in a Markdown file the static scheduling with early and late relative order for the nodes. diff --git a/include/aidge/scheduler/SequentialScheduler.hpp b/include/aidge/scheduler/SequentialScheduler.hpp index 9cf0c2c1877bbbe5930c6b1e39f2a46c33e21d93..7201601254b779d64f23e9c0d1d00f5c6c23532a 100644 --- a/include/aidge/scheduler/SequentialScheduler.hpp +++ b/include/aidge/scheduler/SequentialScheduler.hpp @@ -49,7 +49,7 @@ public: /** * @brief Run the provided Computational Graph with a batch of data */ - virtual void forward(bool forwardDims = true, std::vector<std::shared_ptr<Aidge::Tensor>> data = {}); + virtual void forward(bool forwardDims = true, const std::vector<std::shared_ptr<Aidge::Tensor>>& data = {}); /** * @brief Run the provided Computational Graph with a batch of data diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp index 44c3b1f5e8df833344fa9b7fe72bdb4ef1e0ec12..113377b33d9827c3428eeb0adc92111f75c22abb 100644 --- a/include/aidge/utils/DynamicAttributes.hpp +++ b/include/aidge/utils/DynamicAttributes.hpp @@ -21,6 +21,7 @@ #include "aidge/utils/future_std/any.hpp" #include "aidge/utils/Attributes.hpp" +#include "aidge/utils/ErrorHandling.hpp" #ifdef PYBIND #include <pybind11/pybind11.h> @@ -86,7 +87,7 @@ public: template<class T> void addAttr(const std::string& name, const T& value) { const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value))); - assert(res.second && "attribute already exists"); + AIDGE_ASSERT(res.second, "attribute already exists"); #ifdef PYBIND // We cannot handle Python object if the Python interpreter is not running @@ -129,10 +130,10 @@ public: void addAttrPy(const std::string& name, py::object&& value) { auto it = mAttrs.find(name); - assert(it == mAttrs.end() && "attribute already exists"); + AIDGE_ASSERT(it == mAttrs.end(), "attribute already exists"); const auto& res = mAttrsPy.emplace(std::make_pair(name, value)); - assert(res.second && "attribute already exists"); + AIDGE_ASSERT(res.second, "attribute already exists"); } void setAttrPy(const std::string& name, py::object&& value) override final @@ -199,6 +200,8 @@ public: }; #endif + virtual ~DynamicAttributes() {} + private: #ifdef PYBIND // Stores C++ attributes (copy) and Python-only attributes diff --git a/python_binding/data/pybind_Database.cpp b/python_binding/data/pybind_Database.cpp index 903e692ca3d14d6ae25f0d6f151b1b08d557d924..4bc28a19d350236933c3b6c139e9e3a4d980fa3f 100644 --- a/python_binding/data/pybind_Database.cpp +++ b/python_binding/data/pybind_Database.cpp @@ -1,13 +1,40 @@ #include <pybind11/pybind11.h> +#include <pybind11/stl.h> + #include "aidge/data/Database.hpp" +#include "aidge/data/Tensor.hpp" namespace py = pybind11; namespace Aidge { -void init_Database(py::module& m){ +/** + * @brief Trampoline class for binding + * + */ +class pyDatabase : public Database { + public: + using Database::Database; // Inherit constructors - py::class_<Database, std::shared_ptr<Database>>(m,"Database"); + std::vector<std::shared_ptr<Tensor>> getItem( + const std::size_t index) const override { + PYBIND11_OVERRIDE_PURE_NAME(std::vector<std::shared_ptr<Tensor>>, Database, + "get_item", getItem, index); + } + std::size_t getLen() const noexcept override { + PYBIND11_OVERRIDE_PURE_NAME(std::size_t, Database, "len", getLen); + } + std::size_t getNbModalities() const noexcept override { + PYBIND11_OVERRIDE_PURE_NAME(std::size_t, Database, "get_nb_modalities", + getNbModalities); + } +}; - -} +void init_Database(py::module& m) { + py::class_<Database, std::shared_ptr<Database>, pyDatabase>( + m, "Database", py::dynamic_attr()) + .def(py::init<>()) + .def("get_item", &Database::getItem) + .def("len", &Database::getLen) + .def("get_nb_modalities", &Database::getNbModalities); } +} // namespace Aidge diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f091ea70f9b5e9927e535bd527cd84cf081d9823 --- /dev/null +++ b/python_binding/operator/pybind_Scaling.cpp @@ -0,0 +1,32 @@ +/******************************************************************************** + * Copyright (c) 2024 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <pybind11/pybind11.h> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Scaling.hpp" +#include "aidge/operator/OperatorTensor.hpp" + +namespace py = pybind11; + +namespace Aidge { + +void init_Scaling(py::module& m) +{ + py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, Attributes, OperatorTensor>(m, "ScalingOp", py::multiple_inheritance()) + .def("get_inputs_name", &Scaling_Op::getInputsName) + .def("get_outputs_name", &Scaling_Op::getOutputsName) + .def("attributes_name", &Scaling_Op::staticGetAttrsName); + declare_registrable<Scaling_Op>(m, "ScalingOp"); + m.def("Scaling", &Scaling, py::arg("scaling_factor") = 1.0f, py::arg("nb_bits") = 8, py::arg("is_output_unsigned") = true, py::arg("name") = ""); +} + +} // namespace Aidge diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp index 63e5100ac65b5582c7236c2b3467a7d1debcaa36..7b38c2d72d5f4b2eed8d8bbf9f41f47144b51060 100644 --- a/python_binding/pybind_core.cpp +++ b/python_binding/pybind_core.cpp @@ -51,6 +51,7 @@ void init_Pow(py::module&); void init_ReduceMean(py::module&); void init_ReLU(py::module&); void init_Reshape(py::module&); +void init_Scaling(py::module&); void init_Sigmoid(py::module&); void init_Slice(py::module&); void init_Softmax(py::module&); @@ -72,6 +73,7 @@ void init_Recipes(py::module&); void init_GraphViewHelper(py::module&); void init_Scheduler(py::module&); +void init_MemoryManager(py::module&); void init_TensorUtils(py::module&); void init_Filler(py::module&); @@ -117,6 +119,7 @@ void init_Aidge(py::module& m) { init_ReduceMean(m); init_ReLU(m); init_Reshape(m); + init_Scaling(m); init_Sigmoid(m); init_Slice(m); init_Softmax(m); @@ -134,6 +137,7 @@ void init_Aidge(py::module& m) { init_Recipes(m); init_GraphViewHelper(m); init_Scheduler(m); + init_MemoryManager(m); init_TensorUtils(m); init_Filler(m); } diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp index f122c411618ce28a641fd46ee568f99cc48e9f58..b85d1c41ed90a5774a9b24062dfda4186c2294d5 100644 --- a/python_binding/recipes/pybind_Recipes.cpp +++ b/python_binding/recipes/pybind_Recipes.cpp @@ -21,66 +21,70 @@ namespace py = pybind11; namespace Aidge { -void init_Recipes(py::module &m) { +void init_Recipes(py::module &m) +{ m.def("fuse_mul_add", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseMulAdd), py::arg("graph_view"), R"mydelimiter( - Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator. + Recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator. - :param graph_view: Graph view on which we want to apply the recipie + :param graph_view: Graph view on which we want to apply the recipe :type graph_view: :py:class:`aidge_core.GraphView` )mydelimiter"); // m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter( - // Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator. + // recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator. // :param nodes: The MatMul and Add nodes to fuse. // :type nodes: list of :py:class:`aidge_core.Node` // )mydelimiter"); m.def("remove_dropout",static_cast<void(*)(std::shared_ptr<GraphView>)>(removeDropout), py::arg("graph_view"), R"mydelimiter( - Recipie to remove a dropout operator. + Recipe to remove a dropout operator. - :param graph_view: Graph view on which we want to apply the recipie + :param graph_view: Graph view on which we want to apply the recipe :type graph_view: :py:class:`aidge_core.GraphView` )mydelimiter"); m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter( - Recipie to remove a flatten operator. + Recipe to remove a flatten operator. - :param graph_view: Graph view on which we want to apply the recipie + :param graph_view: Graph view on which we want to apply the recipe :type graph_view: :py:class:`aidge_core.GraphView` )mydelimiter"); // m.def("remove_flatten", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(removeFlatten), py::arg("nodes"), R"mydelimiter( - // Recipie to remove a flatten operator. + // Recipe to remove a flatten operator. // :param nodes: The flatten operator to remove. // :type nodes: list of :py:class:`aidge_core.Node` // )mydelimiter"); // m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter( - // Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator. + // Recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator. // :param nodes: The MatMul and Add nodes to fuse. // :type nodes: list of :py:class:`aidge_core.Node` // )mydelimiter"); m.def("fuse_batchnorm", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseBatchNorm), py::arg("graph_view"), R"mydelimiter( - Recipie to remove a flatten operator. + Recipe to remove a flatten operator. - :param graph_view: Graph view on which we want to apply the recipie + :param graph_view: Graph view on which we want to apply the recipe :type graph_view: :py:class:`aidge_core.GraphView` )mydelimiter"); - m.def("get_conv_horizontal_tiling", static_cast<std::set<std::shared_ptr<Node>>(*)(const std::shared_ptr<Node>&, const DimIdx_t, const std::size_t)>(getConvHorizontalTiling), + m.def("get_conv_horizontal_tiling", static_cast<std::set<std::shared_ptr<Node>>(*)(const std::shared_ptr<Node>&, const DimIdx_t, const std::size_t)>(getConvHorizontalTiling), py::arg("node"), py::arg("axis"), py::arg("nb_slices")); // m.def("fuse_batchnorm", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseBatchNorm), py::arg("nodes"), R"mydelimiter( - // Recipie to remove a flatten operator. + // recipe to remove a flatten operator. // :param nodes: The flatten operator to remove. // :type nodes: list of :py:class:`aidge_core.Node` // )mydelimiter"); + + m.def("expand_metaops", static_cast<void(*)(std::shared_ptr<GraphView>, bool)>(expandMetaOps), py::arg("graph_view"), py::arg("recursive") = false); } + } // namespace Aidge diff --git a/python_binding/scheduler/pybind_MemoryManager.cpp b/python_binding/scheduler/pybind_MemoryManager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0f18db405bec0aee9637f2e5f2ecc7b71e502cc5 --- /dev/null +++ b/python_binding/scheduler/pybind_MemoryManager.cpp @@ -0,0 +1,108 @@ +/******************************************************************************** + * Copyright (c) 2024 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <pybind11/pybind11.h> +#include <pybind11/stl.h> + +#include "aidge/scheduler/MemoryManager.hpp" + +namespace py = pybind11; + +namespace Aidge { + +void init_MemoryManager(py::module& m) +{ + py::enum_<MemoryManager::OptimizeStrategy>(m, "OptimizeStrategy") + .value("None", MemoryManager::OptimizeStrategy::None) + .value("OptimizeMaxLifetimeMinSizeFirst", MemoryManager::OptimizeStrategy::OptimizeMaxLifetimeMinSizeFirst) + .value("OptimizeMaxLifetimeMaxSizeFirst", MemoryManager::OptimizeStrategy::OptimizeMaxLifetimeMaxSizeFirst) + .value("OptimizeMaxHoleMaxLifetimeFirst", MemoryManager::OptimizeStrategy::OptimizeMaxHoleMaxLifetimeFirst) + .export_values(); + + py::class_<MemoryManager::MemorySpace, std::shared_ptr<MemoryManager::MemorySpace>>(m, "MemorySpace") + .def(py::init<MemoryManager::Clock_T, unsigned int, unsigned int, std::set<std::shared_ptr<Node>> >(), py::arg("clock"), py::arg("offset"), py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>()) + .def_readwrite("offset", &MemoryManager::MemorySpace::offset) + .def_readwrite("size", &MemoryManager::MemorySpace::size) + .def_readwrite("dependencies", &MemoryManager::MemorySpace::dependencies) + .def_readwrite("allocated", &MemoryManager::MemorySpace::allocated) + .def_readwrite("released", &MemoryManager::MemorySpace::released); + + py::class_<MemoryManager::MemoryPlane, std::shared_ptr<MemoryManager::MemoryPlane>>(m, "MemoryPlane") + .def(py::init<std::shared_ptr<MemoryManager::MemorySpace>, + MemoryManager::Clock_T, unsigned int, unsigned int, + unsigned int, unsigned int, unsigned int>(), + py::arg("mem_space"), py::arg("clock"), py::arg("offset"), + py::arg("size"), py::arg("stride"), py::arg("length"), py::arg("count")) + .def_readwrite("mem_space", &MemoryManager::MemoryPlane::memSpace) + .def_readwrite("allocated", &MemoryManager::MemoryPlane::allocated) + .def_readwrite("offset", &MemoryManager::MemoryPlane::offset) + .def_readwrite("size", &MemoryManager::MemoryPlane::size) + .def_readwrite("stride", &MemoryManager::MemoryPlane::stride) + .def_readwrite("length", &MemoryManager::MemoryPlane::length) + .def_readwrite("count", &MemoryManager::MemoryPlane::count) + .def("get_size", &MemoryManager::MemoryPlane::getSize) + .def("get_useful_size", &MemoryManager::MemoryPlane::getUsefulSize) + .def("get_contiguous_offset", &MemoryManager::MemoryPlane::getContiguousOffset) + .def("get_contiguous_size", &MemoryManager::MemoryPlane::getContiguousSize) + .def("get_wrapped_offset", &MemoryManager::MemoryPlane::getWrappedOffset) + .def("get_wrapped_size", &MemoryManager::MemoryPlane::getWrappedSize) + .def("get_final_offset", &MemoryManager::MemoryPlane::getFinalOffset) + .def("get_upper_offset", &MemoryManager::MemoryPlane::getUpperOffset) + .def("get_limit", &MemoryManager::MemoryPlane::getLimit); + + py::class_<MemoryManager::MaxLifetimeMinSizeFirst>(m, "MaxLifetimeMinSizeFirst") + .def(py::init<unsigned int>(), py::arg("max_lifetime")) + .def_readonly("max_lifetime", &MemoryManager::MaxLifetimeMinSizeFirst::maxLifetime) + .def("__call__", &MemoryManager::MaxLifetimeMinSizeFirst::operator(), py::arg("p0"), py::arg("p1")); + + py::class_<MemoryManager::MaxLifetimeMaxSizeFirst>(m, "MaxLifetimeMaxSizeFirst") + .def(py::init<unsigned int>(), py::arg("max_lifetime")) + .def_readonly("max_lifetime", &MemoryManager::MaxLifetimeMaxSizeFirst::maxLifetime) + .def("__call__", &MemoryManager::MaxLifetimeMaxSizeFirst::operator(), py::arg("p0"), py::arg("p1")); + + py::class_<MemoryManager::MaxHoleMaxLifetimeFirst>(m, "MaxHoleMaxLifetimeFirst") + .def(py::init<unsigned int, MemoryManager*>(), py::arg("max_lifetime"), py::arg("inst")) + .def_readonly("max_lifetime", &MemoryManager::MaxHoleMaxLifetimeFirst::maxLifetime) + .def_readwrite("inst", &MemoryManager::MaxHoleMaxLifetimeFirst::inst) + .def("__call__", &MemoryManager::MaxHoleMaxLifetimeFirst::operator(), py::arg("p0"), py::arg("p1")); + + py::class_<MemoryManager, std::shared_ptr<MemoryManager>>(m, "MemoryManager") + .def(py::init<>()) + .def("reserve", (std::shared_ptr<MemoryManager::MemorySpace> (MemoryManager::*)(unsigned int, const std::set<std::shared_ptr<Node>>&)) &MemoryManager::reserve, py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>()) + .def("expand", &MemoryManager::expand, py::arg("mem_space"), py::arg("required_size")) + .def("allocate", (MemoryManager::MemoryPlane (MemoryManager::*)(unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::allocate, py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1) + .def("allocate", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::allocate, py::arg("node"), py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1) + .def("is_wrap_around", &MemoryManager::isWrapAround, py::arg("mem_space"), py::arg("offset"), py::arg("size"), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1) + .def("reallocate", (MemoryManager::MemoryPlane (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("mem_space"), py::arg("offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1) + .def("reallocate", (MemoryManager::MemoryPlane (MemoryManager::*)(const MemoryManager::MemoryPlane&, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("memPlane"), py::arg("extra_offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1) + .def("reallocate", (unsigned int (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>, const std::shared_ptr<Node>&, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("mem_space"), py::arg("node"), py::arg("offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1) + .def("reallocate", (unsigned int (MemoryManager::*)(const MemoryManager::MemoryPlane&, const std::shared_ptr<Node>&, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("mem_plane"), py::arg("node"), py::arg("extra_offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1) + .def("release", (unsigned int (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>)) &MemoryManager::release, py::arg("mem_space")) + .def("release", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&)) &MemoryManager::release, py::arg("node")) + .def("release_dependencies", &MemoryManager::releaseDependencies, py::arg("node")) + .def("optimize", &MemoryManager::optimize, py::arg("strategy")) + .def("get_offset", &MemoryManager::getOffset, py::arg("node"), py::arg("plane") = 0) + .def("get_size", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&, unsigned int) const) &MemoryManager::getSize, py::arg("node"), py::arg("plane")) + .def("get_size", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&) const) &MemoryManager::getSize, py::arg("node")) + .def("get_peak_usage", &MemoryManager::getPeakUsage) + .def("get_max_lifetime", &MemoryManager::getMaxLifetime) + .def("get_planes", (const std::vector<MemoryManager::MemoryPlane>& (MemoryManager::*)(const std::shared_ptr<Node>&) const) &MemoryManager::getPlanes, py::arg("node")) + .def("get_planes", (const MemoryManager::MemMap_T& (MemoryManager::*)() const) &MemoryManager::getPlanes) + .def("get_planes", (MemoryManager::MemMap_T (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>) const) &MemoryManager::getPlanes, py::arg("mem_space")) + .def("get_nb_planes", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&) const) &MemoryManager::getNbPlanes, py::arg("node")) + .def("get_nb_planes", (unsigned int (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>) const) &MemoryManager::getNbPlanes, py::arg("mem_space")) + .def("get_current_tick", &MemoryManager::getCurrentTick) + .def("tick", &MemoryManager::tick) + .def("log", &MemoryManager::log, py::arg("file_name")) + ; +} + +} // Aidge diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp index c0966e54d4f025a607aa9763a3657de5b39d2ff4..3f763c8ff0717fb07c1b6c1f85b6aba06c1dc8f1 100644 --- a/python_binding/scheduler/pybind_Scheduler.cpp +++ b/python_binding/scheduler/pybind_Scheduler.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> #include <pybind11/stl.h> +#include "aidge/scheduler/MemoryManager.hpp" #include "aidge/scheduler/Scheduler.hpp" #include "aidge/scheduler/SequentialScheduler.hpp" #include "aidge/scheduler/ParallelScheduler.hpp" @@ -22,10 +23,12 @@ namespace Aidge { void init_Scheduler(py::module& m){ py::class_<Scheduler, std::shared_ptr<Scheduler>>(m, "Scheduler") .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view")) + .def("graph_view", &Scheduler::graphView) .def("save_scheduling_diagram", &Scheduler::saveSchedulingDiagram, py::arg("file_name")) .def("resetScheduling", &Scheduler::resetScheduling) .def("generate_scheduling", &Scheduler::generateScheduling) .def("get_static_scheduling", &Scheduler::getStaticScheduling, py::arg("step") = 0) + .def("generate_memory", &Scheduler::generateMemory, py::arg("inc_producers") = false, py::arg("wrap_around_buffer") = false) ; py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>, Scheduler>(m, "SequentialScheduler") diff --git a/requirements.txt b/requirements.txt index 24ce15ab7ead32f98c7ac3edcd34bb2010ff4326..32ec29bb9b826038eb21ce2927f2fef08973b2b8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,2 @@ numpy +Jinja2 diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp index b6aa4f2e50a5a3db8c3965a8e618fcf4f0299fe8..677bd0246e145ebf760f210000728bd2d99a3807 100644 --- a/src/data/Tensor.cpp +++ b/src/data/Tensor.cpp @@ -23,29 +23,26 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) { return *this; } resize(other.dims(), other.strides()); - setDataType(other.dataType(), false); // do not convert existing data + setDataType(other.dataType(), false); // do not convert existing data if (other.hasImpl()) { if (hasImpl()) { copyFrom(other); - } - else { + } else { // Perform a shallow copy only setImpl(other.mImpl, other.mImplOffset); } - } - else { + } else { setImpl(nullptr); } return *this; } - Aidge::Tensor::~Tensor() noexcept = default; - -void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) { +void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims, + std::vector<Aidge::DimSize_t> strides) { // TODO: scalar Tensor not handled - if (dims.empty()) { // scalar + if (dims.empty()) { // scalar mDims = std::vector<DimSize_t>(0); mStrides = std::vector<DimSize_t>({1}); mContiguous = true; @@ -63,20 +60,21 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto size_t expectedStride = 1; for (int dim = dims.size() - 1; dim >= 0; --dim) { strides[dim] = expectedStride; - expectedStride*= dims[dim]; + expectedStride *= dims[dim]; } checkContiguous = false; - } - else { - AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims"); + } else { + AIDGE_ASSERT(strides.size() == dims.size(), + "Number of strides must match number of dims"); } if (mImpl && mImpl.use_count() > 1) { // Here we could also create a new storage for this tensor in this case - // But, is it more likely that the user really wants this, or that he did a mistake? - AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage"); - } - else { + // But, is it more likely that the user really wants this, or that he + // did a mistake? + AIDGE_ASSERT(dims == mDims && strides == mStrides, + "Cannot resize Tensor with shared storage"); + } else { mDims = dims; mStrides = strides; @@ -88,12 +86,12 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto // mContiguous&= (strides[i] == expectedStride); // expectedStride*= dims[i]; // } - for (std::size_t i = dims.size()-1; i > 0; --i) { + for (std::size_t i = dims.size() - 1; i > 0; --i) { if (strides[i] != expectedStride) { mContiguous = false; break; } - expectedStride*= dims[i]; + expectedStride *= dims[i]; } mContiguous &= (strides[0] == expectedStride); } @@ -106,53 +104,59 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto } std::string Aidge::Tensor::toString() const { - AIDGE_ASSERT(mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) || (mImpl->hostPtr() != nullptr)), "tensor should have a valid host pointer"); + AIDGE_ASSERT( + mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) || + (mImpl->hostPtr() != nullptr)), + "tensor should have a valid host pointer"); // TODO: move lambda elsewhere? auto ptrToString = [](DataType dt, void* ptr, std::size_t idx) { switch (dt) { - case DataType::Float64: - return std::to_string(static_cast<double*>(ptr)[idx]); - case DataType::Float32: - return std::to_string(static_cast<float*>(ptr)[idx]); - case DataType::Float16: - return std::to_string(static_cast<half_float::half*>(ptr)[idx]); - case DataType::Int8: - return std::to_string(static_cast<int8_t*>(ptr)[idx]); - case DataType::Int16: - return std::to_string(static_cast<int16_t*>(ptr)[idx]); - case DataType::Int32: - return std::to_string(static_cast<int32_t*>(ptr)[idx]); - case DataType::Int64: - return std::to_string(static_cast<int64_t*>(ptr)[idx]); - case DataType::UInt8: - return std::to_string(static_cast<uint8_t*>(ptr)[idx]); - case DataType::UInt16: - return std::to_string(static_cast<uint16_t*>(ptr)[idx]); - case DataType::UInt32: - return std::to_string(static_cast<uint32_t*>(ptr)[idx]); - case DataType::UInt64: - return std::to_string(static_cast<uint64_t*>(ptr)[idx]); - default: - AIDGE_ASSERT(true, "unsupported type to convert to string"); + case DataType::Float64: + return std::to_string(static_cast<double*>(ptr)[idx]); + case DataType::Float32: + return std::to_string(static_cast<float*>(ptr)[idx]); + case DataType::Float16: + return std::to_string(static_cast<half_float::half*>(ptr)[idx]); + case DataType::Int8: + return std::to_string(static_cast<int8_t*>(ptr)[idx]); + case DataType::Int16: + return std::to_string(static_cast<int16_t*>(ptr)[idx]); + case DataType::Int32: + return std::to_string(static_cast<int32_t*>(ptr)[idx]); + case DataType::Int64: + return std::to_string(static_cast<int64_t*>(ptr)[idx]); + case DataType::UInt8: + return std::to_string(static_cast<uint8_t*>(ptr)[idx]); + case DataType::UInt16: + return std::to_string(static_cast<uint16_t*>(ptr)[idx]); + case DataType::UInt32: + return std::to_string(static_cast<uint32_t*>(ptr)[idx]); + case DataType::UInt64: + return std::to_string(static_cast<uint64_t*>(ptr)[idx]); + default: + AIDGE_ASSERT(true, "unsupported type to convert to string"); } return std::string("?"); // To make Clang happy }; - if (dims().empty()) { return ptrToString(mDataType, mImpl->hostPtr(), 0); } + if (dims().empty()) { + return ptrToString(mDataType, mImpl->hostPtr(), 0); + } std::string res; std::size_t dim = 0; std::size_t counter = 0; - if (nbDims()>=2) { + if (nbDims() >= 2) { std::vector<std::size_t> dimVals(nbDims(), 0); res += "{\n"; while (counter < mSize) { - std::string spaceString = std::string((dim+1)<<1,' '); - if (dim < nbDims()-2) { + std::string spaceString = std::string((dim + 1) << 1, ' '); + if (dim < nbDims() - 2) { if (dimVals[dim] == 0) { res += spaceString + "{\n"; ++dim; - } else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) { + } else if (dimVals[dim] < + static_cast<std::size_t>(dims()[dim])) { res += spaceString + "},\n" + spaceString + "{\n"; ++dim; } else { @@ -161,13 +165,22 @@ std::string Aidge::Tensor::toString() const { dimVals[dim]++; } } else { - for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) { + for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); + ++dimVals[dim]) { res += spaceString + "{"; for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) { - res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + ","; + res += + " " + + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), + counter++) + + ","; } - res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + "}"; - if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) { + res += " " + + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), + counter++) + + "}"; + if (dimVals[dim] < + static_cast<std::size_t>(dims()[dim] - 1)) { res += ","; } res += "\n"; @@ -179,35 +192,45 @@ std::string Aidge::Tensor::toString() const { dimVals[dim]++; } } - - for(int i = static_cast<int>(dim); i > 0; --i) { - res += std::string((dim+1)<<1,' ') + "}\n"; + if (nbDims() != 2) { // If nbDims == 2, parenthesis is already closed + for (int i = static_cast<int>(dim); i >= 0; --i) { + res += std::string((i + 1) << 1, ' ') + "}\n"; + } } } else { res += "{"; for (DimSize_t j = 0; j < dims()[0]; ++j) { - res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) + ((j < dims()[0]-1) ? "," : " "); + res += " " + + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) + + ((j < dims()[0] - 1) ? "," : " "); } } res += "}"; return res; } -Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& fixedCoord) const { +Aidge::Tensor Aidge::Tensor::extract( + const std::vector<std::size_t>& fixedCoord) const { AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); - AIDGE_ASSERT(fixedCoord.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions"); + AIDGE_ASSERT(fixedCoord.size() <= mDims.size(), + "Number of coordinates is higher than number of dimensions"); Tensor subTensor(mDataType); - subTensor.resize(std::vector<size_t>(mDims.cbegin() + fixedCoord.size(), mDims.cend()), - std::vector<size_t>(mStrides.cbegin() + fixedCoord.size(), mStrides.cend())); + subTensor.resize( + std::vector<size_t>(mDims.cbegin() + fixedCoord.size(), mDims.cend()), + std::vector<size_t>(mStrides.cbegin() + fixedCoord.size(), + mStrides.cend())); subTensor.setBackend(mImpl->backend(), mImpl->device().second); subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(fixedCoord)); return subTensor; } -Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& startCoord, const std::vector<std::size_t>& dims) const { +Aidge::Tensor Aidge::Tensor::extract( + const std::vector<std::size_t>& startCoord, + const std::vector<std::size_t>& dims) const { AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); - AIDGE_ASSERT(startCoord.size() == mDims.size(), "Coordinates does not match number of dimensions"); + AIDGE_ASSERT(startCoord.size() == mDims.size(), + "Coordinates does not match number of dimensions"); Tensor subTensor(mDataType); subTensor.resize(dims, mStrides); @@ -224,7 +247,8 @@ void Aidge::Tensor::makeContiguous() { // Block so that mImpl ref count is 1 for resize() { // Create a new storage that will be contiguous - std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims); + std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create( + {mImpl->backend(), mDataType})(mImpl->device().second, mDims); // Copy elements from old to new storage std::size_t idx = 0; while (idx < mSize) { @@ -233,13 +257,14 @@ void Aidge::Tensor::makeContiguous() { // Determine the size of the contiguous chunk std::size_t copySize = 1; while (idx + copySize < mSize && - getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize) - { + getStorageIdx(getCoord(idx + copySize)) == + storageIdx + copySize) { ++copySize; } // Perform a single copy for the contiguous chunk - newImpl->copy(mImpl->rawPtr(mImplOffset + storageIdx), copySize, idx); + newImpl->copy(mImpl->rawPtr(mImplOffset + storageIdx), copySize, + idx); // Move to the next index after the contiguous chunk idx += copySize; @@ -267,8 +292,10 @@ void Aidge::Tensor::copyCast(const Tensor& src) { } resize(src.dims()); - AIDGE_ASSERT(src.getImpl()->device() == getImpl()->device(), "cannot copy-cast from a different backend/device"); - getImpl()->copyCast(src.getImpl()->rawPtr(src.mImplOffset), src.dataType(), src.size(), mImplOffset); + AIDGE_ASSERT(src.getImpl()->device() == getImpl()->device(), + "cannot copy-cast from a different backend/device"); + getImpl()->copyCast(src.getImpl()->rawPtr(src.mImplOffset), src.dataType(), + src.size(), mImplOffset); } void Aidge::Tensor::copyFrom(const Tensor& src) { @@ -286,16 +313,20 @@ void Aidge::Tensor::copyFrom(const Tensor& src) { } resize(src.dims()); - AIDGE_ASSERT(src.dataType() == dataType(), "cannot copy from a different data type"); - getImpl()->copyFrom(*(src.getImpl()), src.size(), src.mImplOffset, mImplOffset); + AIDGE_ASSERT(src.dataType() == dataType(), + "cannot copy from a different data type"); + getImpl()->copyFrom(*(src.getImpl()), src.size(), src.mImplOffset, + mImplOffset); } -void Aidge::Tensor::copyCastFrom(const Tensor& src, std::shared_ptr<Tensor>& movedSrcPtr) { +void Aidge::Tensor::copyCastFrom(const Tensor& src, + std::shared_ptr<Tensor>& movedSrcPtr) { if (&src == this) { return; } - AIDGE_ASSERT(src.isContiguous(), "cannot copy-cast from non-contiguous tensor"); + AIDGE_ASSERT(src.isContiguous(), + "cannot copy-cast from non-contiguous tensor"); // Current Tensor has necessarily a data type, but may not have backend if (!getImpl()) { @@ -308,29 +339,33 @@ void Aidge::Tensor::copyCastFrom(const Tensor& src, std::shared_ptr<Tensor>& mov if (dataType() != src.dataType()) { // First move data to the target device (only if needed) const auto device = getImpl()->device(); - const Tensor& movedSrc = src.refFrom(movedSrcPtr, device.first, device.second); + const Tensor& movedSrc = + src.refFrom(movedSrcPtr, device.first, device.second); // Second, copy-cast data (necessary) - getImpl()->copyCast(movedSrc.getImpl()->rawPtr(movedSrc.mImplOffset), movedSrc.dataType(), movedSrc.size(), mImplOffset); - } - else { + getImpl()->copyCast(movedSrc.getImpl()->rawPtr(movedSrc.mImplOffset), + movedSrc.dataType(), movedSrc.size(), mImplOffset); + } else { // Directly copy, no conversion necessary // Avoid making a double copy if both data type and device are the same - getImpl()->copyFrom(*(src.getImpl()), src.size(), src.mImplOffset, mImplOffset); + getImpl()->copyFrom(*(src.getImpl()), src.size(), src.mImplOffset, + mImplOffset); } } Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) { // Scott Meyers' solution to avoid code duplication - return const_cast<Tensor&>(static_cast<const Tensor&>(*this).refContiguous(fallback)); + return const_cast<Tensor&>( + static_cast<const Tensor&>(*this).refContiguous(fallback)); } -const Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) const { - AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot refCast() it"); +const Aidge::Tensor& Aidge::Tensor::refContiguous( + std::shared_ptr<Tensor>& fallback) const { + AIDGE_ASSERT(getImpl(), + "no backend was set for tensor, cannot refCast() it"); if (isContiguous()) { return *this; - } - else { + } else { if (this != fallback.get()) { // Shallow copy to fallback *fallback = *this; @@ -342,96 +377,117 @@ const Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallb } } -Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt) { +Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, + const Aidge::DataType& dt) { // Scott Meyers' solution to avoid code duplication - return const_cast<Tensor&>(static_cast<const Tensor&>(*this).refCast(fallback, dt)); + return const_cast<Tensor&>( + static_cast<const Tensor&>(*this).refCast(fallback, dt)); } -const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt) const { - AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot refCast() it"); +const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, + const Aidge::DataType& dt) const { + AIDGE_ASSERT(getImpl(), + "no backend was set for tensor, cannot refCast() it"); if (dt == dataType()) { return *this; - } - else { + } else { if (this == fallback.get()) { // if refFrom() was called before, just change the type fallback->setDataType(dt); - } - else { - AIDGE_ASSERT(isContiguous(), "cannot refCast non-contiguous tensor"); + } else { + AIDGE_ASSERT(isContiguous(), + "cannot refCast non-contiguous tensor"); if (!fallback) { fallback = std::make_shared<Tensor>(dt); - } - else { - fallback->setDataType(dt, false); // don't keep previous data (no copy) + } else { + fallback->setDataType( + dt, false); // don't keep previous data (no copy) } const auto device = getImpl()->device(); - fallback->setBackend(device.first, device.second, false); // don't keep previous data (no copy) + fallback->setBackend(device.first, device.second, + false); // don't keep previous data (no copy) fallback->resize(dims()); - fallback->getImpl()->copyCast(getImpl()->rawPtr(mImplOffset), dataType(), size(), fallback->mImplOffset); + fallback->getImpl()->copyCast(getImpl()->rawPtr(mImplOffset), + dataType(), size(), + fallback->mImplOffset); } return *fallback; } } -Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, const std::string &backend, DeviceIdx_t device) { +Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, + const std::string& backend, + DeviceIdx_t device) { // Scott Meyers' solution to avoid code duplication - return const_cast<Tensor&>(static_cast<const Tensor&>(*this).refFrom(fallback, backend, device)); + return const_cast<Tensor&>( + static_cast<const Tensor&>(*this).refFrom(fallback, backend, device)); } -const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, const std::string &backend, DeviceIdx_t device) const { - AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot refFrom() it"); +const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, + const std::string& backend, + DeviceIdx_t device) const { + AIDGE_ASSERT(getImpl(), + "no backend was set for tensor, cannot refFrom() it"); if (std::make_pair(backend, device) == getImpl()->device()) { return *this; - } - else { + } else { if (this == fallback.get()) { // if refCast() was called before, just change the backend fallback->setBackend(backend, device); - } - else { - AIDGE_ASSERT(isContiguous(), "cannot refFrom non-contiguous tensor"); + } else { + AIDGE_ASSERT(isContiguous(), + "cannot refFrom non-contiguous tensor"); if (!fallback) { fallback = std::make_shared<Tensor>(dataType()); - } - else { - fallback->setDataType(dataType(), false); // don't keep previous data (no copy) + } else { + fallback->setDataType( + dataType(), false); // don't keep previous data (no copy) } - fallback->setBackend(backend, device, false); // don't keep previous data (no copy) + fallback->setBackend(backend, device, + false); // don't keep previous data (no copy) fallback->resize(dims()); - fallback->getImpl()->copyFrom(*getImpl(), size(), mImplOffset, fallback->mImplOffset); + fallback->getImpl()->copyFrom(*getImpl(), size(), mImplOffset, + fallback->mImplOffset); } return *fallback; } } -Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device) { +Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, + const Aidge::DataType& dt, + const std::string& backend, + DeviceIdx_t device) { // Scott Meyers' solution to avoid code duplication - return const_cast<Tensor&>(static_cast<const Tensor&>(*this).ref(fallback, dt, backend, device)); + return const_cast<Tensor&>( + static_cast<const Tensor&>(*this).ref(fallback, dt, backend, device)); } -const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device) const { +const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, + const Aidge::DataType& dt, + const std::string& backend, + DeviceIdx_t device) const { AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot ref() it"); - if (dt == dataType() && std::make_pair(backend, device) == getImpl()->device()) { + if (dt == dataType() && + std::make_pair(backend, device) == getImpl()->device()) { return *this; - } - else { + } else { // Change fallback type, backend & device, without any data copy if (!fallback) { fallback = std::make_shared<Tensor>(dt); - } - else { - fallback->setDataType(dt, false); // don't keep previous data (no copy) + } else { + fallback->setDataType(dt, + false); // don't keep previous data (no copy) } - fallback->setBackend(backend, device, false); // don't keep previous data (no copy) + fallback->setBackend(backend, device, + false); // don't keep previous data (no copy) fallback->resize(dims()); return *fallback; } @@ -439,7 +495,7 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const std::set<std::string> Aidge::Tensor::getAvailableBackends() { std::set<std::string> backendsList; - for(const auto& tupleKey : Registrar<Tensor>::getKeys()) + for (const auto& tupleKey : Registrar<Tensor>::getKeys()) backendsList.insert(std::get<0>(tupleKey)); return backendsList; } diff --git a/src/filler/Filler.cpp b/src/filler/Filler.cpp index 34e04c2ba84ad493429bceadd54f4fa27df69bcd..f5839087c2e37c5e0288f08716595a0ed66e869e 100644 --- a/src/filler/Filler.cpp +++ b/src/filler/Filler.cpp @@ -20,12 +20,12 @@ #include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/Types.h" - void Aidge::calculateFanInFanOut(std::shared_ptr<Aidge::Tensor> tensor, std::uint32_t& fanIn, std::uint32_t& fanOut) { - AIDGE_ASSERT( - tensor->nbDims() == 4, - "Tensor need to have 4 dimensions to compute FanIn and FanOut."); + AIDGE_ASSERT(tensor->nbDims() == 4 || tensor->nbDims() == 2, + "Tensor need to have 4 or 2 dimensions to compute FanIn and " + "FanOut, but found a tensor with {} dims.", + tensor->nbDims()); // Warning: This function suppose NCXX data layout. // Aidge currently only support NCHW but this maybe not be true in the // future. @@ -35,6 +35,6 @@ void Aidge::calculateFanInFanOut(std::shared_ptr<Aidge::Tensor> tensor, "Cannot calculate FanIn if tensor batch size is 0."); AIDGE_ASSERT(channelSize != 0, "Cannot calculate FanOut if tensor channel size is 0."); - fanIn = static_cast<std::uint32_t>(tensor->size() / batchSize); + fanIn = static_cast<std::uint32_t>(tensor->size() / batchSize); fanOut = static_cast<std::uint32_t>(tensor->size() / channelSize); } diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index df2177cf6910a3c40ef269d18bf148d60b5faa66..55fe69678d7d6582f13c48a285fb4f7bfa2a1419 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -83,6 +83,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd } fmt::print(fp.get(), + "```mermaid\n" "%%{{init: {{'flowchart': {{ 'curve': 'monotoneY'}}, " "'fontFamily': 'Verdana' }} }}%%\nflowchart TB\n\n"); @@ -204,6 +205,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd fmt::print(fp.get(), "classDef producerCls_rootCls stroke:#f00,fill:#ccf\n"); fmt::print(fp.get(), "classDef genericCls_rootCls stroke:#f00,fill:#f9f9ff,stroke-width:1px,stroke-dasharray: 5 5\n"); fmt::print(fp.get(), "classDef metaCls_rootCls stroke:#f00,stroke-width:5px\n"); + fmt::print(fp.get(), "```\n"); fmt::print(fp.get(), "\n"); } @@ -391,7 +393,7 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType forwardDims(dims); } -bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims, bool allowDataDependency) { +bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>>& dims, bool allowDataDependency) { // setInputs // Link every tensor to the right pointer // following parent - children informations @@ -414,9 +416,10 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_ i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type()); } else { // Input is missing - AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i) - && !std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(), + AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i), "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type()); + AIDGE_ASSERT(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(), + "Empty input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type()); } } @@ -907,7 +910,7 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const newGraph->getOrderedOutputs(); auto inputParents = std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(oldOIn.size()); - auto outputChildren = std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(oldOOut.size()); + auto outputChildren = std::vector<std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>>(oldOOut.size()); // keep in memory every node related to the node to replace : // Parent @@ -918,19 +921,12 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const // inputParent.first -> addChild(newOI[i].first, inputParent.second, newOI[i].second); } // Children - for (std::size_t i = 0; i < oldOOut.size();) { + for (std::size_t i = 0; i < oldOOut.size(); ++i) { std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> outputChild = oldOOut[i].first -> output(oldOOut[i].second); - if (outputChild.empty()) { - outputChildren[i] = std::pair<std::shared_ptr<Node>, IOIndex_t>({nullptr, gk_IODefaultIndex}); - ++i; - } - else { - for (const auto& child : outputChild) { - if (oldNodes.find(child.first) == oldNodes.cend()) { - outputChildren[i] = child; - ++i; - } + for (const auto& child : outputChild) { + if (oldNodes.find(child.first) == oldNodes.cend()) { + outputChildren[i].push_back(child); } } } @@ -968,8 +964,8 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const } } for (std::size_t o = 0; o < oldOOut.size(); ++o) { - if (outputChildren[o].first) { - newOOut[o].first -> addChild(outputChildren[o].first, newOOut[o].second, outputChildren[o].second); + for (const auto& child : outputChildren[o]) { + newOOut[o].first -> addChild(child.first, newOOut[o].second, child.second); } } } @@ -979,15 +975,21 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const if (newNodes.size() == 0) { // Case 3 if (oldOIn.size() == oldOOut.size()) { + // Same number of inputs and outputs: connect each input to the corresponding output for (std::size_t i = 0; i < oldOIn.size(); ++i) { if (inputParents[i].first) { - inputParents[i].first -> addChild(outputChildren[i].first, inputParents[i].second, outputChildren[i].second); + for (const auto& child : outputChildren[i]) { + inputParents[i].first -> addChild(child.first, inputParents[i].second, child.second); + } } } } else if ((oldOIn.size() == 1) && (inputParents[0].first)) { - for (std::size_t i = 0; i < oldOIn.size(); ++i) { - inputParents[0].first -> addChild(outputChildren[i].first, inputParents[0].second, outputChildren[i].second); + // Single input: connect the only input to all the outputs + for (std::size_t i = 0; i < oldOOut.size(); ++i) { + for (const auto& child : outputChildren[i]) { + inputParents[0].first -> addChild(child.first, inputParents[0].second, child.second); + } } } } @@ -1008,8 +1010,8 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const } } for (std::size_t o = 0; o < oldOOut.size(); ++o) { - if (outputChildren[o].first) { - newOOut[o].first -> addChild(outputChildren[o].first, newOOut[o].second, outputChildren[o].second); + for (const auto& child : outputChildren[o]) { + newOOut[o].first -> addChild(child.first, newOOut[o].second, child.second); } } } @@ -1058,6 +1060,12 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const return true; } +void Aidge::GraphView::updateInputsOutputs() { + for (auto node : mNodes) { + updateInputsOutputsNew(node); + } +} + void Aidge::GraphView::updateInputsOutputsNew(std::shared_ptr<Node> newNode) { // Can be called several times with the same node, e.g. when addChild() is // called on a node already part of the GraphView. In this case, inputs/outputs diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp index 149691f796d1d84212e9d7842a28e4cb79469e6a..b08bb4c2056e8c14f5b1dd3aae62fbacf8d8c14e 100644 --- a/src/graph/Node.cpp +++ b/src/graph/Node.cpp @@ -57,7 +57,10 @@ Aidge::Connector Aidge::Node::operator()(const std::vector<Connector>& ctors) { // INNER /////////////////////////////////////////////////////// -void Aidge::Node::setName(const std::string& name) { mName = name; } +void Aidge::Node::setName(const std::string& name) { + for (auto graphView : views()) graphView->updateNodeName(mName, name); + mName = name; +} /////////////////////////////////////////////////////// // OPERATORS diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp index 6bafb3b7905ae36e23af32f8d60be33a4ba178bf..9b77ffcbe0117292ed0aa520309febf709e8dd68 100644 --- a/src/operator/Add.cpp +++ b/src/operator/Add.cpp @@ -63,7 +63,8 @@ bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) { *it = dim; } else if ((dim != *it) && (dim != 1)) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported Tensor shape for Add operation: {}", outDims); + AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Add Operation: {} for previous inputs vs {} for input#{}", + outDims, getInput(i)->dims(), i); } } } diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp index 813ab774b11cd72f440d28f61843500686d7df2d..e6300d08c2c792c8a3eb66b307aca53f9d2acc73 100644 --- a/src/operator/Div.cpp +++ b/src/operator/Div.cpp @@ -44,7 +44,8 @@ bool Aidge::Div_Op::forwardDims(bool /*allowDataDependency*/) { outDims[out_id] = lowDims[low_id]; } else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported Tensor shape for Div Operation: {}", outDims); + AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Div Operation: {} for input#0 vs {} for input#1", + inputsDims0, inputsDims1); } --out_id; --low_id; diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp index 46e9e1173af98ed5711aa0bbce54705fb61dc03c..36ff1854703d015980a1943390eb87d0863d877f 100644 --- a/src/operator/MetaOperator.cpp +++ b/src/operator/MetaOperator.cpp @@ -37,6 +37,37 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shar } } +void Aidge::MetaOperator_Op::associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) { + AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type"); + AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx); + + const auto& inputOp = mGraph->getOrderedInputs()[inputIdx]; + inputOp.first->getOperator()->associateInput(inputOp.second, data); + + // Associate inputs for custom implementation + mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second)); +} + +void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Data>& data) { + AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type()); + + const auto& inputOp = mGraph->getOrderedInputs()[inputIdx]; + inputOp.first->getOperator()->setInput(inputOp.second, data); + + // Associate inputs for custom implementation + mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second)); +} + +void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, std::shared_ptr<Data>&& data) { + AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type()); + + const auto& inputOp = mGraph->getOrderedInputs()[inputIdx]; + inputOp.first->getOperator()->setInput(inputOp.second, std::forward<std::shared_ptr<Data>>(data)); + + // Associate inputs for custom implementation + mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second)); +} + Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const { if (mImpl) { return mImpl->getNbRequiredData(inputIdx); diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp index 5a25e4dd447f44220dbe4124e63f567520ad8d1e..426de388f31391fb5e59446d50e50de94ca5f8a1 100644 --- a/src/operator/Mul.cpp +++ b/src/operator/Mul.cpp @@ -45,7 +45,8 @@ bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) { outDims[out_id] = lowDims[low_id]; } else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported Tensor shape for Mul Operation: {}", outDims); + AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Mul Operation: {} for input#0 vs {} for input#1", + inputsDims0, inputsDims1); } --out_id; --low_id; @@ -53,9 +54,6 @@ bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) { mOutputs[0]->resize(outDims); return true; } - else if (!getInput(0)->empty() && !getInput(1)->empty()) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible input dimensions for Operator Mul: {} and {}", getInput(0)->dims(), getInput(1)->dims()); - } return false; } diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp index 42715516e6804c1a48ef848fbda8f9d596f0e69e..135c792345b0caf1166e671a8dad7d5b49b42ee7 100644 --- a/src/operator/Pow.cpp +++ b/src/operator/Pow.cpp @@ -44,7 +44,8 @@ bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) { outDims[out_id] = lowDims[low_id]; } else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported Tensor shape for Pow Operation: {}", outDims); + AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Pow Operation: {} for input#0 vs {} for input#1", + inputsDims0, inputsDims1); } --out_id; --low_id; diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp index 8b0d6f9db698e36d232dec38fd8cdd0fad5f8c59..dc5e272210feb09fd5dac6ba4b16f9ba8dc93bf0 100644 --- a/src/operator/Scaling.cpp +++ b/src/operator/Scaling.cpp @@ -21,6 +21,6 @@ const std::string Aidge::Scaling_Op::Type = "Scaling"; void Aidge::Scaling_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { - mImpl = Registrar<Scaling_Op>::create(name)(*this); + SET_IMPL_MACRO(Scaling_Op, *this, name); mOutputs[0]->setBackend(name, device); } \ No newline at end of file diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp index 50e556ad97a90b7a9868594cebe350d955983fd7..b977f4ee7ccce32d7f7929cbee99140aea36cd2f 100644 --- a/src/operator/Sub.cpp +++ b/src/operator/Sub.cpp @@ -46,7 +46,8 @@ bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) { outDims[out_id] = lowDims[low_id]; } else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported Tensor shape for Sub Operation: {}", outDims); + AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Sub Operation: {} for input#0 vs {} for input#1", + inputsDims0, inputsDims1); } --out_id; --low_id; diff --git a/src/scheduler/ParallelScheduler.cpp b/src/scheduler/ParallelScheduler.cpp index 1dd13fe2100122002d4ed068ada4851b1bfba463..4e515099006b9e0588eafc7e981c5f5e80bbe97d 100644 --- a/src/scheduler/ParallelScheduler.cpp +++ b/src/scheduler/ParallelScheduler.cpp @@ -28,7 +28,7 @@ #include "aidge/operator/Memorize.hpp" #include "aidge/operator/MetaOperator.hpp" -void Aidge::ParallelScheduler::forward(bool forwardDims, std::vector<std::shared_ptr<Aidge::Tensor>> data) { +void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std::shared_ptr<Aidge::Tensor>>& data) { // Collect all data input of the graph (that are producers) if (!data.empty()){ connectInputs(data); diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp index 4e3f9978837120bd01a3de2cfe2d22e33f9d7828..af10e3dcd3ead044f8619c40570936f53039d9a2 100644 --- a/src/scheduler/Scheduler.cpp +++ b/src/scheduler/Scheduler.cpp @@ -195,7 +195,9 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S // be put back in the consumers list once the remaining consumers // have been exhausted. bool isStillConsumer = false; - for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbInputs(); ++inputIdx) { + // Only look for data inputs. If no data is available on data input, + // by definition, no parameter can be consumed on parameter inputs. + for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbData(); ++inputIdx) { AIDGE_LOG_CONTEXT("Consumer node {} input #{}", namePtrTable.at(consumer), inputIdx); if (consumer->getOperator()->getNbConsumedData(inputIdx) < @@ -280,7 +282,12 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S mPriorCache.clear(); if (!consumers.empty()) { - Log::warn("Remaining consumers: possible dead-lock"); + std::vector<std::string> consumersName; + std::transform(consumers.begin(), consumers.end(), + std::back_inserter(consumersName), + [&namePtrTable](auto val){ return namePtrTable.at(val); }); + + Log::warn("Remaining consumers: {}. Possible dead-lock.", consumersName); } return schedule; @@ -491,17 +498,17 @@ Aidge::MemoryManager Aidge::Scheduler::generateMemory(bool incProducers, bool wr const MemoryManager::MemoryPlane& memPlane = (wrapAroundBuffer && wrapAroundSize > 0) ? (*wrapAroundMemPlane[outputIdx]) : - memManager.allocate(requiredSize.data, childs, stride, length, count); + memManager.allocate(size, childs, stride, length, count); if (wrapAroundBuffer && wrapAroundSize > 0) { memManager.reallocate(memPlane, node, 0, - requiredSize.data, true, wrapAroundExtra, childs, stride, length, count); + size, true, wrapAroundExtra, childs, stride, length, count); } else { memManager.reallocate(memPlane.memSpace, node, memPlane.offset, - requiredSize.data, false, 0, childs, stride, length, count); + size, false, 0, childs, stride, length, count); } } @@ -513,12 +520,23 @@ Aidge::MemoryManager Aidge::Scheduler::generateMemory(bool incProducers, bool wr return memManager; } -void Aidge::Scheduler::connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data){ +void Aidge::Scheduler::connectInputs(const std::vector<std::shared_ptr<Aidge::Tensor>>& data){ // This version of connect inputs only connects tensor inputs in input data producers. auto inputNodes = mGraphView->getOrderedInputs(); // Assert that the number of input data producers corresponds to the number of data input - assert(data.size() == inputNodes.size() && "Scheduler connectInput error - Inconsistent number of graph inputs and inputs passed to the graph"); + if (data.size() != inputNodes.size()) { + const std::map<std::shared_ptr<Node>, std::string> namePtrTable + = mGraphView->getRankedNodesName("{0} ({1}#{3})"); + + std::vector<std::pair<std::string, IOIndex_t>> inputNodesName; + std::transform(inputNodes.begin(), inputNodes.end(), + std::back_inserter(inputNodesName), + [&namePtrTable](auto val){ return std::make_pair(namePtrTable.at(val.first), val.second); }); + + AIDGE_THROW_OR_ABORT(std::runtime_error, "Provided {} inputs to the scheduler, but graph has {} inputs (required inputs in order: )", + data.size(), inputNodes.size(), inputNodesName); + } for (std::size_t i = 0; i < data.size(); ++i){ // TODO : maybe shallow copy instead of deepcopy diff --git a/src/scheduler/SequentialScheduler.cpp b/src/scheduler/SequentialScheduler.cpp index 801f46ffb0293696dad8a84908bdda2bbd789bfc..f044603fb8b1316ec71728acec520204bb5361b8 100644 --- a/src/scheduler/SequentialScheduler.cpp +++ b/src/scheduler/SequentialScheduler.cpp @@ -28,7 +28,7 @@ #include "aidge/operator/MetaOperator.hpp" #include "aidge/recipes/GraphViewHelper.hpp" -void Aidge::SequentialScheduler::forward(bool forwardDims, std::vector<std::shared_ptr<Aidge::Tensor>> data) { +void Aidge::SequentialScheduler::forward(bool forwardDims, const std::vector<std::shared_ptr<Aidge::Tensor>>& data) { // Collect all data input of the graph (that are producers) if (!data.empty()){ connectInputs(data);