diff --git a/include/aidge/graphRegex/GraphRegex.hpp b/include/aidge/graphRegex/GraphRegex.hpp index b62a42fcfeb258e5c659eaeb6681190482f37aa4..573447cf934b196e8b0c32d7a58e1977f5aa5f9a 100644 --- a/include/aidge/graphRegex/GraphRegex.hpp +++ b/include/aidge/graphRegex/GraphRegex.hpp @@ -12,13 +12,12 @@ namespace Aidge{ /** - * type for recipes function use in query and resolve -*/ + * @brief type for recipes function use in query and resolve + */ using RecipesFunctionType = std::function<void(std::shared_ptr<MatchSolution>)>; /** - * @brief class which is the hight level interface for graph matching, used to simplify match definition - * + * @brief high level interface for graph matching, used to simplify match definition */ class GraphRegex{ diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp index 1097454fce62f645eb83c491498031738847e96c..c8cdd93810e18bd3cdd0a2d080e54aae2d787c66 100644 --- a/include/aidge/operator/OperatorTensor.hpp +++ b/include/aidge/operator/OperatorTensor.hpp @@ -40,6 +40,14 @@ protected: public: OperatorTensor() = delete; + /** + * @brief Operator tensor constructor. This function is not meant to be called directly but by a derived class constructor + * every operator class derive from this class. + * + * @param[in] type : type of operator (i.e. "Add", "AveragePool",...) + * @param[in] inputsCategory : describes the type of each input. + * @param[in] nbOut : Number of tensors this operator will output + */ OperatorTensor(const std::string& type, const std::vector<InputCategory>& inputsCategory, const IOIndex_t nbOut); @@ -79,6 +87,15 @@ public: * For each dataInput Tensor of the Operator, the first index and dimensions of the feature area. */ virtual std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const; + + /** + * @brief Will compute the dimensions of operator's output tensor given the input sizes + * If the output dimensions cannot be computed because it depends on some undefined inputs then forwardDims will return false and enter in TOKEN mode for subsequent tensors. + * - TOKEN mode means that forwarddims will only ensure that all inputs and outputs of the graph the node is within are connected. + * @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optionnal parameter tensors. + * @return true if dims have been properly forwarded. false otherwise. If set to false, then forwardDims will enter in token mode. + * + */ virtual bool forwardDims(bool allowDataDependency = false); virtual bool dimsForwarded() const; /////////////////////////////////////////////////// diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index 257a6965be4c08735f23ae575ffe104bb706593a..c52e779cbfec04f9ae6796c3bb6f21407c0cb0fb 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -103,7 +103,7 @@ public: void forward() override final; void backward() override final { - // fmt::print("Basic Producer backward() function.\n"); + Log::debug("Basic Producer backward() function."); } void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const override; diff --git a/python_binding/utils/pybind_Log.cpp b/python_binding/utils/pybind_Log.cpp index f70a4bfab54ee14194ea04f96efa33a6b8e04201..ca8d1f33086fb5093c76826e5a2f53df873badf5 100644 --- a/python_binding/utils/pybind_Log.cpp +++ b/python_binding/utils/pybind_Log.cpp @@ -78,6 +78,13 @@ void init_Log(py::module& m){ .def_static("set_console_level", &Log::setConsoleLevel, py::arg("level"), R"mydelimiter( Set the minimum log level displayed in the console. + Available `Level`s in ascending order : + - Level.Debug + - Level.Info + - Level.Notice + - Level.Warn + - Level.Error + - Level.Fatal :param level: Log level. :type level: Level @@ -93,6 +100,13 @@ void init_Log(py::module& m){ .def_static("set_file_level", &Log::setFileLevel, py::arg("level"), R"mydelimiter( Set the minimum log level saved in the log file. + Available `Level`s in ascending order : + - Level.Debug + - Level.Info + - Level.Notice + - Level.Warn + - Level.Error + - Level.Fatal :param level: Log level. :type level: Level diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp index 1035deb366a9c5df6ff08cd87ebd65a11c2b6e78..382052535cc6b5cd8089f720b8fa9f8d3a0ebce1 100644 --- a/src/graph/Node.cpp +++ b/src/graph/Node.cpp @@ -211,7 +211,7 @@ void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId) "Input index ({}) is out of bound ({}) for node {} (of type {})", inId, nbInputs(), name(), type()); if (mIdOutParents[inId] != gk_IODefaultIndex) { - Log::notice("Notice: filling a Tensor already attributed"); + Log::notice("Filling a Tensor already attributed."); auto originalParent = input(inId); // remove original parent reference to child // find the output ID for original Parent @@ -279,7 +279,7 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) { if (getParent(inId) != nullptr) { - Log::notice("Notice: you are replacing an existing parent for node {} (of type {})", name(), type()); + Log::notice("You are replacing an existing parent for node {} (of type {}).", name(), type()); } AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(), "Input index ({}) is out of bound ({}) for node {} (of type {})", diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp index 9c2109bf6fdfb1a1fbc57afc3fd09e08a1dfc2db..851f1895c3862ed3deedc73f2ee70f6835b4a8a3 100644 --- a/src/scheduler/Scheduler.cpp +++ b/src/scheduler/Scheduler.cpp @@ -33,6 +33,7 @@ #include "aidge/operator/MetaOperator.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" +#include "aidge/utils/Log.hpp" #include "aidge/utils/Types.h" @@ -665,7 +666,7 @@ Aidge::Elts_t Aidge::Scheduler::getNbAvailableData(const std::shared_ptr<Node>& // => This means data was fed manually to the input, without a Producer // In this case, we assume a single-use data (unlike a Producer, which // keep producing the data each time it is needed). - fmt::print("No producer node attached to input#{} for node {} ({})\n", inputIdx, node->name(), node->type()); + Log::warn("No producer node attached to input#{} for node {} ({})\n", inputIdx, node->name(), node->type()); return Elts_t::DataElts(std::static_pointer_cast<Tensor>(node->getOperator()->getRawInput(inputIdx))->size()); }