From 16afb2d426a443531aabad6ee7ef31e7c94307f7 Mon Sep 17 00:00:00 2001 From: NAUD Maxence <maxence.naud@cea.fr> Date: Thu, 23 Jan 2025 22:13:20 +0000 Subject: [PATCH] UPD: some includes and log --- include/aidge/scheduler/ProdConso.hpp | 1 + src/graph/GraphView.cpp | 61 +++++++++++++++++---------- src/graph/Matching.cpp | 3 +- src/recipes/FuseBatchNorm.cpp | 2 +- src/scheduler/ProdConso.cpp | 12 ++++-- 5 files changed, 51 insertions(+), 28 deletions(-) diff --git a/include/aidge/scheduler/ProdConso.hpp b/include/aidge/scheduler/ProdConso.hpp index cfc83cbf9..f30e00afa 100644 --- a/include/aidge/scheduler/ProdConso.hpp +++ b/include/aidge/scheduler/ProdConso.hpp @@ -12,6 +12,7 @@ #ifndef AIDGE_SCHEDULER_PRODCONSO_H_ #define AIDGE_SCHEDULER_PRODCONSO_H_ +#include <memory> #include <string> #include <vector> diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index f7390facd..6220f5558 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -438,23 +438,32 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType } bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>>& dims, bool allowDataDependency) { + Log::debug("Starting dimension forward propagation for GraphView"); // remove current Data connections and use dummy inputs to propagate dimensions // setInputs // Link every tensor to the right pointer // following parent - children information if (!dims.empty()){ - Log::debug("forwardDims(): setting graph input dims ({} dims provided).", dims.size()); + auto msg = fmt::format("Manually setting GraphView input dims with provided parameters:"); + for (std::size_t i = 0; i< dims.size(); ++i) + msg = fmt::format("{}\n\t* input#{} {}", msg, i, dims[i]); + Log::info("{}", msg); + Log::debug("Validating input dimensions against existing graph inputs"); std::size_t i = 0; for (auto& input : mInputNodes) { const auto& currentTensorPtr = std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator())->getInput(input.second); if (i < dims.size() && !dims[i].empty()) { if (currentTensorPtr) { // tensor detected - AIDGE_ASSERT(currentTensorPtr->dims() == dims[i], - "forwardDims(): mismatch between existing and provided size for graph input#{} (existing size: {}, provided size: {})", - i, currentTensorPtr->dims(), dims[i]) + if (currentTensorPtr->dims() != dims[i]) { + Log::error("Dimension mismatch for input#{} - Expected: {}, Provided: {}", + i, currentTensorPtr->dims(), dims[i]); + return false; + } + Log::debug("Input#{} dimensions match existing tensor", i); } else { + Log::debug("Creating new tensor for input#{} with dims {}", i, dims[i]); auto tensor = std::make_shared<Tensor>(dims[i]); input.first->getOperator()->setInput(input.second, tensor); } @@ -464,12 +473,12 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_ || input.first->inputCategory(input.second) == InputCategory::OptionalParam); if (currentTensorPtr) { - Log::debug("forwardDims(): existing dims are {} for graph input#{} for input#{} of node {} (of type {})", - i, input.second, input.first->name(), input.first->type(), currentTensorPtr->dims()); + Log::debug("Using existing dimensions {} for graph input#{} (matching input#{} of node [\033[1m\033[3m{}\033[0m] - [\033[1m\033[3m{}\033[0m])", + currentTensorPtr->dims(), i, input.second, input.first->name(), input.first->type()); } else if (!optional) { - Log::warn("forwardDims(): did not specify dims for mandatory graph input#{} for input#{} of node {} (of type {})", - i, input.second, input.first->name(), input.first->type()); + Log::warn("Missing dimensions for mandatory graph input#{} (matching input#{} of node [\033[1m\033[3m{}\033[0m] - [\033[1m\033[3m{}\033[0m])", + i, input.second, input.first->name(), input.first->type()); } } ++i; @@ -477,29 +486,35 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_ } // Ensure every node in the graph is correctly connected + Log::debug("Verifying graph connections and tensor validity"); for (std::shared_ptr<Node> nodePtr : getNodes()) { for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) { std::pair<std::shared_ptr<Node>, IOIndex_t> inputI = nodePtr->input(i); if (inputI.first) { - // Check that associated Data are properly connected... - AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i) == inputI.first->getOperator()->getRawOutput(inputI.second), - "Input#{} for node {} ({}) is not properly connected to output#{} of node {} ({}): Data or Tensor mismatch!", - i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type()); - } else if (nodePtr->inputCategory(i) != InputCategory::OptionalData && nodePtr->inputCategory(i) != InputCategory::OptionalParam) { - // Input is missing - AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i), - "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type()); - AIDGE_ASSERT(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->undefined(), - "Undefined input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type()); + if (nodePtr->getOperator()->getRawInput(i) != inputI.first->getOperator()->getRawOutput(inputI.second)) { + Log::error("Connection mismatch: Input#{} of node [\033[1m\033[3m{}\033[0m (\033[1m\033[3m{}\033[0m)] -> Output#{} of node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]", + i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type()); + return false; + } + } else if (nodePtr->inputCategory(i) != InputCategory::OptionalData && + nodePtr->inputCategory(i) != InputCategory::OptionalParam) { + if (!nodePtr->getOperator()->getRawInput(i)) { + Log::error("Missing mandatory input#{} for node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]", + i, nodePtr->name(), nodePtr->type()); + return false; + } + if (std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->undefined()) { + Log::error("Undefined mandatory input#{} for node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]", + i, nodePtr->name(), nodePtr->type()); + return false; + } } - } } - // List of nodes that are already dims forwarded - std::set<std::shared_ptr<Node>> dimsForwarded; - // Establish initial list of dims forwardable nodes: - // input nodes and childs from Producers + Log::debug("Initializing dimension propagation"); + // Establish initial list of dims forwardable nodes: graph input node + Producers childs + std::set<std::shared_ptr<Node>> dimsForwarded; ///< List of nodes that are already dims forwarded std::set<std::shared_ptr<Node>> listNodes = inputNodes(); for (const auto& nodePtr : getNodes()) { if (nodePtr->type() == Producer_Op::Type) { diff --git a/src/graph/Matching.cpp b/src/graph/Matching.cpp index cc1330828..282ed2020 100644 --- a/src/graph/Matching.cpp +++ b/src/graph/Matching.cpp @@ -24,6 +24,7 @@ #include "aidge/graph/GraphView.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/utils/Log.hpp" static void removeLeadingWhitespace(std::string& str) { str.erase(str.begin(), @@ -84,7 +85,7 @@ std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphM if (disjoint) { matches = filterLonguestDisjoint(matches); } - + Log::info("Graph matching complete.\nFound {} matches for the query", matches.size()); return matches; } diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp index 55be9636f..0fd9d7b44 100644 --- a/src/recipes/FuseBatchNorm.cpp +++ b/src/recipes/FuseBatchNorm.cpp @@ -193,8 +193,8 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::GraphView> graphView) { auto matches = SinglePassGraphMatching(graphView).match("(Conv2D|ConvDepthWise2D|PaddedConv2D|PaddedConvDepthWise2D)->BatchNorm2D"); for (auto match : matches) { - fmt::println("Match !"); auto rootNode = match.graph->rootNode(); fuseBatchNorm(rootNode, *rootNode->getChildren().begin()); } + Log::info("[\033[1m\033[3mFuseBatchNorm\033[0m recipe completed."); } diff --git a/src/scheduler/ProdConso.cpp b/src/scheduler/ProdConso.cpp index a3bff53c3..0e20796fe 100644 --- a/src/scheduler/ProdConso.cpp +++ b/src/scheduler/ProdConso.cpp @@ -9,13 +9,19 @@ * ********************************************************************************/ -#include <cassert> -#include <string> - #include "aidge/scheduler/ProdConso.hpp" + +#include <algorithm> // std::fill +#include <cstddef> // std::size_t +#include <memory> +#include <vector> + +#include "aidge/data/Elts.hpp" #include "aidge/operator/Operator.hpp" +#include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Types.h" Aidge::ProdConso::ProdConso(const Operator& op, bool inPlace): mOp(op), -- GitLab