Skip to content
Snippets Groups Projects
Commit 10a0b754 authored by Grégoire Kubler's avatar Grégoire Kubler
Browse files

Merge remote-tracking branch 'EclipseRepo/dev' into feat/support_ASAN

parents 1dfbbd11 d00e9a7f
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!100fix/scheduler_exec_time
......@@ -21,7 +21,7 @@ void init_Sub(py::module& m) {
py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance())
.def("get_inputs_name", &Sub_Op::getInputsName)
.def("get_outputs_name", &Sub_Op::getOutputsName);
declare_registrable<Sub_Op>(m, "SubOp");
m.def("Sub", &Sub, py::arg("name") = "");
}
} // namespace Aidge
......@@ -27,12 +27,15 @@ namespace Aidge {
template <DimIdx_t DIM>
void declare_Transpose(py::module &m) {
const std::string pyClassName("TransposeOp" + std::to_string(DIM) + "D");
py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>(
m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
.def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
.def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName)
.def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName);
declare_registrable<Transpose_Op<DIM>>(m, pyClassName);
m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
const std::string& name) {
AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [{}] does not match DIM [{}]", output_dims_order.size(), DIM);
......
......@@ -23,6 +23,7 @@ void init_DataProvider(py::module&);
void init_Tensor(py::module&);
void init_OperatorImpl(py::module&);
void init_Attributes(py::module&);
void init_Log(py::module&);
void init_Operator(py::module&);
void init_OperatorTensor(py::module&);
......@@ -85,6 +86,7 @@ void init_Aidge(py::module& m){
init_OperatorImpl(m);
init_Attributes(m);
init_Log(m);
init_Operator(m);
init_OperatorTensor(m);
init_Add(m);
......
#include <pybind11/pybind11.h>
#include "aidge/utils/Log.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Log(py::module& m){
py::enum_<Log::Level>(m, "Level")
.value("Debug", Log::Debug)
.value("Info", Log::Info)
.value("Notice", Log::Notice)
.value("Warn", Log::Warn)
.value("Error", Log::Error)
.value("Fatal", Log::Fatal);
py::class_<Log>(m, "Log")
.def_static("debug", [](const std::string& msg) { Log::debug(msg); }, py::arg("msg"),
R"mydelimiter(
Detailed messages for debugging purposes, providing information helpful
for developers to trace and identify issues.
Detailed insights of what is appening in an operation, not useful for the
end-user. The operation is performed nominally.
Note: This level is disabled at compile time for Release, therefore
inducing no runtime overhead for Release.
:param msg: Debug message.
:type msg: str
)mydelimiter")
.def_static("info", [](const std::string& msg) { Log::info(msg); }, py::arg("msg"),
R"mydelimiter(
Messages that provide a record of the normal operation, about
the application's state, progress, or important events.
Reports normal start, end and key steps in an operation. The operation is
performed nominally.
:param msg: Info message.
:type msg: str
)mydelimiter")
.def_static("notice", [](const std::string& msg) { Log::notice(msg); }, py::arg("msg"),
R"mydelimiter(
Applies to normal but significant conditions that may require monitoring,
like unusual or normal fallback events.
Reports specific paths in an operation. The operation can still be
performed normally.
:param msg: Notice message.
:type msg: str
)mydelimiter")
.def_static("warn", [](const std::string& msg) { Log::warn(msg); }, py::arg("msg"),
R"mydelimiter(
Indicates potential issues or situations that may lead to errors but do
not necessarily cause immediate problems.
Some specific steps of the operation could not be performed, but it can
still provide an exploitable result.
:param msg: Warning message.
:type msg: str
)mydelimiter")
.def_static("error",[](const std::string& msg) { Log::error(msg); }, py::arg("msg"),
R"mydelimiter(
Signifies a problem or unexpected condition that the application can
recover from, but attention is needed to prevent further issues.
The operation could not be performed, but it does not prevent potential
further operations.
:param msg: Error message.
:type msg: str
)mydelimiter")
.def_static("fatal", [](const std::string& msg) { Log::fatal(msg); }, py::arg("msg"),
R"mydelimiter(
Represents a critical error or condition that leads to the termination of
the application, indicating a severe and unrecoverable problem.
The operation could not be performed and any further operation is
impossible.
:param msg: Fatal message.
:type msg: str
)mydelimiter")
.def_static("setConsoleLevel", &Log::setConsoleLevel, py::arg("level"),
R"mydelimiter(
Set the minimum log level displayed in the console.
:param level: Log level.
:type level: Level
)mydelimiter")
.def_static("setFileLevel", &Log::setFileLevel, py::arg("level"),
R"mydelimiter(
Set the minimum log level saved in the log file.
:param level: Log level.
:type level: Level
)mydelimiter")
.def_static("setFileName", &Log::setFileName, py::arg("fileName"),
R"mydelimiter(
Set the log file name.
Close the current log file and open the one with the new file name.
If empty, stop logging into a file.
:param fileName: Log file name.
:type fileName: str
)mydelimiter");
}
}
......@@ -25,14 +25,18 @@ Aidge::OperatorImpl::OperatorImpl(const Operator& op):
}
Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getRawInput(inputIdx) && "requires valid input");
AIDGE_ASSERT(mOp.getRawInput(inputIdx),
"a valid input is required at index {} for operator type {}",
inputIdx, mOp.type());
// Requires the whole tensor by default
return std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->size();
}
Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
assert(mOp.getRawInput(inputIdx) && "requires valid input");
AIDGE_ASSERT(mOp.getRawInput(inputIdx),
"a valid input is required at index {} for operator type {}",
inputIdx, mOp.type());
// Protect the whole tensor by default
return std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->size();
......@@ -40,19 +44,25 @@ Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx)
Aidge::NbElts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
assert(mOp.getRawOutput(outputIdx) && "requires valid output");
AIDGE_ASSERT(mOp.getRawOutput(outputIdx),
"a valid output is required at index {} for operator type {}",
outputIdx, mOp.type());
// Requires the whole tensor by default, regardless of available data on inputs
return std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx))->size();
}
Aidge::NbElts_t Aidge::OperatorImpl::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
AIDGE_ASSERT(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(),
"input index ({}) is out of bound ({}) for operator type {}",
inputIdx, mNbConsumedData.size(), mOp.type());
return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
}
Aidge::NbElts_t Aidge::OperatorImpl::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size());
AIDGE_ASSERT(static_cast<std::size_t>(outputIdx) < mNbProducedData.size(),
"output index ({}) is out of bound ({}) for operator type {}",
outputIdx, mNbProducedData.size(), mOp.type());
return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}
......
......@@ -328,19 +328,18 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
}
void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims) {
std::set<NodePtr> startNodes = inputNodes();
// setInputs
// Link every tensor to the right pointer
// following parent - children informations
if (!dims.empty()){
AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of dimensions and graph inputs");
AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of given dimensions ({}) and graph inputs ({})", dims.size(), mInputNodes.size());
for (std::size_t i = 0; i < dims.size(); ++i){
auto tensor = std::make_shared<Tensor>(dims[i]);
mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor);
}
}
// Ensure every node in the graph is correctly connected
for (std::shared_ptr<Node> nodePtr : getNodes()) {
for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
// assess if the input was not already set and is a Tensor then link it to parent output
......@@ -352,7 +351,7 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
nodePtr->getOperator()->associateInput(i, inputI.first->getOperator()->getRawOutput(inputI.second));
}
else {
AIDGE_ASSERT(false, "Non-tensor entries not handled yet.\n");
AIDGE_ASSERT(false, "Non-tensor entries not handled yet, for node {} (of type {}).", nodePtr->name(), nodePtr->type());
}
}
} else {
......@@ -362,54 +361,37 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
}
}
if (nodePtr->type() == Producer_Op::Type) {
startNodes.insert(nodePtr);
}
}
// Compute dimensions of every node
_forwardDims(startNodes);
}
void Aidge::GraphView::_forwardDims(std::set<std::shared_ptr<Node>> listNodes) {
// TODO: support multi-inputs/outputs
std::set<std::shared_ptr<Node>> nextList = std::set<std::shared_ptr<Node>>();
for (std::shared_ptr<Node> nodePtr : listNodes) {
if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) {
const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator());
if (!op->outputDimsForwarded()) {
op->computeOutputDims();
}
if (!op->outputDimsForwarded()) { // try to compute output dimensions again later
nextList.insert(nodePtr);
} else { // compute output dimensions of children
std::set<std::shared_ptr<Node>> children = nodePtr->getChildren();
for (auto child : children) {
const auto childOp = std::static_pointer_cast<OperatorTensor>(child->getOperator());
if (!childOp->outputDimsForwarded()) {
nextList.insert(child);
}
}
}
}
}
if (nextList.empty()) {
for (std::shared_ptr<Node> nodePtr : getNodes()) {
// Compute dimensions of every node
std::set<std::shared_ptr<Node>> listNodes = getNodes();
do {
std::set<std::shared_ptr<Node>> nextList;
for (std::shared_ptr<Node> nodePtr : listNodes) {
if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) {
if (!std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator())->outputDimsForwarded()) {
nextList.insert(nodePtr);
}
const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator());
// Recompute everytime, even if it was already computed in a
// previous call of forwardDims(), as the graph may have changed!
op->computeOutputDims();
if (!op->outputDimsForwarded()) {
nextList.insert(nodePtr);
}
}
}
}
// Internal check to make sure we won't enter in an infinite loop!
AIDGE_ASSERT(nextList != listNodes, "Unable to forward dimensions (circular dependency and/or wrong dimensions?)");
// Internal check to make sure we won't enter in an infinite loop!
if (nextList == listNodes) {
// We are stuck!
std::vector<std::string> nodesName;
std::transform(nextList.begin(), nextList.end(),
std::back_inserter(nodesName),
[](auto val){ return val->name() + " (" + val->type() + ")"; });
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unable to forward dimensions (circular dependency and/or wrong dimensions?). Unable to compute output dims for nodes {}.", nodesName);
}
if (!nextList.empty()) {
_forwardDims(nextList);
listNodes.swap(nextList);
}
while (!listNodes.empty());
}
void Aidge::GraphView::setBackend(const std::string &backend, DeviceIdx_t device) {
......@@ -458,7 +440,7 @@ Aidge::GraphView::outputs(const std::string& nodeName) const {
void Aidge::GraphView::setInputId(Aidge::IOIndex_t /*inID*/,
Aidge::IOIndex_t /*newNodeOutID*/) {
fmt::print("Not implemented yet.\n");
AIDGE_THROW_OR_ABORT(std::runtime_error, "Not implemented yet.");
}
void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnableParam) {
......@@ -714,10 +696,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getParents() const {
std::vector<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getParents(const std::string nodeName) const {
std::map<std::string, std::shared_ptr<Node>>::const_iterator it = mNodeRegistry.find(nodeName);
if (it == mNodeRegistry.end()) {
fmt::print("No such node a {} in {} graph.\n", nodeName, name());
exit(-1);
}
AIDGE_ASSERT(it != mNodeRegistry.end(), "No node named {} in graph {}.", nodeName, name());
return (it->second)->getParents();
}
......@@ -743,20 +722,15 @@ std::vector<std::vector<std::shared_ptr<Aidge::Node>>>
Aidge::GraphView::getChildren(const std::string nodeName) const {
std::map<std::string, std::shared_ptr<Node>>::const_iterator it =
mNodeRegistry.find(nodeName);
if (it == mNodeRegistry.end()) {
fmt::print("No such node a {} in {} graph.\n", nodeName, name());
exit(-1);
}
AIDGE_ASSERT(it != mNodeRegistry.end(), "No node named {} in graph {}.", nodeName, name());
return (it->second)->getOrderedChildren();
}
std::set<std::shared_ptr<Aidge::Node>>
Aidge::GraphView::getChildren(const std::shared_ptr<Node> otherNode) const {
std::set<std::shared_ptr<Node>>::const_iterator it = mNodes.find(otherNode);
if (it == mNodes.end()) {
fmt::print("No such node in graph.\n");
exit(-1);
}
AIDGE_ASSERT(it != mNodes.end(), "The node {} (of type {}) is not in graph {}.",
(otherNode) ? otherNode->name() : "#nullptr", (otherNode) ? otherNode->type() : "", name());
return (*it)->getChildren();
}
......@@ -768,7 +742,7 @@ Aidge::GraphView::getNode(const std::string& nodeName) const {
if (it != mNodeRegistry.cend()) {
return it->second;
} else {
fmt::print("No Node named {} in the current GraphView.\n", nodeName);
Log::warn("No Node named {} in the current GraphView {}.", nodeName, name());
return nullptr;
}
}
......
......@@ -169,7 +169,9 @@ Aidge::IOIndex_t Aidge::Node::nbValidOutputs() const {
}
void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId) {
assert(inId != gk_IODefaultIndex && (inId < nbInputs()) && "Must be a valid index");
AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(),
"Input index ({}) is out of bound ({}) for node {} (of type {})",
inId, nbInputs(), name(), type());
if (mIdOutParents[inId] != gk_IODefaultIndex) {
fmt::print("Warning: filling a Tensor already attributed\n");
auto originalParent = input(inId);
......@@ -194,7 +196,7 @@ void Aidge::Node::addChildOp(std::shared_ptr<Node> otherNode, const IOIndex_t ou
"Output index (#{}) of the node {} (of type {}) is out of bound (it has {} outputs), when trying to add the child node {} (of type {})",
outId, name(), type(), nbOutputs(), otherNode->name(), otherNode->type());
if (otherNode->input(otherInId).second != gk_IODefaultIndex) {
fmt::print("Warning, the {}-th Parent of the child node already existed.\n", otherInId);
Log::notice("Notice: the {}-th Parent of the child node {} (of type {}) already existed", otherInId, otherNode->name(), otherNode->type());
}
// manage tensors and potential previous parent
otherNode->setInputId(otherInId, outId);
......@@ -239,23 +241,29 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t
void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) {
if (getParent(inId) != nullptr) {
fmt::print("Warning, you're replacing a Parent.\n");
Log::notice("Notice: you are replacing an existing parent for node {} (of type {})", name(), type());
}
assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound.");
AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(),
"Input index ({}) is out of bound ({}) for node {} (of type {})",
inId, nbInputs(), name(), type());
mParents[inId] = other_node;
}
std::vector<std::shared_ptr<Aidge::Node>> Aidge::Node::getParents() const { return mParents; }
std::shared_ptr<Aidge::Node> Aidge::Node::popParent(const IOIndex_t inId) {
assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound.");
AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(),
"Input index ({}) is out of bound ({}) for node {} (of type {})",
inId, nbInputs(), name(), type());
std::shared_ptr<Node> val = mParents[inId];
removeParent(inId);
return val;
}
bool Aidge::Node::removeParent(const IOIndex_t inId) {
assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Parent index out of bound.");
AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(),
"Input index ({}) is out of bound ({}) for node {} (of type {})",
inId, nbInputs(), name(), type());
if (mParents[inId]) {
mParents[inId] = nullptr;
mIdOutParents[inId] = gk_IODefaultIndex;
......
......@@ -75,4 +75,7 @@ void Aidge::Operator::forward() {
runHooks();
}
void Aidge::Operator::backward() { mImpl->backward(); }
void Aidge::Operator::backward() {
AIDGE_ASSERT(mImpl != nullptr, "backward(): an implementation is required for {}!", type());
mImpl->backward();
}
......@@ -64,7 +64,7 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
{
// If both inputs are producers, there is an ambiguity, but both options
// result in a correct solution.
fmt::print("Warning: both MatMul inputs are Producers, assume data at input#0 and weights at input#1.\n");
Log::notice("Notice: both MatMul inputs are Producers, assume data at input#0 and weights at input#1.");
weight = matmulNode->getParent(1)->cloneSharedOperators();
}
AIDGE_ASSERT(weight != nullptr, "Could not deduce weight input for MatMul operator.");
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/utils/Log.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include <fmt/color.h>
#include <fmt/chrono.h>
Aidge::Log::Level Aidge::Log::mConsoleLevel = Info;
Aidge::Log::Level Aidge::Log::mFileLevel = Debug;
std::string Aidge::Log::mFileName = "aidge.log";
std::unique_ptr<FILE, decltype(&std::fclose)> Aidge::Log::mFile {nullptr, nullptr};
void Aidge::Log::log(Level level, const std::string& msg) {
if (level >= mConsoleLevel) {
// Apply log level style only for console.
// Styles that were already applied to msg with fmt are kept also in
// the log file.
const auto modifier
= (level == Debug) ? fmt::fg(fmt::color::gray)
: (level == Notice) ? fmt::fg(fmt::color::light_yellow)
: (level == Warn) ? fmt::fg(fmt::color::orange)
: (level == Error) ? fmt::fg(fmt::color::red)
: (level == Fatal) ? fmt::bg(fmt::color::red)
: fmt::text_style();
fmt::println("{}", fmt::styled(msg, modifier));
}
if (level >= mFileLevel && !mFileName.empty()) {
if (!mFile) {
initFile(mFileName);
}
fmt::println(mFile.get(), msg);
}
}
void Aidge::Log::initFile(const std::string& fileName) {
mFile = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen(fileName.c_str(), "a"), &std::fclose);
if (!mFile) {
mFileName.clear(); // prevents AIDGE_THROW_OR_ABORT() to try to log into file
AIDGE_THROW_OR_ABORT(std::runtime_error,
"Could not create log file: {}", fileName);
}
const std::time_t t = std::time(nullptr);
fmt::println(mFile.get(), "###### {:%Y-%m-%d %H:%M:%S} ######", fmt::localtime(t));
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "aidge/utils/Log.hpp"
#include <fmt/color.h>
using namespace Aidge;
TEST_CASE("[core/log] Log") {
SECTION("TestLog") {
Log::setConsoleLevel(Log::Debug);
Log::debug("debug");
Log::debug("{}", fmt::styled("green debug", fmt::fg(fmt::color::green)));
Log::info("info");
Log::notice("notice");
Log::warn("warn");
Log::error("error");
Log::fatal("fatal");
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment